id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3495217
|
#!/usr/bin/python
# a timing script for FFTs and convolutions using OpenMP
import sys, getopt
import numpy as np
from math import *
from subprocess import * # for popen, running processes
import os
import re # regexp package
import shutil
def mvals_from_file(filename):
mvals = []
if os.path.isfile(filename):
with open(filename, 'r') as fin:
for line in fin:
if not line.startswith("#"):
mvals.append(int(line.split()[0]))
return mvals
def max_m(p, RAM, runtype):
print "program:", p
print "runtype:", runtype
print "ram:", RAM
b = 0
if "transpose" in p:
# NB: assumes Z=1 and out-of-place
return int(floor(log(RAM / 32) / log(2) / 2))
if "cconv2" in p:
if runtype == "implicit":
# A * 2m^2 * 16
return int(floor(log(RAM / 64) / ( 2 * log(2)) ))
else:
# A * 4m^2 * 16
return int(floor(log(RAM / 128) / (2 * log(2)) ))
if "cconv3" in p:
if runtype == "implicit":
# A * 2m^3 * 16
return int(floor( log(RAM / 64) / (3 * log(2)) ))
else:
# A * 8m^3 * 16
return int(floor( log(RAM / 256) / (3 * log(2)) ))
if "cconv" in p:
b = int(floor(log(RAM / 4) / log(2)))
b = min(b, 20) # because we aren't crazy
return b
if "tconv2" in p:
if runtype == "implicit":
# A * 6m^2 * 16
return int(floor( log(RAM / 192) / (2 * log(2)) ))
else:
# A * 12m^2 * 16
return int(floor( log(RAM / 768) / (2 * log(2)) ))
if "tconv" in p:
b = int(floor(log(RAM / 6) / log(2)))
b = min(b, 20) # because we aren't crazy
return b
if "conv2" in p:
if runtype == "implicit":
# A * 3 m^2 * 16
return int(floor(log(RAM / 96) / (2 * log(2)) ))
else:
# A * 4.5 m^2 * 16
return int(floor(log(RAM / 144) / (2 * log(2)) ))
if "conv3" in p:
# A * 6 m^3 * 16
return int(floor(log(RAM / 192) / (3 * log(2)) ))
if "conv" in p:
b = int(floor(log(RAM / 6) / log(2)))
b = min(b, 20) # because we aren't crazy
return b
if "mft1" in p:
return int(floor(0.5 * log(RAM / 64) / log(2)))
if "fft1" in p:
return int(floor(0.5 * log(RAM / 64) / log(2)))
if p == "fft2":
return int(floor(log(RAM / 32) / log(2) / 2))
if p == "fft2r":
return int(floor(log(RAM / 32) / log(2) / 2))
if p == "fft3":
return int(floor(log(RAM / 32) / log(2) / 3))
if p == "fft3r":
return int(floor(log(RAM / 32) / log(2) / 3))
if p == "transpose":
return int(floor(log(RAM / 32) / log(2) / 2))
print "Error! Failed to determine b."
return 0
def default_outdir(p):
outdir=""
if p == "cconv":
outdir = "timings1c"
if p == "cconv2":
outdir = "timings2c"
if p == "cconv3":
outdir = "timings3c"
if p == "conv":
outdir = "timings1r"
if p == "conv2":
outdir = "timings2r"
if p == "conv3":
outdir = "timings3r"
if p == "tconv":
outdir = "timings1t"
if p == "tconv2":
outdir="timings2t"
if p == "fft1":
outdir = "timingsf1"
if p == "mfft1":
outdir = "timingsmf1"
if p == "fft2":
outdir = "timingsf2"
if p == "transpose":
outdir="transpose2"
return outdir
def main(argv):
usage = '''Usage:
\ntimings.py
-a<start>
-b<stop>
-p<cconv,cconv2,cconv3,conv,conv2,conv3,tconv,tconv2>
-T<number of threads>
-A<quoted arg list for timed program>
-B<pre-commands (eg srun)>
-r<implicit/explicit/pruned/fft>
-R<ram in gigabytes>
-d dry run
-o<output file name>
-D<outdir>
-o<outfile>
-P<path to executable>
-g<grep string>
-N<int> Number of tests to perform
-e<0 or 1>: append to the timing data already existent (skipping
already-done problem sizes).
-c<string>: extra commentary for output file.
-v: verbose output
'''
dryrun = False
#dryrun = True
bset = 0
dorun = True
T = 0 # number of threads
p = "" # program name
B = [] # precommands
A = [] # postcommands
E = [] # environment variables (eg: -EGOMP_CPU_AFFINITY -E"0 1 2 3")
a = 6 # minimum log of problem size
b = 0 # maximum log of problem size
runtype = "implicit" # type of run
RAM = 0 # ram limit in GB
outdir = "" # output directory
outfile = "" # output filename
rname = "" # output grep string
N = 0 # number of tests
appendtofile = False
stats = 0
path = "./"
verbose = False
extracomment = ""
try:
opts, args = getopt.getopt(argv,"hdp:T:a:b:c:A:B:E:e:r:R:S:o:P:D:g:N:v")
except getopt.GetoptError:
print "error in parsing arguments."
print usage
sys.exit(2)
for opt, arg in opts:
if opt in ("-p"):
p = arg
if opt in ("-T"):
T = arg
elif opt in ("-a"):
a = int(arg)
elif opt in ("-N"):
N = int(arg)
elif opt in ("-b"):
b = int(arg)
elif opt in ("-c"):
extracomment = arg
elif opt in ("-A"):
A += [str(arg)]
elif opt in ("-B"):
B += [str(arg)]
elif opt in ("-E"):
E += [str(arg)]
elif opt in ("-e"):
appendtofile = (int(arg) == 1)
elif opt in ("-r"):
runtype = str(arg)
elif opt in ("-R"):
print "ram arg:", arg
RAM = float(arg)*2**30
elif opt in ("-S"):
stats = int(arg)
elif opt in ("-d"):
dryrun = True
elif opt in ("-o"):
outfile = str(arg)
elif opt in ("-P"):
path = arg
elif opt in ("-D"):
outdir = str(arg)
elif opt in ("-g"):
rname = str(arg)
elif opt in ("-v"):
verbose = True
elif opt in ("-h"):
print usage
sys.exit(0)
if dryrun:
print "Dry run! No output actually created."
if p == "":
print "please specify a program with -p"
print usage
sys.exit(2)
print "RAM:", RAM
# if both the max problem size and the ram are unset, go up to 2^8
if (b == 0 and RAM == 0):
b = 8
hermitian = False
ternary = False
if p == "cconv":
if(runtype == "pruned"):
print p + " has no pruned option"
dorun = False
if p == "conv":
hermitian = True
if(runtype == "pruned"):
print p + " has no pruned option"
dorun = False
if p == "conv2":
hermitian = True
if p == "conv3":
hermitian = True
if(runtype != "implicit"):
print p + " has no " + r + " option"
dorun = False
if p == "tconv":
ternary = True
if(runtype == "pruned"):
print p + " has no pruned option"
dorun = False
if p == "tconv2":
ternary = True
if p == "fft1":
runtype = "fft"
if p == "mfft1":
runtype = "fft"
if p == "fft2":
runtype = "fft"
if p == "transpose":
runtype = "transpose"
if outdir == "":
outdir = default_outdir(p)
if outdir == "":
print "empty outdir: please select a program or specify an outdir (-D)"
print
print usage
sys.exit(2)
if RAM != 0:
b = max_m(p, RAM, runtype)
print "max value of b with ram provided:", b
if outfile == "":
outfile = "implicit"
goodruns = []
badruns = []
if dorun:
if RAM != 0:
print "max problem size is "+str(2**b)
if rname == "":
if runtype == "implicit":
rname = "Implicit"
if runtype == "explicit":
rname = "Explicit"
if runtype == "pruned":
rname = "rune"
if runtype == "fft":
rname = "fft"
if runtype == "transpose":
rname = "transpose"
print "Search string for timing: " + rname
filename = outdir + "/" + outfile
print "output in", filename
mdone = mvals_from_file(filename)
print "problem sizes already done:", mdone
print "environment variables:", E
if not dryrun:
os.system("mkdir -p " + outdir)
with open(outdir + "/log", "a") as logfile:
logfile.write(str(sys.argv))
logfile.write("\n")
logfile.write("intial exponent: " + str(a) + "\n")
logfile.write("final exponent: " + str(b) + "\n")
if not appendtofile:
os.system("rm -f " + filename)
cmd = []
i = 0
while i < len(B):
cmd.append(B[i]);
i += 1
cmd += [path + str(p)]
if not os.path.isfile(path + str(p)):
print path + str(p), "does not exist!"
sys.exit(1)
if not "fft" in p:
if(runtype == "explicit"):
cmd.append("-e")
if(runtype == "pruned"):
cmd.append("-p")
if(runtype == "implicit"):
cmd.append("-i")
cmd.append("-S" + str(stats))
if(N > 0):
cmd.append("-N" + str(N))
if(T > 0):
cmd.append("-T" + T)
# Add the extra arguments to the program being timed.
i = 0
while i < len(A):
cmd.append(A[i]);
i += 1
print " ".join(cmd)
if not dryrun and stats == -1:
try:
os.remove("timing.dat")
except OSError:
pass
if not dryrun:
comment = "#"
# Add the base run command as a comment
comment += " " + " ".join(cmd)
# Add the run date as a comment
import time
date = time.strftime("%Y-%m-%d")
comment += "\t" + date
if extracomment == "":
# If we can get the commit and commit date, add as a comment
vcmd = []
vcmd.append("git")
vcmd.append("log")
vcmd.append("-1")
vcmd.append("--format=%h")
vp = Popen(vcmd, stdout = PIPE, stderr = PIPE)
vp.wait()
prc = vp.returncode
if prc == 0:
out, err = vp.communicate()
comment += "\t" + out.rstrip()
vcmd = []
vcmd.append("git")
vcmd.append("log")
vcmd.append("-1")
vcmd.append("--format=%ci")
vp = Popen(vcmd, stdout = PIPE, stderr = PIPE)
vp.wait()
prc = vp.returncode
if prc == 0:
out, err = vp.communicate()
out = out.rstrip()
comment += " (" + out[0:10] + ")"
else:
comment += "\t" + extracomment
comment += "\n"
if(appendtofile):
if os.path.isfile(filename):
with open(filename, "a") as myfile:
myfile.write(comment)
else:
with open(filename, "a") as myfile:
myfile.write(comment)
else:
if stats == -1:
with open("timing.dat", "w") as myfile:
myfile.write(comment)
else:
with open(filename, "w") as myfile:
myfile.write(comment)
for i in range(a, b + 1):
if not hermitian or runtype == "implicit":
m = str(int(pow(2, i)))
else:
if not ternary:
m = str(int(floor((pow(2, i + 1) + 2) / 3)))
else:
m = str(int(floor((pow(2, i + 2) + 3) / 4)))
print str(i) + " m=" + str(m)
dothism = True
if appendtofile and int(m) in mdone:
print "problem size", m, "is already done; skipping."
dothism = False
if dothism:
mcmd = cmd + ["-m" + str(m)]
if dryrun:
#print mcmd
print " ".join(mcmd)
else:
denv = dict(os.environ)
i = 0
while i < len(E):
denv[E[i]] = E[i + 1]
i += 2
p = Popen(mcmd, stdout = PIPE, stderr = PIPE, env = denv)
p.wait() # sets the return code
prc = p.returncode
out, err = p.communicate() # capture output
if(verbose):
print "Output from timing.py's popen:"
#print " ".join(mcmd)
print "cwd:" , os.getcwd()
print "out:"
print out
print "err:"
print err
# copy the output and error to a log file.
with open(outdir + "/log", "a") as logfile:
logfile.write(" ".join(mcmd))
logfile.write("\n")
logfile.write(out)
logfile.write(err)
if (prc == 0): # did the process succeed?
#print out
outlines = out.split('\n')
itline = 0
dataline = ""
while itline < len(outlines):
line = outlines[itline]
#print line
re.search(rname, line)
if re.search(rname, line) is not None:
print "\t" + str(outlines[itline])
print "\t" + str(outlines[itline + 1])
dataline = outlines[itline + 1]
itline = len(outlines)
itline += 1
if not stats == -1:
if not dataline == "":
goodruns.append(mcmd)
# append to output file
with open(filename, "a") as myfile:
myfile.write(dataline + "\n")
else:
print "ERROR: no timing data found"
badruns.append(mcmd)
else:
goodruns.append(mcmd)
else:
print "FAILURE:"
print cmd
print "with, return code:"
print prc
print "output:"
print out
print "error:"
print err
badruns.append(mcmd)
if not dryrun and (stats == -1 and os.path.isfile("timing.dat")):
if(appendtofile):
# Append the new data ot the output.
with open(filename, "a") as fout:
with open("timing.dat") as fin:
lines = []
for line in fin:
lines.append(line)
fout.write(lines[len(lines) - 1])
else:
shutil.copyfile("timing.dat", filename)
if not dryrun and stats == -1:
try:
os.remove("timing.dat")
except OSError:
pass
if not dryrun:
with open(outdir + "/log", "a") as logfile:
goodbads = ""
if len(goodruns) > 0:
goodbads += "Successful runs:\n"
for mcmd in goodruns:
goodbads += " ".join(mcmd) + "\n"
if len(badruns) > 0:
goodbads += "Unsuccessful runs:\n"
for mcmd in badruns:
goodbads += " ".join(mcmd) + "\n"
logfile.write(goodbads)
print goodbads
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
3283254
|
<reponame>ndkruif/8dm40-machine-learning10
import numpy as np
def lsq(X, y):
"""
Least squares linear regression
:param X: Input data matrix
:param y: Target vector
:return: Estimated coefficient vector for the linear regression
"""
# add column of ones for the intercept
ones = np.ones((len(X), 1))
X = np.concatenate((ones, X), axis=1)
# calculate the coefficients
beta = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, y))
return beta
|
StarcoderdataPython
|
12845598
|
<filename>paprika/actions/files/Pipe.py
from paprika.repositories.FileRepository import FileRepository
from paprika.repositories.ProcessPropertyRepository import ProcessPropertyRepository
from paprika.repositories.ProcessRepository import ProcessRepository
from paprika.system.logger.Logger import Logger
from paprika.actions.Actionable import Actionable
class Pipe(Actionable):
def __init__(self):
Actionable.__init__(self)
def execute(self, connector, process_action):
job_name = process_action['job_name']
logger = Logger(connector, self)
file_repository = FileRepository(connector)
process_repository = ProcessRepository(connector)
process_property_repository = ProcessPropertyRepository(connector)
# retrieve the file properties
process = process_repository.find_by_id(process_action['pcs_id'])
file_id = process_property_repository.get_property(process, 'file_id')
file = file_repository.find_by_id(file_id)
filename = file['filename']
locked = file_repository.locked(file)
if locked:
logger.info(job_name, 'file: ' + filename + " locked ")
return process_action
else:
logger.info(job_name, 'file: ' + filename + " not locked ")
logger.info(job_name, filename + " state: " + file['state'])
|
StarcoderdataPython
|
158568
|
<gh_stars>0
import utility
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
import static_sim_functions as smf
# import ts_preprocessing as ts_data
import numpy as np
import os
from pathlib import Path
import ast
'''
This class is created to simulate models for UI for especially similarity comparison.
Uses the same functionalities as per other ML modelling classes. Is maintained as a separate file.
The simulated data itself is created from the jupyter notebook file Synthethic Test Data.ipynb.
'''
def common_processing(df):
# Getting percentage between 0 to 1 rather than score values
df["tschq12"] = df["tschq12"].apply(lambda x: x / 100)
df["tschq16"] = df["tschq16"].apply(lambda x: x / 100)
df["tschq17"] = df["tschq17"].apply(lambda x: x / 100)
df["tschq04"] = df.apply(create_cols_family_hist, axis=1)
return df
# Common elements
# Feature engineering family history
def create_cols_family_hist(x):
if x["tschq04-1"] == "YES":
if isinstance(x["tschq04-2"], str):
res = ast.literal_eval(x["tschq04-2"])
else:
res = x["tschq04-2"]
lst_sorted = sorted(res)
list_to_str = "_".join([val for val in lst_sorted])
return list_to_str
else:
return x["tschq04-1"]
def get_common_cols(col1, col2):
common_elements = set(col1).intersection(col2)
return common_elements
import properties
import pandas as pd
def check_access(location):
if location.exists() and location.is_file():
return True
else:
return False
def initial_processing():
# Read the csv of the tschq data and make the necessary things
tschq = pd.read_pickle(properties.registration_file_location)
hq = pd.read_pickle(properties.hearing_file_location)
# If simulation file for tchq dataset exists add it.
path_access = Path(properties.simulate_registration_file_location)
hearing_path_access = Path(properties.simulate_hearing_file_location)
if check_access(path_access):
simulation_reg_file = pd.read_pickle(properties.simulate_registration_file_location)
# Append the simulation file alongside when True
tschq = tschq.append(simulation_reg_file)
else:
print("Simulated registration file is not created !!!")
if check_access(hearing_path_access):
simulation_hearing_file = pd.read_pickle(properties.simulate_hearing_file_location)
hq = hq.append(simulation_hearing_file)
else:
print("Simulated hearing file is not created !!!")
# Dropping users who do not have their time series
drop_indexs = []
drop_user_ids = [54, 60, 140, 170, 4, 6, 7, 9, 12, 19, 25, 53, 59, 130, 144, 145, 148, 156, 167]
# indexes to be obtained
for val in drop_user_ids:
drop_indexs.append(tschq[tschq["user_id"] == val].index[0])
# Drop those indexes of the users who do not have their time recordings
tschq.drop(drop_indexs, inplace=True)
tschq.reset_index(inplace=True, drop=True)
# Cleaning tschq05 question. There is an abstraction for a row we add common value
def filter_age(x):
if isinstance(x, int):
# Append the most common value obtained
return tschq["tschq05"].value_counts().head(1).index[0]
else:
return x
tschq["tschq05"] = tschq["tschq05"].apply(filter_age)
# Drop the questionnaire_id and created_at
tschq.drop(["questionnaire_id", "created_at"], axis=1, inplace=True)
hq.isna().sum(axis=0)
# By looking at the output we are sure that h5 and h6 do not contribute much and can be dropped
hq.drop(["hq05", "hq06"], axis=1, inplace=True)
hq_df = hq.set_index("user_id")
df = tschq.join(hq_df.iloc[:, 2:], on="user_id")
# Repeated code but it should be okay
# Looking at the output, we can drop tschq25, tschq07-02, tschq04-2
drop_cols = ["tschq01", "tschq25", "tschq07-2",
"tschq13", "tschq04-1", "tschq04-2"]
# Getting percentage between 0 to 1 rather than score values
df["tschq12"] = df["tschq12"].apply(lambda x: x / 100)
df["tschq16"] = df["tschq16"].apply(lambda x: x / 100)
df["tschq17"] = df["tschq17"].apply(lambda x: x / 100)
df["tschq04"] = df.apply(create_cols_family_hist, axis=1)
df.drop(drop_cols, axis=1, inplace=True)
# Set the heom object, while using the required similarity
# Alternative
# Categorical boolean mask
categorical_feature_mask = df.iloc[:, 1:].infer_objects().dtypes == object
other_feature_mask = df.iloc[:, 1:].infer_objects().dtypes != object
# filter categorical columns using mask and turn it into a list
categorical_cols = df.iloc[:, 1:].columns[categorical_feature_mask].tolist()
num_cols = df.iloc[:, 1:].columns[other_feature_mask].tolist()
cat_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in categorical_cols]
num_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in num_cols]
return cat_idx, num_idx, df
import os
import traceback
def save_data_objs(df, quest_cmbs="all"):
try:
if not os.path.isdir(properties.model_location + "/simulate/" + quest_cmbs):
os.makedirs(properties.model_location + "/simulate/" + quest_cmbs)
utility.save_model("".join("/simulate/" + quest_cmbs + "/" + quest_cmbs + "_stat_q_data"), df)
encoded_combined_df = smf.preprocess(df, quest_cmbs, age_bin=False,
process_model_name="".join("/simulate/" + quest_cmbs + "/" +
quest_cmbs + "_stat_q_data_oe_model"),
prediction=False)
# Save this encoded_data
utility.save_model("".join("/simulate/" + quest_cmbs + "/" +
quest_cmbs + "_stat_q_data_encoded"), encoded_combined_df)
return encoded_combined_df
# Use this data to build the data model over static data.
except Exception:
print(traceback.print_exc())
def weighted_average(distress_list):
average = np.asarray(distress_list, dtype=float).mean()
return average
# Function computes the weighted average as predictions for given prediction time point
def compute_weighted_avg(n_idx, encoded_data, pred_at_list, method="mean", dist_nn=None, wt_flag=False):
preds = list()
# Prediction for four time points
for pval in pred_at_list:
distress_list = list()
for vals in n_idx:
# print(user_id)
# For this user now get the time series.
# might have to iterate over as well.
u_id = encoded_data["user_id"].iloc[vals]
user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id))
# 3rd val of the series is s03 of the neighbor
print("{}, {} Values ".format(int(pval), int(u_id)))
if len(user_ts) > int(pval):
value = user_ts[int(pval), :][3]
elif len(user_ts) <= int(pval):
value = user_ts[len(user_ts) - 1, :][3]
distress_list.append(value)
if wt_flag:
print("Calling by weighted distance prediction for distress")
preds.append(weighted_distance_prediction(distress_list, dist_nn))
else:
print("Calling weighted average to predict distress")
preds.append(weighted_average(distress_list))
return preds
def weighted_distance_prediction(p_preds, distance):
# Inverse distance so that highest weight is given to the nearest one and least to the farther
inv_dist = np.divide(1, distance)
# s03 - tinnitus distress weighted by distance is given as
s03_pred = (np.sum(np.multiply(p_preds, inv_dist)) / (np.sum(inv_dist)))
return s03_pred
def compute(test_nn, encoded_data,
pred_list, method="mean", dist_nn=None, wt_dist=False):
# test_nn = [0, 3, 4]
# pred_list = [0.1,0.23,0.27]
from sklearn.linear_model import LinearRegression
preds = list()
for point in pred_list:
nn_preds = list()
intercepts_list = list()
coeff_list = list()
for nn in test_nn:
u_id = encoded_data["user_id"].iloc[nn]
user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id))
# Obtain the time series until time point and fit the data for linear regression
diff_arr = np.abs(np.subtract(point, user_ts[:, 1]))
diff_near_idx = np.where(diff_arr == diff_arr.min())
print("minimum to the time point is at -- ", diff_near_idx)
# difference near index. Handling for the length of users
usr_idx = diff_near_idx[0][0]
user_ts_p = user_ts[:usr_idx]
user_ts_df = pd.DataFrame(user_ts_p, columns=["day", "day_sess_index",
"s02", "s03", "s04",
"s05", "s06", "s07"])
X = user_ts_df[["day_sess_index"]]
# We show for tinnitus distress. This can be extended to other physiological variables as well.
y = user_ts_df[["s03"]]
# Fit on X axis as time and Y as the s03 predictive value.
reg_fit = LinearRegression(normalize=True)
reg_fit.fit(X, y)
# If weighted_distance is true, then predict by each of the nn_user and add to list. This will be used for
# calculating weighted_distance_predictions.
if wt_dist:
nn_pred = reg_fit.predict(np.asarray(point).reshape(1, -1))
nn_preds.append(nn_pred[0][0])
else:
intercepts_list.append(reg_fit.intercept_)
coeff_list.append(reg_fit.coef_)
if wt_dist:
print("Predicting the value of s03 for the user by a weighted average weighted by distance")
preds.append(weighted_distance_prediction(nn_preds, dist_nn))
else:
print("Predicting the value of s3 over the averaged slope and intercepts of "
"observations of the neighbors")
# y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained.
print("The equation to estimate s03 for the user is {}".format("".join(str(np.asarray(coeff_list).mean())) +
"* time_index + " +
str(np.asarray(intercepts_list).mean())))
y = np.multiply(np.asarray(coeff_list).mean(), point) + np.asarray(intercepts_list).mean()
preds.append(y)
return preds
# Create test label as ground truth at prediction point.
def create_y_labels(test_data, prediction_at, method="mean"):
y_test = list()
for i in range(0, len(test_data)):
test_ts_test1 = tsg_data.get_usr_mday_ts_predict(int(test_data.iloc[i]["user_id"]))
# print(len(test_ts_test1))
if len(test_ts_test1) >= prediction_at:
y_test.append(test_ts_test1[prediction_at - 1][2])
elif len(test_ts_test1) < prediction_at:
y_test.append(test_ts_test1[len(test_ts_test1) - 1][2])
return y_test
# Create reference points for multiple reference predictions
def get_pred_ref_points(user_id, ndays, method="mean"):
# Using the default tsg which is mean observations of the user
test_user_ts = tsg_data.get_usr_mday_ts_predict(user_id)
# user_ts = tsg.get_usr_mday_ts_index_corrected(int(user_id))
user_ts_idx = test_user_ts[:, 1]
# ["date", "time_idx", "s02", "s03", "s04", "s05", "s06", "s07]
user_distress = test_user_ts[:, 3]
# Near evaluation. Change this for farther evaluations
# Near -> 0.25
# Far -> 1 - (Near)
# Near points are of the sequence of observation because we are sure all stay until here.
prediction_at = 10
# Far prediction point is the last N% of the test user time series
# prediction_at = round(len(user_ts_idx) * 0.80)
y_labels = user_distress[prediction_at:prediction_at + ndays].tolist()
prediction_at_list = user_ts_idx[prediction_at:prediction_at + ndays].tolist()
return y_labels, prediction_at_list
def do_test(test_data, out_writer, csv_out_writer,
ndays, near_idxs, encoded_data, fold_count="final",
method="mean", dist_nn=None, wt_dist_flag=False):
for i in range(0, len(test_data)):
user_id = int(test_data.iloc[i]["user_id"])
print("User- Id ", user_id)
y_labels, prediction_at_list = get_pred_ref_points(user_id, ndays, method=method)
# y_labels = create_y_labels(X_test, preds, method="mean")
if wt_dist_flag:
test_user_nn = near_idxs[i]
test_user_dist = dist_nn[i]
pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list,
method=method, dist_nn=test_user_dist, wt_flag=wt_dist_flag)
pred_lr = compute(test_user_nn, encoded_data, prediction_at_list,
method=method, dist_nn=test_user_dist, wt_dist=wt_dist_flag)
else:
test_user_nn = near_idxs[i]
pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list,
method=method, dist_nn=None, wt_flag=False)
pred_lr = compute(test_user_nn, encoded_data, prediction_at_list,
method=method, dist_nn=None, wt_dist=False)
# calculate MAE, MSE, RMSE
if not fold_count == "final":
print("Evaluating for the fold-" + str(count) + " for the forecast reference points - " +
str(prediction_at_list))
out_writer.write("Evaluating for the fold-" + str(count) + " for the forecast reference points -- " +
str(prediction_at_list) + "for the method evaluation -- " + str(method) + "\n")
else:
print("Evaluating for the final model over the " + " forecast reference points - " +
str(prediction_at_list))
out_writer.write("Evaluating for the final model over the" + " forecast reference points -- " +
str(prediction_at_list) + "for the method evaluation -- " + str(method) + "\n")
print("Computing MAE, MSE, RMSE for weighted average based predictions on the User -- " + str(user_id))
out_writer.write("Computing MAE, MSE, RMSE for weighted average based predictions"
" plain and on N days on the User -- " + str(user_id) + "\n")
print("---------------------------------------------------------------")
out_writer.write("---------------------------------------------------------------\n")
print("MAE -- ", mean_absolute_error(y_labels, pred_weighted_average))
out_writer.write("MAE -- " + str(mean_absolute_error(y_labels, pred_weighted_average)) + "\n")
# MAE for N days
print("MAE for N days -- ",
str(mean_absolute_error(y_labels, pred_weighted_average) / ndays))
out_writer.write("MAE for N days -- "
+ str(mean_absolute_error(y_labels, pred_weighted_average) / ndays) + "\n")
print("MSE -- ", mean_squared_error(y_labels, pred_weighted_average))
out_writer.write("MSE -- " + str(mean_squared_error(y_labels, pred_weighted_average)) + "\n")
# MSE for N days
print("MSE for N days-- ", str(mean_squared_error(y_labels, pred_weighted_average) / ndays))
out_writer.write(
"MSE for N days -- " + str(mean_squared_error(y_labels, pred_weighted_average) / ndays) + "\n")
print("RMSE -- ", np.sqrt(mean_squared_error(y_labels, pred_weighted_average)))
out_writer.write("RMSE -- " + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "\n")
# RMSE for N days
print("RMSE for N days -- ", str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average)) / ndays))
out_writer.write("RMSE for N days -- " + str(
np.sqrt(mean_squared_error(y_labels, pred_weighted_average)) / ndays) + "\n")
# pred_lr = compute_linear_regression(test_user_nn, encoded_data, prediction_at_list , method="mean")
m_count = 0
# Writing to csv file
if not fold_count == "final":
csv_out_writer.write("".join(str(user_id) + "," +
str(count) + "," +
str(mean_absolute_error(y_labels, pred_weighted_average)) + "," +
str(mean_squared_error(y_labels, pred_weighted_average)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "," +
"weighted_average" + ","
# str(y_labels) + "," +
# str(pred_weighted_average)
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_weighted_average[0]) + "," + str(pred_weighted_average[1])
+ "," + str(pred_weighted_average[2]) + "\n"))
else:
csv_out_writer.write("".join(str(user_id) + "," +
str("test") + "," +
str(mean_absolute_error(y_labels, pred_weighted_average)) + "," +
str(mean_squared_error(y_labels, pred_weighted_average)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + "," +
"weighted_average" + ","
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_weighted_average[0]) + "," + str(pred_weighted_average[1])
+ "," + str(pred_weighted_average[2]) + "\n"))
# + str(y_labels) + str(pred_weighted_average)
print("---------------------------------------------------------------")
out_writer.write("---------------------------------------------------------------\n")
print("Computing MAE, MSE, RMSE for {} {} based predictions for the user -- {}"
.format(str("weighted_distance" + str(wt_dist_flag)), str("linear_regression"), str(user_id)))
out_writer.write("Computing MAE, MSE, RMSE for {} {} based predictions for the user -- {} \n"
.format(str("weighted_distance" + str(wt_dist_flag)), str("linear_regression"), str(user_id)))
print("MAE -- ", mean_absolute_error(y_labels, pred_lr))
out_writer.write("MAE -- " + str(mean_absolute_error(y_labels, pred_lr)) + "\n")
print("MSE -- ", mean_squared_error(y_labels, pred_lr))
out_writer.write("MSE -- " + str(mean_squared_error(y_labels, pred_lr)) + "\n")
print("RMSE -- ", np.sqrt(mean_squared_error(y_labels, pred_lr)))
out_writer.write("RMSE -- " + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "\n")
print("---------------------------------------------------------------")
out_writer.write("---------------------------------------------------------------\n")
# Write to csv file
if not fold_count == "final":
csv_out_writer.write("".join(str(user_id) + "," +
str(count) + "," +
str(mean_absolute_error(y_labels, pred_lr)) + "," +
str(mean_squared_error(y_labels, pred_lr)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "," +
str("lr") + ","
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_lr[0]) + "," + str(pred_lr[1]) + "," + str(
pred_lr[2]) + "\n"))
else:
csv_out_writer.write("".join(str(user_id) + "," +
str("test") + "," +
str(mean_absolute_error(y_labels, pred_lr)) + "," +
str(mean_squared_error(y_labels, pred_lr)) + "," +
str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + "," +
str("lr") + ","
+ str(y_labels[0]) + "," + str(y_labels[1]) + "," + str(y_labels[2])
+ "," + str(pred_lr[0]) + "," + str(pred_lr[1]) + "," + str(
pred_lr[2]) + "\n"))
import properties
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
# Create prediction reference points
from sklearn.metrics import *
# Here, to change to different evaluations
from time_series_grp import TimeSeriesGroupProcessing
from HEOM import HEOM
from scipy.spatial.distance import pdist, squareform
# Change method and execute to get the predictions appropriately, these are configurations
eval_method = "mean"
wt_distance = False
# Random Neighbors
rand_neighbors = False
# Default day readings for all test users must be at mean and prediction are between min - mean - max
tsg_data = TimeSeriesGroupProcessing(method=eval_method)
# For all combinations evaluation it must be set to True
quest_cmb_all = True
# Same random state needs to be maintained to get consistent test data over all combinations and repeatable results
random_state = 1220
# It is the setting to get the ahead prediction for tinnitus distress, 3 here means for 3 days
# min it is a day and max of about 60days between points which is not an usual scenario
ndays = 3
# Load user build models over the time series observations
# user_best_models = utility.load_ts_model("best_params_usr_models")
# user_best_estimators = utility.load_ts_model("cross_val_estimators")
# KFOLDS - Evaluation over K=5 folds are done.
# Build the default model with all the combination.
if not quest_cmb_all:
for key, val in properties.quest_comb.items():
# Build model for each category
print("Building model for the question combination -- " + str(key))
out_writer = open("".join("output/output_simulate_" + str(key) + "_" + str(eval_method) + "_heom_norm.txt"), "w+")
csv_out_writer = open("".join("output/output__simulate_" + str(key) + "_" + str(eval_method) + "_heom_norm.csv"), "w+")
# key = "bg_tinnitus_history"
# val = properties.quest_comb[key]
cat_idx, num_idx, combined_df = smf.initial_processing(key, val, append_synthethic=True)
# Build and get the knn model for prediction over test instances.
# Save the data objs
encoded_data = save_data_objs(combined_df, key)
csv_out_writer.write("".join("user_id,fold,mae,mse,rmse,algorithm,"
"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n"))
# Create a specific test set as per requirements to contain digital twin, outlier and normal instances
random_user_ids = encoded_data["user_id"].sample(n=3, random_state=42).to_list()
"""
10 test users in following format:
1. Outliers -- [8,20,27,149]
2. DT - [44428, 444154, 444133]
3. Random Users - random_user_ids with random state same so always same test set is retrieved.
"""
test_simulation_ids = [8, 20, 27, 149, 44428, 444154, 444133] + random_user_ids
test = encoded_data[encoded_data["user_id"].isin(test_simulation_ids)]
X = encoded_data[~encoded_data["user_id"].isin(test_simulation_ids)]
def filter_train_ids(x):
# print(x)
if x["user_id"] in train_user_ids:
return x
def filter_test_ids(x):
# print(x)
if x["user_id"] in test_user_ids:
return x
train_user_ids = X["user_id"].to_list()
X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type="broadcast").dropna()
X_train_data_ui["user_id"] = X_train_data_ui["user_id"].apply(int)
# Save the non encoded train data for visualization purposes
utility.save_model("".join("/simulate/" + key + "/" + key + "_train_stat_q_data"), X_train_data_ui)
# filter and get the data to show to the UI for the test data.
test_user_ids = test["user_id"].to_list()
X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type="broadcast").dropna()
X_test_data_ui["user_id"] = X_test_data_ui["user_id"].apply(int)
# Save the data_ui object as json
test_data = {}
test_data["users"] = X_test_data_ui.to_dict("r")
utility.save_data("".join("simulate/test_data_ui_" + key), test_data)
heom = HEOM(X.to_numpy(), cat_idx, num_idx)
sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)
mean_heom_distance = sim_matrix.mean()
knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)
knn.fit(X.iloc[:, 1:])
dist, test_idx = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)
do_test(test, out_writer, csv_out_writer, ndays, test_idx, X,
fold_count="final", method=eval_method, dist_nn=None, wt_dist_flag=wt_distance)
utility.save_model("".join("simulate/" + key + "/" + "knn_static"), knn)
utility.save_model("".join("simulate/" + key + "/" + "train_sim_data.pckl"), X)
out_writer.close()
csv_out_writer.close()
else:
cat_idx, num_idx, combined_df = initial_processing()
# Build model for each category
print("Building model for the question combination -- " + str("overall"))
# Take this combined_df and split into train and test.
# Split some data out of test as part unseen from the UI
# data_ui_val, data = combined_df.iloc[:5, :], combined_df.iloc[5:, :]
# Save the data objs
encoded_data = save_data_objs(combined_df, "overall")
random_user_ids = encoded_data["user_id"].sample(n=3, random_state=42).to_list()
test_simulation_ids = [8, 20, 27, 149, 44428, 444154, 444133] + random_user_ids
test = encoded_data[encoded_data["user_id"].isin(test_simulation_ids)]
X = encoded_data[~encoded_data["user_id"].isin(test_simulation_ids)]
def filter_train_ids(x):
# print(x)
if x["user_id"] in train_user_ids:
return x
def filter_test_ids(x):
# print(x)
if x["user_id"] in test_user_ids:
return x
train_user_ids = X["user_id"].to_list()
X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type="broadcast").dropna()
X_train_data_ui["user_id"] = X_train_data_ui["user_id"].apply(int)
utility.save_model("".join("/simulate/" + "overall" + "/" + "overall" + "_train_stat_q_data"), X_train_data_ui)
# filter and get the data to show to the UI for the test data.
test_user_ids = test["user_id"].to_list()
X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type="broadcast").dropna()
X_test_data_ui["user_id"] = X_test_data_ui["user_id"].apply(int)
# Save the data_ui object as json
test_data = {}
test_data["users"] = X_test_data_ui.to_dict("r")
utility.save_data("simulate/test_data_ui_x_test", test_data)
count = 0
out_writer = open("output/simulate_overall_output_folds_" + str(eval_method) + ".txt", "w+")
csv_out_writer = open("output/simulate_overall_output_folds_" + str(eval_method) + ".csv", "w+")
# First get the time series for a given test patient and the reference point and iterate to evaluate
csv_out_writer.write("user_id,fold,mae,mse,rmse,algorithm,"
"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\n")
# Split the data into train and test and evaluate as a final model
from sklearn.model_selection import train_test_split
import utility
from HEOM import HEOM
# Can be done at prediction too.
from sklearn.metrics.pairwise import cosine_distances
from sklearn.linear_model import LinearRegression
from scipy.spatial.distance import pdist, squareform
heom = HEOM(X.to_numpy()[:, 1:], cat_idx, num_idx)
sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)
mean_heom_distance = sim_matrix.mean()
knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)
knn.fit(X.to_numpy()[:, 1:])
dist, idx_test = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)
do_test(test, out_writer, csv_out_writer, ndays, idx_test, X,
fold_count="final", method=eval_method, dist_nn=None, wt_dist_flag=wt_distance)
out_writer.close()
csv_out_writer.close()
# Save the simulated neighborhood results
utility.save_model("".join("/simulate/overall/" + "knn_static"), knn)
utility.save_model("".join("/simulate/overall" + "/" + "train_sim_data.pckl"), X)
|
StarcoderdataPython
|
6641437
|
##############################################################################
# Copyright 2016-2019 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import sys
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
import networkx as nx
import numpy as np
from pyquil.quilatom import Parameter, unpack_qubit
from pyquil.quilbase import Gate
if sys.version_info < (3, 7):
from pyquil.external.dataclasses import dataclass
else:
from dataclasses import dataclass
DEFAULT_QUBIT_TYPE = "Xhalves"
DEFAULT_EDGE_TYPE = "CZ"
THETA = Parameter("theta")
"Used as the symbolic parameter in RZ, CPHASE gates."
@dataclass
class MeasureInfo:
operator: Optional[str] = None
qubit: Optional[Union[int, str]] = None
target: Optional[Union[int, str]] = None
duration: Optional[float] = None
fidelity: Optional[float] = None
@dataclass
class GateInfo:
operator: Optional[str] = None
parameters: Optional[Sequence[Union[str, float]]] = None
arguments: Optional[Sequence[Union[str, float]]] = None
duration: Optional[float] = None
fidelity: Optional[float] = None
@dataclass
class Qubit:
id: int
type: Optional[str] = None
dead: Optional[bool] = None
gates: Optional[Sequence[Union[GateInfo, MeasureInfo]]] = None
@dataclass
class Edge:
targets: Tuple[int, ...]
type: Optional[Union[List[str], str]] = None
dead: Optional[bool] = None
gates: Optional[Sequence[GateInfo]] = None
@dataclass
class ISA:
"""
Basic Instruction Set Architecture specification.
:ivar qubits: The qubits associated with the ISA.
:ivar edges: The multi-qubit gates.
"""
qubits: Sequence[Qubit]
edges: Sequence[Edge]
def to_dict(self) -> Dict[str, Any]:
"""
Create a JSON-serializable representation of the ISA.
The dictionary representation is of the form::
{
"1Q": {
"0": {
"type": "Xhalves"
},
"1": {
"type": "Xhalves",
"dead": True
},
...
},
"2Q": {
"1-4": {
"type": "CZ"
},
"1-5": {
"type": "CZ"
},
...
},
...
}
:return: A dictionary representation of self.
"""
def _maybe_configure(o: Union[Qubit, Edge], t: Union[str, List[str]]) -> Dict[str, Any]:
"""
Exclude default values from generated dictionary.
:param o: The object to serialize
:param t: The default value for ``o.type``.
:return: d
"""
d: Dict[str, Any] = {}
if o.gates is None or len(o.gates) == 0:
inferred_type = o.type if (o.type is not None and o.type != t) else t
inferred_gates = convert_gate_type_to_gate_information(inferred_type)
else:
inferred_gates = cast(List[Union[GateInfo, MeasureInfo]], o.gates)
d["gates"] = [
{
"operator": i.operator,
"parameters": i.parameters,
"arguments": i.arguments,
"fidelity": i.fidelity,
"duration": i.duration,
}
if isinstance(i, GateInfo)
else {
"operator": "MEASURE",
"qubit": i.qubit,
"target": i.target,
"duration": i.duration,
"fidelity": i.fidelity,
}
for i in inferred_gates
]
if o.dead:
d["dead"] = o.dead
return d
return {
"1Q": {"{}".format(q.id): _maybe_configure(q, DEFAULT_QUBIT_TYPE) for q in self.qubits},
"2Q": {
"{}-{}".format(*edge.targets): _maybe_configure(edge, DEFAULT_EDGE_TYPE)
for edge in self.edges
},
}
@staticmethod
def from_dict(d: Dict[str, Any]) -> "ISA":
"""
Re-create the ISA from a dictionary representation.
:param d: The dictionary representation.
:return: The restored ISA.
"""
return ISA(
qubits=sorted(
[
Qubit(
id=int(qid),
type=q.get("type", DEFAULT_QUBIT_TYPE),
dead=q.get("dead", False),
)
for qid, q in d["1Q"].items()
],
key=lambda qubit: qubit.id,
),
edges=sorted(
[
Edge(
targets=tuple(int(q) for q in eid.split("-")),
type=e.get("type", DEFAULT_EDGE_TYPE),
dead=e.get("dead", False),
)
for eid, e in d["2Q"].items()
],
key=lambda edge: edge.targets,
),
)
def convert_gate_type_to_gate_information(
gate_type: Union[str, List[str]]
) -> List[Union[GateInfo, MeasureInfo]]:
if isinstance(gate_type, str):
gate_type = [gate_type]
gate_information: List[Union[GateInfo, MeasureInfo]] = []
for type_keyword in gate_type:
if type_keyword == "Xhalves":
gate_information.extend(
[
GateInfo("I", [], ["_"]),
GateInfo("RX", [np.pi / 2], ["_"]),
GateInfo("RX", [-np.pi / 2], ["_"]),
GateInfo("RX", [np.pi], ["_"]),
GateInfo("RX", [-np.pi], ["_"]),
GateInfo("RZ", ["theta"], ["_"]),
MeasureInfo(operator="MEASURE", qubit="_", target="_"),
MeasureInfo(operator="MEASURE", qubit="_", target=None),
]
)
elif type_keyword == "WILDCARD":
gate_information.extend([GateInfo("_", "_", ["_"]), GateInfo("_", "_", ["_", "_"])])
elif type_keyword == "CZ":
gate_information.extend([GateInfo("CZ", [], ["_", "_"])])
elif type_keyword == "ISWAP":
gate_information.extend([GateInfo("ISWAP", [], ["_", "_"])])
elif type_keyword == "CPHASE":
gate_information.extend([GateInfo("CPHASE", ["theta"], ["_", "_"])])
elif type_keyword == "XY":
gate_information.extend([GateInfo("XY", ["theta"], ["_", "_"])])
else:
raise ValueError("Unknown edge type: {}".format(type_keyword))
return gate_information
def gates_in_isa(isa: ISA) -> List[Gate]:
"""
Generate the full gateset associated with an ISA.
:param isa: The instruction set architecture for a QPU.
:return: A sequence of Gate objects encapsulating all gates compatible with the ISA.
"""
gates = []
for q in isa.qubits:
if q.dead:
# TODO: dead qubits may in the future lead to some implicit re-indexing
continue
if q.type == "Xhalves":
gates.extend(
[
Gate("I", [], [unpack_qubit(q.id)]),
Gate("RX", [np.pi / 2], [unpack_qubit(q.id)]),
Gate("RX", [-np.pi / 2], [unpack_qubit(q.id)]),
Gate("RX", [np.pi], [unpack_qubit(q.id)]),
Gate("RX", [-np.pi], [unpack_qubit(q.id)]),
Gate("RZ", [THETA], [unpack_qubit(q.id)]),
]
)
elif q.type == "WILDCARD":
gates.extend([Gate("_", "_", [unpack_qubit(q.id)])])
else: # pragma no coverage
raise ValueError("Unknown qubit type: {}".format(q.type))
for e in isa.edges:
if e.dead:
continue
targets = [unpack_qubit(t) for t in e.targets]
assert e.type is not None
edge_type = e.type if isinstance(e.type, list) else [e.type]
if "CZ" in edge_type:
gates.append(Gate("CZ", [], targets))
gates.append(Gate("CZ", [], targets[::-1]))
continue
if "ISWAP" in edge_type:
gates.append(Gate("ISWAP", [], targets))
gates.append(Gate("ISWAP", [], targets[::-1]))
continue
if "CPHASE" in edge_type:
gates.append(Gate("CPHASE", [THETA], targets))
gates.append(Gate("CPHASE", [THETA], targets[::-1]))
continue
if "XY" in edge_type:
gates.append(Gate("XY", [THETA], targets))
gates.append(Gate("XY", [THETA], targets[::-1]))
continue
assert e.type is not None
if "WILDCARD" in e.type:
gates.append(Gate("_", "_", targets))
gates.append(Gate("_", "_", targets[::-1]))
continue
raise ValueError("Unknown edge type: {}".format(e.type))
return gates
def isa_from_graph(
graph: nx.Graph, oneq_type: str = "Xhalves", twoq_type: Optional[Union[str, List[str]]] = None
) -> ISA:
"""
Generate an ISA object from a NetworkX graph.
:param graph: The graph
:param oneq_type: The type of 1-qubit gate. Currently 'Xhalves'
:param twoq_type: The type of 2-qubit gate. One or more of 'CZ', 'CPHASE', 'ISWAP', 'XY'.
The default, None, is a synonym for ["CZ", "XY"].
"""
all_qubits = list(range(max(graph.nodes) + 1))
qubits = [Qubit(i, type=oneq_type, dead=i not in graph.nodes) for i in all_qubits]
edges = [
Edge(
tuple(sorted((a, b))), type=["CZ", "XY"] if twoq_type is None else twoq_type, dead=False
)
for a, b in graph.edges
]
return ISA(qubits, edges)
def isa_to_graph(isa: ISA) -> nx.Graph:
"""
Construct a NetworkX qubit topology from an ISA object.
This discards information about supported gates.
:param isa: The ISA.
"""
return nx.from_edgelist(e.targets for e in isa.edges if not e.dead)
|
StarcoderdataPython
|
12828348
|
#!/usr/bin/env python3
import json
import logging
import os
import traceback
import git
import requests
import yaml
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(logging.StreamHandler())
def post(path: str, data: dict, params=None):
if params is None:
params = {}
headers = {'Content-Type': 'application/json'}
params['token'] = TOKEN
if DEBUG:
logging.info("post: {} data: {}".format(path, data))
return None
# Attempt to contact the pipeline API
try:
res = requests.post(
'http://pipeline-api:5000' + path,
headers=headers,
params=params,
json=data,
)
except:
logging.error('UNABLE TO REPORT POST TO PIPELINE API')
exit(0)
# If the call to the api failed we're in trouble,
# and need to abort.
if res.status_code != 200:
logging.error('UNABLE TO REPORT POST TO PIPELINE API')
exit(0)
return res
def report_panic(message: str, traceback: str, ):
"""
Report and error to the API
:param message: error message
:param traceback: optional traceback
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
'message': message,
'traceback': traceback,
}
print(traceback)
logging.info('report_error {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/panic/{}'.format(SUBMISSION_ID), data)
try:
import assignment
except ImportError:
report_panic('Unable to import assignment', traceback.format_exc())
exit(0)
from utils import registered_tests, build_function
from utils import fix_permissions, Panic, DEBUG
git_creds = os.environ.get('GIT_CRED', default=None)
if git_creds is not None:
del os.environ['GIT_CRED']
with open(os.environ.get('HOME') + '/.git-credentials', 'w') as f:
f.write(git_creds)
f.close()
with open(os.environ.get('HOME') + '/.gitconfig', 'w') as f:
f.write('[credential]\n')
f.write('\thelper = store\n')
f.close()
TOKEN = os.environ.get('TOKEN')
COMMIT = os.environ.get('COMMIT')
GIT_REPO = os.environ.get('GIT_REPO')
SUBMISSION_ID = os.environ.get('SUBMISSION_ID')
del os.environ['TOKEN']
def report_state(state: str, params=None):
"""
Report a state update for the current submission
:param params:
:param state: text representation of state
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
'state': state,
}
logging.info('report_state {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/state/{}'.format(SUBMISSION_ID), data, params=params)
def report_build_results(stdout: str, passed: bool):
"""
Report the results of a given build.
:param stdout:
:param passed:
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
# 'stdout': base64.b16encode(stdout).decode(),
'stdout': stdout,
'passed': passed,
}
logging.info('report_build {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/build/{}'.format(SUBMISSION_ID), data)
def report_test_results(test_name: str, stdout: str, message: str, passed: bool):
"""
Report a single test result to the pipeline API.
:param test_name:
:param stdout:
:param message:
:param passed:
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
'test_name': test_name,
# 'stdout': base64.b16encode(stdout).decode(),
'stdout': stdout,
'message': message,
'passed': passed,
}
logging.info('report_test_results {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/test/{}'.format(SUBMISSION_ID), data)
def get_assignment_data() -> dict:
"""
Load the assignment metadata out from the assignment yaml file
:return:
"""
# Figure out filename
assignment_filename = None
for assignment_filename_option in ['meta.yml', 'meta.yaml']:
if os.path.isfile(assignment_filename_option):
assignment_filename = assignment_filename_option
break
# Make sure we figured out the metadata filename
if assignment_filename is None:
report_panic('No meta.yml was found', '')
exit(0)
# Load yaml
with open(assignment_filename, 'r') as f:
try:
assignment_data = yaml.safe_load(f.read())
except yaml.YAMLError:
report_panic('Unable to read assignment yaml', traceback.format_exc())
logging.info(assignment_data)
return assignment_data
def clone():
"""
Clone the assigment repo into the student folder.
File permissions will need to be updated.
:return:
"""
report_state('Cloning repo')
# Clone
try:
repo = git.Repo.clone_from(GIT_REPO, './student')
if COMMIT.lower() != 'null':
repo.git.checkout(COMMIT)
except git.exc.GitCommandError:
report_panic('Git error', traceback.format_exc())
exit(0)
fix_permissions()
os.system('rm -rf ./student/.git')
os.system('rm -rf /home/anubis/.git-credentials')
os.system('rm -rf /home/anubis/.gitconfig')
def run_build(assignment_data: dict):
"""
Build the student repo.
:param assignment_data: assignment meta
:return:
"""
# build
report_state('Running Build...')
result = build_function()
report_build_results(result.stdout, result.passed)
if not result.passed:
exit(0)
def run_tests(assignment_data: dict):
"""
Run the assignment test scripts. Update submission state as you go.
:param assignment_data:
:return:
"""
# Tests
for test_name in registered_tests:
report_state('Running test: {}'.format(test_name))
result = registered_tests[test_name]()
report_test_results(test_name, result.stdout, result.message, result.passed)
def main():
try:
assignment_data = get_assignment_data()
clone()
os.chdir('./student')
run_build(assignment_data)
run_tests(assignment_data)
report_state('Finished!', params={'processed': '1'})
except Panic as e:
report_panic(repr(e), traceback.format_exc())
except Exception as e:
report_panic(repr(e), traceback.format_exc())
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3373397
|
<reponame>eherr/vis_utils
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from ...graphics.renderer import lines
from ...graphics.renderer import primitive_shapes
from ..scene_object import SceneObject
from ...graphics.utils import get_translation_matrix
class CoordinateSystemObject(SceneObject):
def __init__(self, scale=1.0):
SceneObject.__init__(self)
#super(self, CoordinateSystemObject).__init__()
self.visualization = lines.CoordinateSystemRenderer(scale)
self.active = True
def setPosition(self, position):
self.transformation = get_translation_matrix(position)
def draw(self, viewMatrix, projectionMatrix, lightSources):
if self.active:
self.visualization.draw(self.transformation, viewMatrix, projectionMatrix)
class PlaneObject(SceneObject):
def __init__(self,width = 1000, depth =1000,segments =10, r =0.5,g=0.5,b=0.5):
SceneObject.__init__(self)
self.visualization = lines.WireframePlaneRenderer(width, depth, segments, r, g, b)
def draw(self,viewMatrix,projectionMatrix,lightSources):
self.visualization.draw(self.transformation,viewMatrix,projectionMatrix)
def intersectRay(self,start,rayDir):
return self.visualization.intersectRay(start,rayDir)
class TravelledPathObject(SceneObject):
def __init__(self,r = 1.0,g = 1.0,b = 1.0,maxLength = 2000):
SceneObject.__init__(self)
self.line = lines.ExtendingLineRenderer(r, g, b, maxLength)
def addPoint(self,point):
self.line.addPoint(point)
def clear(self):
self.line.clear()
def draw(self,viewMatrix,projectionMatrix,lightSources):
self.line.draw(self.transformation,viewMatrix, projectionMatrix)
class PointCloudObject(SceneObject):
def __init__(self):
SceneObject.__init__(self)
self.sphere = primitive_shapes.SphereRenderer(20, 20, 1.0, material=CustomShaders.redMaterial)
self.transformations = []
return
def addPoint(self,point):
print("got point", point)
transformation = get_translation_matrix(point)
self.transformations.append(transformation)
def clear(self):
self.transformations.clear()
def draw(self,viewMatrix,projectionMatrix,lightSources):
for transformation in self.transformations:
self.sphere.draw(transformation,viewMatrix, projectionMatrix,lightSources)
class TravelledPathWithCorrespondencesObject(SceneObject):
def __init__(self,r = 1.0,g = 1.0,b = 1.0,maxLength = 2000):
SceneObject.__init__(self)
self.line = lines.ExtendingLineRenderer(r, g, b, maxLength)
self.correspondences = lines.ExtendingLineRenderer(1.0, 0.0, 0.0, maxLength * 2)
def addPoint(self,point):
self.line.addPoint(point)
def addCorrespondence(self,point,correspondence):
self.correspondences.addPoint(point)
self.correspondences.addPoint(correspondence)
def clear(self):
self.line.clear()
self.correspondences.clear()
def draw(self,viewMatrix,projectionMatrix,lightSources):
self.line.draw(self.transformation,viewMatrix, projectionMatrix)
self.correspondences.draw(self.transformation,viewMatrix, projectionMatrix)
class TravelledPathWithMarkersObject(SceneObject):
def __init__(self):
SceneObject.__init__(self)
self.line = lines.ExtendingLineRenderer(0.0, 1.0, 0.0)
self.markers = lines.ExtendingMarkerListRenderer(1.0, 0.0, 0.0)
def addMarker(self,point):
self.markers.addPoint(point)
def addPoint(self,point):
self.line.addPoint(point)
def clear(self):
self.line.clear()
self.markers.clear()
def draw(self,viewMatrix,projectionMatrix,lightSources):
self.line.draw(self.transformation,viewMatrix, projectionMatrix)
self.markers.draw(self.transformation,viewMatrix, projectionMatrix)
class ListOfLocalCoordinateSystemsObject(SceneObject):
def __init__(self,scaleFactor = 0.5):
SceneObject.__init__(self)
self.scaleFactor = scaleFactor
self.coordinateSystems =[]
def addCoordinateSystem(self,transformation):
cs = lines.CoordinateSystemRenderer(scale = self.scaleFactor)
print("transformation matrix",transformation.shape)
self.coordinateSystems.append((cs,transformation))
def clear(self):
self.coordinateSystems = []
def draw(self, viewMatrix, projectionMatrix, lightSources):
for cs,transformation in self.coordinateSystems:
#print "draw cs"
cs.draw(transformation,viewMatrix, projectionMatrix)
|
StarcoderdataPython
|
9608210
|
<gh_stars>1-10
#!/usr/bin/env python3
from datetime import datetime as dt2
from dateutil.relativedelta import *
from dateutil.rrule import *
import os, sys, re, traceback, calendar, datetime
import json, unicodedata
from bpaTools import Logger
def isInteger(s:str) -> bool:
try:
n:int = int(s)
return True
except ValueError:
return False
def isFloat(s:str) -> bool:
try:
n:float = float(s)
return True
except ValueError:
return False
def cleanAccent(s:str) -> str:
return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
def str2bool(v:str) -> bool:
return bool(v.lower() in ("true","t","vrai","v","yes","y","1","-1"))
def theQuit():
sys.exit()
return
def sysExit():
sys.exit()
return
def sysExitError(sError:str) -> None:
sys.stderr.write(sError)
traceback.print_exc(file=sys.stdout)
sys.exit(sError)
return
def ctrlPythonVersion() -> None:
pyMajorVersion, pyMinorVersion = sys.version_info[:2]
if not (pyMajorVersion==3 and pyMinorVersion>=5):
sysExitError("Sorry, only Python 3.5 or and higher are supported at this time.\n")
return
#Use: bpaTools.initEvent(__file__)
def initEvent(sFile:str, oLog:Logger=None, isSilent:bool=False) -> None:
msg = "({0}) Initialisation".format(getFileName(sFile))
if oLog:
oLog.debug(msg, level=9, outConsole=True)
elif not isSilent:
print(msg)
return
#Extract data, samples:
# getContentOf("{"1":"Value}", "{", "}", bRetSep=True) -> {"1":"Value}
# getContentOf("*AActiv [HX] ...", "[", "]") -> "HX"
# getContentOf("UTC(01/01->31/12)", "(", ")") -> "01/01->31/12"
# getContentOf("tyty et toto sur ", "(", ")") -> None
# getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")") -> 120.700
# getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=1) -> 120.700
# getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=2) -> GLIDER
# getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=3) -> None
def getContentOf(sSrc:str, sSepLeft:str, sSepRight:str, iNoInst:int=1, bRetSep:bool=False) -> str:
lIdxLeft:int = -1
lIdxRight:int = -1
for idx in range(0, iNoInst):
lIdxLeft = sSrc.find(sSepLeft, lIdxLeft+1)
lIdxRight = sSrc.find(sSepRight, lIdxLeft+1)
if lIdxLeft<0 or lIdxRight<0: break
if lIdxLeft>=0 and lIdxRight>=0:
if bRetSep:
return sSrc[lIdxLeft:lIdxRight+len(sSepRight)]
else:
return sSrc[lIdxLeft+len(sSepLeft):lIdxRight]
else:
return None
#Extract data, samples:
# getLeftOf("UTC(01/01->31/12)", "(") -> "UTC"
# getLeftOf("tyty et toto sur ", "(",) -> None
def getLeftOf(sSrc:str, sFind:str) -> str:
lIdxFind = sSrc.find(sFind)
if lIdxFind>=0:
return sSrc[:lIdxFind]
else:
return None
#Extract data, samples:
# getRightOf("*AActiv [HX] tyty et toto", "]") -> " tyty et toto"
# getRightOf("tyty et toto sur ", ")",) -> None
def getRightOf(sSrc:str, sFind:str) -> str:
lIdxFind = sSrc.find(sFind)
if lIdxFind>=0:
return sSrc[lIdxFind+1]
else:
return None
def getFileName(sFile:str) -> str:
return os.path.basename(sFile).split(".")[0]
def getFileExt(sFile:str) -> str:
return os.path.splitext(sFile)[1]
def getFilePath(sFile:str) -> str:
#return os.path.dirname(sFile) + "/" #Non-Fonctionnel sous Linux
return os.path.dirname(os.path.abspath(sFile)) + "/" #Fonctionnel sous Linux
def getFileModificationDate(sFile:str) -> datetime:
try:
mtime:float = os.path.getmtime(sFile)
except OSError:
mtime:float = 0
return dt2.fromtimestamp(mtime)
def getFileCreationDate(sFile:str) -> datetime:
try:
ctime:float = os.path.getctime(sFile)
except OSError:
ctime:float = 0
return dt2.fromtimestamp(ctime)
#Samples
# str(bpaTools.getNow()) -> "2020-11-16 12:50:03.726297"
# bpaTools.getNow().strftime("%Y%m%d-%H%M%S") -> Specific format "20201208-164204"
def getNow() -> datetime:
return datetime.datetime.now()
#Sample bpaTools.getNowISO() -> ISO Format "2020-11-16T12:45:29.405387"
def getNowISO() -> str:
return datetime.datetime.now().isoformat()
#Samples
# bpaTools.getDateNow() -> "20201116"
# bpaTools.getDateNow(frmt="ymd") -> "20201116"
# bpaTools.getDateNow(frmt="dmy") -> "16112020"
# bpaTools.getDateNow(frmt="dmy") -> "16112020"
# bpaTools.getDateNow(sep="/", frmt="dmy") -> "16/11/2020"
# bpaTools.getDateNow(frmt="%Y%m%d-%H%M%S") -> Specific format "20201208-164204"
def getDateNow(sep:str="", frmt="ymd") -> str:
return getDate(datetime.datetime.now(), sep=sep, frmt=frmt)
#Samples
# bpaTools.getDate(datetime.datetime.now()) -> "20201116"
# bpaTools.getDate(datetime.datetime.now(), frmt="ymd") -> "20201116"
# bpaTools.getDate(datetime.datetime.now(), frmt="dmy") -> "16112020"
# bpaTools.getDate(datetime.datetime.now(), frmt="dmy") -> "16112020"
# bpaTools.getDate(datetime.datetime.now(), sep="/", frmt="dmy") -> "16/11/2020"
# bpaTools.getDate(datetime.datetime.now(), "%Y%m%d-%H%M%S") -> Specific format "20201208-164204"
def getDate(date:datetime, sep:str="", frmt="ymd") -> str:
if frmt=="ymd":
sFrmt = "%Y" + sep + "%m" + sep + "%d"
elif frmt=="dmy":
sFrmt = "%d" + sep + "%m" + sep + "%Y"
else:
sFrmt = frmt #Specific format
return date.strftime(sFrmt)
#Samples
# theDate = datetime.datetime.now() #or datetime.date.today() or datetime(2021,2,16)
# print("Now ", addDatetime(theDate))
# print("minutes= -1", addDatetime(theDate, minutes=-1))
# print("minutes=+10", addDatetime(theDate, minutes=+10))
# print(" hours= -1", addDatetime(theDate, hours=-1))
# print(" hours= +2", addDatetime(theDate, hours=+2))
# print(" days= -1", addDatetime(theDate, days=-1))
# print(" days= +2", addDatetime(theDate, days=+2))
# print(" days=+31", addDatetime(theDate, days=+31))
# print(" weeks= -1", addDatetime(theDate, weeks=-1))
# print(" weeks= +2", addDatetime(theDate, weeks=+2))
# print(" months= -1", addDatetime(theDate, months=-1))
# print(" months= +2", addDatetime(theDate, months=+2))
# print(" months=+12", addDatetime(theDate, months=+12))
# print(" months=+24", addDatetime(theDate, months=+24))
# print(" years= -1" , addDatetime(theDate, years=-1))
# print(" years= +2" , addDatetime(theDate, years=+2))
# print("last day of month " , addDatetime(theDate, day=31))
# print("last day of last month " , addDatetime(addDatetime(theDate, months=-1), day=31))
# print("last day of next month " , addDatetime(addDatetime(theDate, months=+1), day=31))
# print("previous day of next month" , addDatetime(addDatetime(theDate, months=+1), days=-1))
# #Use datetime.strftime("%Y/%d/%m") -> datetime.datetime(2021, 1, 28).strftime("%Y/%m/%d")
def addDatetime(srcdate:datetime, minutes:int=0, hours:int=0, day:int=0, days:int=0, weeks:int=0, months:int=0, years:int=0) -> datetime:
ret:datetime = srcdate
if minutes: ret += datetime.timedelta(minutes=minutes)
if hours: ret += datetime.timedelta(hours=hours)
if day: ret += relativedelta(day=day)
if days: ret += datetime.timedelta(days=days)
if weeks: ret += datetime.timedelta(weeks=weeks)
if months: ret += relativedelta(months=months)
if years: ret += relativedelta(years=years)
return ret
def getVersionFile(versionPath:str="", versionFile:str="_version.py") -> str:
fileContent = open(versionPath + versionFile, "rt").read()
token = r"^__version__ = ['\"](.*)['\"]" #old - r"^__version__ = ['\"]([^'\"]*)['\"]"
oFound = re.search(token, fileContent, re.M)
if oFound:
sVersion = oFound.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (versionFile,))
return sVersion
def getParamTxtFile(sFile:str, paramVar:str, valueType:str="str") -> str:
fileContent = open(sFile, "rt").read()
token = r"^" + paramVar
if valueType=="str":
token += " = ['\"](.*)['\"]" #get __version__ = "2.1.3"
elif valueType=="lst":
token += " = ['](.*)[']" #get __webPublicationDates__ = '["02/01/2014","28/01/2014", ... , "14/06/2014"]'
oFound = re.search(token, fileContent, re.M)
if oFound:
sParam = oFound.group(1)
else:
raise RuntimeError("Unable to find param:{0} in file:{1}".format(paramVar, sFile))
return sParam
def getParamJsonFile(sFile:str, paramVar:str):
fileContent:dict = readJsonFile(sFile)
if paramVar in fileContent:
oRet = fileContent[paramVar]
else:
raise RuntimeError("Unable to find param:{0} in file:{1}".format(paramVar, sFile))
return oRet
def readJsonFile(sFile:str) -> dict:
if os.path.exists(sFile):
jsonFile = open(sFile, "rt", encoding="utf-8")
jdata = json.load(jsonFile)
jsonFile.close()
else:
jdata = {}
return jdata
def writeJsonFile(sFile:str, jdata:dict) -> None:
jsonFile = open(sFile, "w", encoding="utf-8")
json.dump(jdata, jsonFile, ensure_ascii=False)
jsonFile.close()
return
def writeTextFile(sFile:str, stext:str, sencoding:str="cp1252"):
textFile = open(sFile, "w", encoding=sencoding, errors="replace")
textFile.write(stext)
textFile.close()
return
### Default file encoding
def defaultEncoding() -> str:
return encodingUTF8()
def encodingUTF8() -> str:
return 'utf-8'
### Create folder if not exists
def createFolder(path:str) -> None:
try:
if not os.path.exists(path):
os.mkdir(path)
except OSError as e:
print("Erreur en création du dossier {0}. ".format(e))
return
### Remove file if exixts
def deleteFile(file:str) -> None:
try:
if os.path.exists(file):
os.remove(file)
except OSError as e:
print("Erreur en supression du fichier. {0}".format(e))
return
### Collect Command-Line Options in a dictionary
# Samples:
# python aixmParser.py -h
# argv = ['aixmParser.py', '-h']
# opts = {'-h': '-h'}
# python aixmParser.py -json srcfile
# argv = ['aixmParser.py', 'srcfile', '-json']
# opts = {'-json': '-json'}
# python aixmParser.py param0 -idx1 param1 -idx2 param2
# argv = ['aixmParser.py', 'srcfile', '-json', '-Openair' '-CleanLog']
# opts = {'-json': '-json', '-Openair': '-Openair', '-CleanLog': '-CleanLog'}
def getCommandLineOptions(argv) -> dict:
#print(argv)
opts = dict()
while argv:
if argv[0][0]=='-': #Found a "-name value" pair
aSplit = argv[0].split("=") #find combined attribute
if len(aSplit)==2:
opts[aSplit[0]] = aSplit[1]
else:
opts[argv[0]] = argv[0] #Add key and value to the dictionary
argv = argv[1:] #Reduce the argument list
#print(opts)
return opts
if __name__ == '__main__':
#sFile = __file__
#dCre = getFileCreationDate(sFile)
#dMod = getFileModificationDate(sFile)
#print(sFile, "\n", dCre, "\n", dMod, "\n", getDate(dMod))
"""
print(addMonths(datetime.date.today(), -1))
print(addMonths(datetime.date.today(), +0))
print(addMonths(datetime.date.today(), +1))
print(addMonths(datetime.date.today(), +2))
print(addMonths(datetime.date.today(), +3))
print(addMonths(datetime.date.today(), 12))
print(addMonths(datetime.date.today(), 25))
print(datetime.date.today() + datetime.timedelta(days=-1))
print(datetime.date.today() + datetime.timedelta(days=0))
print(datetime.date.today() + datetime.timedelta(days=1))
print(datetime.date.today() + datetime.timedelta(days=10))
print(datetime.date.today() + datetime.timedelta(days=20))
print(datetime.date.today() + datetime.timedelta(days=31))
"""
theDate = datetime.datetime.now() #or datetime.date.today() or datetime(2021,2,16)
print("Now ", addDatetime(theDate))
print("minutes= -1", addDatetime(theDate, minutes=-1))
print("minutes=+10", addDatetime(theDate, minutes=+10))
print(" hours= -1", addDatetime(theDate, hours=-1))
print(" hours= +2", addDatetime(theDate, hours=+2))
print(" days= -1", addDatetime(theDate, days=-1))
print(" days= +2", addDatetime(theDate, days=+2))
print(" days=+31", addDatetime(theDate, days=+31))
print(" weeks= -1", addDatetime(theDate, weeks=-1))
print(" weeks= +2", addDatetime(theDate, weeks=+2))
print(" months= -1", addDatetime(theDate, months=-1))
print(" months= +2", addDatetime(theDate, months=+2))
print(" months=+12", addDatetime(theDate, months=+12))
print(" months=+24", addDatetime(theDate, months=+24))
print(" years= -1" , addDatetime(theDate, years=-1))
print(" years= +2" , addDatetime(theDate, years=+2))
print("last day of month " , addDatetime(theDate, day=31))
print("last day of last month " , addDatetime(addDatetime(theDate, months=-1), day=31))
print("last day of next month " , addDatetime(addDatetime(theDate, months=+1), day=31))
print("previous day of next month" , addDatetime(addDatetime(theDate, months=+1), days=-1))
print()
#Calculate the last day of last month
theDate = theDate + relativedelta(months=-1)
theDate = theDate + relativedelta(day=31)
#Generate a list of the last day for 9 months from the calculated date
x = list(rrule(freq=MONTHLY, count=9, dtstart=theDate, bymonthday=(-1,)))
print("Last day")
for ld in x:
print(ld)
#Generate a list of the 2nd Tuesday in each of the next 9 months from the calculated date
print("\n2nd Tuesday")
x = list(rrule(freq=MONTHLY, count=9, dtstart=theDate, byweekday=TU(2)))
for tuesday in x:
print(tuesday)
"""
print(getContentOf('{"1":"Value"}', "{", "}", bRetSep=True))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")"))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", " (", ")"))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=1))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=2))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=3))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=5))
print(getContentOf("FFVP-Prot RMZ Selestat App(120.700) (GLIDER)", "(", ")", iNoInst=10))
"""
|
StarcoderdataPython
|
4863446
|
import os
import pickle
import sys
from typing import List
import numpy as np
import pandas as pd
os.environ["OPENBLAS_NUM_THREADS"] = "1"
sys.path.append("../../")
from advertising.data_structure.Campaign import Campaign
from advertising.optimizers.CampaignOptimizer import CampaignOptimizer
from environments.Settings.EnvironmentManager import EnvironmentManager
from environments.Settings.Scenario import Scenario
from utils.folder_management import handle_folder_creation
from utils.stats.StochasticFunction import IStochasticFunction
FOLDER_RESULT = "../../report/csv/advertising_2/"
CSV_CUM_REGRET = True
SCENARIO_NAME = "linear_scenario"
N_ARMS_ADV = 11
BUDGET = 1000
PRICE_PLOT_N_POINTS = 100
ADS_PLOT_N_POINTS = 100
MIN_PRICE = 15
MAX_PRICE = 25
FIXED_COST = 12
REWARD_FILE_LIST = ["../../report/project_point_2/Jun30_09-05-01/reward_GPBandit.pkl",
"../../report/project_point_2/Jun30_09-19-33/reward_GaussianBandit.pkl"]
BANDIT_NAME = ["GPBandit", "GaussianBandit"]
n_bandit = len(BANDIT_NAME)
_, folder_path_with_date = handle_folder_creation(result_path=FOLDER_RESULT, retrieve_text_file=False)
assert len(REWARD_FILE_LIST) == len(BANDIT_NAME), "Number of bandits and file list does not match"
# Reading file list
total_reward_list = []
for curr_day, _ in enumerate(BANDIT_NAME):
rewards = []
with (open(REWARD_FILE_LIST[curr_day], "rb")) as openfile:
while True:
try:
rewards.append(pickle.load(openfile))
except EOFError:
break
rewards = rewards[0]
total_reward_list.append(rewards)
# Compute N-days
n_days = len(total_reward_list[0][0][0])
# Compute mean and standard deviation for each day
mean_reward = np.zeros(shape=(n_bandit + 1, n_days))
std_reward = np.zeros(shape=(n_bandit + 1, n_days))
mean_reward[-1] = np.arange(n_days) + 1
std_reward[-1] = np.arange(n_days) + 1
for bandit_idx in range(len(BANDIT_NAME)):
n_exp = len(total_reward_list[bandit_idx])
for curr_day in range(n_days):
daily_values = []
for exp in range(n_exp):
daily_values.append(total_reward_list[bandit_idx][exp][0][curr_day])
mean_reward[bandit_idx][curr_day] = np.array(daily_values).mean()
std_reward[bandit_idx][curr_day] = np.array(daily_values).std()
mean_df = pd.DataFrame(mean_reward.transpose())
std_df = pd.DataFrame(std_reward.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_reward_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "mean_std_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}instant_reward.csv".format(folder_path_with_date), index=False)
# Something
mean_scenario: Scenario = EnvironmentManager.load_scenario(SCENARIO_NAME, get_mean_function=True)
click_function_list: List[IStochasticFunction] = mean_scenario.get_phases()[0].get_n_clicks_function()
# Optimal point computation
campaign = Campaign(n_sub_campaigns=mean_scenario.get_n_subcampaigns(), cum_budget=BUDGET, n_arms=N_ARMS_ADV)
for i in range(campaign.get_n_sub_campaigns()):
sub_campaign_values = [click_function_list[i].draw_sample(b) for b in np.linspace(0, BUDGET, N_ARMS_ADV)]
campaign.set_sub_campaign_values(i, sub_campaign_values)
max_clicks, best_budgets = CampaignOptimizer.find_best_budgets(campaign)
# # Compute regret
# if CSV_CUM_REGRET:
# mean_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
# std_regret_data = np.zeros(shape=(n_bandit + 1, n_days))
# mean_regret_data[-1] = np.arange(n_days) + 1
# std_regret_data[-1] = np.arange(n_days) + 1
#
# for bandit_idx in range(len(BANDIT_NAME)):
# n_exp = len(total_reward_list[bandit_idx])
#
# for curr_day in range(n_days):
# daily_values = []
# for exp in range(n_exp):
# daily_values.append(max_clicks - total_reward_list[bandit_idx][exp][0][curr_day])
#
# mean_regret_data[bandit_idx][curr_day] = np.array(daily_values).mean()
# std_regret_data[bandit_idx][curr_day] = np.array(daily_values).std()
#
# mean_df = pd.DataFrame(np.cumsum(mean_regret_data).transpose())
# std_df = pd.DataFrame(std_regret_data.transpose())
#
# for bandit_idx, name in enumerate(BANDIT_NAME):
# mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
#
# mean_df.rename(columns={n_bandit: "day"}, inplace=True)
# for bandit_idx, name in enumerate(BANDIT_NAME):
# std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
# std_df.rename(columns={n_bandit: "day"}, inplace=True)
#
# total_df = mean_df.merge(std_df, left_on="day", right_on="day")
# total_df.to_csv("{}daily_discrete_regret.csv".format(folder_path_with_date), index=False)
if CSV_CUM_REGRET:
mean_data = np.zeros(shape=(n_bandit + 1, n_days))
std_data = np.zeros(shape=(n_bandit + 1, n_days))
mean_data[-1] = np.arange(n_days)
std_data[-1] = np.arange(n_days)
for bandit_idx in range(n_bandit):
n_exp = len(total_reward_list[bandit_idx])
values = [[] for _ in range(n_days)]
for exp in range(n_exp):
curr_exp_value = 0
for day in range(n_days):
curr_exp_value += total_reward_list[bandit_idx][exp][0][day]
values[day].append((day + 1) * max_clicks - curr_exp_value)
for day in range(n_days):
mean_data[bandit_idx][day] = np.array(values[day]).mean()
std_data[bandit_idx][day] = np.array(values[day]).std()
mean_df = pd.DataFrame(mean_data.transpose())
std_df = pd.DataFrame(std_data.transpose())
for bandit_idx, name in enumerate(BANDIT_NAME):
mean_df.rename(columns={bandit_idx: "mean_regret_{}".format(name)}, inplace=True)
mean_df.rename(columns={n_bandit: "day"}, inplace=True)
for bandit_idx, name in enumerate(BANDIT_NAME):
std_df.rename(columns={bandit_idx: "std_regret_{}".format(name)}, inplace=True)
std_df.rename(columns={n_bandit: "day"}, inplace=True)
total_df = mean_df.merge(std_df, left_on="day", right_on="day")
total_df.to_csv("{}discrete_cum_regret.csv".format(folder_path_with_date), index=False)
|
StarcoderdataPython
|
3311101
|
<filename>datamessage.py<gh_stars>1-10
import json
import telegram
import sys
def notify_ending(message):
token = 'XXXX'
chat_id = 'XXXX'
bot = telegram.Bot(token=token)
bot.sendMessage(chat_id=chat_id, text=message)
f = open('data_file.json')
data = json.load(f)
length = len(data)
for i in range(0,5):
name = data.get('data')[i].get('name')
user_input= sys.argv[1]
if user_input == name:
price = data.get('data')[i].get('quote').get('USD').get('price')
percantage24 = data.get('data')[i].get('quote').get('USD').get('percent_change_24h')
percantage1 = data.get('data')[i].get('quote').get('USD').get('percent_change_1h')
message = str(str(name) + ' price: ' + str(price) + '\nLast hour: ' + str(percantage1) +'%'+'\nLast 24 hour: ' + str(percantage24) +'%')
print(message)
notify_ending(message)
quit()
|
StarcoderdataPython
|
3584350
|
"""
utils.py
Contains helper functions.
"""
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
EPSILON_DOUBLE = torch.tensor(2.220446049250313e-16, dtype=torch.float64)
EPSILON_SINGLE = torch.tensor(1.19209290E-07, dtype=torch.float32)
SQRT_TWO_DOUBLE = torch.tensor(math.sqrt(2), dtype=torch.float32)
SQRT_TWO_SINGLE = SQRT_TWO_DOUBLE.to(torch.float32)
def str2bool(v):
v = v.lower()
if v in ('yes', 'true', 't', '1'):
return True
elif v in ('no', 'false', 'f', '0'):
return False
raise ValueError('Boolean argument needs to be true or false. '
'Instead, it is %s.' % v)
def get_basename_without_ext(paths):
"""Return array of base names."""
return np.array([os.path.splitext(os.path.basename(p))[0] for p in paths])
def create_dir_if_necessary(path, is_dir=False):
"""Create directory to path if necessary."""
parent_dir = get_parent_dir(path) if not is_dir else path
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
def get_parent_dir(path):
"""Return parent directory of path."""
return os.path.abspath(os.path.join(path, os.pardir))
def write_imdb(split_dir, gt_file, out_file):
"""
Write an imdb file containing paths and labels.
Args:
split_dir: String, path to image directory for dataset split.
gt_file: String, path to ground truth file with relative image names.
out_file: String, path of output file.
"""
f = open(gt_file)
rel_paths = []
labels = []
for line in f.readlines():
s = line.split()
rel_paths.append(s[0])
labels.append(int(s[1]))
abs_paths = []
for p in rel_paths:
abs_paths.append(os.path.join(split_dir, p))
out_f = open(out_file, 'w')
for i in range(len(abs_paths)):
out_f.write('%s %d\n' % (abs_paths[i], labels[i]))
out_f.close()
def read_imdb(imdb_file):
"""Load (paths, labels) from imdb file."""
f = open(imdb_file)
paths = []
labels = []
for line in f.readlines():
s = line.split()
paths.append(s[0])
labels.append(int(s[1]))
return (np.array(paths), np.array(labels))
def save_dummy_bbox_file(out_file = 'data/dummy_bb_file.txt'):
"""Save a dummy bounding box file."""
_, labels = read_imdb('data/val.txt')
num_examples = len(labels)
synsets = np.loadtxt('data/synsets.txt', dtype='str', delimiter='\t')
labels_as_synsets = np.array([synsets[l] for l in labels])[:, None]
assert(labels_as_synsets.shape[0] == num_examples)
bbs = np.random.randint(50, 100, size=(num_examples, 2))
deltas = np.random.randint(1, 100, size=(num_examples, 2))
labels_and_bbs = np.hstack((labels_as_synsets,
bbs[:, 0, None],
bbs[:, 1, None],
bbs[:, 0, None] + deltas[:, 0, None],
bbs[:, 1, None] + deltas[:, 0, None]))
np.savetxt(out_file, labels_and_bbs, fmt='%s %s %s %s %s')
def imsmooth(x, sigma, stride=1, padding=0, padding_mode='constant', padding_value=0):
r"""Apply a Gaussian filter to a batch of 2D images
Args:
x (Tensor): :math:`N\times C\times H\times W` image tensor.
sigma (float): standard deviation of the Gaussian kernel.
stride (int, optional): subsampling factor (default: 1).
padding (int, optional): extra padding (default: 0).
padding_mode (str, optional): `constant`, `reflect` or `replicate` (default: `constant`).
padding_value (float, optional): constant value for the `constant` padding mode (default: 0).
Returns:
Tensor: :math:`N\times C\times H\times W` tensor with the smoothed images.
"""
assert sigma >= 0
W = math.ceil(4 * sigma)
filt = torch.arange(-W, W+1, dtype=torch.float32, device=x.device) / \
(SQRT_TWO_SINGLE * sigma + EPSILON_SINGLE)
filt = torch.exp(- filt*filt)
filt /= torch.sum(filt)
num_channels = x.shape[1]
W = W + padding
if padding_mode == 'constant' and padding_value == 0:
P = W
y = x
else:
# pad: (before, after) pairs starting from last dimension backward
y = F.pad(x, (W, W, W, W), mode=padding_mode, value=padding_value)
P = 0
padding = 0
y = F.conv2d(y, filt.reshape((1, 1, -1, 1)).expand(num_channels, -1, -1, -1),
padding=(P, padding), stride=(stride, 1), groups=num_channels)
y = F.conv2d(y, filt.reshape((1, 1, 1, -1)).expand(num_channels, -1, -1, -1),
padding=(padding, P), stride=(1, stride), groups=num_channels)
return y
def normalize(arr):
"""Normalize array values to be between 0 and 1
Args:
arr (numpy array): non-normalized array
Return:
normalized array
"""
if (arr == 0).all() or (arr == 1).all():
return arr
min_value = np.min(arr)
max_value = np.max(arr)
norm_arr = (arr - min_value) / (max_value - min_value)
return norm_arr
|
StarcoderdataPython
|
1872914
|
<gh_stars>0
N, K, Q = [int(x) for x in input().split()]
seikai = [0] * N
for _ in range(Q):
a = int(input())
seikai[a-1] += 1
for i in range(N):
if K - (Q - seikai[i]) > 0:
print("Yes")
else:
print("No")
|
StarcoderdataPython
|
4847300
|
def build_array_from_tree(root):
"Convert a tree into an array (flatten a tree)"
current_stack = []
output = []
next_queue = Queue()
current_stack.append(root)
while len(current_stack) > 0 and \
len(next_queue) > 0:
output.append([])
while len(current_stack) > 0:
item = current_stack.pop()
output[-1].append(item)
for child in item.children():
next_queue.enqueue(child)
while len(next_queue) > 0:
current_stack.append(next_queue.dequeue())
|
StarcoderdataPython
|
11284926
|
'''
Name: time_tracker.py
Author: <NAME>
Date: 28/05/2020
Version: 0.0
DESCRIPTION
---------------
Simple time tracker script to track your time
'''
import argparse
import datetime
import json
import os
import time
from PySide2.QtWidgets import *
import psutil
import win32gui
import win32process
class TimeTracker:
ui: QWidget
def __init__(self):
self.windows = {}
self.windows_table = []
self.i = 0
self.ui = None
today_date = datetime.datetime.today().strftime("%a_%d_%b_%Y")
self.document_path = os.path.join(os.environ["USERPROFILE"], "Documents", "time_track",
today_date)
if not os.path.isdir(self.document_path):
print("[+] Creating folder {}".format(self.document_path))
os.makedirs(self.document_path)
self.data_out_json = ""
def __print_seconds(self, seconds):
minutes, secs = divmod(seconds, 60)
hour, minutes = divmod(minutes, 60)
return "%d:%02d:%02d" % (hour, minutes, secs)
def start_tracking_ui(self, task_name="Default"):
# let's check if a json file already exists
self.i = 0
self.data_out_json = os.path.join(self.document_path, task_name + '.json')
if os.path.isfile(self.data_out_json):
print("[+] Found an existing {}".format(self.data_out_json))
print("[+] Loading previous time track data")
# If the file exists we want to load the dictionary data instead of starting from scratch
try:
with open(self.data_out_json) as f:
self.windows = json.load(f)
except ValueError:
print("[-] Problem reading {}. Storing json into {}_temp".format(self.data_out_json, task_name))
self.data_out_json = os.path.join(self.document_path, task_name + '_temp.json')
else:
print("[+] Creating a new time track data instance")
while self.i <= 1:
time.sleep(1)
w = win32gui
w.GetWindowText(w.GetForegroundWindow())
pid = win32process.GetWindowThreadProcessId(w.GetForegroundWindow())
try:
process_name = psutil.Process(pid[-1]).name()
except psutil.NoSuchProcess as e:
process_name = "Unknown process:{}".format(pid[-1])
try:
time_spent = self.windows[process_name] + 1
except KeyError as e:
# no value in the dictionary
time_spent = 0
self.windows[process_name] = time_spent
total_time_seconds = 0
print("[+] For task {}".format(task_name))
for key, value in self.windows.items():
print("[+]\t\t {} : {}".format(key, self.__print_seconds(value)))
total_time_seconds += value
self.ui.lcdNumber.display(self.__print_seconds(total_time_seconds))
print("[+] Total time spent :{}".format(self.__print_seconds(total_time_seconds)))
# every minute let's dump data, just to avoid to lose info
if total_time_seconds % 60 == 0:
self.__dump_data()
def start_tracking(self, task_name="Default"):
# let's check if a json file already exists
self.data_out_json = os.path.join(self.document_path, task_name + '.json')
if os.path.isfile(self.data_out_json):
print("[+] Found an existing {}".format(self.data_out_json))
print("[+] Loading previous time track data")
# If the file exists we want to load the dictionary data instead of starting from scratch
try:
with open(self.data_out_json) as f:
self.windows = json.load(f)
except ValueError:
print("[-] Problem reading {}. Storing json into {}_temp".format(self.data_out_json, task_name))
self.data_out_json = os.path.join(self.document_path, task_name + '_temp.json')
else:
print("[+] Creating a new time track data instance")
while self.i <= 1:
time.sleep(1)
w = win32gui
w.GetWindowText(w.GetForegroundWindow())
pid = win32process.GetWindowThreadProcessId(w.GetForegroundWindow())
try:
process_name = psutil.Process(pid[-1]).name()
except psutil.NoSuchProcess as e:
process_name = "Unknown process:{}".format(pid[-1])
try:
time_spent = self.windows[process_name] + 1
except KeyError as e:
# no value in the dictionary
time_spent = 0
self.windows[process_name] = time_spent
total_time_seconds = 0
print("[+] For task {}".format(task_name))
for key, value in self.windows.items():
print("[+]\t\t {} : {}".format(key, self.__print_seconds(value)))
total_time_seconds += value
print("[+] Total time spent :{}".format(self.__print_seconds(total_time_seconds)))
# every minute let's dump data, just to avoid to lose info
if total_time_seconds % 60 == 0:
self.__dump_data()
def __dump_data(self):
print("[+] Saving data in {}".format(self.data_out_json))
with open(self.data_out_json, "w") as out_file:
json.dump(self.windows, out_file, indent=1)
def pause_tracking_ui(self):
pass
def stop_tracking_ui(self):
self.i = 2
self.__dump_data()
def stop_tracking(self):
print("[-] Received keyboard interrupt")
print("[-] Time tracker interrupted")
self.i = 2 # should interrupt the tracking
self.__dump_data()
y = "y"
n = "n"
# Show the task available
# Create a new task
print("[+] Do you want to start track a new task? [y][n]")
user_input = input()
if user_input.lower() == "y":
print("[+] Insert the name of the new task to track:")
user_input = input()
self.i = 0
self.windows = {}
try:
self.start_tracking(user_input)
except KeyboardInterrupt:
self.stop_tracking()
else:
print("[+] Do you want to continue an existing task? [y][n]")
user_input = input()
if user_input.lower() == "y":
print("[-] Task available")
# Create a dictionary of possible task to switch in
available_tasks = {}
for dirpath, dirnames, filenames in os.walk(self.document_path):
count = 0
for json_file in filenames:
if json_file.endswith(".json"):
print("\t[{}] {}".format(count, json_file.replace(".json", "")))
available_tasks[count] = json_file.replace(".json", "")
count += 1
print("[-] Select the number of the task you want to switch back")
user_input = input()
try:
self.i = 0
self.windows = {}
try:
self.start_tracking(available_tasks[int(user_input)])
except KeyboardInterrupt:
self.stop_tracking()
except KeyError:
print("[-] Error: couldnt find any task with the input:{}".format(user_input))
else:
print("[+] Closing time tracker")
exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Time tracker built by <NAME>")
parser.add_argument('task_name', type=str)
args = parser.parse_args()
t_tracker = TimeTracker()
try:
t_tracker.start_tracking(args.task_name)
except KeyboardInterrupt:
t_tracker.stop_tracking()
|
StarcoderdataPython
|
11316266
|
<reponame>RileyWClarke/flarubin
import numpy as np
import pandas as pd
from .baseSlicer import BaseSlicer
from rubin_sim.maf.plots.moPlotters import MetricVsH, MetricVsOrbit
from .orbits import Orbits
__all__ = ['MoObjSlicer']
class MoObjSlicer(BaseSlicer):
""" Slice moving object _observations_, per object and optionally clone/per H value.
Iteration over the MoObjSlicer will go as:
* iterate over each orbit;
* if Hrange is not None, for each orbit, iterate over Hrange.
Parameters
----------
Hrange : numpy.ndarray or None
The H values to clone the orbital parameters over. If Hrange is None, will not clone orbits.
"""
def __init__(self, Hrange=None, verbose=True, badval=0):
super(MoObjSlicer, self).__init__(verbose=verbose, badval=badval)
self.Hrange = Hrange
self.slicer_init = {'Hrange': Hrange, 'badval': badval}
# Set default plotFuncs.
self.plotFuncs = [MetricVsH(),
MetricVsOrbit(xaxis='q', yaxis='e'),
MetricVsOrbit(xaxis='q', yaxis='inc')]
def setupSlicer(self, orbitFile, delim=None, skiprows=None, obsFile=None):
"""Set up the slicer and read orbitFile and obsFile from disk.
Sets self.orbits (with orbit parameters), self.allObs, and self.obs
self.orbitFile and self.obsFile
Parameters
----------
orbitFile : str
The file containing the orbit information.
This is necessary, in order to be able to generate plots.
obsFile : str, optional
The file containing the observations of each object, optional.
If not provided (default, None), then the slicer will not be able to 'slice', but can still plot.
"""
self.readOrbits(orbitFile, delim=delim, skiprows=skiprows)
if obsFile is not None:
self.readObs(obsFile)
else:
self.obsFile = None
self.allObs = None
self.obs = None
# Add these filenames to the slicer init values, to preserve in output files.
self.slicer_init['orbitFile'] = self.orbitFile
self.slicer_init['obsFile'] = self.obsFile
def readOrbits(self, orbitFile, delim=None, skiprows=None):
# Use sims_movingObjects to read orbit files.
orb = Orbits()
orb.readOrbits(orbitFile, delim=delim, skiprows=skiprows)
self.orbitFile = orbitFile
self.orbits = orb.orbits
# Then go on as previously. Need to refactor this into 'setupSlicer' style.
self.nSso = len(self.orbits)
self.slicePoints = {}
self.slicePoints['orbits'] = self.orbits
# And set the slicer shape/size.
if self.Hrange is not None:
self.shape = [self.nSso, len(self.Hrange)]
self.slicePoints['H'] = self.Hrange
else:
self.shape = [self.nSso, 1]
self.slicePoints['H'] = self.orbits['H']
# Set the rest of the slicePoint information once
self.nslice = self.shape[0] * self.shape[1]
def readObs(self, obsFile):
"""Read observations of the solar system objects (such as created by sims_movingObjects).
Parameters
----------
obsFile: str
The file containing the observation information.
"""
# For now, just read all the observations (should be able to chunk this though).
self.allObs = pd.read_csv(obsFile, delim_whitespace=True, comment='#')
self.obsFile = obsFile
# We may have to rename the first column from '#objId' to 'objId'.
if self.allObs.columns.values[0].startswith('#'):
newcols = self.allObs.columns.values
newcols[0] = newcols[0].replace('#', '')
self.allObs.columns = newcols
if 'velocity' not in self.allObs.columns.values:
self.allObs['velocity'] = np.sqrt(self.allObs['dradt']**2 + self.allObs['ddecdt']**2)
if 'visitExpTime' not in self.allObs.columns.values:
self.allObs['visitExpTime'] = np.zeros(len(self.allObs['objId']), float) + 30.0
# If we created intermediate data products by pandas, we may have an inadvertent 'index'
# column. Since this creates problems later, drop it here.
if 'index' in self.allObs.columns.values:
self.allObs.drop('index', axis=1, inplace=True)
self.subsetObs()
def subsetObs(self, pandasConstraint=None):
"""
Choose a subset of all the observations, such as those in a particular time period.
"""
if pandasConstraint is None:
self.obs = self.allObs
else:
self.obs = self.allObs.query(pandasConstraint)
def _sliceObs(self, idx):
"""Return the observations of a given ssoId.
For now this works for any ssoId; in the future, this might only work as ssoId is
progressively iterated through the series of ssoIds (so we can 'chunk' the reading).
Parameters
----------
idx : integer
The integer index of the particular SSO in the orbits dataframe.
"""
# Find the matching orbit.
orb = self.orbits.iloc[idx]
# Find the matching observations.
if self.obs['objId'].dtype == 'object':
obs = self.obs.query('objId == "%s"' %(orb['objId']))
else:
obs = self.obs.query('objId == %d' %(orb['objId']))
# Return the values for H to consider for metric.
if self.Hrange is not None:
Hvals = self.Hrange
else:
Hvals = np.array([orb['H']], float)
# Note that ssoObs / obs is a recarray not Dataframe!
# But that the orbit IS a Dataframe.
return {'obs': obs.to_records(),
'orbit': orb,
'Hvals': Hvals}
def __iter__(self):
"""
Iterate through each of the ssoIds.
"""
self.idx = 0
return self
def __next__(self):
"""
Returns result of self._getObs when iterating over moSlicer.
"""
if self.idx >= self.nSso:
raise StopIteration
idx = self.idx
self.idx += 1
return self._sliceObs(idx)
def __getitem__(self, idx):
# This may not be guaranteed to work if/when we implement chunking of the obsfile.
return self._sliceObs(idx)
def __eq__(self, otherSlicer):
"""
Evaluate if two slicers are equal.
"""
result = False
if isinstance(otherSlicer, MoObjSlicer):
if otherSlicer.orbitFile == self.orbitFile:
if otherSlicer.obsFile == self.obsFile:
if np.array_equal(otherSlicer.slicePoints['H'], self.slicePoints['H']):
result = True
return result
|
StarcoderdataPython
|
5043371
|
from .loadxml import battleStart, battleWrite
from .interpret import importModule, importFunction
from .utilities import *
from .errors import *
from .instance import Instance
|
StarcoderdataPython
|
6419829
|
<reponame>rbarzic/ALIGN-public
import json
import pathlib
import re
from pprint import pformat
from align.cell_fabric import transformation
from Intel.Intel_P1222p2_PDK.IntelP1222p2Canvas import IntelP1222p2Canvas
if __name__ == "__main__":
with open("comparator.json", "rt") as fp:
d = json.load(fp)
skip_layers = set( ["boundary", "diearea", "cellarea", "ndiff", "pdiff", "nwell", "poly", "gcn"])
layer_tbl = { "diffcon": "Diffcon",
"polycon": "Polycon",
"nwell": "Nwell",
"metal1": "M1",
"metal2": "M2",
"metal3": "M3",
"via0": "V0",
"via1": "V1",
"via2": "V2"}
p = re.compile( "^(.*)_gr$")
def s( r):
assert all( v%10 == 0 for v in r)
return [ v//10 for v in r]
terminals = []
for term in d['terminals']:
ly = term['layer']
if ly in skip_layers:
continue
nm = term['netName'] if 'netName' in term else term['net_name']
if nm is not None and p.match(nm): continue
term['layer'] = layer_tbl.get( ly, ly)
term['rect'] = s(term['rect'])
terminals.append( term)
d['terminals'] = terminals
pdkfile = pathlib.Path('Intel/Intel_P1222p2_PDK/layers.json')
cnv = IntelP1222p2Canvas(pdkfile)
cnv.bbox = transformation.Rect( *s(d['bbox']))
cnv.terminals = d['terminals']
cnv.gen_data(run_pex=False)
if False:
assert len(cnv.rd.different_widths) == 0, pformat(cnv.rd.different_widths)
assert len(cnv.rd.shorts) == 0, pformat(cnv.rd.shorts)
assert len(cnv.rd.opens) == 0, pformat(cnv.rd.opens)
assert len(cnv.drc.errors) == 0, pformat(cnv.drc.errors)
|
StarcoderdataPython
|
9670361
|
'''
Your task is to write a function maskify, which changes all but the last four characters into '#'.
Examples
maskify("4556364607935616") == "############5616"
maskify( "64607935616") == "#######5616"
maskify( "1") == "1"
maskify( "") == ""
# "What was the name of your first pet?"
maskify("Skippy") == "##ippy"
maskify("Nananananananananananananananana Batman!") == "####################################man!"
'''
def maskify(cc):
new_str = cc[:-4]
if len(cc) == 0:
return ''
elif len(cc) < 4:
return cc
else:
cc = ''.join(("#"*(len(cc)-4))) + cc[-4:]
return cc
|
StarcoderdataPython
|
9712552
|
import datetime
def size_format(size_b):
if(size_b < 1024):
return "%.2f"%(size_b) + 'B'
elif(size_b < 1024 * 1024):
return "%.2f"%(size_b / 1024) + 'KB'
elif(size_b < 1024 * 1024 * 1024):
return "%.2f"%(size_b / 1024 / 1024) + 'MB'
elif(size_b > 1024 * 1024 * 1024 * 1024):
return "%.2f"%(size_b / 1024 / 1024 / 1024) + 'GB'
def seconds_to_string(seconds):
minute = 60
hour = minute * 60
day = hour * 24
month = day * 30
result = "剩余时间: "
t = [(month, "月"), (day, "天"), (hour, "小时"), (minute, "分钟")]
for (s, c) in t:
if (seconds >= s):
result += str((seconds // s)) + c
seconds %= s
return result
def time_delta(t):
sp = t.split(' ')
tl = [int(x) for x in sp[0].split('-') + sp[1].split(':')]
delta = datetime.datetime(tl[0], tl[1], tl[2], tl[3], tl[4], 0, 0) - datetime.datetime.now()
return seconds_to_string(delta.days * 24 * 60 * 60 + delta.seconds)
|
StarcoderdataPython
|
5063413
|
# Module that cast a decimal number to fraction notation
# Input:
# number: number to cast
# return:
# tuple[0]: is the error, None is returned when the process has no error
# otherwise a string with message of error is returned
# tuple[1]: is number in fraction notation
from fractions import Fraction
def decimalToFraction(number):
if(isinstance(number,(int,float))):
ans = Fraction(number).limit_denominator()
return (None,str(ans))
else:
return ('error','arg must be number')
if __name__ == '__main__':
print(decimalToFraction(3.45))
|
StarcoderdataPython
|
11359806
|
<filename>flask_qiniustorage.py
# -*- coding: utf-8 -*-
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import qiniu as QiniuClass
class Qiniu(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self._access_key = app.config.get('QINIU_ACCESS_KEY', '')
self._secret_key = app.config.get('QINIU_SECRET_KEY', '')
self._bucket_name = app.config.get('QINIU_BUCKET_NAME', '')
domain = app.config.get('QINIU_BUCKET_DOMAIN')
if not domain:
self._base_url = 'http://' + self._bucket_name + '.qiniudn.com'
else:
self._base_url = 'http://' + domain
def save(self, data, filename=None):
auth = QiniuClass.Auth(self._access_key, self._secret_key)
token = auth.upload_token(self._bucket_name)
return QiniuClass.put_data(token, filename, data)
def delete(self, filename):
auth = QiniuClass.Auth(self._access_key, self._secret_key)
bucket = QiniuClass.BucketManager(auth)
return bucket.delete(self._bucket_name, filename)
def url(self, filename):
return urljoin(self._base_url, filename)
def private_url(self, filename):
auth = QiniuClass.Auth(self._access_key, self._secret_key)
return auth.private_download_url(urljoin(self._base_url, filename), expires=3600)
|
StarcoderdataPython
|
3404314
|
"""empty message
Revision ID: 1c306b9d32
Revises: <PASSWORD>
Create Date: 2015-09-15 12:14:05.356636
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('use_of_force_incidents', sa.Column('received_date', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('use_of_force_incidents', 'received_date')
### end Alembic commands ###
|
StarcoderdataPython
|
1923265
|
<filename>rsvqa/grad_cam.py
import argparse
from utils import seed_everything, load_data, LabelSmoothing,encode_text #,Model
import wandb
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torchvision import transforms
from models.mmbert import Model, get_transformer_model
from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
#from torchvision.models import resnet50
from transformers import BertTokenizer
import cv2
import timm
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Pretrain on ROCO with MLM")
parser.add_argument('--data_dir', type = str, required = False, default = "../ImageClef-2019-VQA-Med", help = "path for data")
parser.add_argument('--model_dir', type = str, required = False, default = "../ImageClef-2019-VQA-Med/mmbert/MLM/vqa-sentence_transformers-allmpnet48-2.pt", help = "path to load weights")
parser.add_argument('--method', type=str, default='gradcam',
choices=["gradcam","scorecam","gradcam++","ablationcam","xgradcam","eigencam"], help='gradcam method')
parser.add_argument('--cnn_encoder', type=str, default='tf_efficientnetv2_m', help='name of the cnn encoder')
parser.add_argument('--use_relu', action = 'store_true', default = False, help = "use ReLu")
parser.add_argument('--transformer_model', type=str, default='realformer',choices=['transformer', 'realformer', 'feedback-transformer'], help='name of the transformer model')
parser.add_argument('--dataset', type=str, default='VQA-Med', help='roco or vqamed2019')
parser.add_argument('--num_vis', type = int, default=5, help = "num of visual embeddings")
parser.add_argument('--hidden_size', type=int, default=768, help='embedding size')
parser.add_argument('--hidden_dropout_prob', type=float, default=0.3, help='dropout')
parser.add_argument('--n_layers', type=int, default=4, help='num of heads in multihead attenion')
parser.add_argument('--heads', type=int, default=8, help='num of bertlayers')
parser.add_argument('--vocab_size', type=int, default=30522, help='vocabulary size')
parser.add_argument('--task', type=str, default='MLM',
choices=['MLM', 'distillation'], help='pretrain task for the model to be trained on')
parser.add_argument('--seed', type = int, required = False, default = 42, help = "set seed for reproducibility")
parser.add_argument('--img', type=str, default = '../dog_cat.jfif', help='path to img', required = False)
parser.add_argument('--output', type=str, default = '../grad_cam', help='output img', required = False)
parser.add_argument('--train_pct', type = float, required = False, default = 1.0, help = "fraction of train samples to select")
parser.add_argument('--valid_pct', type = float, required = False, default = 1.0, help = "fraction of validation samples to select")
parser.add_argument('--test_pct', type = float, required = False, default = 1.0, help = "fraction of test samples to select")
parser.add_argument('--max_position_embeddings', type = int, required = False, default = 28, help = "max length of sequence")
parser.add_argument('--vqa_img', type=str, default = 'synpic371.jpg', help="path to vqa img", required = False)
parser.add_argument('--category', type=str, default = 'organ', choices=['organ','modality','plane','abnormality'], help="question category", required = False)
parser.add_argument('--mode', type=str, default = 'Train', choices=['Train', 'Val', 'Test'], help="data split", required = False)
parser.add_argument('--grad_cam', action='store_false', required = False, default = True, help='flag to save model input_tensor')
args = parser.parse_args()
methods = \
{"gradcam": GradCAM,
"scorecam": ScoreCAM,
"gradcam++": GradCAMPlusPlus,
"ablationcam": AblationCAM,
"xgradcam": XGradCAM,
"eigencam": EigenCAM,
}
seed_everything(args.seed)
train_df, val_df, test_df = load_data(args)
df = pd.concat([train_df, val_df, test_df]).reset_index(drop=True)
ans2idx = {ans:idx for idx,ans in enumerate(df['answer'].unique())}
idx2ans = {idx:ans for ans,idx in ans2idx.items()}
df['answer'] = df['answer'].map(ans2idx).astype(int)
train_df = df[df['mode']=='train'].reset_index(drop=True)
val_df = df[df['mode']=='val'].reset_index(drop=True)
test_df = df[df['mode']=='test'].reset_index(drop=True)
num_classes = len(ans2idx)
args.num_classes = num_classes
print('numclasses',num_classes)
img_path = os.path.join(args.data_dir,args.mode,'images',args.vqa_img)
#import IPython; IPython.embed(); import sys; sys.exit(0)
info_df=df.loc[df['img_id'] == img_path]
category_df=info_df.loc[info_df['category'] == args.category]
question = category_df['question'].item()
answer = category_df['answer'].item()
# model = Model(args)
# model.classifier[2] = nn.Linear(args.hidden_size, num_classes)
# target_layers = model.transformer.mains[3].ff[3]#model.fc1#model.transformer.mains[-3]#model.transformer.trans.model.blocks[6][4]#
#model = timm.create_model('tf_efficientnetv2_m', pretrained=True)
#model.classifier = torch.nn.Sequential()
#target_layers = model.blocks[6][4]
model = Model(args)
model.classifier[2] = nn.Linear(args.hidden_size, num_classes)
model.load_state_dict(torch.load(args.model_dir))
effv2 = timm.create_model('tf_efficientnetv2_m', pretrained=True) #model.transformer.trans.model
effv2.classifier = torch.nn.Sequential()
print('Loading weights from', args.model_dir)
effv2_dict = effv2.state_dict()
pretrained_dict = model.transformer.trans.model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in effv2_dict}
# 2. overwrite entries in the existing state dict
effv2_dict.update(pretrained_dict)
# 3. load the new state dict
effv2.load_state_dict(effv2_dict)
target_layers = effv2.blocks[-1][-1]
print( target_layers)
def preprocess_image(img: np.ndarray, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> torch.Tensor:
preprocessing = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
return preprocessing(img.copy()).unsqueeze(0)
rgb_img = cv2.imread(img_path, 1)[:, :, ::-1] #dog_cat.jfif - synpic371.jpg'
rgb_img = cv2.resize(rgb_img, (224, 224))
rgb_img = np.float32(rgb_img) / 255
img_tensor = preprocess_image(rgb_img, mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokens, segment_ids, input_mask= encode_text(question, tokenizer, args)
tokens, segment_ids, input_mask = torch.tensor(tokens, dtype = torch.long).unsqueeze(dim=0), torch.tensor(segment_ids, dtype = torch.long).unsqueeze(dim=0), torch.tensor(input_mask, dtype = torch.long).unsqueeze(dim=0)
#h = model.transformer.prepare_input(img_tensor, tokens, segment_ids, input_mask)
#out = model(h)
print('out.shape', model(img_tensor, tokens, segment_ids, input_mask)[0].shape)
# Construct the CAM object once, and then re-use it on many images:
print('Using ' + args.method)
cam = methods[args.method](model=effv2, target_layer=target_layers, use_cuda=False)
# If target_category is None, the highest scoring category
# will be used for every image in the batch.
# target_category can also be an integer, or a list of different integers
# for every image in the batch.
target_category = None#answer#None#281
#import IPython; IPython.embed(); import sys; sys.exit(0)
# You can also pass aug_smooth=True and eigen_smooth=True, to apply smoothing.
grayscale_cam = cam(input_tensor=img_tensor, target_category=target_category)
# In this example grayscale_cam has only one image in the batch:
grayscale_cam = grayscale_cam[0, :]
visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=False)
print('Writing output file to ',args.output)
cv2.imwrite(args.output + '_' + args.vqa_img + '_' + args.model_dir.split('/')[-1] + ".jpg", visualization)
|
StarcoderdataPython
|
3535210
|
import time
import torch
from torch.backends import cudnn
from backbone import HybridNetsBackbone
import cv2
import numpy as np
from glob import glob
from utils.utils import letterbox, scale_coords, postprocess, BBoxTransform, ClipBoxes, restricted_float, boolean_string
from utils.plot import STANDARD_COLORS, standard_to_bgr, get_index_label, plot_one_box
import os
from torchvision import transforms
import argparse
parser = argparse.ArgumentParser('HybridNets: End-to-End Perception Network - DatVu')
parser.add_argument('-c', '--compound_coef', type=int, default=3, help='Coefficient of efficientnet backbone')
parser.add_argument('--source', type=str, default='demo/image', help='The demo image folder')
parser.add_argument('--output', type=str, default='demo_result', help='Output folder')
parser.add_argument('-w', '--load_weights', type=str, default='weights/hybridnets.pth')
parser.add_argument('--nms_thresh', type=restricted_float, default='0.25')
parser.add_argument('--iou_thresh', type=restricted_float, default='0.3')
parser.add_argument('--imshow', type=boolean_string, default=False, help="Show result onscreen (unusable on colab, jupyter...)")
parser.add_argument('--imwrite', type=boolean_string, default=True, help="Write result to output folder")
parser.add_argument('--show_det', type=boolean_string, default=False, help="Output detection result exclusively")
parser.add_argument('--show_seg', type=boolean_string, default=False, help="Output segmentation result exclusively")
parser.add_argument('--cuda', type=boolean_string, default=True)
parser.add_argument('--float16', type=boolean_string, default=True, help="Use float16 for faster inference")
args = parser.parse_args()
compound_coef = args.compound_coef
source = args.source
if source.endswith("/"):
source = source[:-1]
output = args.output
if output.endswith("/"):
output = output[:-1]
weight = args.load_weights
img_path = glob(f'{source}/*.jpg') + glob(f'{source}/*.png')
# img_path = [img_path[0]] # demo with 1 image
input_imgs = []
shapes = []
det_only_imgs = []
# replace this part with your project's anchor config
anchor_ratios = [(0.62, 1.58), (1.0, 1.0), (1.58, 0.62)]
anchor_scales = [2 ** 0, 2 ** 0.70, 2 ** 1.32]
threshold = args.nms_thresh
iou_threshold = args.iou_thresh
imshow = args.imshow
imwrite = args.imwrite
show_det = args.show_det
show_seg = args.show_seg
os.makedirs(output, exist_ok=True)
use_cuda = args.cuda
use_float16 = args.float16
cudnn.fastest = True
cudnn.benchmark = True
obj_list = ['car']
color_list = standard_to_bgr(STANDARD_COLORS)
ori_imgs = [cv2.imread(i, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) for i in img_path]
ori_imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in ori_imgs]
# cv2.imwrite('ori.jpg', ori_imgs[0])
# cv2.imwrite('normalized.jpg', normalized_imgs[0]*255)
resized_shape = 640
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
for ori_img in ori_imgs:
h0, w0 = ori_img.shape[:2] # orig hw
r = resized_shape / max(h0, w0) # resize image to img_size
input_img = cv2.resize(ori_img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_AREA)
h, w = input_img.shape[:2]
(input_img, _, _), ratio, pad = letterbox((input_img, input_img.copy(), input_img.copy()), resized_shape, auto=True,
scaleup=False)
input_imgs.append(input_img)
# cv2.imwrite('input.jpg', input_img * 255)
shapes.append(((h0, w0), ((h / h0, w / w0), pad))) # for COCO mAP rescaling
if use_cuda:
x = torch.stack([transform(fi).cuda() for fi in input_imgs], 0)
else:
x = torch.stack([transform(fi) for fi in input_imgs], 0)
x = x.to(torch.float32 if not use_float16 else torch.float16)
# print(x.shape)
model = HybridNetsBackbone(compound_coef=compound_coef, num_classes=len(obj_list),
ratios=anchor_ratios, scales=anchor_scales, seg_classes=2)
try:
model.load_state_dict(torch.load(weight, map_location='cuda' if use_cuda else 'cpu'))
except:
model.load_state_dict(torch.load(weight, map_location='cuda' if use_cuda else 'cpu')['model'])
model.requires_grad_(False)
model.eval()
if use_cuda:
model = model.cuda()
if use_float16:
model = model.half()
with torch.no_grad():
features, regression, classification, anchors, seg = model(x)
seg = seg[:, :, 12:372, :]
da_seg_mask = torch.nn.functional.interpolate(seg, size=[720, 1280], mode='nearest')
_, da_seg_mask = torch.max(da_seg_mask, 1)
for i in range(da_seg_mask.size(0)):
# print(i)
da_seg_mask_ = da_seg_mask[i].squeeze().cpu().numpy().round()
color_area = np.zeros((da_seg_mask_.shape[0], da_seg_mask_.shape[1], 3), dtype=np.uint8)
color_area[da_seg_mask_ == 1] = [0, 255, 0]
color_area[da_seg_mask_ == 2] = [0, 0, 255]
color_seg = color_area[..., ::-1]
# cv2.imwrite('seg_only_{}.jpg'.format(i), color_seg)
color_mask = np.mean(color_seg, 2)
# prepare to show det on 2 different imgs
# (with and without seg) -> (full and det_only)
det_only_imgs.append(ori_imgs[i].copy())
seg_img = ori_imgs[i]
seg_img[color_mask != 0] = seg_img[color_mask != 0] * 0.5 + color_seg[color_mask != 0] * 0.5
seg_img = seg_img.astype(np.uint8)
if show_seg:
cv2.imwrite(f'{output}/{i}_seg.jpg', cv2.cvtColor(seg_img, cv2.COLOR_RGB2BGR))
regressBoxes = BBoxTransform()
clipBoxes = ClipBoxes()
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
for i in range(len(ori_imgs)):
out[i]['rois'] = scale_coords(ori_imgs[i][:2], out[i]['rois'], shapes[i][0], shapes[i][1])
for j in range(len(out[i]['rois'])):
x1, y1, x2, y2 = out[i]['rois'][j].astype(int)
obj = obj_list[out[i]['class_ids'][j]]
score = float(out[i]['scores'][j])
plot_one_box(ori_imgs[i], [x1, y1, x2, y2], label=obj, score=score,
color=color_list[get_index_label(obj, obj_list)])
if show_det:
plot_one_box(det_only_imgs[i], [x1, y1, x2, y2], label=obj, score=score,
color=color_list[get_index_label(obj, obj_list)])
if show_det:
cv2.imwrite(f'{output}/{i}_det.jpg', cv2.cvtColor(det_only_imgs[i], cv2.COLOR_RGB2BGR))
if imshow:
cv2.imshow('img', ori_imgs[i])
cv2.waitKey(0)
if imwrite:
cv2.imwrite(f'{output}/{i}.jpg', cv2.cvtColor(ori_imgs[i], cv2.COLOR_RGB2BGR))
# exit()
print('running speed test...')
with torch.no_grad():
print('test1: model inferring and postprocessing')
print('inferring 1 image for 10 times...')
x = x[0, ...]
x.unsqueeze_(0)
t1 = time.time()
for _ in range(10):
_, regression, classification, anchors, segmentation = model(x)
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
t2 = time.time()
tact_time = (t2 - t1) / 10
print(f'{tact_time} seconds, {1 / tact_time} FPS, @batch_size 1')
# uncomment this if you want a extreme fps test
print('test2: model inferring only')
print('inferring images for batch_size 32 for 10 times...')
t1 = time.time()
x = torch.cat([x] * 32, 0)
for _ in range(10):
_, regression, classification, anchors, segmentation = model(x)
t2 = time.time()
tact_time = (t2 - t1) / 10
print(f'{tact_time} seconds, {32 / tact_time} FPS, @batch_size 32')
|
StarcoderdataPython
|
1703498
|
#!/router/bin/python
from .trex_general_test import CTRexGeneral_Test, CTRexScenario
from .trex_nbar_test import CTRexNbarBase
from CPlatform import CStaticRouteConfig
from .tests_exceptions import *
#import sys
import time
from nose.tools import nottest
# Testing client cfg ARP resolve. Actually, just need to check that TRex run finished with no errors.
# If resolve will fail, TRex will exit with exit code != 0
class CTRexClientCfg_Test(CTRexNbarBase):
"""This class defines the IMIX testcase of the TRex traffic generator"""
def setUp(self):
if CTRexScenario.setup_name == 'kiwi02':
self.skip("Can't run currently on kiwi02")
super(CTRexClientCfg_Test, self).setUp() # launch super test class setUp process
def test_client_cfg_nbar(self):
if self.is_loopback:
self.skip('No NBAR on loopback')
if not CTRexScenario.router_cfg['no_dut_config']:
self.router.configure_basic_interfaces()
self.router.config_pbr(mode = "config")
self.router.config_nbar_pd()
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex (
c = core,
m = mult,
nc = True,
p = True,
d = 100,
f = 'avl/sfr_delay_10_1g.yaml',
client_cfg = 'automation/regression/cfg/client_cfg.yaml',
l = 1000)
trex_res = self.trex.sample_until_finish()
print("\nLATEST RESULT OBJECT:")
print(trex_res)
self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config
self.match_classification()
def test_client_cfg_vlan(self):
if self.is_loopback:
self.skip('Not relevant on loopback')
if not CTRexScenario.router_cfg['no_dut_config']:
self.router.configure_basic_interfaces(vlan = True)
self.router.config_pbr(mode = "config", vlan = True)
mult = self.get_benchmark_param('multiplier')
core = self.get_benchmark_param('cores')
ret = self.trex.start_trex (
c = core,
m = mult,
nc = True,
p = True,
d = 60,
f = 'cap2/dns.yaml',
limit_ports = 4,
client_cfg = 'automation/regression/cfg/client_cfg_vlan.yaml')
trex_res = self.trex.sample_until_finish()
print("\nLATEST RESULT OBJECT:")
print(trex_res)
self.check_general_scenario_results(trex_res, check_latency = False) # no latency with client config
def tearDown(self):
CTRexNbarBase.tearDown(self)
pass
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
77444
|
#!/usr/bin/env python
# Tested with both Python 2.7.6 and Python 3.4.3
#
# This Python code collects the source code for testing acados
# on microcontrollers, putting all the necessary C files in
# one directory, and header files in the sub-directory include.
#
# The idea is that when compiling the testing code of acados for
# embedded platforms, when "make" does not fully function like
# on standard Linux platform, all the source code available in
# one directory would allow the compiler to process the code
# easier.
#
# To use for ESP32:
#
# Example usage:
# Assume the source directory of acados is: ~/acados
# The target folder to be created is: chen_nmpc_qpoases
# This command should be used:
# python test_nmpc_qpoases.py ~/acados chen_nmpc_qpoases
#
# Author: <NAME>
# Date: 2017.04.03
import sys
import os
import glob
from subprocess import call
from os.path import join
print('Running python script to grab chen_nmpc_qpoases...')
print(sys.version) # get python version, for debugging
if len(sys.argv)!= 3:
raise SyntaxError('This script needs exactly 2 arguments: \n \
test_nmpc_qpoases.py <acados_top_dir> <new_target_dir>\n \
Example:\n \
test_nmpc_qpoases.py ~/acados chen_nmpc_qpoases')
# 1. Bring all necessary files to one directory.
top_dir = str(sys.argv[1]).rstrip('/') # no trailing / in top_dir
target_dir = str(sys.argv[2]).rstrip('/') # no trailing / in target_dir
# List of file to collect
# Note: this hard-coded path doesnot work with Windows
workingcodefiles = [\
'examples/c/chen_nmpc_qpoases.c', \
'examples/c/Chen_model/chen_model.c', \
'acados/utils/print.c', \
'acados/utils/timing.c', \
'acados/ocp_qp/condensing.c', \
'acados/ocp_qp/condensing_helper_functions.c', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.c', \
'acados/sim/sim_erk_integrator.c', \
'external/hpmpc/auxiliary/d_aux_extern_depend_lib4.c', \
'external/blasfeo/auxiliary/i_aux_extern_depend_lib.c', \
'external/qpOASES/src/Constraints.c', \
'external/qpOASES/src/Bounds.c', \
'external/qpOASES/src/Flipper.c', \
'external/qpOASES/src/Indexlist.c', \
'external/qpOASES/src/Matrices.c', \
'external/qpOASES/src/MessageHandling.c', \
'external/qpOASES/src/Options.c', \
'external/qpOASES/src/QProblem.c', \
'external/qpOASES/src/QProblemB.c', \
'external/qpOASES/src/Utils.c' \
]
workingheaderfiles = [\
'examples/c/Chen_model/chen_model.h', \
'acados/ocp_qp/ocp_qp_common.h', \
'acados/ocp_qp/condensing.h', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.h', \
'acados/sim/sim_common.h', \
'acados/sim/sim_erk_integrator.h', \
'acados/sim/sim_collocation.h', \
'acados/sim/sim_rk_common.h', \
'acados/utils/print.h', \
'acados/utils/types.h', \
'acados/utils/timing.h', \
'external/hpmpc/include/aux_d.h', \
'external/hpmpc/include/block_size.h', \
'external/hpmpc/include/kernel_d_lib4.h', \
'external/blasfeo/include/blasfeo_i_aux.h', \
'external/qpOASES/include/qpOASES_e/Bounds.h', \
'external/qpOASES/include/qpOASES_e/Constants.h', \
'external/qpOASES/include/qpOASES_e/ConstraintProduct.h', \
'external/qpOASES/include/qpOASES_e/Constraints.h', \
'external/qpOASES/include/qpOASES_e/Flipper.h', \
'external/qpOASES/include/qpOASES_e/Indexlist.h', \
'external/qpOASES/include/qpOASES_e/Matrices.h', \
'external/qpOASES/include/qpOASES_e/MessageHandling.h', \
'external/qpOASES/include/qpOASES_e/Options.h', \
'external/qpOASES/include/qpOASES_e/QProblem.h', \
'external/qpOASES/include/qpOASES_e/QProblemB.h', \
'external/qpOASES/include/qpOASES_e/Utils.h' \
]
# Files that should be renamed to avoid conflicts
oldfiles = ['external/qpOASES/include/qpOASES_e/Types.h']
newfiles = ['include/qpOASES_e_Types.h']
# Create directory structure and copy files
if not os.path.exists(target_dir):
os.system('mkdir '+target_dir)
for filename in workingcodefiles:
os.system('cp '+top_dir+'/'+filename+' '+target_dir)
if not os.path.exists(target_dir+'/include'):
os.system('mkdir '+target_dir+'/include')
for filename in workingheaderfiles:
os.system('cp '+top_dir+'/'+filename+' '+target_dir+'/include/')
for kk in range(len(oldfiles)):
os.system('cp '+top_dir+'/'+oldfiles[kk]+' '+target_dir+'/'+newfiles[kk])
print('Step 1: Necessary files copied.')
# 2. Modify .h and .c files to adapt to the new code structure:
# List of texts to be replaced:
old_text = [\
'examples/c/Chen_model/chen_model.h', \
'acados/ocp_qp/condensing.h', \
'acados/ocp_qp/condensing_helper_functions.c', \
'acados/ocp_qp/ocp_qp_common.h', \
'acados/ocp_qp/ocp_qp_condensing_qpoases.h', \
'acados/ocp_qp/ocp_qp_hpmpc.h', \
'acados/sim/sim_common.h', \
'acados/sim/sim_erk_integrator.h', \
'acados/sim/sim_collocation.h', \
'acados/sim/sim_rk_common.h', \
'acados/utils/print.h', \
'acados/utils/timing.h', \
'acados/utils/types.h', \
'hpmpc/include/aux_d.h', \
'../include/block_size.h', \
'../include/kernel_d_lib4.h', \
'blasfeo/include/blasfeo_common.h', \
'blasfeo/include/blasfeo_i_aux.h', \
'qpOASES_e/Bounds.h', \
'qpOASES_e/Constants.h', \
'qpOASES_e/Constraints.h', \
'qpOASES_e/ConstraintProduct.h', \
'qpOASES_e/Flipper.h', \
'qpOASES_e/Indexlist.h', \
'qpOASES_e/Matrices.h', \
'qpOASES_e/MessageHandling.h', \
'qpOASES_e/Options.h', \
'qpOASES_e/QProblem.h', \
'qpOASES_e/QProblemB.h', \
'qpOASES_e/Types.h', \
'qpOASES_e/Utils.h' \
]
# List of new texts to replace old ones,
# in corresponding order to old_text:
new_text = [\
'chen_model.h', \
'condensing.h', \
'condensing_helper_functions.c', \
'ocp_qp_common.h', \
'ocp_qp_condensing_qpoases.h', \
'ocp_qp_hpmpc.h', \
'sim_common.h', \
'sim_erk_integrator.h', \
'sim_collocation.h', \
'sim_rk_common.h', \
'print.h', \
'timing.h', \
'types.h', \
'aux_d.h', \
'block_size.h', \
'kernel_d_lib4.h', \
'blasfeo_common.h', \
'blasfeo_i_aux.h', \
'Bounds.h', \
'Constants.h', \
'Constraints.h', \
'ConstraintProduct.h', \
'Flipper.h', \
'Indexlist.h', \
'Matrices.h', \
'MessageHandling.h', \
'Options.h', \
'QProblem.h', \
'QProblemB.h', \
'qpOASES_e_Types.h', \
'Utils.h' \
]
len_old_text = len(old_text)
len_new_text = len(new_text)
if len_old_text != len_new_text:
raise ValueError('Number of old and new texts not match')
files = glob.glob(target_dir+"/*.c")
for file in files:
objFile = open(file, "r")
txtFile = objFile.read()
objFile.close()
for replacetext in range(len_old_text):
txtFile = txtFile.replace(old_text[replacetext],new_text[replacetext])
objFile = open(file, "w")
objFile.write(txtFile)
objFile.close()
files = glob.glob(target_dir+"/include/*.h")
for file in files:
objFile = open(file, "r")
txtFile = objFile.read()
objFile.close()
for replacetext in range(len_old_text):
txtFile = txtFile.replace(old_text[replacetext],new_text[replacetext])
objFile = open(file, "w")
objFile.write(txtFile)
objFile.close()
print('Step 2: Path information in files modified to the new structure.')
# 3. Add specific code to HPMPC and BLASFEO files:
# List of files to be modified:
files = ['include/block_size.h']
# List of lines to be added in the beginning of files,
# in corresponding order with the list files:
lines = ['#include "target.h"\n']
if len(files) != len(lines):
raise ValueError('Number of files and added lines not match')
for kk in range(len(files)):
objFile = open(target_dir+'/'+files[kk], "r")
txtFile = objFile.read()
objFile.close()
objFile = open(target_dir+'/'+files[kk], "w")
objFile.write(lines[kk]) # write the line to the beginning
objFile.write(txtFile)
objFile.close()
print('Step 3: Common header file included in specific files.')
# 4. Copy Makefile and specific setting files
os.system('cp '+top_dir+'/experimental/dang/esp32/script_test_nmpc_qpoases/Makefile '+target_dir)
os.system('cp '+top_dir+'/experimental/dang/esp32/script_test_nmpc_qpoases/target.h '+target_dir+'/include/')
print('Step 4: Makefile, and HPMPC target.h replaced.')
# 5. Display further instructions
print('Please do next steps in terminal:')
print(' cd '+target_dir)
print(' make')
print('Then run the binary file in '+target_dir+'/bin')
print('To remove binary objects: make clean\n')
|
StarcoderdataPython
|
12810706
|
count=1000
total=0
while count > 0:
if count %3==0 or count %5==0:
total=total+count
count=count-1
print(total)
|
StarcoderdataPython
|
1649373
|
<gh_stars>0
import pymongo
import os
from pymongo import MongoClient
from dotenv import load_dotenv
CLUSTER = os.getenv('DB_CLUSTER')
DATABASE = os.getenv('DB_NAME')
COLLECTION = os.getenv('DB_COLLECTION')
cluster = MongoClient(CLUSTER)
db = cluster[DATABASE]
collection = db[COLLECTION]
def sort_by_points():
return collection.find().sort("points", pymongo.DESCENDING)
def find_points(id):
member = collection.find_one({'id':id})
return member['points']
def update_points(id, points):
attempt = collection.find_one_and_update({'id': id}, {'$inc':{'points':points}})
if attempt == None:
collection.insert_one({'id': id, 'points': points})
def get_top_users(range):
return sort_by_points().limit(range)
|
StarcoderdataPython
|
4921159
|
# Practice XGBoost model for Pima Indians dataset
import pandas as pd
from numpy import loadtxt
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# load data
dataset = loadtxt('pima-indians-diabetes.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:8]
Y = dataset[:,8]
# split data into train and test sets
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
# fit model no training data
model = XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=1,
gamma=1.005, learning_rate=0.1, max_delta_step=0, max_depth=3,
min_child_weight=1, missing=None, n_estimators=100, nthread=-1,
objective='binary:logistic', reg_alpha=0, reg_lambda=1,
scale_pos_weight=1, seed=0, silent=True, subsample=1)
model.fit(X_train, y_train)
print(model)
# make predictions for test data
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
|
StarcoderdataPython
|
333157
|
<gh_stars>0
# SPDX-License-Identifier: MIT
# Copyright (c) 2022 <NAME> <https://github.com/AndrielFR>
import asyncio
import time
import feedparser
from bs4 import BeautifulSoup
from mews.monitor.sources import BaseRSS
from mews.utils import http
from mews.utils.database import exists_post
class AnimeNew(BaseRSS):
def __init__(self):
self.uri = "https://www.animenew.com.br/"
self.rss_uri = "https://animenew.com.br/feed/"
self.new_posts: List[Dict] = []
async def work(self):
response = await http.get(self.rss_uri)
p = feedparser.parse(response.content)
for entrie in p.entries[:10]:
title = entrie.title
author = entrie.author
published_date = int(round(time.mktime(entrie.published_parsed)))
post_link = entrie.link
comments_link = entrie.comments
post_response = await http.get(post_link)
post_soup = BeautifulSoup(post_response.content, "html.parser")
post_content = post_soup.find(
"div", **{"class": "elementor-widget-theme-post-content"}
)
contents = post_content.find_next("div").contents
content = "".join(str(line) for line in contents[1:-2])
if not (
await exists_post(
self.__class__.__name__.lower(), title, content, post_link
)
):
self.new_posts.append(
dict(
source=self.__class__.__name__,
title=title,
author=author,
published_date=published_date,
content=content,
post_link=post_link,
comments_link=comments_link,
)
)
await asyncio.sleep(600)
await self.work()
|
StarcoderdataPython
|
4954175
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutClasses(Koan):
class Dog:
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
# NOTE: The .__name__ attribute will convert the class
# into a string value.
fido = self.Dog()
self.assertEqual('Dog', fido.__class__.__name__)
def test_classes_have_docstrings(self):
self.assertRegex(self.Dog.__doc__, "Dogs need regular walkies. Never, ever let them drive.")
# ------------------------------------------------------------------
class Dog2:
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual('Paul', dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual('Fido', dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual('Fido', getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual('Fido', fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
# access as method
self.assertEqual('Fido', fido.get_name())
# access as property
self.assertEqual('Fido', fido.name)
# ------------------------------------------------------------------
class Dog4:
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual("Fido", fido.name)
# ------------------------------------------------------------------
class Dog5:
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual("Fido", fido.name)
def test_args_must_match_init(self):
with self.assertRaises(TypeError):
self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_different_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(False, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6:
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
#
# Implement this!
#
return self._name
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(fido, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual("My dog is Fido", "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual("<Dog named 'Fido'>", repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual("[1, 2, 3]", str(seq))
self.assertEqual("[1, 2, 3]", repr(seq))
self.assertEqual("STRING", str("STRING"))
self.assertEqual("'STRING'", repr("STRING"))
|
StarcoderdataPython
|
12803100
|
<reponame>uibcdf/MolSysMT<filename>molsysmt/protonation.py
from molsysmt._private_tools.exceptions import *
def has_hydrogens(molecular_system):
from molsysmt.multitool import get
output = False
n_hydrogens = get(molecular_system, target='atom', selection='atom_type=="H"', n_atoms=True)
if n_hydrogens>0:
output = True
return output
def add_missing_hydrogens(molecular_system, pH=7.4, forcefield='AMBER99SB-ILDN', engine='OpenMM', verbose=False):
"""add_missing_hydrogens(item, pH=7.4, forcefield='AMBER99SB-ILDN', engine='OpenMM', verbose=False)
The missing hydrogens of a molecular model are added. This method does not remove any hydrogen
already present.
Regarding the protonation states of the aminoacids the documentation corresponding to the
chosen engine should be checked for further details.
- OpenMM: The protonation state is determined according the frequency of the variant at the specified pH, and the participation of Cysteines in disulfide bonds or Histidines in hydrogen bonds. This engine needs the specification of a forcefield. See the `OpenMM User Manual <http://docs.openmm.org/7.0.0/userguide/application.html#adding-hydrogens>`_ or the `OpenMM Api Guide <http://docs.openmm.org/development/api-python/generated/simtk.openmm.app.modeller.Modeller.html#simtk.openmm.app.modeller.Modeller.addHydrogens>`_.
- PDBFixer: The protonation state is determined according to the frequency of the variant at the specified pH. See the `PDBFixer Manual <http://htmlpreview.github.io/?https://raw.github.com/pandegroup/pdbfixer/master/Manual.html>`_.
Parameters
----------
item: Molecular model in accepted form.
Molecular model in any of the accepted forms by MolSysMT.
pH: float, default: 7.4
The pH based on which to determine the aminoacids protonation states.
forcefield: str, default: 'AMBER99SB-ILDN'
Name of the forcefield to be used by OpenMM ([check the list of names accepted here]())
engine: str ('OpenMM' or 'PDBFixer'), default: 'OpenMM'
Name of the engine used to add the missing hydrogens. The following options are available:
- 'OpenMM': The method openmm.app.modeller.Modeller.addHydrogens is included in the
workflow. See the `OpenMM User Manual
<http://docs.openmm.org/7.0.0/userguide/application.html#adding-hydrogens>`_ or the
`OpenMM Api Guide
<http://docs.openmm.org/development/api-python/generated/simtk.openmm.app.modeller.Modeller.html#simtk.openmm.app.modeller.Modeller.addHydrogens>`_.
- 'PDBFixer': The method pdbfixer.PDBFixer.addMissingHydrogens() is included in the workflow. See the `PDBFixer Manual <http://htmlpreview.github.io/?https://raw.github.com/pandegroup/pdbfixer/master/Manual.html>`_.
verbose: bool, default: False
The method prints out useful information if verbose=`True`.
Returns
-------
item : Molecular model in the same form as input `item`.
A new molecular model with the missing hydrogens
added is returned. The form will be the same as the input model.
Examples
--------
See Also
--------
Notes
-----
"""
from molsysmt.multitool import convert, get_form
from molsysmt._private_tools._digestion import digest_engine
form = get_form(molecular_system)
engine = digest_engine(engine)
if engine=="OpenMM":
tmp_item = convert(molecular_system, to_form="openmm.Modeller")
log_residues_changed = tmp_item.addHydrogens(pH=pH)
if verbose:
print('Missing hydrogens added.')
ii = 0
for residue in item.topology.residues():
if log_residues_changed[ii] is not None:
print('{}-{} to {}-{}'.format(residue.name, residue.index,
log_residues_changed[ii], residue.index))
ii+=1
elif engine=='PDBFixer':
tmp_item = convert(molecular_system, to_form="pdbfixer.PDBFixer")
tmp_item.addMissingHydrogens(pH=pH)
if verbose:
print('Missing hydrogens added (PDBFixer gives no details).')
else:
raise NotImplementedError
tmp_item = convert(tmp_item, to_form=form)
return tmp_item
|
StarcoderdataPython
|
11379742
|
def convert_distance(miles):
km = miles * 1.6
result = "{} miles equals {:.1f} km".format(miles, km)
return result
print(convert_distance(13.1))
|
StarcoderdataPython
|
6672243
|
<filename>fashion-mnist/fashion-keras/src/neural_network.py
"""Neural network class."""
import tensorflow as tf
class NeuralNetwork(tf.keras.Model):
"""Neural network that classifies Fashion MNIST-style images."""
def __init__(self):
super().__init__()
self.sequence = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(20, activation='relu'),
tf.keras.layers.Dense(10)
])
def call(self, x: tf.Tensor) -> tf.Tensor:
y_prime = self.sequence(x)
return y_prime
|
StarcoderdataPython
|
1882550
|
<reponame>markhankins/podannotator
__version__ = '0.1'
#!/usr/local/bin/python3
from kubernetes import client, config
from kubernetes.client.rest import ApiException
import sys
from prettytable import PrettyTable
config.load_kube_config()
def print_help():
print('I need the namespace(s) as an argument')
if len(sys.argv) <= 1:
print (f'bad arguments: {str(sys.argv[1:])}')
print_help()
exit(1)
namespaces = sys.argv[1:]
table = PrettyTable(['Namespace', 'Pod','Volume-Name','PVC'])
v1 = client.CoreV1Api()
print ('Pods with Persistant Volume Cliams to be annotated:', end = '')
for n in namespaces:
print ("")
ret = v1.list_namespaced_pod(n,watch=False)
for i in ret.items:
for v in i.spec.volumes:
if v.persistent_volume_claim:
table.add_row([n,i.metadata.name,v.name,v.persistent_volume_claim.claim_name])
print (table)
confirm = input('proceed?[Y/n]:')
if confirm != 'Y':
exit(0)
for p in table:
p.border=False
p.header=False
pod = p.get_string(fields=["Pod"]).strip()
ns = p.get_string(fields=["Namespace"]).strip()
pvc = p.get_string(fields=["PVC"]).strip()
volume = p.get_string(fields=["Volume-Name"]).strip()
body = {"metadata": {"annotations": {"backup.velero.io/backup-volumes": volume }}}
print(f'Annotating {pod} ', end = '')
try:
v1.patch_namespaced_pod(pod,ns,body)
print (' Done')
except ApiException as e:
print (" AAHH something went wrong!")
exit(1)
exit (0)
# todo: if a pvc have more than 1 pod, then for p int table should handle this.
|
StarcoderdataPython
|
1751038
|
<reponame>brad90four/bradbot<filename>src/exts/trail_status.py<gh_stars>0
import feedparser
from loguru import logger
from nextcord import Embed
from nextcord.ext import commands
class TrailStatus(commands.Cog):
"Send an embed about the bot's ping."
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="trail_status", aliases=("trails", "trailstatus", "ts"))
async def trail_status(self, ctx: commands.Context) -> None:
"""Get the current trail status."""
logger.debug(f"Command `{ctx.invoked_with}` used by {ctx.author}.")
try:
esnp_rss = feedparser.parse(
"https://parks.hamiltontn.gov/RSSFeed.aspx?ModID=63&CID=Enterprise-South-Nature-Park-Bike-Trails-8"
)
except Exception as e:
logger.debug("Error occured while fetching ESNP RSS.")
logger.exception(e)
return
updated_date = esnp_rss["entries"][0]["published"]
status = esnp_rss["entries"][0]["title"]
# logger.debug(f"{updated_date = }\n{status = }")
embed = Embed(
title="Trail Status for ESNP :man_mountain_biking:",
description=f"Updated: {updated_date}\nStatus: {status}",
)
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Load the TrailStatus cog."""
bot.add_cog(TrailStatus(bot))
|
StarcoderdataPython
|
8144377
|
<reponame>Anancha/Programming-Techniques-using-Python
from random import choice
from time import time
my_names = ['Suman','Mohan','Divya','Sugandh']
my_subjects = ['Chemistry','Biology','Maths']
def my_list(num_students):
mylist = []
for loop in range(num_students):
mystudents = {
'myid':loop,
'myname': choice(my_names),
'mysubject':choice(my_subjects)
}
mylist.append(mystudents)
return mylist
def my_generator(num_students):
for loop in range(num_students):
mystudents = {
'myid':loop,
'myname': choice(my_names),
'mysubject':choice(my_subjects)
}
yield mystudents
myt1 = time()
people = my_list(1000000)
myt2 = time()
print('Time taken by list', myt2 - myt1)
myt1 = time()
people = my_generator(1000000)
myt2 = time()
print('Time taken by generator function', myt2 - myt1)
|
StarcoderdataPython
|
1610068
|
from break_ import Break_
class Instruction:
player_actions = iter([action.rstrip('\n') for action in open('C:\\Users\\<NAME>\\Documents\\Synacor Challenge\\auto_player.txt')])
def __init__(self, memory, registers):
self.memory = memory
self.registers = registers
self.stack = []
self.memory_address = 0
self.input_string = ''
self.input_start = 0
self.input_index = 0
def register_value(self, n):
if n in range(32768):
return(n)
else:
return(self.registers[n])
def halt_(self):
raise Break_
def set_(self):
self.registers[self.memory[self.memory_address + 1]] = self.register_value(self.memory[self.memory_address + 2])
self.memory_address += 3
def push_(self):
self.stack.append(self.register_value(self.memory[self.memory_address + 1]))
self.memory_address += 2
def pop_(self):
self.registers[self.memory[self.memory_address + 1]] = self.stack.pop()
self.memory_address += 2
def eq_(self):
if self.register_value(self.memory[self.memory_address + 2]) == self.register_value(self.memory[self.memory_address + 3]):
self.registers[self.memory[self.memory_address + 1]] = 1
else:
self.registers[self.memory[self.memory_address + 1]] = 0
self.memory_address += 4
def gt_(self):
if self.register_value(self.memory[self.memory_address + 2]) > self.register_value(self.memory[self.memory_address + 3]):
self.registers[self.memory[self.memory_address + 1]] = 1
else:
self.registers[self.memory[self.memory_address + 1]] = 0
self.memory_address += 4
def jmp_(self):
self.memory_address = self.register_value(self.memory[self.memory_address + 1])
def jt_(self):
if self.register_value(self.memory[self.memory_address + 1]) != 0:
self.memory_address = self.register_value(self.memory[self.memory_address + 2])
else:
self.memory_address += 3
def jf_(self):
if self.register_value(self.memory[self.memory_address + 1]) == 0:
self.memory_address = self.register_value(self.memory[self.memory_address + 2])
else:
self.memory_address += 3
def add_(self):
self.registers[self.memory[self.memory_address + 1]] = (self.register_value(self.memory[self.memory_address + 2]) + self.register_value(self.memory[self.memory_address + 3])) % 32768
self.memory_address += 4
def mult_(self):
self.registers[self.memory[self.memory_address + 1]] = (self.register_value(self.memory[self.memory_address + 2])*self.register_value(self.memory[self.memory_address + 3])) % 32768
self.memory_address += 4
def mod_(self):
self.registers[self.memory[self.memory_address + 1]] = self.register_value(self.memory[self.memory_address + 2]) % self.register_value(self.memory[self.memory_address + 3])
self.memory_address += 4
def and_(self):
self.registers[self.memory[self.memory_address + 1]] = self.register_value(self.memory[self.memory_address + 2]) & self.register_value(self.memory[self.memory_address + 3])
self.memory_address += 4
def or_(self):
self.registers[self.memory[self.memory_address + 1]] = self.register_value(self.memory[self.memory_address + 2]) | self.register_value(self.memory[self.memory_address + 3])
self.memory_address += 4
def not_(self):
self.registers[self.memory[self.memory_address + 1]] = 32768 + ~self.register_value(self.memory[self.memory_address + 2])
self.memory_address += 3
def rmem_(self):
self.registers[self.memory[self.memory_address + 1]] = self.memory[self.register_value(self.memory[self.memory_address + 2])]
self.memory_address += 3
def wmem_(self):
self.memory[self.register_value(self.memory[self.memory_address + 1])] = self.register_value(self.memory[self.memory_address + 2])
self.memory_address += 3
def call_(self):
self.stack.append(self.memory_address + 2)
self.memory_address = self.register_value(self.memory[self.memory_address + 1])
def ret_(self):
if len(self.stack) > 0:
self.memory_address = self.stack.pop()
else:
raise Break_
def out_(self):
print(chr(self.register_value(self.memory[self.memory_address + 1])), end='')
self.memory_address += 2
def in_(self):
if self.input_start == 0:
self.input_index = 0
self.input_string = next(self.player_actions, None)
if self.input_string == 'fix teleporter':
self.registers[32775] = 25734
if self.input_string == None:
self.input_string = input()
self.registers[self.memory[self.memory_address + 1]] = ord(self.input_string[self.input_index])
self.input_start = 1
else:
self.input_index += 1
if self.input_index < len(self.input_string):
self.registers[self.memory[self.memory_address + 1]] = ord(self.input_string[self.input_index])
else:
self.registers[self.memory[self.memory_address + 1]] = ord('\n')
self.input_start = 0
self.memory_address += 2
def noop_(self):
self.memory_address += 1
operations = [halt_, set_, push_, pop_, eq_, gt_, jmp_, jt_, jf_, add_, mult_, mod_, and_, or_, not_, rmem_, wmem_, call_, ret_, out_, in_, noop_]
|
StarcoderdataPython
|
9798075
|
<filename>server/galaxyls/services/xml/scanner.py
""" This code is based on the Eclipse/Lemminx XML language server implementation:
https://github.com/eclipse/lemminx/tree/master/org.eclipse.lemminx/src/main/java/org/eclipse/lemminx/dom
Only the minimum subset of the XML dialect used by Galaxy tool wrappers is supported.
"""
from typing import Optional
from .constants import (
_EQS,
_EXL,
_FSL,
_LAN,
_QMA,
_RAN,
_UDS,
CDATA_END_CHAR_SEQ,
CDATA_START_CHAR_SEQ,
COMMENT_END_CHAR_SEQ,
COMMENT_START_CHAR_SEQ,
PI_END_CHAR_SEQ,
QUOTE_CHARS,
)
from .types import ScannerState, TokenType
from .utils import MultiLineStream
ERROR_UNEXPECTED_WHITESPACE = "Unexpected whitespace. Tag name must directly follow the open angle bracket."
class XmlScanner:
"""This class allows to sequentially scan a XML document to find and extract the exact positions of every
token inside the document."""
def __init__(self, source: str, initial_offset: int = 0, initial_state: ScannerState = ScannerState.WithinContent) -> None:
self.stream = MultiLineStream(source, initial_offset)
self.state = initial_state
self.token_offset = 0
self.token_type = TokenType.Unknown
self.token_error: Optional[str] = None
def scan(self) -> TokenType:
"""Scans the document to sequentially find the next token."""
ofsset = self.stream.pos()
token = self._internal_scan()
if token != TokenType.EOS and ofsset == self.stream.pos():
self.stream.advance(1)
return self._finish_token(ofsset, TokenType.Unknown)
return token
def get_token_offset(self) -> int:
"""Gets the current token offset inside the document."""
return self.token_offset
def get_token_end(self) -> int:
"""Gets the last position/offset of the token."""
return self.stream.pos()
def get_token_text(self) -> str:
"""Gets the text of this token from its offset to the current position."""
return self.stream.get_source()[self.token_offset : self.stream.pos()]
def get_token_text_from_offset(self, offset: int) -> str:
"""Gets the text of this token from the given offset to the current position."""
return self.stream.get_source()[offset : self.stream.pos()]
def _finish_token(self, offset: int, type: TokenType, error_message: Optional[str] = None) -> TokenType:
self.token_type = type
self.token_offset = offset
self.token_error = error_message
return type
# flake8: noqa: C901
def _internal_scan(self) -> TokenType:
"""Scans the document for the next token.
This method is a bit too complex, but, since it is a Python translation
from the Java Eclipse/Lemminx parser, it could be easier to maintain it this way.
Returns:
TokenType: The token found.
"""
offset = self.stream.pos()
if self.stream.eos():
return self._finish_token(offset, TokenType.EOS)
if self.state == ScannerState.WithinComment:
if self.stream.advance_if_chars(COMMENT_END_CHAR_SEQ):
return self._finish_token(offset, TokenType.EndCommentTag)
self.stream.advance_until_chars(COMMENT_END_CHAR_SEQ)
return self._finish_token(offset, TokenType.Comment)
elif self.state == ScannerState.PrologOrPI:
if self.stream.advance_if_chars(PI_END_CHAR_SEQ):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.PIEnd)
self.stream.advance_until_chars_or_new_tag(PI_END_CHAR_SEQ)
if self.stream.peek_char() == _LAN:
self.state = ScannerState.WithinContent
return self._internal_scan()
elif self.state == ScannerState.WithinPI:
if self.stream.skip_whitespace():
return self._finish_token(offset, TokenType.Whitespace)
if self.stream.advance_if_chars(PI_END_CHAR_SEQ):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.PIEnd)
if self.stream.advance_until_chars_or_new_tag(PI_END_CHAR_SEQ):
if self.stream.peek_char() == _LAN:
self.state = ScannerState.WithinContent
if len(self.get_token_text_from_offset(offset)) == 0:
return self._finish_token(offset, TokenType.PIEnd)
return self._finish_token(offset, TokenType.PIContent)
elif self.state == ScannerState.WithinContent:
if self.stream.advance_if_char(_LAN):
if not self.stream.eos() and self.stream.peek_char() == _EXL:
if self.stream.advance_if_chars(CDATA_START_CHAR_SEQ):
self.state = ScannerState.WithinCDATA
return self._finish_token(offset, TokenType.CDATATagOpen)
if self.stream.advance_if_chars(COMMENT_START_CHAR_SEQ):
self.state = ScannerState.WithinComment
return self._finish_token(offset, TokenType.StartCommentTag)
elif not self.stream.eos() and self.stream.peek_char() == _QMA:
self.state = ScannerState.PrologOrPI
return self._finish_token(offset, TokenType.StartPrologOrPI)
if self.stream.advance_if_char(_FSL):
self.state = ScannerState.AfterOpeningEndTag
return self._finish_token(offset, TokenType.EndTagOpen)
self.state = ScannerState.AfterOpeningStartTag
return self._finish_token(offset, TokenType.StartTagOpen)
self.stream.advance_until_char(_LAN)
return self._finish_token(offset, TokenType.Content)
elif self.state == ScannerState.WithinCDATA:
if self.stream.advance_if_chars(CDATA_END_CHAR_SEQ):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.CDATATagClose)
self.stream.advance_until_chars(CDATA_END_CHAR_SEQ)
return self._finish_token(offset, TokenType.CDATAContent)
elif self.state == ScannerState.AfterOpeningEndTag:
if self._has_next_element_name():
self.state = ScannerState.WithinEndTag
return self._finish_token(offset, TokenType.EndTag)
if self.stream.skip_whitespace():
return self._finish_token(
offset,
TokenType.Whitespace,
ERROR_UNEXPECTED_WHITESPACE,
)
self.state = ScannerState.WithinEndTag
if self.stream.advance_until_char_or_new_tag(_RAN):
if self.stream.peek_char() == _RAN:
self.state = ScannerState.WithinContent
return self._internal_scan()
return self._finish_token(offset, TokenType.Unknown)
elif self.state == ScannerState.WithinEndTag:
if self.stream.skip_whitespace():
return self._finish_token(offset, TokenType.Whitespace)
if self.stream.advance_if_char(_RAN):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.EndTagClose)
if self.stream.advance_until_char(_LAN):
self.state = ScannerState.WithinContent
return self._internal_scan()
return self._finish_token(offset, TokenType.Whitespace)
elif self.state == ScannerState.AfterOpeningStartTag:
if self._has_next_element_name():
self.state = ScannerState.WithinTag
return self._finish_token(offset, TokenType.StartTag)
if self.stream.skip_whitespace():
return self._finish_token(
offset,
TokenType.Whitespace,
ERROR_UNEXPECTED_WHITESPACE,
)
self.state = ScannerState.WithinTag
if self.stream.advance_until_char_or_new_tag(_RAN):
if self.stream.peek_char() == _LAN:
self.state = ScannerState.WithinContent
return self._internal_scan()
return self._finish_token(offset, TokenType.Unknown)
elif self.state == ScannerState.WithinTag:
if self.stream.skip_whitespace():
return self._finish_token(offset, TokenType.Whitespace)
if self.stream.advance_if_chars(PI_END_CHAR_SEQ):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.PIEnd)
if self._has_next_attribute_name():
self.state = ScannerState.AfterAttributeName
return self._finish_token(offset, TokenType.AttributeName)
if self.stream.advance_if_char(_FSL):
self.state = ScannerState.WithinTag
if self.stream.advance_if_char(_RAN):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.StartTagSelfClose)
return self._finish_token(offset, TokenType.Unknown)
ch = self.stream.peek_char()
if ch in QUOTE_CHARS:
self.state = ScannerState.BeforeAttributeValue
return self._internal_scan()
if self.stream.advance_if_char(_RAN):
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.StartTagClose)
if self.stream.advance_until_char(_LAN):
self.state = ScannerState.WithinContent
return self._internal_scan()
return self._finish_token(offset, TokenType.Unknown)
elif self.state == ScannerState.AfterAttributeName:
if self.stream.skip_whitespace():
return self._finish_token(offset, TokenType.Whitespace)
if self.stream.advance_if_char(_EQS):
self.state = ScannerState.BeforeAttributeValue
return self._finish_token(offset, TokenType.DelimiterAssign)
self.state = ScannerState.WithinTag
return self._internal_scan()
elif self.state == ScannerState.BeforeAttributeValue:
if self.stream.skip_whitespace():
return self._finish_token(offset, TokenType.Whitespace)
if self._has_next_attribute_value():
self.state = ScannerState.WithinTag
return self._finish_token(offset, TokenType.AttributeValue)
self.state = ScannerState.WithinTag
return self._internal_scan()
self.stream.advance(1)
self.state = ScannerState.WithinContent
return self._finish_token(offset, TokenType.Unknown)
def _has_next_element_name(self) -> bool:
first = self.stream.peek_char()
if not self._is_valid_start_name_character(chr(first)):
return False
self.stream.advance(1)
self.stream.advance_while_char(self._is_valid_name_character)
return True
def _has_next_attribute_name(self) -> bool:
return self.stream.advance_while_char(self._is_valid_name_character) > 0
def _has_next_attribute_value(self) -> bool:
first = self.stream.peek_char()
if first in QUOTE_CHARS:
self.stream.advance(1)
if self.stream.advance_until_char(first):
self.stream.advance(1)
return True
return False
def _is_valid_name_character(self, ch: str) -> bool:
return ord(ch) == _UDS or ch.isalnum()
def _is_valid_start_name_character(self, ch: str) -> bool:
return ord(ch) == _UDS or ch.isalpha() # No numbers allowed as first character
|
StarcoderdataPython
|
3431596
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# BuildRPMDriver derived classes expose the following methods:
#
# build_package(). This method will perform the actual package build using
# the driver-specific approach.
from dlrn.config import setup_logging
from dlrn.drivers.buildrpm import BuildRPMDriver
import io
import logging
import multiprocessing
import os
import re
import sh
from time import localtime
from time import strftime
from time import time
logger = logging.getLogger("dlrn-build-koji")
class KojiBuildDriver(BuildRPMDriver):
DRIVER_CONFIG = {
'kojibuild_driver': {
'koji_krb_principal': {'name': 'krb_principal'},
'koji_krb_keytab': {'name': 'krb_keytab'},
'koji_scratch_build': {'name': 'scratch_build', 'type': 'boolean',
'default': True},
'koji_build_target': {'name': 'build_target'},
'koji_arch': {'name': 'arch', 'default': 'x86_64'},
'koji_use_rhpkg': {'name': 'use_rhpkg', 'type': 'boolean'},
'koji_exe': {'default': 'koji'},
'fetch_mock_config': {'type': 'boolean'},
'mock_base_packages': {'default': ''},
'mock_package_manager': {'default': ''},
'koji_add_tags': {'name': 'additional_koji_tags', 'type': 'list',
'default': []},
}
}
def __init__(self, *args, **kwargs):
super(KojiBuildDriver, self).__init__(*args, **kwargs)
self.verbose_build = False
self.exe_name = self.config_options.koji_exe
# Check for empty additional_koji_tags value
if self.config_options.koji_add_tags == ['']:
self.config_options.koji_add_tags = []
setup_logging()
# We are using this method to "tee" koji output to a log file and stdout
def _process_koji_output(self, line):
if self.verbose_build:
logger.info(line[:-1])
self.koji_fp.write(line)
def write_mock_config(self, filename):
"""Retrieve mock config from Koji instance
:param filename: output filename to write mock config
"""
target = self.config_options.koji_build_target
arch = self.config_options.koji_arch
try:
worker_id = multiprocessing.current_process()._identity[0]
except IndexError:
# Not in multiprocessing mode
worker_id = 1
run_cmd = [self.exe_name]
run_cmd.extend(['mock-config',
'--arch', arch, '--target', target, '-o', filename])
# FIXME(hguemar): add proper exception management
sh.env(run_cmd,
_env={'PATH': '/usr/bin/'})
lines = []
with open(filename, 'r') as fp:
for line in fp.readlines():
if (line.startswith("config_opts['chroot_setup_cmd']") and
self.config_options.mock_base_packages != ''):
lines.append("config_opts['chroot_setup_cmd'] = "
"'install %s'\n" %
self.config_options.mock_base_packages)
elif line.startswith("config_opts['root']"):
# Append worker id to mock buildroot name
line = line[:-2] + "-" + str(worker_id) + "'\n"
lines.append(line)
else:
lines.append(line)
if self.config_options.mock_package_manager:
lines.append("config_opts['package_manager'] = '%s'\n" %
self.config_options.mock_package_manager)
with open(filename, 'w') as fp:
fp.write(''.join(lines))
def _build_with_rhpkg(self, package_name, output_dir, src_rpm, scratch,
commit):
"""Use rhpkg as build backend
:param package_name: package name to build
:param output_dir: output directory
:param src_rpm: source RPM to build
:param scratch: define if build is scratch or not
"""
distgit_dir = os.path.join(
self.config_options.datadir,
package_name + "_distro")
ds_source_git = os.path.join(
self.config_options.datadir,
package_name + "_downstream")
build_exception = None
# if we are using rhpkg, we need to create a kerberos ticket
krb_principal = self.config_options.koji_krb_principal
keytab_file = self.config_options.koji_krb_keytab
with io.open("%s/kerberos.log" % output_dir, 'a',
encoding='utf-8', errors='replace') as self.koji_fp:
sh.kinit('-k', '-t', keytab_file, krb_principal)
rhpkg = sh.rhpkg.bake(_cwd=distgit_dir, _tty_out=False,
_timeout=3600,
_err=self._process_koji_output,
_out=self._process_koji_output,
_env={'PATH': '/usr/bin/'})
if (self.config_options.pkginfo_driver ==
'dlrn.drivers.downstream.DownstreamInfoDriver' and
self.config_options.use_upstream_spec):
# This is a special situation. We are copying the upstream
# spec over, but then building the srpm and importing. In this
# situation, rhpkg import will complain because there are
# uncommited changes to the repo... and we will commit them
# the srpm. So let's reset the git repo right before that.
git = sh.git.bake(_cwd=distgit_dir, _tty_out=False,
_timeout=3600,
_err=self._process_koji_output,
_out=self._process_koji_output,
_env={'PATH': '/usr/bin/'})
git.checkout('--', '*')
with io.open("%s/rhpkgimport.log" % output_dir, 'a',
encoding='utf-8', errors='replace') as self.koji_fp:
rhpkg('import', '--skip-diff', src_rpm)
# Get build NVR
m = re.search(r'([0-9a-zA-Z._+-]+)\.src\.rpm', src_rpm)
if m and m.group(1):
package_nvr = m.group(1)
else:
package_nvr = 'XXX-XXX'
pkg_date = strftime("%Y-%m-%d-%H%M%S", localtime(time()))
rhpkg('commit', '-p', '-m',
'DLRN build at %s\n\nSource SHA: %s\nDist SHA: %s\n'
'NVR: %s\n' %
(pkg_date, commit.commit_hash, commit.distro_hash,
package_nvr))
# After running rhpkg commit, we have a different commit hash, so
# let's update it
git = sh.git.bake(_cwd=distgit_dir, _tty_out=False, _timeout=3600)
repoinfo = str(git.log("--pretty=format:%H %ct", "-1", ".")).\
strip().split(" ")
if (self.config_options.pkginfo_driver ==
'dlrn.drivers.downstream.DownstreamInfoDriver'):
git = sh.git.bake(_cwd=ds_source_git, _tty_out=False,
_timeout=3600)
# In some cases, a patch rebasing script could update the
# downstream source git, so we ensure we have the latest code
git.pull()
repoinfo_ds_git = str(git.log("--pretty=format:%H %ct",
"-1", ".")).strip().split(" ")
logger.info("Updated git: %s" % repoinfo)
# When using rhpkg with a pkginfo driver other than downstreamdriver,
# we want to overwrite the distro_hash instead of extended_hash.
# Otherwise, The distgit update will trigger yet another build on
# the next run, causing an endless loop
if (self.config_options.pkginfo_driver !=
'dlrn.drivers.downstream.DownstreamInfoDriver'):
commit.distro_hash = repoinfo[0]
commit.dt_distro = repoinfo[1]
else:
commit.extended_hash = '%s_%s' % (repoinfo[0], repoinfo_ds_git[0])
commit.dt_extended = repoinfo[1]
# Since we are changing the extended_hash, we need to rename the
# output directory to match the updated value
datadir = os.path.realpath(self.config_options.datadir)
new_output_dir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
logger.info("Renaming %s to %s" % (output_dir, new_output_dir))
os.rename(output_dir, new_output_dir)
output_dir = new_output_dir
with io.open("%s/rhpkgbuild.log" % output_dir, 'a',
encoding='utf-8', errors='replace') as self.koji_fp:
try:
rhpkg('build', '--skip-nvr-check', scratch=scratch)
except Exception as e:
build_exception = e
return build_exception, "%s/rhpkgbuild.log" % output_dir
def _build_with_exe(self, package_name, output_dir, src_rpm, scratch,
commit):
"""Build using koji/brew executables (cbs being an aliases)
:param package_name: package name to build
:param output_dir: output directory
:param src_rpm: source RPM to build
:param scratch: define if build is scratch or not
"""
krb_principal = self.config_options.koji_krb_principal
keytab_file = self.config_options.koji_krb_keytab
scratch = self.config_options.koji_scratch_build
target = self.config_options.koji_build_target
# Build package using koji/brew
run_cmd = [self.exe_name]
if krb_principal:
run_cmd.extend(['--principal', krb_principal,
'--keytab', keytab_file])
run_cmd.extend(['build', '--wait',
target, src_rpm])
build_exception = None
with io.open("%s/kojibuild.log" % output_dir, 'a',
encoding='utf-8', errors='replace') as self.koji_fp:
try:
sh.env(run_cmd, _err=self._process_koji_output,
_out=self._process_koji_output,
_cwd=output_dir, scratch=scratch,
_env={'PATH': '/usr/bin/'})
except Exception as e:
build_exception = e
return build_exception, "%s/kojibuild.log" % output_dir
def build_package(self, **kwargs):
"""Valid parameters:
:param output_directory: directory where the SRPM is located,
and the built packages will be.
:param package_name: name of a package to build
"""
output_dir = kwargs.get('output_directory')
package_name = kwargs.get('package_name')
commit = kwargs.get('commit')
self.verbose_build = kwargs.get('verbose')
scratch = self.config_options.koji_scratch_build
build_exception = None
# Find src.rpm
for rpm in os.listdir(output_dir):
if rpm.endswith(".src.rpm"):
src_rpm = os.path.realpath('%s/%s' % (output_dir, rpm))
try:
if self.config_options.koji_use_rhpkg:
build_method = self._build_with_rhpkg
else:
build_method = self._build_with_exe
build_exception, logfile = build_method(
package_name, output_dir, src_rpm, scratch, commit)
if self.config_options.koji_use_rhpkg:
# In this case, we need to re-calculate the output directory
datadir = os.path.realpath(self.config_options.datadir)
output_dir = os.path.join(datadir, "repos",
commit.getshardedcommitdir())
# Find task id to download logs
with open(logfile, 'r') as fp:
log_content = fp.readlines()
task_id = None
for line in log_content:
m = re.search(r'^Created task: (\d+)$', line)
if m:
logger.info("Created task id %s" % m.group(1))
task_id = m.group(1)
break
if not task_id:
raise Exception('Failed to find task id for the koji build')
# Also find package name if we need to add tags
if len(self.config_options.koji_add_tags) > 0:
# Get build name
m = re.search(r'([0-9a-zA-Z._+-]+)\.src\.rpm', src_rpm)
package_nvr = None
if m:
logger.info("Adding tags for %s" % m.group(1))
package_nvr = m.group(1)
if not package_nvr:
raise Exception('Failed to find package nvr when tagging')
for tag in self.config_options.koji_add_tags:
run_cmd = []
run_cmd.extend(
[self.exe_name, 'tag-build', tag, package_nvr])
with io.open("%s/additional_tags.log" % output_dir, 'a',
encoding='utf-8',
errors='replace') as self.koji_fp:
try:
sh.env(run_cmd, _err=self._process_koji_output,
_out=self._process_koji_output,
_cwd=output_dir, _env={'PATH': '/usr/bin/'})
except Exception as e:
raise e
# Download build artifacts and logs
run_cmd = []
run_cmd.extend(
[self.exe_name,
'download-task', '--logs',
task_id])
with io.open("%s/build_download.log" % output_dir, 'a',
encoding='utf-8', errors='replace') as self.koji_fp:
try:
sh.env(run_cmd, _err=self._process_koji_output,
_out=self._process_koji_output,
_cwd=output_dir, _env={'PATH': '/usr/bin/'})
except Exception as e:
raise e
# All went fine, create the $OUTPUT_DIRECTORY/installed file
open('%s/installed' % output_dir, 'a').close()
finally:
# Finally run restorecon
try:
sh.restorecon('-Rv', output_dir)
except Exception as e:
logger.info('restorecon did not run correctly, %s' % e)
# We only want to raise the build exception at the very end, after
# downloading all relevant artifacts
if build_exception:
raise build_exception
|
StarcoderdataPython
|
3314970
|
# coding: utf-8
import csv # I want to import the csv library.
from pathlib import Path # I want the Path function from the pathlib library.
"""Part 1: Automate the Calculations.
Automate the calculations for the loan portfolio summaries.
First, let's start with some calculations on a list of prices for 5 loans.
1. Use the `len` function to calculate the total number of loans in the list.
2. Use the `sum` function to calculate the total of all loans in the list.
3. Using the sum of all loans and the total number of loans, calculate the average loan price.
4. Print all calculations with descriptive messages.
"""
loan_costs = [500, 600, 200, 1000, 450]
# How many loans are in the list?
# @TODO: Use the `len` function to calculate the total number of loans in the list.
# Print the number of loans from the list
def loan_count(loan_info):
number_of_loans = len(loan_info)
return number_of_loans
def loan_count_print(loan_info_1):
number_of_loans_1 = loan_count(loan_info_1)
print(f"There is a total of {number_of_loans_1} loans.")
loan_count_print(loan_costs)
# What is the total of all loans?
# @TODO: Use the `sum` function to calculate the total of all loans in the list.
# Print the total value of the loans
def loan_total(loan_data):
total_of_loans = sum(loan_data)
return total_of_loans
def loan_total_print(loan_data_1):
total_of_loans_1 = loan_total(loan_data_1)
print(f"The loans sum to a total of ${total_of_loans_1: .2f}.")
loan_total_print(loan_costs)
# What is the average loan amount from the list?
# @TODO: Using the sum of all loans and the total number of loans, calculate the average loan price.
# Print the average loan amount
def loan_average (loan_material):
average_of_loans = loan_total(loan_material)/loan_count(loan_material)
return average_of_loans
def loan_average_print(loan_material_1):
average_of_loans_1 = loan_average(loan_material_1)
print(f"The average loan price is ${average_of_loans_1: .2f}.")
loan_average_print(loan_costs)
"""Part 2: Analyze Loan Data.
Analyze the loan to determine the investment evaluation.
Using more detailed data on one of these loans, follow these steps to calculate a Present Value, or a "fair price" for what this loan would be worth.
1. Use get() on the dictionary of additional information to extract the **Future Value** and **Remaining Months** on the loan.
a. Save these values as variables called `future_value` and `remaining_months`.
b. Print each variable.
@NOTE:
**Future Value**: The amount of money the borrower has to pay back upon maturity of the loan (a.k.a. "Face Value")
**Remaining Months**: The remaining maturity (in months) before the loan needs to be fully repaid.
2. Use the formula for Present Value to calculate a "fair value" of the loan. Use a minimum required return of 20% as the discount rate.
3. Write a conditional statement (an if-else statement) to decide if the present value represents the loan's fair value.
a. If the present value of the loan is greater than or equal to the cost, then print a message that says the loan is worth at least the cost to buy it.
b. Else, the present value of the loan is less than the loan cost, then print a message that says that the loan is too expensive and not worth the price.
@NOTE:
If Present Value represents the loan's fair value (given the required minimum return of 20%), does it make sense to buy the loan at its current cost?
"""
# Given the following loan data, you will need to calculate the present value for the loan
loan = {
"loan_price": 500,
"remaining_months": 9,
"repayment_interval": "bullet",
"future_value": 1000,
}
# @TODO: Use get() on the dictionary of additional information to extract the Future Value and Remaining Months on the loan.
# Print each variable.
def future_value(loan_1):
fv = loan_1.get("future_value")
return fv
def future_value_print(loan_2):
fv_1 = future_value(loan_2)
print(f"The future value of the loan is ${fv_1: .2f}.")
future_value_print(loan)
def remaining_months(loan_3):
rm = loan_3.get("remaining_months")
return rm
def remaining_months_print(loan_4):
rm_1 = remaining_months(loan_4)
print(f"The months remaining on the loan is {rm_1} months.")
remaining_months_print(loan)
# @TODO: Use the formula for Present Value to calculate a "fair value" of the loan.
# Use a minimum required return of 20% as the discount rate.
# You'll want to use the **monthly** version of the present value formula.
# HINT: Present Value = Future Value / (1 + Discount_Rate/12) ** remaining_months
discount_rate = 0.20
def present_value(loan_5):
pv = future_value(loan_5) / ((1 + discount_rate/12) ** remaining_months(loan_5))
return pv
def present_value_print(loan_6):
pv_1 = present_value(loan_6)
print(f"The present value of the loan is ${pv_1: .2f} given a future value of ${future_value(loan_6): .2f}, a discount rate of {discount_rate * 100: .2f}%, and {remaining_months(loan_6)} months remaining.")
present_value_print(loan)
# If Present Value represents what the loan is really worth, does it make sense to buy the loan at its cost?
# @TODO: Write a conditional statement (an if-else statement) to decide if the present value represents the loan's fair value.
# If the present value of the loan is greater than or equal to the cost, then print a message that says the loan is worth at least the cost to buy it.
# Else, the present value of the loan is less than the loan cost, then print a message that says that the loan is too expensive and not worth the price.
def loan_cost(loan_7):
lc = loan_7.get("loan_price")
return lc
def loan_cost_print(loan_8):
lc_1 = loan_cost(loan_8)
print(f"The cost of the loan is ${lc_1: .2f}.")
loan_cost_print(loan)
def buy_nobuy_loan(loan_9):
if present_value(loan_9) >= loan_cost(loan_9):
print(f"The loan is worth at least the cost to buy it.")
#loan_cost_print(loan_9)
#present_value_print(loan_9)
else:
print(f"The loan is too expensive and not worth the price.")
#loan_cost_print(loan_9)
#present_value_print(loan_9)
buy_nobuy_loan(loan)
"""Part 3: Perform Financial Calculations.
Perform financial calculations using functions.
1. Define a new function that will be used to calculate present value.
a. This function should include parameters for `future_value`, `remaining_months`, and the `annual_discount_rate`
b. The function should return the `present_value` for the loan.
2. Use the function to calculate the present value of the new loan given below.
a. Use an `annual_discount_rate` of 0.2 for this new loan calculation.
"""
# Given the following loan data, you will need to calculate the present value for the loan
new_loan = {
"loan_price": 800,
"remaining_months": 12,
"repayment_interval": "bullet",
"future_value": 1000,
}
# I already created the present_value and present_value_print functions.
# I am creating the next function to satisfy the above specifications for the assignment.
# @TODO: Define a new function that will be used to calculate present value.
# This function should include parameters for `future_value`, `remaining_months`, and the `annual_discount_rate`
# The function should return the `present_value` for the loan.
def calculate_present_value(fv_2, rm_2, discount_rate_1):
pv_2 = fv_2 / ((1 + discount_rate_1/12) ** rm_2)
return pv_2 # I already created the present_value function. I am creating this function to satisfy the above specifications for the assignment.
# present_value(new_loan) # accomplishes the same
# print(f"${present_value(new_loan): .2f}") # prints the returned value with a dollar sign and two decimal places
# @TODO: Use the function to calculate the present value of the new loan given below.
# Use an `annual_discount_rate` of 0.2 for this new loan calculation.
annual_discount_rate = 0.20
present_value_1 = calculate_present_value(new_loan["future_value"], new_loan["remaining_months"], annual_discount_rate)
print(f"The present value of the loan is: ${present_value_1: .2f}") # I already created the present_value_print function. I am creating this function to satisfy the above specifications for the assignment.
# present_value_print(new_loan) # performs a similar print statement with additional details
"""Part 4: Conditionally filter lists of loans.
In this section, you will use a loop to iterate through a series of loans and select only the inexpensive loans.
1. Create a new, empty list called `inexpensive_loans`.
2. Use a for loop to select each loan from a list of loans.
a. Inside the for loop, write an if-statement to determine if the loan_price is less than 500
b. If the loan_price is less than 500 then append that loan to the `inexpensive_loans` list.
3. Print the list of inexpensive_loans.
"""
loans = [
{
"loan_price": 700,
"remaining_months": 9,
"repayment_interval": "monthly",
"future_value": 1000,
},
{
"loan_price": 500,
"remaining_months": 13,
"repayment_interval": "bullet",
"future_value": 1000,
},
{
"loan_price": 200,
"remaining_months": 16,
"repayment_interval": "bullet",
"future_value": 1000,
},
{
"loan_price": 900,
"remaining_months": 16,
"repayment_interval": "bullet",
"future_value": 1000,
},
]
# @TODO: Create an empty list called `inexpensive_loans`
inexpensive_loans = []
# @TODO: Loop through all the loans and append any that cost $500 or less to the `inexpensive_loans` list
for loan in loans:
loan_price = loan.get("loan_price")
if loan_price <= 500:
inexpensive_loans.append(loan)
# @TODO: Print the `inexpensive_loans` list
print(inexpensive_loans)
"""Part 5: Save the results.
Output this list of inexpensive loans to a csv file
1. Use `with open` to open a new CSV file.
a. Create a `csvwriter` using the `csv` library.
b. Use the new csvwriter to write the header variable as the first row.
c. Use a for loop to iterate through each loan in `inexpensive_loans`.
i. Use the csvwriter to write the `loan.values()` to a row in the CSV file.
Hint: Refer to the official documentation for the csv library.
https://docs.python.org/3/library/csv.html#writer-objects
"""
# Set the output header
header = ["loan_price", "remaining_months", "repayment_interval", "future_value"]
# Set the output file path
output_path = Path("inexpensive_loans.csv")
# @TODO: Use the csv library and `csv.writer` to write the header row
# and each row of `loan.values()` from the `inexpensive_loans` list.
with open(output_path, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(header) # I am writing the header first.
for loan in inexpensive_loans:
csvwriter.writerow(loan.values()) # I am writing the inexpensive loans rows.
|
StarcoderdataPython
|
6522057
|
import numpy as np
import pytest
from pizza_cutter.slice_utils.locate import build_slice_locations
from ..masks import (
in_unique_coadd_tile_region,
get_slice_bounds,
_mask_one_slice_for_gaia_stars,
_mask_one_slice_for_missing_data,
MASK_INTILE,
MASK_GAIA_STAR,
MASK_NOSLICE,
_wrap_ra,
make_mask,
)
def test_wrap_ra():
dra = np.array([-350, -170, 0, 350, 350 + 360*10, -350 - 360*10])
ans = np.array([10, 360-170, 0, 350, 350, 10])
assert np.allclose(_wrap_ra(dra), ans)
def test_wrap_dra_array_nan_inf():
dra = np.array([np.nan, np.inf, -350, -170, 0, 350, 350 + 360*10, -350 - 360*10])
ans = np.array([np.nan, np.inf, 10, 360-170, 0, 350, 350, 10])
msk = np.isfinite(dra)
assert np.allclose(_wrap_ra(dra[msk]), ans[msk])
assert np.isnan(ans[0])
assert np.isinf(ans[1])
def test_in_unique_coadd_tile_region():
ra = np.array([
210,
190,
210,
200,
220,
210,
210,
np.nan,
np.inf,
210,
210,
210 + 360*4,
210 - 360*4,
])
dec = np.array([
0,
0,
20,
0,
0,
-10,
10,
0,
0,
np.nan,
np.inf,
0,
0,
])
truth = np.array([
True,
False,
False,
False,
True,
False,
True,
False,
False,
False,
False,
True,
True,
], dtype=bool)
res = in_unique_coadd_tile_region(
ra=ra,
dec=dec,
crossra0='N',
udecmin=-10,
udecmax=10,
uramin=200,
uramax=220,
)
assert np.array_equal(res, truth)
def test_in_unique_coadd_tile_region_crossra0():
ra = np.array([
0,
30,
0,
340,
20,
0,
0,
np.nan,
np.inf,
0,
0,
360*4,
-360*4,
])
dec = np.array([
0,
0,
20,
0,
0,
-10,
10,
0,
0,
np.nan,
np.inf,
0,
0,
])
truth = np.array([
True,
False,
False,
False,
True,
False,
True,
False,
False,
False,
False,
True,
True,
], dtype=bool)
res = in_unique_coadd_tile_region(
ra=ra,
dec=dec,
crossra0='Y',
udecmin=-10,
udecmax=10,
uramin=360-20,
uramax=20,
)
assert np.array_equal(res, truth)
@pytest.mark.parametrize('col,row,truth', [
(200, 200, {"min_row": 50, "max_row": 150, "min_col": 50, "max_col": 150}),
(0, 200, {"min_row": 50, "max_row": 150, "min_col": 0, "max_col": 150}),
(200, 0, {"min_row": 0, "max_row": 150, "min_col": 50, "max_col": 150}),
(0, 0, {"min_row": 0, "max_row": 150, "min_col": 0, "max_col": 150}),
(800, 200, {"min_row": 50, "max_row": 150, "min_col": 50, "max_col": 200}),
(200, 800, {"min_row": 50, "max_row": 200, "min_col": 50, "max_col": 150}),
(800, 800, {"min_row": 50, "max_row": 200, "min_col": 50, "max_col": 200}),
(0, 800, {"min_row": 50, "max_row": 200, "min_col": 0, "max_col": 150}),
(800, 0, {"min_row": 0, "max_row": 150, "min_col": 50, "max_col": 200}),
])
def test_get_slice_bounds(col, row, truth):
res = get_slice_bounds(
orig_start_col=col,
orig_start_row=row,
central_size=100,
buffer_size=50,
coadd_dims=(1000, 1000),
)
assert res == truth
def test_mask_one_gaia_stars(show=False):
buffer_size = 5
central_size = 10
coadd_dims = (100, 100)
gaia_stars = np.array(
[(20, 10, 5)],
dtype=[('x', 'f4'), ('y', 'f4'), ('radius_pixels', 'f4')],
)
msk_img = np.zeros(coadd_dims, dtype=np.int32)
_mask_one_slice_for_gaia_stars(
buffer_size=buffer_size,
central_size=central_size,
gaia_stars=gaia_stars,
symmetrize=False,
coadd_dims=coadd_dims,
msk_img=msk_img,
scol=15,
srow=0,
)
assert np.any((msk_img & MASK_GAIA_STAR) != 0)
assert np.all((msk_img[10, 20:24] & MASK_GAIA_STAR) != 0)
assert np.all((msk_img[10, 16:20] & MASK_GAIA_STAR) == 0)
assert np.all((msk_img[14, 23:26] & MASK_GAIA_STAR) == 0)
if show:
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(msk_img)
import pdb
pdb.set_trace()
msk_img = np.zeros(coadd_dims, dtype=np.int32)
_mask_one_slice_for_gaia_stars(
buffer_size=buffer_size,
central_size=central_size,
gaia_stars=gaia_stars,
symmetrize=True,
coadd_dims=coadd_dims,
msk_img=msk_img,
scol=15,
srow=0,
)
if show:
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(msk_img)
import pdb
pdb.set_trace()
assert np.any((msk_img & MASK_GAIA_STAR) != 0)
assert np.all((msk_img[10, 20:24] & MASK_GAIA_STAR) != 0)
assert np.all((msk_img[10, 16:20] & MASK_GAIA_STAR) == 0)
assert np.all((msk_img[14, 20:26] & MASK_GAIA_STAR) != 0)
def test_mask_one_slice():
buffer_size = 5
central_size = 10
coadd_dims = (100, 100)
flags = 2**9
msk_img = np.zeros(coadd_dims, dtype=np.int32)
_mask_one_slice_for_missing_data(
buffer_size=buffer_size,
central_size=central_size,
coadd_dims=coadd_dims,
msk_img=msk_img,
scol=15,
srow=0,
flags=flags,
)
for f in [MASK_NOSLICE, flags]:
assert np.all((msk_img[0:15, 20:30] & f) != 0)
assert np.all((msk_img[15:, 30:] & f) == 0)
_mask_one_slice_for_missing_data(
buffer_size=buffer_size,
central_size=central_size,
coadd_dims=coadd_dims,
msk_img=msk_img,
scol=15,
srow=15,
flags=flags,
)
for f in [MASK_NOSLICE, flags]:
assert np.all((msk_img[20:30, 20:30] & f) != 0)
assert np.all((msk_img[30:, 30:] & f) == 0)
@pytest.mark.parametrize("msk_exp_rad", [0, 99])
def test_make_mask(coadd_image_data, msk_exp_rad):
preconfig = {
"gaia_star_masks": {"symmetrize": False, "mask_expand_rad": msk_exp_rad},
}
missing_slice_inds = [100, 768]
missing_slice_flags = [2**9, 2**11]
central_size = 100
buffer_size = 50
wcs = coadd_image_data["eu_wcs"]
position_offset = coadd_image_data["position_offset"]
coadd_dims = (10000, 10000)
info = coadd_image_data
gaia_stars = np.array(
[(275, 275, 100-msk_exp_rad)],
dtype=[("x", "f8"), ("y", "f8"), ("radius_pixels", "f8")],
)
_, _, srow, scol = build_slice_locations(
central_size=central_size,
buffer_size=buffer_size,
image_width=coadd_dims[0],
)
obj_data = np.zeros(
srow.shape[0],
dtype=[('orig_start_row', 'i4', (2,)), ('orig_start_col', 'i4', (2,))]
)
obj_data["orig_start_row"][:, 0] = srow
obj_data["orig_start_col"][:, 0] = scol
msk_img, hs_msk = make_mask(
preconfig=preconfig,
missing_slice_inds=missing_slice_inds,
missing_slice_flags=missing_slice_flags,
obj_data=obj_data,
central_size=central_size,
buffer_size=buffer_size,
wcs=wcs,
position_offset=position_offset,
coadd_dims=coadd_dims,
info=info,
gaia_stars=gaia_stars,
healpix_nside=131072,
)
if False:
import matplotlib.pyplot as plt
fig, axs = plt.subplots()
axs.imshow(msk_img[:500, :500])
import pdb
pdb.set_trace()
hs_vals, hs_ra, hs_dec = hs_msk.valid_pixels_pos(return_pixels=True)
hs_vals = hs_msk[hs_vals]
# basic tests
# we have some bits set
assert np.any((msk_img & MASK_INTILE) != 0)
assert np.any((msk_img & MASK_NOSLICE) != 0)
assert np.any((msk_img & MASK_GAIA_STAR) != 0)
assert np.any((hs_vals & MASK_INTILE) != 0)
assert np.any((hs_vals & MASK_NOSLICE) != 0)
assert np.any((hs_vals & MASK_GAIA_STAR) != 0)
assert np.any((msk_img & 2**9) != 0)
assert np.any((msk_img & 2**11) != 0)
# edges are all zero
assert np.all(msk_img[:, 0] == 0)
assert np.all(msk_img[:, -1] == 0)
assert np.all(msk_img[0, :] == 0)
assert np.all(msk_img[-1, :] == 0)
for x, y in [
(0, np.arange(coadd_dims[0])),
(coadd_dims[1]-1, np.arange(coadd_dims[0])),
(np.arange(coadd_dims[1]), 0),
(np.arange(coadd_dims[1]), coadd_dims[0]-1),
]:
ra, dec = wcs.image2sky(x+position_offset, y+position_offset)
_vals = hs_msk.get_values_pos(ra, dec)
assert np.all(_vals == 0)
# most of the coadd is fine
assert np.mean((msk_img & MASK_INTILE) != 0) > 0.80
assert np.mean((hs_vals & MASK_INTILE) != 0) > 0.80
# slice ind at 1, 1 is fully masked except for edges
assert np.all((msk_img[220:250, 220:250] & MASK_NOSLICE) != 0)
assert np.all((msk_img[220:250, 220:250] & 2**9) != 0)
assert np.all((msk_img[220:250, 220:250] & 2**11) == 0)
x, y = np.meshgrid(np.arange(220, 250), np.arange(220, 250))
x = x.ravel()
y = y.ravel()
ra, dec = wcs.image2sky(x+position_offset, y+position_offset)
_vals = hs_msk.get_values_pos(ra, dec)
assert np.all((_vals & MASK_NOSLICE) != 0)
assert np.all((_vals & 2**9) != 0)
assert np.all((_vals & 2**11) == 0)
# slice ind at 6, 75 is fully masked except for edges
assert np.all((msk_img[750:850, 7550:7650] & MASK_NOSLICE) != 0)
assert np.all((msk_img[750:850, 7550:7650] & 2**9) == 0)
assert np.all((msk_img[750:850, 7550:7650] & 2**11) != 0)
x, y = np.meshgrid(np.arange(7550, 7650), np.arange(750, 850))
x = x.ravel()
y = y.ravel()
ra, dec = wcs.image2sky(x+position_offset, y+position_offset)
_vals = hs_msk.get_values_pos(ra, dec)
assert np.all((_vals & MASK_NOSLICE) != 0)
assert np.all((_vals & 2**9) == 0)
assert np.all((_vals & 2**11) != 0)
# there is a star so let's check that
assert np.all((msk_img[260:290, 260:290] & MASK_GAIA_STAR) != 0)
x, y = np.meshgrid(np.arange(260, 290), np.arange(260, 290))
x = x.ravel()
y = y.ravel()
ra, dec = wcs.image2sky(x+position_offset, y+position_offset)
_vals = hs_msk.get_values_pos(ra, dec)
assert np.all((_vals & MASK_GAIA_STAR) != 0)
# make sure the buffer is ok
assert np.all(msk_img[0:20, 0:20] == 0)
x, y = np.meshgrid(np.arange(20), np.arange(20))
x = x.ravel()
y = y.ravel()
ra, dec = wcs.image2sky(x+position_offset, y+position_offset)
_vals = hs_msk.get_values_pos(ra, dec)
assert np.all(_vals == 0)
|
StarcoderdataPython
|
8076789
|
"""Module constituting the commandscript
pylint rated 10.0/10
"""
MENU_DICTIONARY = {}
HELP_DICTIONARY = {}
COMMANDSCRIPT = []
HEADERS = ['COMMANDS',
'DEFAULTKEYS',
'NAVIGATION',
'DISPLAY',
'SEARCHING',
'ORGANIZING NOTES',
'DEFAULTS',
'INPUT/OUTPUT',
'ADVANCED',
'HYPERLINKS',
'DEFAULT',
'ADVANCED DISPLAY ONE',
'ADVANCED DISPLAY TWO',
'KNOWLEDGE BASE & SYSTEM']
PERIOD = '.'
BLANK = ' '
EMPTYCHAR = ''
def make_command_dict (text):
""" Creates the HELP DICTIOARY from the text of COMMMANDS
Splits text into lines.
Skips over line if equal to '||'
Splits line into phrases divided by '|'
First phrase = COMMAND
Second phrase = PARAMETER
Third phrase = DESCRIPTION/INSTRUCTION
"""
started = True
command_text = ''
parameter_text = ''
instruction_text = ''
for line in text.split('\n'):
if line != '||':
phrases = line.split('|')+['','','']
command,parameters,instruction = phrases[0].strip(),phrases[1].strip(),phrases[2].strip()
if command:
if not started:
for com_temp in command_text.split(','):
HELP_DICTIONARY[com_temp.strip()] = (parameter_text.replace('\n\n','\n'),
instruction_text.replace('\n\n','\n'))
command_text = command
parameter_text = parameters + ' '
instruction_text = instruction + ' '
started = False
else:
parameter_text += parameters + ' '
instruction_text += instruction + ' '
for com_temp in command_text.split(','):
if com_temp.strip() not in HELP_DICTIONARY:
HELP_DICTIONARY[com_temp.strip()] = (parameter_text,instruction_text)
COMMANDS = """
||
COMMAND|PARAMETERS|FUNCTION
||
|BASIC NOTE ENTRY|
||
// divides multiple commands | |
{{int}}| |for feeding back search results
[?] | |for feeding back marked indexes
[/] | |for last entered or displayed note
||
| #yr-mo-dy |To enter dates in ranges, use POUND
ent, enter, + |key(s);text.. {$ to suppress show}.. |enter a new note
|/= suppress default keys
| |Indexes, entered as keys, serve as hyperlinks
|SEQUENCE@INTEGER |For entering a sequence key
|SEQUENCE@#1770-03-20
|SEQUENCE@_INDEX
ent, enter, + |index |enter a note at index
enternext, ++| |Enter a 'next' note 1.1>1.2
enterchild, +++| |Enter a 'child' note 1>1.1
enterback, -| |Enter a note at.. previous level 1.1.1>1.2
conent | |Enter a series of notes
conchild | |Enter a series of children
connext | |Enter a series of nextnotes
;; |To quit connext and conchild modes
delete,del,d|index or indexrange |delete note(s)
/|To quit entering mode and continue |
|cycling through notes |
|MODIFYING NOTES|
editnote|indexrange.. /$ annotate |edit note(s) keys and text
editnotekeys|indexrange |edit note(s) keys
editnotetext|indexrange |edit note(s) text
revise,rev|indexrange;index to merge;break mark|revise a note..
| /$ in back /&.. front and back /*BREAK /?Newu
undo|undo |undo last action
redo|redo |redo last action
tutorial| /$ to reload & Restart tutorial
starttutorial |To resume tutorial
"""
DEFAULTKEYS = """
|MARKING NOTES|
marked | |show all marked keys
[ | |mark current key
] | |unmark current key
addmarks |indexranges |mark notes in range
deletemarks |indexranges|unmark notes in range
clearmarks | |unmark all notes
|DEFAULT KEYS|
addkeys,ak|key,key...;keymacroname /$ to save macro |add new default keys
addkey|key |add one key to default keys
newkeys|keymacro.. /$ keep old|change to new keys from keymacro
changekeys|keys;keymacroname /$ to save macro |changes to new default keys
deletekey,dk| |delete last default key
deletedefaultkeys| |delete default keys
clearkeys| |clear all default keys
grabkeys|indexrange.. /$ no all caps.. /& no first caps |
suspendkey|keys |Suspend queried sequence key
unsuspendkey|keys |Unsuspend queried sequence key
clearsuspended| |Clear all sequence keys
"""
NAVIGATION = """
skip|index |skip to index
hop|int |jump ahead
first| |go to the first index
last| |go to the last index
[.*int]| |hops ahead by int
[,*int]| |hops back by int
>| |direction forward
<| |direction back
'| |move to the next/previousnotes
"| |move to the child/parent notes
=| |return to ordinary mode
||
||"""
DISPLAY = """
||
123[return] |displays the next note |displays note with INDEX 123
* ||Shows notes related to the current note
all, $ |levels to show.. /$ suppress quick mode |show all notes
|/&no children /* no brackets |
| /? suppress short show |
| /= showdates %indexrange |
show, s |indexrange;levels to show |
| /&no children |show some notes
| /* no brackets |
| /? suppress short show |
| /= show dates |
inc | |incremental show
indexes, ind, i | |show indexes
keys, key, k |/$ histogram.. |show keys
|/& all caps.. | =>COMMAND:??
|/* upperkeys.. | to feed back results
|/? lowerkeys |
tags, tag, t | |show tags
text |index range;# of words1;# of words2;gets important words in the text
|/$ for intersection of words
|/& by decreasing frequency in notebase
|/$ by increasing frequency in note itself
keysfortags | |show keys for tags
defaultkeys, dfk | |show default keys
showdel | |show soft-deleted notes
keystags | |show tags and their keys
||"""
SEARCHING = """| |
search, ? |/$search phrase /&indexrange /*daterange |keysearch..
|use a straight slash for OR.. |.. =>COMMAND:?..
|use an amperstand for AND.. |to feed results into
|parentheses are allowed.. |another command
|ALL CAPS for case insensitive.. |
|<keyword>.. |
|#tag.. |
|##metatag.. /$ show.. /&dates.. |
|/? show indexes |
|* wildcard |
| this$is$a$phrase
| $thi$wild$ase$ for a Wildcard phrase
|<SEQUENCE@FROM> | to search for sequences
|<SEQUENCE@TO> | ADD # and _ as appropriate
|<SEQUENCE@FROM/SEQUENCE@TO>
|{NOTEBOOK1,NOTEBOOK2} |To search over another notebook
sort |refed indexes;searchphrase |sorts refer search results
fetch |fetches indexes
reverse |refer indexes |reverse search result
globalsearch |searchphrase;notebooks |To search over other notebooks
|/$ don't query
terms,??? |return foundterms
textsearch, ?? |search phrase %indexrange |textsearch
constdates, |indexrange;f(irst) n(ewest) a(all).. |make date chart
|/$ ym.. /& ymd.. /* show indexes.. |
|/? to query which dates |
constitutedates | |
activedet, | |show active determinants
actdet | |
showdatedict |/$year.. /&month.. /*day.. /&hour.. |display date chart
ahowdatedictpurge |/$ ym .& ymd /* add hour |
|/? ask for purge parameter |
|determinant;purge parameter |
|SPEC.TERM1-TERM2-TERM3... |
|SPECS = a ALLCAPS |
| u UPPER |
| l LOWER |
cleardatedict |/$year.. /&month.. /*day.. /&hour.. |clear date chart
changedet |determinant |change determinant
showdet | |show determinants
setpurgekeys |/$ allcaps.. /& upper.. /& lower.. |set keys to purge when showing date
|[~][aulns]VERTLINE term1,term2,etc. |
| | Each term can be a searchable term (e.g. *ism)
| | OR #ALLCAPS#,#CAPS#,#LOWER#,#NUMSEQ#,
| | #ALLSEQ#, #NONPS#, #PROJ#,
| | #INDSEQ#, #STRSEQ#, #DATSEQ#
setsuppresskeys | | SETS keywords that are suppressed
clearpurgekeys, clearsuppresskeys | |clear purge keys
addsuppresskeys, deletesuppresskeys |
showpurgekeys, showsuppresskeys | |show purge keys
cleartempsuppresskeys | |
searchlog | |show the search log
clearlog, | |clear the search log
clearsearchlog | |
||"""
ORGANIZING = """||
mergemany, mm |indexrange;indexrange;C-E.. |Combine many formatted notes within a single note
| /$ Conflate..
| /& Embed
conflate |indexrange;e - b - m;destinationindex;BREAKMARK |conflate many notes into a single note
| e(mptychar) |
| b(reak) |
| n(ew note)
| /$ emptychar /& break /* new /? BREAKMARK |
move |indexrange;indexrange;S or M or C;Yes/no.. |move to notes from.. sourcerange to destinationrange
|S=Subordinate /$..|Preserves hierarchical.. structure when subordinating
|M=Make Compact /&..|Collapses hierarchical.. structure
|Children /*|Each note is a child of the last
copy |See Above |copy from source.. to destination
copyto |indexrange|copy notes into buffer
copyfrom |integer.. /$ to copy all |copy notes from.. buffer into notebook
ndel|undel |undelete soft-deleted notes
permdel | {$ to suppress query} |perminately delete soft-deleted notes
clear | {$ to suppress query} |soft-delete all notes
addfield |fieldname;indexrange |define a new field
|/$ for a prerange |
deletefield |fieldname;range |delete a field
compress | |remove gaps between notes
|ADVANCED FORMATTING |
split |index;columns;width;breaking mark |splits a note into columns
sidenote |indexrange;total width.. /$ add counters |side-by-side notes
||
||"""
SETTING = """||
|FLIPBOOK|
flipout, f | |automatically channel search results into flipbook
showflip, | |show flip book
showflipbook | |
flipbook|indexrange or fields or index |define the flipbook ---
|DISPLAY|
shortshow | |display notes in short format
resize, size, sz |integer |set the default size for notes
showtags | |show tags attached to keys when displaying notes
setlongmax |integer |set the maximum number of notes
| |that can be displayed long-form
curtail | |eliminate EOL at beginning and end of note
header |integer |blank lines at top of note
footer |integer |blank lines at foot of note
leftmargin | |
orderkeys | |arrange keys by increasing frequency
rectify | |equalize the width of columns
cpara |keys to purge;(a)allcap (c)aps (l)ower |cluster settings (exclude ALL CAPS;capitalized word;lower case)
|/$ all caps /& caps /* lower case |
|LIMITLIST|
limitlist |indexrange or F or R |define an automatic limiting range;
|F for flipbook; R to reset|
showlimitlist| |show limitlist
resetlimitlist, resetl| |reset limit list
|NOTE ENTRY|
quickenter | |enable quick-entry mode
autobackup | |suspend automatic backup of notes
changeuser |username |changes username saved in metadata
boxconfigs | |show configurations in boxed notes
spelling | |turn on or off spelling correction
enterhelp | |turn on or off note entry helpscript
formathelp | |turn on or off formatting helpscript
keysbefore||ask for keys before entering note
keyafter||ask for keys after entering note
carryoverkeys||carry over keys for child and nextnotes
nameinterpret||Expand keywords with PERIOD to multiple name forms
|| e.g. Martin.Heidegger = <NAME>,<NAME>,Heidegger
carryall||carry over keys from all parents
returnquit| |Exit note entry mode after pressing successive returns
setreturnquit| |Set number of returns for returnquit
usealphabets| |Apply transcription for foreign languages
overrideextract| |Overrides the automatic
| |extraction of subordinate notes
||"""
INPUT = """||
loadtext, lt |filename.. \= suppress default keys |load and parse a NOTESCRIPTle embedded note
formout |indexrange;filename;include indexes;include metadata |save NOTESCRIPT
|/$ include indexes /& onclude metadata
loadbyparagraph |/$ don't apply keywords.. /& apply definitions |load text, divide by paragraph, and apply keywords
|/* suppress queries|
splitload |string |Load text, divide by splitterm, and apply keywords
|$ don't apply keywords.. /& apply definitions..|
|/* suppress queries|
||"""
ADVANCED = """||
cluster |int.. /$ turn clusters into iterators |organize notes into clusters. Parameter indicates how many
killclusters | |destroy cluster iterators
descendents |iterate over clusters of note descendents
| |keywords, according to increasing.. frequency, will be used
;(semicolon)|switch to next cluster-iterator|
| |the notes that are cycled through
eliminateblanks| |eliminate blank keys
eliminatekeys|keys |globally removes keys
correctkeys|indexrange.. /$ keys+tags |corrects keys
refresh| |reconstitute word.. concordance used for searching
reconstitutesequences| |reconstitute sequences
reform|range|applying reformating to range of notes!
saveconfigurations| |save configurations
loadconfigurations| |load configurations
dumpprojects | |save a backup textfile of projects
loadprojects | |load a backup textfile of projects
clearprojects | |clear existing projects
truthtable |string |generates a truth table for a logical expression
calculate | |enter the calculator mode
||
"""
HYPERLINKS = """
|HYPERLINKS|
link |indexrange |links together notes
chain |indexrange |enchains notes
loop |indexrange |loops notes
unlink |indexrange |removes hyperlinks
iteratemode | |toggles between iteratemode and hypermovemode
hyperone | |hypermovemode one --
||randomly jumps between notes with common keys
hypertwo | |hypermovemode two --
||randomly jumps between hyperlinked notes
hyperthree| |hypermovemode three --
||allows you to navigate between hyperlinked notes
startlinking| |Start automatically linking notes
startlooping| |Start automatically looping notes
endlooping,endlinking| |End looping or linking notes
showsequences| |show sequences
showsequence|sequencename |show a single sequence
|/$ correct sequence
invert | |Gives opposite of indexes
"""
DEFAULTS = """||
changecodes| |change abbreviations. Use to define special codes (TO)
changemacros| |change macros. Implemented with _ before macro
changekeymacros| |change key macros. Implemented wiht $ before macro
changecommandmacros| |change commandmacros
changekeydefinitions| |change definitions
| |Used to automatically assign keys with loadbyparagraph
changeknowledge| |change knowledgebase
changeequivalences| |change equivalences
yieldequivalences| |suspects or unsuspends automatic yielding
| |of equivalences during searches
equivmultiply | |Converts OR to AND for second
|iteration of equivalence substitutions
converttextphrase | |Adds DOLLAR to spaces
|of substituted multi-word text search phrase
changegeneralknowledge| |change general knowledge
||
defaultcodes| |load codes embedded with kw CODES
defaultmacros| |load macros embedded with kw MACROS
defaultkeymacros| |load keymacros embedded with kw KEYMACROS
defaultcommandmacros| |load commandmacros embedded with kw COMMANDMACROS
defaultkeydefinitions| |load keydefinitions embedded with kw KEYDEFINITIONS
defaultknowledge| |load knowledge embedded with kw KNOWLEDGE
||
recordcodes| |record codes into a note
recordmacros | |record macros into a note
recordkeymacros| |record keymacros into a note.
recordcommandmacros| |record commandmacros into a note
recordkeydefinitions| |record keydefinitions into a note.
recordknowledge | |
||
clearcodes | |reset abbreviation object
clearmacros| |reset macros
clearkeymacros| |reset keymacros
cleardefinitions | |reset definition object
clearcommandmacros | |reset commandmacro object
clearkeydefinitions | |reset keydefinition object
clearknowledge | |reset knowledgebase
||
showspelling |language.. /$ en.. /& gr.. /* fr.. /? es |show words added to spelling dictionary
spelldictionary| |call up spelling dictionary console
defaultspelling|language.. /$ en.. /& gr.. /* fr.. /? es |load added words from note
alphabets| |Call up alphabet console
| |NOT RECOMMENDED for large ranges"""
ADVANCEDDISPLAYONE = """||
multi|streamname;width;savename.. |display notes packed into columns, channeled to a stream,
|/$ smallsize /* vary.. /? pause.. /= save |
| |with option to vary
| |width according to legnth of text
smallsize | |set small size for multidisplay
streams| |shows active display streams
showstream|stream /? pause |display an active display stream
deletestream|streamname {$ to suppress query} |delete a display stream
updatetags||updates the tags
histogram|indexrange;truncatespec=[p][i][d][n][t][a]
/$ for keys /& for tags |generate a histogram of words
|* to create a new dictionary
showuser | |show the user
negativeresults,| |include negative results with searches
negresults,nr | |
fields | |show defined fields
showmeta |index|show metadata
depth |integer |set the number of children to display
deeper | |go one level deeper
shallower | |go one level shallower
allchildren | |show all children
childrentoo | |include children when showing notes
shortshow, ss || show notes in an abridged form
fulltop | |show all descendents of top-level notes
randomon | |turn on random mode
randomoff | |turn off random mode
indentmultiplier ||Adjust the indentation for displaying
||chilren
||
usesequence | |uses sequences for keeping track of indexes
itshow | |show all indexes rather than greatest and
| |smallest when resetting the iterator
|FLASHCARDS|
noflash | |don't show flashcards as flashcards
flashmode | |flip through flashcards
setsides | | set the number of sides for flashcards
setflipat | | set the side at which the side flips
flexflip| | automatically adjust to
| |number of sides of flashcards
flashforward, ff | |advanced to the next side
flashback, fb | | go back to the previous side
flashreset, fr | | reset to the first side
flastto, ft | | advance to given side
"""
ADVANCEDDISPLAYTWO = """
sortbydate | |display notes sorted by date
showimages | | enable display of embedded images
showtext | | enable display of embedded text
seqintext | | include sequences in the main text
fromtext | | extract keywords and sequences from text
convertbyline | | extract keywords and sequences by line
nodiagnostics | | disable diagnostic tracking
updateuser | | update user over a range of notes
updatesize | | update size over a range of notes
run | |
interpret | |
diagnosticnote | |
dictionaryload ||
seqformone | |define seqformone for seqintext
seqformtwo | |definte seqformtwo for seqintext
mainsequences | |define mainsequence for seqintext
convertdefinition | |define parsing information for fromtext
newconvertmode | |add a new convert mode
switchconvertmode | |switch to a new convert mode
showallconvertmodes | |show all convert modes
createworkpad |padname |create a new work pad
addtopad |range,padname |add notes to pad
||add notes to pad, and display
padshow |range,padname |create pad if needed
emptypadstack |padname| clears notes from pad
renewpad | |clears an existing pad
switchpad | padname |switch the current pad
currentpad | |desplay the current pad
allpads | |show all the pads
sheet|range,display stream,width,save stream, Xmax*Ymax |display as sheet
| /$ query size /* vary
rsheet, resumesheet | resumes existing sheet
closesheetshelf |close the sheet shelf
tosheetshelf|frompad,topad|add a pad to the shelf for storage
selectsheet |resume a pad in the sheetshelf
indexer|/$ suspend query /& tags /* full name |Calls index generator
||
||"""
KNOWLEDGE = """
|SPECIES-GENUS KNOWLEDGE|
learn|string(species);string(genus)|teach the notebook an ontological fact
forgetstring(species);string(genus)| |unteach the notebook an ontological fact
allknowledge| |show what the notebook knows
|GENERAL KNOWLEDGE|
dumpknowledge,dumpgeneralknowledge |
showknowledge |
loadknowledge,loadgeneralknowledge|filename |load knowledge from textfile
cleargeneralknowledge|
reconstitutegeneralknowledge |
general,generalknowledge,gk|query|interprety a knowledge query
(*)KNOWLEDGE PHRASE(*) |ALTERNATE FORM OF ABOVE
switchgeneralknowledge|
||
|PROJECT MANAGEMENT|
newproject|projectname |Start a new project
saveproject|projectname |Save a project
loadproject,resumeproject |Resume a saved project
showprojects||Show all available projects
currentproject||Show information about current project
flipproject| |Iterate over indexes of current project
|SYSTEM|
||
menu (single space)| |calls up small menu
bigmenu (double space)| |Calls up big menu
help| |
showsettings| |Show settings
switch|notebase |switches to a new notebase without quiting
quit| {$ to suppress query} |save and quit
test| execute test script
"""
COMMANDSCRIPT.append(COMMANDS.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(DEFAULTKEYS.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(NAVIGATION.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(DISPLAY.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(SEARCHING.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(ORGANIZING.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(SETTING.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(INPUT.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(ADVANCED.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(HYPERLINKS.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(DEFAULTS.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(ADVANCEDDISPLAYONE.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(ADVANCEDDISPLAYTWO.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMANDSCRIPT.append(KNOWLEDGE.replace(PERIOD+PERIOD,EMPTYCHAR))
COMMAND_TEXT = COMMANDS+DEFAULTKEYS+NAVIGATION+DISPLAY+SEARCHING\
+ORGANIZING+SETTING+INPUT+ADVANCED+HYPERLINKS\
+DEFAULTS+ADVANCEDDISPLAYONE+ADVANCEDDISPLAYTWO+KNOWLEDGE
make_command_dict(COMMAND_TEXT.replace('\t', ''))
MENU_DICTIONARY[0] = (HEADERS[0], COMMANDS.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[1] = (HEADERS[1], DEFAULTKEYS.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[2] = (HEADERS[2], NAVIGATION.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[3] = (HEADERS[3], DISPLAY.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[4] = (HEADERS[4], SEARCHING.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[5] = (HEADERS[5], ORGANIZING.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[6] = (HEADERS[6], SETTING.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[7] = (HEADERS[7], INPUT.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[8] = (HEADERS[8], ADVANCED.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[9] = (HEADERS[9], HYPERLINKS.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[10] = (HEADERS[10], DEFAULTS.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[11] = (HEADERS[11], ADVANCEDDISPLAYONE.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[12] = (HEADERS[12], ADVANCEDDISPLAYTWO.replace(PERIOD+PERIOD,EMPTYCHAR))
MENU_DICTIONARY[13] = (HEADERS[13], KNOWLEDGE.replace(PERIOD+PERIOD,EMPTYCHAR))
##del COMMAND_DICTIONARY['COMMAND']
##
##for key in HELP_DICTIONARY:
## print(key+' ///' +str(HELP_DICTIONARY[key]))
|
StarcoderdataPython
|
3550049
|
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# method to plot confusion matrices
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(fraction=0.05)
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# load the dataset from sklearn
iris = datasets.load_iris()
# put the iris dataset into a DataFrame
df = pd.DataFrame(data=np.c_[iris['data'],iris['target']],
columns=np.append(iris['feature_names'], ['target']))
# shuffle the data
df = df.reindex(np.random.permutation(df.index))
# print(df.head())
result = []
for x in df.columns:
if x != 'target':
result.append(x)
# Define X and y
X = df[result].values
y = df['target'].values
variety = iris.target_names
# Split the data into training and testing
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# print(df.columns)
# plot a 3 x 2 figure comparing the feature against each other
plt.figure()
plt.subplot(321)
plt.scatter(df['sepal length (cm)'], df['sepal width (cm)'], c=y, s=50)
plt.subplot(322)
plt.scatter(df['sepal length (cm)'], df['petal length (cm)'], c=y, s=50)
plt.subplot(323)
plt.scatter(df['sepal length (cm)'], df['petal width (cm)'], c=y, s=50)
plt.subplot(324)
plt.scatter(df['sepal width (cm)'], df['petal length (cm)'], c=y, s=50)
plt.subplot(325)
plt.scatter(df['sepal width (cm)'], df['petal width (cm)'], c=y, s=50)
plt.subplot(326)
plt.scatter(df['petal length (cm)'], df['petal width (cm)'], c=y, s=50)
# build a multiclass SVM 'ovo' for one-versus-one, and
# fit the data
multi_svm = SVC(gamma='scale', decision_function_shape='ovo')
multi_svm.fit(X_train,y_train)
# print(X.shape[1])
y_pred = multi_svm.predict(X_test)
# print(y_pred)
# print(y_test)
# put the results into a DataFrame and print side-by-side
output = pd.DataFrame(data=np.c_[y_test,y_pred])
print(output)
# calculate accuracy score and print
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
# find the confusion matrix, normalise and print
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
# confusion matrix as a figure
plt.figure()
plot_confusion_matrix(cm_normalized, variety, title='Normalized confusion matrix')
plt.show()
|
StarcoderdataPython
|
1667067
|
<filename>dataStoreExceptions.py
# data store custom exceptions and error response classes
class dataStoreException(Exception): # base class for other custom exceptions
pass
class FileNotFound(dataStoreException):
def __init__(self, message="File does not exist. Requires valid file path."):
self.message = message
super().__init__(self.message)
class FileNotAccessible(dataStoreException):
def __init__(self, message="File can not be accessed. Requires file accessiblity to read or write."):
self.message = message
super().__init__(self.message)
class IOErrorOccurred(dataStoreException):
def __init__(self, message="Caught IO Exception. File can not be accessed."):
self.message = message
super().__init__(self.message)
class InvalidKey(dataStoreException):
def __init__(self, message="Key must be a string."):
self.message = message
super().__init__(self.message)
class KeyLengthExceeded(dataStoreException):
def __init__(self, message="Requires valid Key not exceeding the maximum size of 32 characters."):
self.message = message
super().__init__(self.message)
class DuplicateKey(dataStoreException):
def __init__(self, key, message=" already exists. Create is invoked for an existing key."):
self.key = key
self.message = message
super().__init__(self.message)
def __str__(self):
return f'{self.key} {self.message}'
class InvalidJSONobject(dataStoreException):
def __init__(self, message="Requires value as a valid JSON object."):
self.message = message
super().__init__(self.message)
class ValueSizeExceeded(dataStoreException):
def __init__(self, message="Requires valid JSON object not exceeding the maximum size of 16KB."):
self.message = message
super().__init__(self.message)
class FileSizeExceeded(dataStoreException):
def __init__(self, message="Reached Maximum file size. New data can not be stored."):
self.message = message
super().__init__(self.message)
class timeToLiveValueError(dataStoreException):
def __init__(self, message="Invalid argument. Requires numerical value defining the number of seconds."):
self.message = message
super().__init__(self.message)
class EmptyFile(dataStoreException):
def __init__(self, message="File does not have any json object."):
self.message = message
super().__init__(self.message)
class KeyNotExist(dataStoreException):
def __init__(self, key, message=" does not exist. Requires Valid Key."):
self.message = message
self.key = key
super().__init__(self.message)
def __str__(self):
return f'{self.key} {self.message}'
class KeyExpired(dataStoreException):
def __init__(self, key, message="Key exceeded Time-To-Live. Can not be accessed for read or delete operation."):
self.message = message
super().__init__(self.message)
class InvalidJSONfile(dataStoreException):
def __init__(self, message="Requires valid JSON file containing JSON object in standard format."):
self.message = message
super().__init__(self.message)
|
StarcoderdataPython
|
8111442
|
<gh_stars>0
'''
TMC, XTI, tsproj parsing utilities
'''
import collections
import logging
import os
import pathlib
import re
import types
import lxml
import lxml.etree
from .code import (get_pou_call_blocks, program_name_from_declaration,
variables_from_declaration, determine_block_type)
# Registry of all TwincatItem-based classes
TWINCAT_TYPES = {}
USE_NAME_AS_PATH = object()
logger = logging.getLogger(__name__)
SLN_PROJECT_RE = re.compile(
r"^Project.*?=\s*\"(.*?)\",\s*\"(.*?)\"\s*,\s*(.*?)\"\s*$",
re.MULTILINE
)
def parse(fn, *, parent=None):
'''
Parse a given tsproj, xti, or tmc file.
Returns
-------
item : TwincatItem
'''
fn = case_insensitive_path(fn)
with open(fn, 'rb') as f:
tree = lxml.etree.parse(f)
root = tree.getroot()
return TwincatItem.parse(root, filename=fn, parent=parent)
def projects_from_solution(fn, *, exclude=None):
'''
Find project filenames from a solution.
Parameters
----------
fn : str, pathlib.Path
Solution filename
exclude : list or None
Exclude certain extensions. Defaults to excluding .tcmproj
'''
with open(fn, 'rt') as f:
solution_text = f.read()
if exclude is None:
exclude = ('.tcmproj', )
projects = [
pathlib.PureWindowsPath(match[1])
for match in SLN_PROJECT_RE.findall(solution_text)
]
solution_path = pathlib.Path(fn).parent
return [(solution_path / pathlib.Path(project)).absolute()
for project in projects
if project.suffix not in exclude
]
def element_to_class_name(element, *, parent=None):
'''
Determine the Python class name for an element
Parameters
----------
element : lxml.etree.Element
Returns
-------
class_name : str
base_class : class
'''
tag = strip_namespace(element.tag)
extension = os.path.splitext(element.base)[-1].lower()
if tag == 'Project':
if isinstance(parent, TcSmProject):
return 'TopLevelProject', TwincatItem
if 'File' in element.attrib:
# File to be loaded will contain PrjFilePath
return 'Plc', TwincatItem
if 'PrjFilePath' in element.attrib:
return 'Plc', TwincatItem
if isinstance(parent, (Plc, TcSmItem)):
return 'PlcProject', TwincatItem
return 'Project', TwincatItem
if tag == 'Plc':
return 'TopLevelPlc', TwincatItem
if tag == 'Symbol':
base_type, = element.xpath('BaseType')
return f'{tag}_' + base_type.text, Symbol
if extension == '.tmc':
return tag, _TmcItem
return tag, TwincatItem
def _determine_path(base_path, name, class_hint):
'''
Determine the path to load child XTI items from, given a base path and the
class load path hint.
Parameters
----------
base_path : pathlib.Path
The path from which to start, e.g., the child_load_path of the parent
object
name : str
The name of the parent object, to be used when USE_NAME_AS_PATH is
specified
class_hint : pathlib.Path or USE_NAME_AS_PATH
A hint path as to where to load child objects from
'''
if not class_hint:
return base_path
path = base_path / (name
if class_hint is USE_NAME_AS_PATH
else class_hint)
if path.exists() and path.is_dir():
return path
return base_path # the fallback
class TwincatItem:
_load_path_hint = ''
def __init__(self, element, *, parent=None, name=None, filename=None):
'''
Represents a single TwinCAT project XML Element, for either tsproj,
xti, tmc, etc.
Parameters
----------
element : lxml.etree.Element
parent : TwincatItem, optional
name : str, optional
filename : pathlib.Path, optional
'''
self.child_load_path = _determine_path(
filename.parent, name, self._load_path_hint)
self.attributes = dict(element.attrib)
self._children = []
self.children = None # populated later
self.comments = []
self.element = element
self.filename = filename
self.name = name
self.parent = parent
self.tag = element.tag
self.text = element.text.strip() if element.text else None
self._add_children(element)
self.post_init()
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
TWINCAT_TYPES[cls.__name__] = cls
def post_init(self):
'Hook for subclasses; called after __init__'
...
@property
def root(self):
'The top-level TwincatItem (likely TcSmProject)'
parent = self
while parent.parent is not None:
parent = parent.parent
return parent
@property
def path(self):
'Path of classes required to get to this instance'
hier = [self]
parent = self.parent
while parent:
hier.append(parent)
parent = parent.parent
return '/'.join(item.__class__.__name__ for item in reversed(hier))
def find_ancestor(self, cls):
'''
Find an ancestor of this instance
Parameters
----------
cls : TwincatItem
'''
parent = self.parent
while parent and not isinstance(parent, cls):
parent = parent.parent
return parent
def get_relative_path(self, path):
'''
Get an absolute path relative to this item
Returns
-------
path : pathlib.Path
'''
root = pathlib.Path(self.filename).parent
rel_path = pathlib.PureWindowsPath(path)
return (root / rel_path).resolve()
def find(self, cls, *, recurse=True):
'''
Find any descendents that are instances of cls
Parameters
----------
cls : TwincatItem
'''
for child in self._children:
if isinstance(child, cls):
yield child
if not recurse:
continue
yield from child.find(cls, recurse=recurse)
def _add_children(self, element):
'A hook for adding all children'
for child_element in element.iterchildren():
if isinstance(child_element, lxml.etree._Comment):
self.comments.append(child_element.text)
continue
self._add_child(child_element)
by_tag = separate_by_classname(self._children)
self.children = types.SimpleNamespace(**by_tag)
for key, value in by_tag.items():
if not hasattr(self, key):
setattr(self, key, value)
def _add_child(self, element):
child = self.parse(element, parent=self, filename=self.filename)
if child is None:
return
self._children.append(child)
if not hasattr(child, '_squash_children'):
return
for grandchild in list(child._children):
if any(isinstance(grandchild, squashed_type)
for squashed_type in child._squash_children):
self._children.append(grandchild)
grandchild.container = child
grandchild.parent = self
child._children.remove(grandchild)
@staticmethod
def parse(element, parent=None, filename=None):
'''
Parse an XML element and return a TwincatItem
Parameters
----------
element : lxml.etree.Element
parent : TwincatItem, optional
The parent to assign to the new element
filename : str, optional
The filename the element originates from
Returns
-------
item : TwincatItem
'''
classname, base = element_to_class_name(element, parent=parent)
try:
cls = TWINCAT_TYPES[classname]
except KeyError:
# Dynamically create and register new TwincatItem-based types!
cls = type(classname, (base, ), {})
if 'File' in element.attrib:
# This is defined directly in the file. Instantiate it as-is:
filename = element.attrib['File']
return cls.from_file(filename, parent=parent)
# Two ways for names to come in:
# 1. a child has a tag of 'Name', with its text being our name
names = [child.text for child in element.iterchildren()
if child.tag == 'Name' and child.text]
name = names[0] if names else None
# 2. the child has an attribute key 'Name'
try:
name = element.attrib['Name'].strip()
except KeyError:
...
# A special identifier __FILENAME__ means to replace the name
if name == '__FILENAME__':
name = filename.stem
return cls(element, parent=parent, filename=filename, name=name)
def _repr_info(self):
'__repr__ information'
return {
'name': self.name,
'attributes': self.attributes,
'children': self._children,
'text': self.text,
}
def __repr__(self):
info = ' '.join(f'{key}={value!r}'
for key, value in self._repr_info().items()
if value)
return f'<{self.__class__.__name__} {info}>'
@classmethod
def from_file(cls, filename, parent):
base_path = _determine_path(
base_path=parent.child_load_path,
name=parent.name,
class_hint=cls._load_path_hint
)
return parse(base_path / filename, parent=parent)
class _TwincatProjectSubItem(TwincatItem):
'[XTI/TMC/...] A base class for items that appear in virtual PLC projects'
@property
def plc(self):
'The nested project (virtual PLC project) associated with the item'
return self.find_ancestor(Plc)
class TcModuleClass(_TwincatProjectSubItem):
'[TMC] The top-level TMC file'
DataTypes: list
def get_data_type(self, type_name):
data_types = self.DataTypes[0].types
try:
return data_types[type_name]
except KeyError:
return BuiltinDataType(type_name)
class OwnerA(TwincatItem):
'[XTI] For a Link between VarA and VarB, this is the parent of VarA'
class OwnerB(TwincatItem):
'[XTI] For a Link between VarA and VarB, this is the parent of VarB'
class Link(TwincatItem):
'[XTI] Links between NC/PLC/IO'
def post_init(self):
self.a = (self.find_ancestor(OwnerA).name, self.attributes.get('VarA'))
self.b = (self.find_ancestor(OwnerB).name, self.attributes.get('VarB'))
self.link = [self.a, self.b]
def __repr__(self):
return f'<Link a={self.a} b={self.b}>'
class TopLevelProject(TwincatItem):
'[tsproj] Containing Io, System, Motion, TopLevelPlc, etc.'
@property
def ams_id(self):
'''
The AMS ID of the configured target
'''
return self.attributes.get('TargetNetId', '')
@property
def target_ip(self):
'''
A guess of the target IP, based on the AMS ID
'''
ams_id = self.ams_id
if ams_id.endswith('.1.1'):
return ams_id[:-4]
return ams_id # :(
class PlcProject(TwincatItem):
...
class TcSmProject(TwincatItem):
'[tsproj] A top-level TwinCAT tsproj'
def post_init(self):
self.top_level_plc, = list(self.find(TopLevelPlc, recurse=False))
@property
def plcs(self):
'The virtual PLC projects contained in this TcSmProject'
yield from self.top_level_plc.projects.values()
@property
def plcs_by_name(self):
'The virtual PLC projects in a dictionary keyed by name'
return {plc.name: plc for plc in self.plcs}
@property
def plcs_by_link_name(self):
'The virtual PLC projects in a dictionary keyed by link name'
return {plc.link_name: plc for plc in self.plcs}
class TcSmItem(TwincatItem):
'''
[XTI] Top-level container for XTI files
Visual Studio-level configuration changes the project layout significantly,
with individual XTI files being created for axes, PLCs, etc. instead of
updating the original tsproj file.
The additional, optional, level of indirection here can make walking the
tree frustrating. So, we squash these TcSmItems - skipping over them in the
hierarchy - and pushing its children into its parent.
The original container `TcSmItem` is accessible in those items through the
`.container` attribute.
'''
_squash_children = [TwincatItem]
class TopLevelPlc(TwincatItem):
'[XTI] Top-level PLC, contains one or more projects'
PlcProjectContainer: list
def post_init(self):
# TODO: this appears to cover all bases, but perhaps it could be
# refactored out
if hasattr(self, 'Plc'):
projects = self.Plc
elif hasattr(self, 'TcSmItem'):
projects = self.TcSmItem[0].PlcProject
else:
raise RuntimeError('Unable to find project?')
self.projects = {
project.name: project
for project in projects
}
self.projects_by_link_name = {
project.link_name: project
for project in projects
}
# Fix to squash hack: squashed Mappings belong to the individual
# projects, not this TopLevelPlc
for mapping in getattr(self, 'Mappings', []):
for project in projects:
if project.filename == mapping.filename:
self._children.remove(mapping)
project.Mappings = [mapping]
project._children.append(mapping)
continue
class Plc(TwincatItem):
'[tsproj] A project which contains Plc, Io, Mappings, etc.'
_load_path_hint = pathlib.Path('_Config') / 'PLC'
def post_init(self):
self.link_name = (self.Instance[0].name
if hasattr(self, 'Instance')
else self.name)
self.namespaces = {}
self.project_path = self.get_relative_path(
self.attributes['PrjFilePath'])
self.tmc_path = self.get_relative_path(
self.attributes['TmcFilePath'])
self.project = (parse(self.project_path, parent=self)
if self.project_path.exists()
else None)
self.tmc = (parse(self.tmc_path, parent=self)
if self.tmc_path.exists()
else None)
self.source_filenames = [
self.project.get_relative_path(compile.attributes['Include'])
for compile in self.find(Compile)
if 'Include' in compile.attributes
]
self.source = {
str(fn.relative_to(self.project.filename.parent)):
parse(fn, parent=self)
for fn in self.source_filenames
}
def get_source_items(attr):
for plc_obj in self.source.values():
try:
source_obj = getattr(plc_obj, attr, [None])[0]
except IndexError:
continue
if source_obj and source_obj.name:
yield (source_obj.name, source_obj)
self.pou_by_name = dict(sorted(get_source_items('POU')))
self.gvl_by_name = dict(sorted(get_source_items('GVL')))
self.dut_by_name = dict(sorted(get_source_items('DUT')))
self.namespaces.update(self.pou_by_name)
self.namespaces.update(self.gvl_by_name)
self.namespaces.update(self.dut_by_name)
@property
def links(self):
return [link
for mapping in self.Mappings
for link in mapping.find(Link, recurse=False)
]
@property
def port(self):
'''
The ADS port for the project
'''
return self.attributes.get('AmsPort', '')
@property
def ams_id(self):
'''
The AMS ID of the configured target
'''
return self.find_ancestor(TopLevelProject).ams_id
return self.attributes.get('TargetNetId', '')
@property
def target_ip(self):
'''
A guess of the target IP, based on the AMS ID
'''
return self.find_ancestor(TopLevelProject).target_ip
def find(self, cls, *, recurse=True):
yield from super().find(cls, recurse=recurse)
if self.project is not None:
yield from self.project.find(cls, recurse=recurse)
for _, ns in self.namespaces.items():
if isinstance(ns, cls):
yield ns
if self.tmc is not None:
yield from self.tmc.find(cls, recurse=recurse)
def get_source_code(self):
'Get the full source code, DUTs, GVLs, and then POUs'
source_items = (
list(self.dut_by_name.items()) +
list(self.gvl_by_name.items()) +
list(self.pou_by_name.items())
)
return '\n'.join(
item.get_source_code()
for item in source_items
if hasattr(item, 'get_source_code')
)
class Compile(TwincatItem):
'''
[XTI] A code entry in a nested/virtual PLC project
File to load is marked with 'Include'
May be TcTTO, TcPOU, TcDUT, GVL, etc.
'''
class _TmcItem(_TwincatProjectSubItem):
'[TMC] Any item found in a TMC file'
@property
def tmc(self):
'The TcModuleClass (TMC) associated with the item'
return self.find_ancestor(TcModuleClass)
class DataTypes(_TmcItem):
'[TMC] Container of DataType'
def post_init(self):
self.types = {
dtype.qualified_type: dtype
for dtype in self.find(DataType)
}
self.types['Tc2_System.T_MaxString'] = T_MaxString()
class Type(_TmcItem):
'[TMC] DataTypes/DataType/SubItem/Type'
@property
def qualified_type(self):
'The base type, including the namespace'
namespace = self.attributes.get("Namespace", None)
return f'{namespace}.{self.text}' if namespace else self.text
class EnumInfo(_TmcItem):
'[TMC] Enum values, strings, and associated comments'
Text: list
Enum: list
Comment: list
@property
def enum_text(self):
return self.Text[0].text
@property
def enum_value(self):
try:
return self.Enum[0].text
except AttributeError:
...
logger.warning(
'Encountered a known issue with the TwinCAT-generated TMC file: '
'%s is missing an Enum value in section %s; this may cause '
'database generation errors.', self.parent.name, self.path
)
return ''
@property
def enum_comment(self):
return self.Comment[0].text if hasattr(self, 'Comment') else ''
class ArrayInfo(_TmcItem):
'[TMC] Array information for a DataType or Symbol'
LBound: list
UBound: list
Elements: list
def post_init(self):
lbound = (int(self.LBound[0].text)
if hasattr(self, 'LBound')
else 0)
elements = (int(self.Elements[0].text)
if hasattr(self, 'Elements')
else 1)
ubound = (int(self.UBound[0].text)
if hasattr(self, 'UBound')
else lbound + elements - 1)
self.bounds = (lbound, ubound)
self.elements = elements
class ExtendsType(_TmcItem):
'[TMC] A marker of inheritance / extension, found on DataType'
@property
def qualified_type(self):
if 'Namespace' in self.attributes:
return f'{self.attributes["Namespace"]}.{self.text}'
return self.text
class DataType(_TmcItem):
'[TMC] A DataType with SubItems, likely representing a structure'
Name: list
EnumInfo: list
SubItem: list
@property
def qualified_type(self):
name_attrs = self.Name[0].attributes
if 'Namespace' in name_attrs:
return f'{name_attrs["Namespace"]}.{self.name}'
return self.name
@property
def is_complex_type(self):
return True
def walk(self, condition=None):
if self.is_enum:
# Ensure something is yielded for this type - it doesn't
# appear possible to have SubItems or use ExtendsType
# in this case.
yield []
return
extends_types = [
self.tmc.get_data_type(ext_type.qualified_type)
for ext_type in getattr(self, 'ExtendsType', [])
]
for extend_type in extends_types:
yield from extend_type.walk(condition=condition)
if hasattr(self, 'SubItem'):
for subitem in self.SubItem:
for item in subitem.walk(condition=condition):
yield [subitem] + item
@property
def enum_dict(self):
return {int(item.enum_value): item.enum_text
for item in getattr(self, 'EnumInfo', [])}
@property
def is_enum(self):
return len(getattr(self, 'EnumInfo', [])) > 0
@property
def is_array(self):
return len(getattr(self, 'ArrayInfo', [])) > 0
@property
def is_string(self):
return False
@property
def array_info(self):
try:
return self.ArrayInfo[0]
except (AttributeError, IndexError):
return None
@property
def length(self):
array_info = self.array_info
return array_info.elements if array_info else 1
class SubItem(_TmcItem):
'[TMC] One element of a DataType'
Type: list
@property
def data_type(self):
return self.tmc.get_data_type(self.qualified_type_name)
@property
def array_info(self):
try:
return self.ArrayInfo[0]
except (AttributeError, IndexError):
return None
@property
def type(self):
'The base type'
return self.Type[0].text
@property
def qualified_type_name(self):
'The base type, including the namespace'
type_ = self.Type[0]
namespace = type_.attributes.get("Namespace", None)
return f'{namespace}.{type_.text}' if namespace else type_.text
def walk(self, condition=None):
if condition is None or condition(self):
yield from self.data_type.walk(condition=condition)
class Module(_TmcItem):
'''
[TMC] A Module
Contains generated symbols, data areas, and miscellaneous properties.
'''
@property
def ads_port(self):
'The ADS port assigned to the Virtual PLC'
try:
return self._ads_port
except AttributeError:
app_prop, = [prop for prop in self.find(Property)
if prop.name == 'ApplicationName']
port_text = app_prop.value
self._ads_port = int(port_text.split('Port_')[1])
return self._ads_port
class Property(_TmcItem):
'''
[TMC] A property containing a key/value pair
Examples of TMC properties::
ApplicationName (used for the ADS port)
ChangeDate
GeneratedCodeSize
GlobalDataSize
'''
Value: list
@property
def key(self):
'The property key name'
return self.name
@property
def value(self):
'The property value text'
return self.Value[0].text if hasattr(self, 'Value') else self.text
def __repr__(self):
return f'<Property {self.key}={self.value!r}>'
class BuiltinDataType:
'[TMC] A built-in data type such as STRING, INT, REAL, etc.'
def __init__(self, typename, *, length=1):
if '(' in typename:
typename, length = typename.split('(')
length = int(length.rstrip(')'))
self.name = typename
self.length = length
@property
def is_complex_type(self):
return False
@property
def enum_dict(self):
return {int(item.enum_value): item.enum_text
for item in getattr(self, 'EnumInfo', [])}
@property
def is_enum(self):
return len(getattr(self, 'EnumInfo', [])) > 0
@property
def is_string(self):
return self.name == 'STRING'
@property
def is_array(self):
# TODO: you can have an array of STRING(80), for example
# the length would be reported as 80 here, and the DataType would have
# ArrayInfo
return self.length > 1 and not self.is_string
def walk(self, condition=None):
yield []
class T_MaxString(BuiltinDataType):
def __init__(self):
super().__init__(typename='STRING', length=255)
class Symbol(_TmcItem):
'''
[TMC] A basic Symbol type
This is dynamically subclassed into new classes for ease of implementation
and searching. For example, a function block defined as `FB_MotionStage`
will become `Symbol_FB_MotionStage`.
'''
BitOffs: list
BitSize: list
BaseType: list
@property
def type_name(self):
'The base type'
return self.BaseType[0].text
@property
def qualified_type_name(self):
'The base type, including the namespace'
type_ = self.BaseType[0]
namespace = type_.attributes.get("Namespace", None)
return f'{namespace}.{type_.text}' if namespace else type_.text
@property
def data_type(self):
return self.tmc.get_data_type(self.qualified_type_name)
@property
def module(self):
'The TMC Module containing the Symbol'
return self.find_ancestor(Module)
@property
def info(self):
return dict(name=self.name,
bit_size=self.BitSize[0].text,
type=self.type_name,
qualified_type_name=self.qualified_type_name,
bit_offs=self.BitOffs[0].text,
module=self.module.name,
is_pointer=self.is_pointer,
array_bounds=self.array_bounds,
summary_type_name=self.summary_type_name,
)
def walk(self, condition=None):
if condition is None or condition(self):
for item in self.data_type.walk(condition=condition):
yield [self] + item
@property
def array_info(self):
try:
return self.ArrayInfo[0]
except (AttributeError, IndexError):
return None
@property
def array_bounds(self):
try:
return self.array_info.bounds
except (AttributeError, IndexError):
return None
def get_links(self, *, strict=False):
sym_name = '^' + self.name.lower()
dotted_name = sym_name + '.'
plc = self.plc
plc_name = plc.link_name
for link in plc.links:
if any(owner == plc_name and
(var.lower().endswith(sym_name) or
not strict and dotted_name in var.lower())
for owner, var in link.link):
yield link
@property
def is_pointer(self):
type_ = self.BaseType[0]
pointer_info = type_.attributes.get("PointerTo", None)
return bool(pointer_info)
@property
def summary_type_name(self):
summary = self.qualified_type_name
if self.is_pointer:
summary = 'POINTER TO ' + summary
array_bounds = self.array_bounds
if array_bounds:
summary = 'ARRAY[{}..{}] OF '.format(*array_bounds) + summary
return summary
class Symbol_DUT_MotionStage(Symbol):
'[TMC] A customized Symbol, representing only DUT_MotionStage'
def _repr_info(self):
'__repr__ information'
repr_info = super()._repr_info()
# Add on the NC axis name
try:
repr_info['nc_axis'] = self.nc_axis.name
except Exception as ex:
repr_info['nc_axis'] = repr(ex)
return repr_info
@property
def program_name(self):
'`Main` of `Main.M1`'
return self.name.split('.')[0]
@property
def motor_name(self):
'`M1` of `Main.M1`'
return self.name.split('.')[1]
@property
def nc_to_plc_link(self):
'''
The Link for NcToPlc
That is, how the NC axis is connected to the DUT_MotionStage
'''
expected = '^' + self.name.lower() + '.axis.nctoplc'
links = [link
for link in self.plc.find(Link, recurse=False)
if expected in link.a[1].lower()
]
if not links:
raise RuntimeError(f'No NC link to DUT_MotionStage found for '
f'{self.name!r}')
link, = links
return link
@property
def nc_axis(self):
'The NC `Axis` associated with the DUT_MotionStage'
link = self.nc_to_plc_link
parent_name = link.parent.name.split('^')
if parent_name[0] == 'TINC':
parent_name = parent_name[1:]
task_name, axis_section, axis_name = parent_name
nc, = list(nc for nc in self.root.find(NC, recurse=False)
if nc.SafTask[0].name == task_name)
nc_axis = nc.axis_by_name[axis_name]
# link nc_axis and FB_MotionStage?
return nc_axis
class GVL(_TwincatProjectSubItem):
'[TcGVL] A Global Variable List'
@property
def declaration(self):
'The declaration code; i.e., the top portion in visual studio'
return self.Declaration[0].text
def get_source_code(self, *, close_block=True):
'The full source code - declaration only in the case of a GVL'
return self.declaration
class ST(_TwincatProjectSubItem):
'[TcDUT/TcPOU] Structured text'
class Implementation(_TwincatProjectSubItem):
'[TcDUT/TcPOU] Code implementation'
class Declaration(_TwincatProjectSubItem):
'[TcDUT/TcPOU/TcGVL] Code declaration'
class DUT(_TwincatProjectSubItem):
'[TcDUT] Data unit type (DUT)'
@property
def declaration(self):
'The declaration code; i.e., the top portion in visual studio'
return self.Declaration[0].text
def get_source_code(self, *, close_block=True):
'The full source code - declaration only in the case of a DUT'
return self.declaration
class Action(_TwincatProjectSubItem):
'[TcPOU] Code declaration for actions'
@property
def source_code(self):
return f'''\
ACTION {self.name}:
{self.implementation or ''}
END_ACTION'''
@property
def implementation(self):
'The implementation code; i.e., the bottom portion in visual studio'
impl = self.Implementation[0]
if hasattr(impl, 'ST'):
# NOTE: only ST for now
return impl.ST[0].text
class POU(_TwincatProjectSubItem):
'[XTI] A Program Organization Unit'
# TODO: may fail when mixed with ladder logic?
Declaration: list
Implementation: list
def get_fully_qualified_name(self, name):
if '.' in name:
first, rest = name.split('.', 1)
if (first == self.name or first in self.project.namespaces):
return name
return f'{self.name}.{name}'
@property
def declaration(self):
'The declaration code; i.e., the top portion in visual studio'
return self.Declaration[0].text
@property
def implementation(self):
'The implementation code; i.e., the bottom portion in visual studio'
impl = self.Implementation[0]
if hasattr(impl, 'ST'):
return impl.ST[0].text
@property
def actions(self):
'The action implementations (zero or more)'
return list(getattr(self, 'Action', []))
def get_source_code(self, *, close_block=True):
'The full source code - declaration, implementation, and actions'
source_code = [self.declaration or '',
self.implementation or '',
]
if close_block:
source_code.append('')
closing = {
'function_block': 'END_FUNCTION_BLOCK',
'program': 'END_PROGRAM',
'function': 'END_FUNCTION',
'action': 'END_ACTION',
}
source_code.append(
closing.get(determine_block_type(self.declaration),
'# pytmc: unknown block type')
)
# TODO: actions defined outside of the block?
for action in self.actions:
source_code.append(action.source_code)
return '\n'.join(source_code)
@property
def call_blocks(self):
'A dictionary of all implementation call blocks'
return get_pou_call_blocks(self.declaration, self.implementation)
@property
def program_name(self):
'The program name, determined from the declaration'
return program_name_from_declaration(self.declaration)
@property
def variables(self):
'A dictionary of variables defined in the POU'
return variables_from_declaration(self.declaration)
class AxisPara(TwincatItem):
'''
[XTI] Axis Parameters
Has information on units, acceleration, deadband, etc.
'''
class NC(TwincatItem):
'[tsproj or XTI] Top-level NC'
_load_path_hint = pathlib.Path('_Config') / 'NC'
def post_init(self):
# Axes can be stored directly in the tsproj:
self.axes = getattr(self, 'Axis', [])
self.axis_by_id = {
int(axis.attributes['Id']): axis
for axis in self.axes
}
self.axis_by_name = {
axis.name: axis
for axis in self.axes
}
class Axis(TwincatItem):
'[XTI] A single NC axis'
_load_path_hint = pathlib.Path('Axes')
@property
def axis_number(self):
return int(self.attributes['Id'])
@property
def units(self):
try:
for axis_para in getattr(self, 'AxisPara', []):
for general in getattr(axis_para, 'General', []):
if 'UnitName' in general.attributes:
return general.attributes['UnitName']
except Exception:
logger.exception('Unable to determine EGU for Axis %s', self)
# 'mm' is the default in twincat if unspecified. defaults are not saved
# in the xti files:
return 'mm'
def summarize(self):
yield from self.attributes.items()
for param in self.find(AxisPara, recurse=False):
yield from param.attributes.items()
for child in param._children:
for key, value in child.attributes.items():
yield f'{child.tag}:{key}', value
for encoder in getattr(self, 'Encoder', []):
for key, value in encoder.summarize():
yield f'Enc:{key}', value
class EncPara(TwincatItem):
'''
[XTI] Encoder parameters
Includes such parameters as ScaleFactorNumerator, ScaleFactorDenominator,
and so on.
'''
class Encoder(TwincatItem):
'''
[XTI] Encoder
Contains EncPara, Vars, Mappings, etc.
'''
def summarize(self):
yield 'EncType', self.attributes['EncType']
for param in self.find(EncPara, recurse=False):
yield from param.attributes.items()
for child in param._children:
for key, value in child.attributes.items():
yield f'{child.tag}:{key}', value
class Device(TwincatItem):
'[XTI] Top-level IO device container'
_load_path_hint = pathlib.Path('_Config') / 'IO'
def __init__(self, element, *, parent=None, name=None, filename=None):
super().__init__(element, parent=parent, name=name, filename=filename)
class Box(TwincatItem):
'[XTI] A box / module'
_load_path_hint = USE_NAME_AS_PATH
class RemoteConnections(TwincatItem):
'[StaticRoutes] Routes contained in the TwinCat configuration'
def post_init(self):
def to_dict(child):
return {
item.tag: item.text
for item in child._children
}
def keyed_on(key):
return {
getattr(child, key)[0].text: to_dict(child)
for child in self._children
if hasattr(child, key)
}
self.by_name = keyed_on('Name')
self.by_address = keyed_on('Address')
self.by_ams_id = keyed_on('NetId')
class _ArrayItemProxy:
'''
A TwincatItem proxy that represents a single element of an array value.
Adjusts 'name' such that access from EPICS will refer to the correct index.
Parameters
----------
item : TwincatItem
The item to mirror
index : int
The array index to use
'''
def __init__(self, item, index):
self.__dict__.update(
name=f'{item.name}[{index}]',
item=item,
_index=index,
)
def __getattr__(self, attr):
return getattr(self.__dict__['item'], attr)
def __setattr__(self, attr, value):
return setattr(self.__dict__['item'], attr, value)
def case_insensitive_path(path):
'''
Match a path in a case-insensitive manner, returning the actual filename as
it exists on the host machine
Required on Linux to find files in a case-insensitive way. Not required on
OSX/Windows, but platform checks are not done here.
Parameters
----------
path : pathlib.Path or str
The case-insensitive path
Returns
-------
path : pathlib.Path or str
The case-corrected path
Raises
------
FileNotFoundError
When the file can't be found
'''
path = pathlib.Path(path)
if path.exists():
return path.resolve()
new_path = pathlib.Path(path.parts[0])
for part in path.parts[1:]:
if not (new_path / part).exists():
all_files = {fn.lower(): fn
for fn in os.listdir(new_path)}
try:
part = all_files[part.lower()]
except KeyError:
raise FileNotFoundError(
f'{path} does not exist ({part!r} not in {new_path!r})'
) from None
new_path = new_path / part
return new_path.resolve()
def separate_by_classname(children):
'''
Take in a list of `TwincatItem`, categorize each by their class name (based
on XML tag), and return a dictionary keyed on that.
For example::
<a> <a> <b> <b>
Would become::
{'a': [<a>, <a>],
'b': [<b>, <b>]
}
Parameters
----------
children : list
list of TwincatItem
Returns
-------
dict
Categorized children
'''
d = collections.defaultdict(list)
for child in children:
d[child.__class__.__name__].append(child)
return d
def strip_namespace(tag):
'Strip off {{namespace}} from: {{namespace}}tag'
return lxml.etree.QName(tag).localname
|
StarcoderdataPython
|
1784733
|
from collections import OrderedDict
TARGETS = OrderedDict([('2.7', (2, 7)),
('3.0', (3, 0)),
('3.1', (3, 1)),
('3.2', (3, 2)),
('3.3', (3, 3)),
('3.4', (3, 4)),
('3.5', (3, 5)),
('3.6', (3, 6))])
SYNTAX_ERROR_OFFSET = 5
TARGET_ALL = (9999, 9999)
|
StarcoderdataPython
|
1978077
|
<reponame>euroledger/aries-cloudagent-python<gh_stars>1-10
"""Basic message admin routes."""
from aiohttp import web
from aiohttp_apispec import docs, request_schema
from marshmallow import fields, Schema
from ...connections.models.connection_record import ConnectionRecord
from ...storage.error import StorageNotFoundError
from .messages.basicmessage import BasicMessage
class SendMessageSchema(Schema):
"""Request schema for sending a message."""
content = fields.Str(description="Message content", example="Hello")
@docs(tags=["basicmessage"], summary="Send a basic message to a connection")
@request_schema(SendMessageSchema())
async def connections_send_message(request: web.BaseRequest):
"""
Request handler for sending a basic message to a connection.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
connection_id = request.match_info["id"]
outbound_handler = request.app["outbound_message_router"]
params = await request.json()
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
raise web.HTTPNotFound()
if connection.is_ready:
msg = BasicMessage(content=params["content"])
await outbound_handler(msg, connection_id=connection_id)
return web.json_response({})
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[web.post("/connections/{id}/send-message", connections_send_message)]
)
|
StarcoderdataPython
|
1691676
|
import json
import pytest
from verity_sdk.utils import unpack_forward_message
from verity_sdk.utils.Context import Context
from verity_sdk.protocols.Protocol import Protocol
from ..test_utils import get_test_config, cleanup
@pytest.mark.asyncio
async def test_get_message():
message = {'hello': 'world'}
context = await Context.create_with_config(await get_test_config())
packed_message = await Protocol('test-family', '0.1').get_message_bytes(context, message)
unpacked_message = json.dumps(await unpack_forward_message(context, packed_message))
assert json.dumps(message) == unpacked_message
await cleanup(context)
|
StarcoderdataPython
|
390807
|
import os
from datetime import datetime, timedelta
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import connection
from app import tests
from error.models import Error
def create_error():
return Error(timestamp=datetime.now(),
timestamp_date=datetime.today())
class StatsTests(TestCase):
# test the view for writing errors
def setUp(self):
settings.ANONYMOUS_ACCESS = True
Error.objects.all().delete()
def testCount(self):
for x in range(0, 10):
create_error().save()
for x in range(0, 5):
err = create_error()
err.priority = 4
err.save()
url = reverse('stats-view', args=['priority'])
res = self.client.get(url)
assert 'data.setValue(0, 1, 10);' in res.content
assert 'data.setValue(0, 2, 5);' in res.content
|
StarcoderdataPython
|
1749688
|
<filename>tests/components/fronius/test_sensor.py
"""Tests for the Fronius sensor platform."""
from homeassistant.components.fronius.coordinator import (
FroniusInverterUpdateCoordinator,
FroniusMeterUpdateCoordinator,
FroniusPowerFlowUpdateCoordinator,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import STATE_UNKNOWN
from homeassistant.util import dt
from . import enable_all_entities, mock_responses, setup_fronius_integration
from tests.common import async_fire_time_changed
async def test_symo_inverter(hass, aioclient_mock):
"""Test Fronius Symo inverter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state.state == str(expected_state)
# Init at night
mock_responses(aioclient_mock, night=True)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 23
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 55
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 10828)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 44186900)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 25507686)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 16)
# Second test at daytime when inverter is producing
mock_responses(aioclient_mock, night=False)
async_fire_time_changed(
hass, dt.utcnow() + FroniusInverterUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 57
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 59
# 4 additional AC entities
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 2.19)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 1113)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 44188000)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 25508798)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 518)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 5.19)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.94)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 1190)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.90)
# Third test at nighttime - additional AC entities aren't changed
mock_responses(aioclient_mock, night=True)
async_fire_time_changed(
hass, dt.utcnow() + FroniusInverterUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 5.19)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.94)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 1190)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.90)
async def test_symo_logger(hass, aioclient_mock):
"""Test Fronius Symo logger entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock)
await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 25
# ignored constant entities:
# hardware_platform, hardware_version, product_type
# software_version, time_zone, time_zone_location
# time_stamp, unique_identifier, utc_offset
#
# states are rounded to 4 decimals
assert_state(
"sensor.cash_factor_fronius_logger_info_0_http_fronius",
0.078,
)
assert_state(
"sensor.co2_factor_fronius_logger_info_0_http_fronius",
0.53,
)
assert_state(
"sensor.delivery_factor_fronius_logger_info_0_http_fronius",
0.15,
)
async def test_symo_meter(hass, aioclient_mock):
"""Test Fronius Symo meter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 25
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 59
# ignored entities:
# manufacturer, model, serial, enable, timestamp, visible, meter_location
#
# states are rounded to 4 decimals
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 7.755)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 6.68)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 10.102)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 59960790
)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 723160
)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 35623065)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 15303334)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 15303334)
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 35623065)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 50)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 1772.793)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 1527.048)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 2333.562)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 5592.57)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", -0.99)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", -0.99)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.99)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 1)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", 51.48)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", 115.63)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -164.24)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", 2.87)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 1765.55)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 1515.8)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 2311.22)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 5592.57)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 228.6)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 228.6)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 231)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 395.9
)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 398
)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 398
)
async def test_symo_power_flow(hass, aioclient_mock):
"""Test Fronius Symo power flow entities."""
async_fire_time_changed(hass, dt.utcnow())
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state.state == str(expected_state)
# First test at night
mock_responses(aioclient_mock, night=True)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 23
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 55
# ignored: location, mode, timestamp
#
# states are rounded to 4 decimals
assert_state(
"sensor.energy_day_fronius_power_flow_0_http_fronius",
10828,
)
assert_state(
"sensor.energy_total_fronius_power_flow_0_http_fronius",
44186900,
)
assert_state(
"sensor.energy_year_fronius_power_flow_0_http_fronius",
25507686,
)
assert_state(
"sensor.power_battery_fronius_power_flow_0_http_fronius",
STATE_UNKNOWN,
)
assert_state(
"sensor.power_grid_fronius_power_flow_0_http_fronius",
975.31,
)
assert_state(
"sensor.power_load_fronius_power_flow_0_http_fronius",
-975.31,
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius",
STATE_UNKNOWN,
)
assert_state(
"sensor.relative_autonomy_fronius_power_flow_0_http_fronius",
0,
)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius",
STATE_UNKNOWN,
)
# Second test at daytime when inverter is producing
mock_responses(aioclient_mock, night=False)
async_fire_time_changed(
hass, dt.utcnow() + FroniusPowerFlowUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
# still 55 because power_flow update interval is shorter than others
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 55
assert_state(
"sensor.energy_day_fronius_power_flow_0_http_fronius",
1101.7001,
)
assert_state(
"sensor.energy_total_fronius_power_flow_0_http_fronius",
44188000,
)
assert_state(
"sensor.energy_year_fronius_power_flow_0_http_fronius",
25508788,
)
assert_state(
"sensor.power_battery_fronius_power_flow_0_http_fronius",
STATE_UNKNOWN,
)
assert_state(
"sensor.power_grid_fronius_power_flow_0_http_fronius",
1703.74,
)
assert_state(
"sensor.power_load_fronius_power_flow_0_http_fronius",
-2814.74,
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius",
1111,
)
assert_state(
"sensor.relative_autonomy_fronius_power_flow_0_http_fronius",
39.4708,
)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius",
100,
)
async def test_gen24(hass, aioclient_mock):
"""Test Fronius Gen24 inverter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock, fixture_set="gen24")
config_entry = await setup_fronius_integration(hass, is_logger=False)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 25
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 57
# inverter 1
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", STATE_UNKNOWN)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 0.1589)
assert_state("sensor.current_dc_2_fronius_inverter_1_http_fronius", 0.0754)
assert_state("sensor.status_code_fronius_inverter_1_http_fronius", 7)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", STATE_UNKNOWN)
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0.0783)
assert_state("sensor.voltage_dc_2_fronius_inverter_1_http_fronius", 403.4312)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 37.3204)
assert_state("sensor.error_code_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 411.3811)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 1530193.42)
assert_state("sensor.inverter_state_fronius_inverter_1_http_fronius", "Running")
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 234.9168)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.9917)
# meter
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 3863340.0)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 2013105.0)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 653.1)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 49.9)
assert_state("sensor.meter_location_fronius_meter_0_http_fronius", 0.0)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 0.828)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 88221.0
)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 3863340.0)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 2.33)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 235.9)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 408.7
)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 294.9)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 2013105.0)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 236.1)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 1989125.0
)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 236.9)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", 0.441)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 409.6
)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 1.825)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.832)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 243.3)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 409.4
)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 323.4)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 301.2)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 106.8)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", 0.934)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 251.3)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", -218.6)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", -132.8)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -166.0)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 868.0)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", -517.4)
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 1.145)
# power_flow
assert_state("sensor.power_grid_fronius_power_flow_0_http_fronius", 658.4)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius", 100.0
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius", 62.9481
)
assert_state("sensor.power_load_fronius_power_flow_0_http_fronius", -695.6827)
assert_state("sensor.meter_mode_fronius_power_flow_0_http_fronius", "meter")
assert_state("sensor.relative_autonomy_fronius_power_flow_0_http_fronius", 5.3592)
assert_state(
"sensor.power_battery_fronius_power_flow_0_http_fronius", STATE_UNKNOWN
)
assert_state("sensor.energy_year_fronius_power_flow_0_http_fronius", STATE_UNKNOWN)
assert_state("sensor.energy_day_fronius_power_flow_0_http_fronius", STATE_UNKNOWN)
assert_state("sensor.energy_total_fronius_power_flow_0_http_fronius", 1530193.42)
async def test_gen24_storage(hass, aioclient_mock):
"""Test Fronius Gen24 inverter with BYD battery and Ohmpilot entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock, fixture_set="gen24_storage")
config_entry = await setup_fronius_integration(hass, is_logger=False)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 31
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 63
# inverter 1
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0.3952)
assert_state("sensor.voltage_dc_2_fronius_inverter_1_http_fronius", 318.8103)
assert_state("sensor.current_dc_2_fronius_inverter_1_http_fronius", 0.3564)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", STATE_UNKNOWN)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 1.1087)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 250.9093)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", STATE_UNKNOWN)
assert_state("sensor.error_code_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.status_code_fronius_inverter_1_http_fronius", 7)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 7512794.0117)
assert_state("sensor.inverter_state_fronius_inverter_1_http_fronius", "Running")
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 419.1009)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.354)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.9816)
# meter
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 1705128.0)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 487.7)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 0.698)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 1247204.0)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 49.9)
assert_state("sensor.meter_location_fronius_meter_0_http_fronius", 0.0)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", -501.5)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 3266105.0
)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 19.6)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 0.645)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 1705128.0)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 383.9)
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 1.701)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 1.832)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 319.5)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 229.4)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 150.0)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 394.3
)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 225.6)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 5482.0
)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 1247204.0)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", 0.995)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.163)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", 0.389)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", -31.3)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -116.7)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 396.0
)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 393.0
)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", -353.4)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 317.9)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 228.3)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 821.9)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 118.4)
# power_flow
assert_state("sensor.power_grid_fronius_power_flow_0_http_fronius", 2274.9)
assert_state("sensor.power_battery_fronius_power_flow_0_http_fronius", 0.1591)
assert_state("sensor.power_load_fronius_power_flow_0_http_fronius", -2459.3092)
assert_state(
"sensor.relative_self_consumption_fronius_power_flow_0_http_fronius", 100.0
)
assert_state(
"sensor.power_photovoltaics_fronius_power_flow_0_http_fronius", 216.4328
)
assert_state("sensor.relative_autonomy_fronius_power_flow_0_http_fronius", 7.4984)
assert_state("sensor.meter_mode_fronius_power_flow_0_http_fronius", "bidirectional")
assert_state("sensor.energy_year_fronius_power_flow_0_http_fronius", STATE_UNKNOWN)
assert_state("sensor.energy_day_fronius_power_flow_0_http_fronius", STATE_UNKNOWN)
assert_state("sensor.energy_total_fronius_power_flow_0_http_fronius", 7512664.4042)
# storage
assert_state("sensor.current_dc_fronius_storage_0_http_fronius", 0.0)
assert_state("sensor.state_of_charge_fronius_storage_0_http_fronius", 4.6)
assert_state("sensor.capacity_maximum_fronius_storage_0_http_fronius", 16588)
assert_state("sensor.temperature_cell_fronius_storage_0_http_fronius", 21.5)
assert_state("sensor.capacity_designed_fronius_storage_0_http_fronius", 16588)
assert_state("sensor.voltage_dc_fronius_storage_0_http_fronius", 0.0)
|
StarcoderdataPython
|
92870
|
from odoo import _, api, fields, models
class Quotations(models.Model):
_inherit = "sale.order"
note_on_customer = fields.Text("Note on Customer",
help="Add Notes on Customers!")
|
StarcoderdataPython
|
8118091
|
<gh_stars>0
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2014 Numenta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------
"""
Test shellshock and other related bash bugs
"""
import agamotto
import unittest2 as unittest
class TestForBashSecurityBugs(unittest.TestCase):
def testForBashCVE_2014_6271(self):
"""Is bash immune to CVE-2014-6271?
"""
test6271 = ("(env x='() { :;}; echo vulnerable' "
"bash -c \"echo this is a test\") 2>&1")
self.assertFalse(agamotto.process.stdoutContains(test6271, 'vulnerable'),
'Bash is vulnerable to CVE-2014-6271')
def testForBashCVE_2014_6277(self):
"""Is bash immune to CVE-2014-6277?
"""
test6277 = "foo='() { echo still vulnerable; }' bash -c foo 2>&1"
self.assertFalse(agamotto.process.stdoutContains(test6277,
'still vulnerable'),
'Bash is vulnerable to CVE-2014-6277')
def testForBashCVE_2014_6278(self):
"""Is bash immune to CVE-2014-6278?
"""
test6278 = ("shellshocker='() { echo You are vulnerable; }' "
"bash -c shellshocker")
self.assertFalse(agamotto.process.stdoutContains(test6278, 'vulnerable'),
'Bash is vulnerable to CVE-2014-6278')
def testForBashCVE_2014_7169(self):
"""Is bash immune to CVE-2014-7169?
"""
testFor7169 = ("env X='() { (a)=>\' bash -c \"echo echo vuln\";"
" [[ \"$(cat echo)\" == \"vuln\" ]] && "
"echo \"still vulnerable :(\" 2>&1")
self.assertFalse(agamotto.process.stdoutContains(testFor7169,
'still vulnerable'),
'Bash is vulnerable to CVE-2014-7169')
def testForBashCVE_2014_7186_a(self):
"""Is bash immune to CVE-2014-7186 using test from shellshocker.net?"""
test7186shellshocker = ("bash -c 'true <<EOF <<EOF <<EOF <<EOF <<EOF "
"<<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF <<EOF' ||"
" echo 'CVE-2014-7186 vulnerable, redir_stack'")
self.assertFalse(agamotto.process.stdoutContains(test7186shellshocker,
'vulnerable'),
'Vulnerable to CVE-2014-7186, redir_stack')
def testForBashCVE_2014_7186_c(self):
"""Is bash immune to CVE-2014-7186 using <EMAIL>'s test?"""
# Try Eric Blake's test too
blake7186Test = ("""bash -c "export f=1 g='() {'; f() { echo 2;};
export -f f; bash -c 'echo \$f \$g; f; env |
grep ^f='"
""")
safe=("1 () {\n2\nf=1\n")
self.assertTrue(agamotto.process.stdoutContains(blake7186Test, safe),
"Fails Eric Blake's CVE-2014-7186 test")
def testForBashCVE_2014_7187a(self):
"""Is bash immune to CVE-2014-7187 using test from shellshocker.net?"""
test7187 = ("""(for x in {1..200} ; do echo "for x$x in ; do :"; done;
for x in {1..200} ; do echo done ; done) | bash ||
echo "CVE-2014-7187 vulnerable, word_lineno" """)
self.assertFalse(agamotto.process.stdoutContains(test7187, 'vulnerable'),
'CVE-2014-7187 vulnerable, word_lineno')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
124887
|
<filename>explorecourses/classes.py
"""
This module contains classes representing various academic elements
for use in storing and manipulating information from Explore Courses.
Includes:
- School
- Department
- Course
- Section
- Schedule
- Instructor
- LearningObjective
- Attribute
- Tag
"""
from typing import Tuple
from xml.etree.ElementTree import Element
class Department(object):
"""
This class represents a department within a school.
Attributes:
name (str): The department name.
code (str): The department code used for searching courses by
department.
"""
def __init__(self, elem: Element):
"""
Constructs a new Department from an XML element.
Args:
elem (Element): The department's XML element.
"""
self.name = elem.get("longname")
self.code = elem.get("name")
def __str__(self):
"""
Returns a string representation of the Department that includes both
department name and code.
"""
return f"{self.name} ({self.code})"
class School(object):
"""
This class represents a school within the university.
Attributes:
name (str): The name of the school.
departments (Tuple[Department]): A list of departments within the
school.
"""
def __init__(self, elem: Element):
"""
Constructs a new School from an XML element.
Args:
elem (Element): The school's XML element.
"""
self.name = elem.get("name")
depts = elem.findall("department")
self.departments = tuple(Department(dept) for dept in depts)
def get_department(self, idf: str) -> Department:
"""
Gets a department within the school identified by name or code.
Args:
idf (str): An identifier of the department; either the name or code.
Returns:
Department: The department matched by the given identifier if a
match was found, None otherwise.
"""
idf = idf.lower()
find_code = lambda dept, code: dept.code.lower() == code
find_name = lambda dept, name: dept.name.lower() == name
find_dept = lambda dept, idf: find_name(dept, idf) or find_code(dept,
idf)
idx = [idx for idx, dept in enumerate(self.departments)
if find_dept(dept, idf)]
return self.departments[idx[0]] if idx else None
def __str__(self):
"""
Returns a string representation of the School that is the School's name.
"""
return self.name
class Instructor(object):
"""
This class represents an instructor for a section.
Attributes:
name (str): The instructor's name in "LastName, FirstInitial." form.
first_name (str): The instructor's first name.
middle_name (str): The instructor's middle name.
last_name (str): The instructor's last name.
sunet_id (str): The instructor's SUNet ID (as in <EMAIL>).
is_primary_instructor (bool): True if the instructor is the primary
instructor for the course, False otherwise.
"""
def __init__(self, elem: Element):
"""
Constructs a new Instructor from an XML element.
Args:
elem (Element): The instructor's XML element.
"""
self.name = elem.findtext("name")
self.first_name = elem.findtext("firstName")
self.middle_name = elem.findtext("middleName")
self.last_name = elem.findtext("lastName")
self.sunet_id = elem.findtext("sunet")
self.is_primary_instructor = elem.findtext("role") == "PI"
def __str__(self):
"""
Returns a string representation of the Instructor that includes the
instructor's first and last name and SUNet ID.
"""
return f"{self.first_name} {self.last_name} ({self.sunet_id})"
class Attribute(object):
"""
This class represents an attribute of a course.
Attributes:
name (str): The name of the attribute.
value (str): The abbreviation value of the attribute.
description (str): A description of the value of the attribute.
catalog_print (bool): True if the attribute has the catalog print flag,
False otherwise.
schedule_print (bool): True if the attribute has the schedule print
flag, False otherwise.
"""
def __init__(self, elem: Element):
"""
Constructs a new Attribute from an XML element.
Args:
elem (Element): The attribute's XML element.
"""
self.name = elem.findtext("name")
self.value = elem.findtext("value")
self.description = elem.findtext("description")
self.catalog_print = elem.findtext("catalogPrint") == "true"
self.schedule_print = elem.findtext("schedulePrint") == "true"
def __str__(self):
"""
Returns a string representation of the Attribute that includes the
attribute's name and value.
"""
return f"{self.name}::{self.value}"
class Schedule(object):
"""
This class represents the schedule of a section, including instructors.
Attributes:
start_date (str): The start date of the section's schedule.
end_date (str): The end date of the section's schedule.
start_time (str): The start time of each section.
end_time (str): The end time of each section.
location (str): The location of each section.
days (Tuple[str]): The days of the week that the section meets.
instructors (Tuple[Instructor]): The section's instructors.
"""
def __init__(self, elem: Element):
"""
Constructs a new Schedule from an XML element.
Args:
elem (Element): The schedule's XML element.
"""
self.start_date = elem.findtext("startDate")
self.end_date = elem.findtext("endDate")
self.start_time = elem.findtext("startTime")
self.end_time = elem.findtext("endTime")
self.location = elem.findtext("location")
self.days = tuple(elem.findtext("days").split())
self.instructors = tuple(Instructor(instr) for instr
in elem.find("instructors"))
def __str__(self):
"""
Returns a string representation of the Schedule that includes the
days of the week the section meets and it's time and location.
"""
return (f"{', '.join(self.days)}, {self.start_time} - {self.end_time} "
f"at {self.location}")
class Section(object):
"""
This class represents a section of a course.
Attributes:
class_id (int): The unique ID of the section.
term (str): The year and quarter during which the section is offered.
units (str): The number of units the section is offered for
section_num (str): The section number which distinguishes between
different sections of the same type.
component (str): The type of section (e.g., LEC)
curr_class_size (int): The current number of students enrolled in the
section.
max_class_size (int): The maximum number of students allowed in the
section.
curr_waitlist_size (int): The current number of students on the
waitlist to enroll in the section.
max_waitlist_size (int): The maximum number of students allowed on the
waitlist for the section.
notes (str): Any notes about the section.
schedules (Tuple[Schedule]): The different schedules of the section.
attributes (Tuple[Attribute]): The section's attributes.
"""
def __init__(self, elem: Element):
"""
Constructs a new Section from an XML element.
Args:
elem (Element): The section's XML element.
"""
self.class_id = int(elem.findtext("classId"))
self.term = elem.findtext("term")
self.units = elem.findtext("units")
self.section_num = elem.findtext("sectionNumber")
self.component = elem.findtext("component")
self.max_class_size = int(elem.findtext("maxClassSize"))
self.curr_class_size = int(elem.findtext("currentClassSize"))
self.curr_waitlist_size = int(elem.findtext("currentWaitlistSize"))
self.max_waitlist_size = int(elem.findtext("maxWaitlistSize"))
self.notes = elem.findtext("notes")
self.schedules = tuple(Schedule(sched) for sched
in elem.find("schedules"))
self.attributes = tuple(Attribute(attr) for attr
in elem.find("attributes"))
def __str__(self):
"""
Returns a string representation of the Section that includes the
section's component and number, and section's ID.
"""
return f"{self.component} {self.section_num} (id: {self.class_id})"
class Tag(object):
"""
This class represents a tag for a course.
Attributes:
organization (str): The organization within the school responsible for
the tag.
name (str): The name of the tag.
"""
def __init__(self, elem: Element):
"""
Constructs a new Tag from an XML element.
Args:
elem (Element): The tag's XML element.
"""
self.organization = elem.findtext("organization")
self.name = elem.findtext("name")
def __str__(self):
"""
Returns a string representation of the Tag that includes the
tag's organization and name.
"""
return f"{self.organization}::{self.name}"
class LearningObjective(object):
"""
This class represents a learning objective for a course.
Attributes:
code (str): The GER that the learning objective is for.
description (str): A description of the learning objective.
"""
def __init__(self, elem: Element):
"""
Constructs a new LearningObjective from an XML element.
Args:
elem (Element): The learning objective's XML element.
"""
self.code = elem.findtext(".//requirementCode")
self.description = elem.findtext(".//description")
def __str__(self):
"""
Returns a string representation of the LearningObjective that includes
the learning objective's code and description.
"""
return f"Learning Objective ({self.code}: {self.description})"
class Course(object):
"""
This class represents a course listed at the university.
Attributes:
year (str): The Academic year that the course is offered.
subject (str): The academic subject of the course (e.g., 'MATH').
code (str): The code listing of the course (e.g., '51').
title (str): The full title of the course.
description (str): A description of the course.
gers (Tuple[str]): The General Education Requirements satisfied
by the course.
repeatable (bool): True if the course is repeatable for credit,
False otherwise.
grading_basis (str): The grading basis options for the course.
units_min (int): The minimum number of units the course can be
taken for.
units_max (int): The maximum number of units the course can be
taken for.
objectives (Tuple[LearningObjective]): The learning objectives of
the course.
final_exam (bool): True if the course has a final exam, False otherwise.
sections (Tuple[Section]): The sections associated with the course.
tags (Tuple[Tag]): The tags associated with the course.
attributes (Tuple[Attributes]): The attributes associated with
the course.
course_id (int): The unique ID of the course.
active (bool): True if the course is currently being taught,
False otherwise.
offer_num (str): The offer number of the course.
academic_group (str): The academic group that the course is a part of.
academic_org (str): The academic organization that the course
is a part of.
academic_career (str): The academic career associated with the course.
max_units_repeat (int): The number of units that the course
can be repeated for.
max_times_repeat (int): The number of times that the course
can be repeated.
"""
def __init__(self, elem: Element):
"""
Constructs a new Course from an XML element.
Args:
elem (Element): The course's XML element.
"""
self.year = elem.findtext("year")
self.subject = elem.findtext("subject")
self.code = elem.findtext("code")
self.title = elem.findtext("title")
self.description = elem.findtext("description")
self.gers = tuple(elem.findtext("gers").split(", "))
self.repeatable = (True if elem.findtext("repeatable") == "true"
else False)
self.grading_basis = elem.findtext("grading")
self.units_min = int(elem.findtext("unitsMin"))
self.units_max = int(elem.findtext("unitsMax"))
self.objectives = tuple(LearningObjective(obj) for obj
in elem.find("learningObjectives"))
self.final_exam = (
True if elem.findtext(".//finalExamFlag") == "Y"
else False if elem.findtext(".//finalExamFlag") == "N"
else None
)
self.sections = tuple(Section(section) for section
in elem.find("sections"))
self.tags = tuple(Tag(tag) for tag in elem.find("tags"))
self.attributes = tuple(Attribute(attr) for attr
in elem.find("attributes"))
self.course_id = int(elem.findtext(".//courseId"))
self.active = (True if elem.findtext(".//effectiveStatus") == "A"
else False if elem.findtext(".//effectiveStatus") == "I"
else None)
self.offer_num = elem.findtext(".//offerNumber")
self.academic_group = elem.findtext(".//academicGroup")
self.academic_org = elem.findtext(".//academicOrganization")
self.academic_career = elem.findtext(".//academicCareer")
self.max_units_repeat = int(elem.findtext(".//maxUnitsRepeat"))
self.max_times_repeat = int(elem.findtext(".//maxTimesRepeat"))
def __str__(self):
"""
Returns a string representation of the Course that includes the
course's subject, code, and full title.
"""
return f"{self.subject}{self.code} {self.title}"
def __eq__(self, other):
"""
Overloads the equality (==) operator for the Course class.
A Course can only be compared to another Course. Course equality is
determined by course ID.
Args:
other: The right operand of the equality operator.
Returns:
bool: True if the object being compared is equal to the Course,
False otherwise.
"""
if type(other) != Course: return False
return self.course_id == other.course_id
def __lt__(self, other):
"""
Overloads the less than (<) operator for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the less than operator.
Returns:
bool: True if the object being compared is less than the Course,
False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'<' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
if self.subject != other.subject:
return self.subject < other.subject
if self.code != other.code:
return self.code < other.code
if self.year != other.year:
return self.year < other.year
return False
def __gt__(self, other):
"""
Overloads the greater than (>) operator for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the greater than operator.
Returns:
bool: True if the object being compared is greater than the Course,
False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'>' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return not self.__lt__(other) and not self.__eq__(other)
def __le__(self, other):
"""
Overloads the less than or equal to operator (<=) for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the less than or equal to operator.
Returns:
bool: True if the object being compared is less than or equal to
the Course, False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'<=' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
"""
Overloads the greater than or equal to operator (>=) for Course.
A Course can only be compared to another Course. Courses are compared
first by subject, then by code, and last by year.
Args:
other: The right operand of the greater than or equal to operator.
Returns:
bool: True if the object being compared is greater than or equal to
the Course, False otherwise.
"""
if type(other) != Course:
raise TypeError(f"'>=' not supported between instances of "
f"'{type(self)}' and '{type(other)}'")
return self.__gt__(other) or self.__eq__(other)
|
StarcoderdataPython
|
1952533
|
"""
Test batch gradient computation of conv2d layer.
The example is taken from
Chellapilla: High Performance Convolutional Neural Networks
for Document Processing (2007).
"""
from random import choice, randint
import pytest
from torch import Tensor, allclose, randn
from torch.nn import Conv2d
import backpack.extensions as new_ext
from backpack import backpack, extend
def ExtConv2d(*args, **kwargs):
return extend(Conv2d(*args, **kwargs))
TEST_ATOL = 1e-4
###
# Problem settings
###
def make_conv_params(
in_channels, out_channels, kernel_size, stride, padding, dilation, bias, kernel
):
return {
"in_channels": in_channels,
"out_channels": out_channels,
"kernel_size": kernel_size,
"stride": stride,
"padding": padding,
"dilation": dilation,
"bias": bias,
"kernel": kernel,
}
def make_conv_layer(LayerClass, conv_params):
layer = LayerClass(
in_channels=conv_params["in_channels"],
out_channels=conv_params["out_channels"],
kernel_size=conv_params["kernel_size"],
stride=conv_params["stride"],
padding=conv_params["padding"],
dilation=conv_params["dilation"],
bias=conv_params["bias"],
)
layer.weight.data = conv_params["kernel"]
return layer
kernel11 = [[1, 1], [2, 2]]
kernel12 = [[1, 1], [1, 1]]
kernel13 = [[0, 1], [1, 0]]
kernel21 = [[1, 0], [0, 1]]
kernel22 = [[2, 1], [2, 1]]
kernel23 = [[1, 2], [2, 0]]
kernel = Tensor(
[[kernel11, kernel12, kernel13], [kernel21, kernel22, kernel23]]
).float()
CONV_PARAMS = make_conv_params(
in_channels=3,
out_channels=2,
kernel_size=(2, 2),
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
bias=False,
kernel=kernel,
)
# input (1 sample)
in_feature1 = [[1, 2, 0], [1, 1, 3], [0, 2, 2]]
in_feature2 = [[0, 2, 1], [0, 3, 2], [1, 1, 0]]
in_feature3 = [[1, 2, 1], [0, 1, 3], [3, 3, 2]]
in1 = Tensor([[in_feature1, in_feature2, in_feature3]]).float()
result1 = [[14, 20], [15, 24]]
result2 = [[12, 24], [17, 26]]
out1 = Tensor([[result1, result2]]).float()
conv2d = make_conv_layer(Conv2d, CONV_PARAMS)
g_conv2d = make_conv_layer(ExtConv2d, CONV_PARAMS)
inputs = [in1]
results = [out1]
def loss_function(tensor):
"""Test loss function. Sum over squared entries."""
return ((tensor.contiguous().view(-1)) ** 2).sum()
def test_forward():
"""Compare forward
Handles only single instance batch.
"""
for input, result in zip(inputs, results):
out_conv2d = conv2d(input)
assert allclose(out_conv2d, result)
out_g_conv2d = g_conv2d(input)
assert allclose(out_g_conv2d, result)
def random_convolutions_and_inputs(
in_channels=None,
out_channels=None,
kernel_size=None,
stride=None,
padding=None,
dilation=None,
bias=None,
batch_size=None,
in_size=None,
):
"""Return same torch/exts 2d conv modules and random inputs.
Arguments can be fixed by handing them over.
"""
def __replace_if_none(var, by):
return by if var is None else var
in_channels = __replace_if_none(in_channels, randint(1, 3))
out_channels = __replace_if_none(out_channels, randint(1, 3))
kernel_size = __replace_if_none(kernel_size, (randint(1, 3), randint(1, 3)))
stride = __replace_if_none(stride, (randint(1, 3), randint(1, 3)))
padding = __replace_if_none(padding, (randint(0, 2), randint(0, 2)))
dilation = __replace_if_none(dilation, (randint(1, 3), randint(1, 3)))
bias = __replace_if_none(bias, choice([True, False]))
batch_size = __replace_if_none(batch_size, randint(1, 3))
in_size = __replace_if_none(in_size, (randint(8, 12), randint(8, 12)))
kernel_shape = (out_channels, in_channels) + kernel_size
kernel = randn(kernel_shape)
in_shape = (batch_size, in_channels) + in_size
input = randn(in_shape)
conv_params = make_conv_params(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
kernel=kernel,
)
conv2d = make_conv_layer(Conv2d, conv_params)
g_conv2d = make_conv_layer(ExtConv2d, conv_params)
if bias is True:
bias_vals = randn(out_channels)
conv2d.bias.data = bias_vals
g_conv2d.bias.data = bias_vals
assert allclose(conv2d.bias, g_conv2d.bias)
assert allclose(conv2d.weight, g_conv2d.weight)
return conv2d, g_conv2d, input
def compare_grads(conv2d, g_conv2d, input):
"""Feed input through nn and exts conv2d, compare bias/weight grad."""
loss = loss_function(conv2d(input))
loss.backward()
loss_g = loss_function(g_conv2d(input))
with backpack(new_ext.BatchGrad()):
loss_g.backward()
assert allclose(g_conv2d.bias.grad, conv2d.bias.grad, atol=TEST_ATOL)
assert allclose(g_conv2d.weight.grad, conv2d.weight.grad, atol=TEST_ATOL)
assert allclose(g_conv2d.bias.grad_batch.sum(0), conv2d.bias.grad, atol=TEST_ATOL)
assert allclose(
g_conv2d.weight.grad_batch.sum(0), conv2d.weight.grad, atol=TEST_ATOL
)
@pytest.mark.skip("Test does not consistently fail or pass")
def test_random_grad(random_runs=10):
"""Compare bias gradients for a single sample."""
for _ in range(random_runs):
conv2d, g_conv2d, input = random_convolutions_and_inputs(
bias=True, batch_size=1
)
compare_grads(conv2d, g_conv2d, input)
@pytest.mark.skip("Test does not consistently fail or pass")
def test_random_grad_batch(random_runs=10):
"""Check bias gradients for a batch."""
for _ in range(random_runs):
conv2d, g_conv2d, input = random_convolutions_and_inputs(bias=True)
compare_grads(conv2d, g_conv2d, input)
|
StarcoderdataPython
|
6505396
|
<reponame>1byte2bytes/cpython
"""cmtest - List all components in the system"""
import Cm
import Res
import sys
def getstr255(r):
"""Get string from str255 resource"""
if not r.data: return ''
len = ord(r.data[0])
return r.data[1:1+len]
def getinfo(c):
"""Return (type, subtype, creator, fl1, fl2, name, description) for component"""
h1 = Res.Resource('')
h2 = Res.Resource('')
h3 = Res.Resource('')
type, subtype, creator, fl1, fl2 = c.GetComponentInfo(h1, h2, h3)
name = getstr255(h1)
description = getstr255(h2)
return type, subtype, creator, fl1, fl2, name, description
def getallcomponents():
"""Return list with info for all components, sorted"""
any = ('\0\0\0\0', '\0\0\0\0', '\0\0\0\0', 0, 0)
c = None
rv = []
while 1:
try:
c = Cm.FindNextComponent(c, any)
except Cm.Error:
break
rv.append(getinfo(c))
rv.sort()
return rv
def main():
"""Print info for all components"""
info = getallcomponents()
for type, subtype, creator, f1, f2, name, description in info:
print '%4.4s %4.4s %4.4s %s 0x%x 0x%x'%(type, subtype, creator, name, f1, f2)
print ' ', description
sys.exit(1)
main()
|
StarcoderdataPython
|
337100
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser
import re
import sys
def main(args):
f = '{}.{}'.format(args.corpus, args.l1)
e = '{}.{}'.format(args.corpus, args.l2)
fo = '{}.{}'.format(args.clean_corpus, args.l1)
eo = '{}.{}'.format(args.clean_corpus, args.l2)
pattern = re.compile('^\s*$')
cnt = 0
lines = 0
with \
open(f, 'r', encoding='utf-8') as fp, \
open(e, 'r', encoding='utf-8') as ep, \
open(fo, 'w', encoding='utf-8') as fop, \
open(eo, 'w', encoding='utf-8') as eop:
for fl, el in zip(fp, ep):
lines += 1
if pattern.fullmatch(fl) is not None or pattern.fullmatch(el) is not None:
continue
fop.write(fl)
eop.write(el)
cnt += 1
print('input sentences: {}, output sentences: {}'.format(lines, cnt), file=sys.stderr)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('corpus')
parser.add_argument('l1')
parser.add_argument('l2')
parser.add_argument('clean_corpus')
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
11239622
|
import json
import mtprof
import numpy as np
import circuits as cir
import ngspice
import optimizers as opt
def test_folded_corners_sim():
process = ["SS", "FF", "SNFP", "FNSP"]
voltage_tempt = ["VDD_MAX_TEMP_MAX", "VDD_MAX_TEMP_MIN", "VDD_MIN_TEMP_MAX", "VDD_MIN_TEMP_MIN"]
corner_set = set(["TT"] + [ p+"_"+vt for p in process for vt in voltage_tempt])
with open("./circuit_examples/ptm130_folded_cascode/sizing_example.json", 'r') as file:
sizing = json.load(file)
parameters = [ k for k,v in sizing.items() ]
values = [ v for k,v in sizing.items() ]
folder = "./circuit_examples/ptm130_folded_cascode/"
meas = ngspice.simulate(cwd = folder, netlist=["tb_ac.cir"], param = parameters, val = np.array([values]))
errors = []
# replace assertions by conditions
if not len(meas) == 1:
errors.append("Failed output size, found {} expected 1".format(len(meas)))
else:
if not len(meas[0]) == 17:
errors.append("Failed number of coners , found {} expected 17".format(len(meas[0])))
if meas[0].keys() != corner_set :
errors.append("Failed to find all corners , found {} expected one of {}".format(sorted(meas.keys()), sorted(corner_set)))
# assert no error message has been registered, else print messages
assert not errors, "errors occured:\n{}".format("\n".join(errors))
def itest_folded_corners_opt():
seed = 42
np.random.seed(seed)
nsga2 = opt.NSGA2()
for pop, pop_obj, pop_cstr, pop_data, evals, front_no in nsga2.minimize(
cir.Circuit("./circuit_examples/ptm130_folded_cascode/"), pop_size=32, evaluations=32*10, mutation=0.3):
print(evals)
print(pop_obj[pop_cstr.argmax()] , pop_cstr[pop_cstr.argmax()])
print(pop_data[pop_cstr.argmax()][1]['TT'])
if __name__ == '__main__':
itest_folded_corners_opt()
|
StarcoderdataPython
|
12838917
|
<reponame>vluk/baymaxBot<gh_stars>0
import discord
from discord.ext import commands
import random
import asyncio
cards = ["villager", "werewolf", "minion", "mason", "seer", "robber", "troublemaker", "drunk", "insomniac", "tanner", "hunter"]
aesthetics = {
"werewolf" : {
"color" : 0x25d0ff,
"thumbnail" : "https://cdn.discordapp.com/attachments/323535193073778689/716453639782137876/unknown.png"
}
}
class Game:
def __init__(self, host, join_message):
self.state = "preparing"
self.players = [1, 2, 3]
self.host = host
self.join_message = join_message
self.initial_roles = []
self.current_roles = []
self.votes = {}
def fetch_player(self, arg):
try:
for player in self.players:
if not isinstance(player, int):
if player.id == int(arg):
return self.players.index(player)
except ValueError:
pass
arg = str(arg)
for player in self.players:
if not isinstance(player, int):
if player.name.lower() == arg.lower():
return self.players.index(player)
for player in self.players:
if not isinstance(player, int):
if player.nick != None and player.nick.lower() == arg.lower():
return self.players.index(player)
return -1
def get_refreshed_embed(self):
embed = self.join_message.embeds[0]
embed.clear_fields()
players = ", ".join(self.get_player_list()) if len(self.players) > 3 else "None"
embed.add_field(name="Players:", value = players)
roles = ", ".join([cards[i] for i in self.initial_roles]) if len(self.initial_roles) > 0 else "None"
embed.add_field(name="Roles:", value = roles)
return embed
def get_player_list(self):
return [i.display_name for i in self.players if not isinstance(i, int)]
def get_debrief(self):
return [(self.players[i].display_name, cards[self.current_roles[i]]) for i in range(len(self.current_roles)) if not isinstance(self.players[i], int)]
def simulate(self, instructions):
for i in instructions:
for j in i:
swap = self.current_roles[j[0]]
self.current_roles[j[0]] = self.current_roles[j[1]]
self.current_roles[j[1]] = swap
class Werewolf(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.games = {}
async def do_villager():
pass
async def do_werewolf(self, user, game):
werewolves = []
for i in range(len(game.players)):
if not isinstance(game.players[i], int) and game.initial_roles[i] == 1:
werewolves.append(game.players[i])
if len(werewolves) == 1:
embed = discord.Embed(
title = "You are a werewolf!",
description = " ".join([
"You are a werewolf, the very embodiment of evil itself.",
"As a werewolf, your goal is to stay alive by deceiving the other players.",
"If all of the werewolves manage to stay alive, then their team wins.",
"Since it looks like you're the only werewolf, you get to look at a card from the center.",
"Click on one of the reactions on this message to reveal a card."
]),
color = aesthetics["werewolf"]["color"]
)
embed.set_thumbnail(url=aesthetics["werewolf"]["thumbnail"])
embed.add_field(
name="Werewolves",
value = "Just you!"
)
message = await user.send(embed=embed)
key = {"1️⃣" : 1, "2️⃣" : 2, "3️⃣" : 3}
for i in key:
await message.add_reaction(i)
def check(r, u):
return u.id == user.id and r.message.id == message.id and str(r.emoji) in key
reaction, user = await self.bot.wait_for("reaction_add", check=check)
selection = key[str(reaction.emoji)]
revealed = cards[game.initial_roles[game.players.index(selection)]].capitalize()
embed.add_field(
name="Revealed Card",
value=revealed,
)
await message.edit(embed=embed)
elif len(werewolves) > 1:
embed = discord.Embed(
title = "You are a werewolf!",
description = " ".join([
"As a werewolf, your goal is to stay alive by deceiving the other players.",
"If all of the werewolves manage to stay alive, then their team wins."
]),
color = aesthetics["werewolf"]["color"]
)
embed.set_thumbnail(url=aesthetics["werewolf"]["thumbnail"])
embed.add_field(
name="Werewolves:",
value = ", ".join([werewolf.display_name for werewolf in werewolves])
)
await user.send(embed=embed)
return []
async def do_minion(self, user, game):
werewolves = []
for i in range(len(game.players)):
if not isinstance(game.players[i], int) and game.initial_roles[i] == 1:
werewolves.append(game.players[i])
if len(werewolves) == 0:
embed = discord.Embed(
title = "You are a minion!",
description = " ".join([
"You are a dastardly minion, only barely tolerated by the werewolves.",
"Try to draw the fire of the other players, or divert suspicion towards one of the villagers.",
"If all of the werewolves manage to stay alive, then you win.",
])
)
embed.add_field(
name="Werewolves:",
value = "None"
)
await user.send(embed=embed)
else:
embed = discord.Embed(
title = "You are a minion!",
description = " ".join([
"You are a minion, dashing but with a heart of coal.",
"Try to draw the fire of the other players, or divert suspicion towards one of the villagers.",
"If all of the werewolves manage to stay alive, then you win.",
])
)
embed.add_field(
name="Werewolves:",
value = ", ".join([werewolf.display_name for werewolf in werewolves])
)
await user.send(embed=embed)
return []
async def do_mason(self, user, game):
masons = []
for i in range(len(game.players)):
if not isinstance(game.players[i], int) and game.initial_roles[i] == 3:
masons.append(game.players[i])
embed = discord.Embed(
title = "You are a mason!",
description = " ".join([
"Your sublime bond with your partner is unbreakable.",
"Leverage your maybe-platonic love to narrow down the suspects.",
"If you manage to kill a werewolf, then you win.",
])
)
embed.add_field(
name="Masons",
value = ", ".join([mason.display_name for mason in masons])
)
message = await user.send(embed=embed)
return []
async def do_seer(self, user, game):
embed = discord.Embed(
title = "You are a seer!",
description = " ".join([
"You are one with the very fabric of reality itself.",
"Use your eldritch knowledge to gain insights into the game.",
"If you manage to kill a werewolf, then you win.",
"You can either look at either another player's card or two cards in the center. "
"React with either 🇵 or 🇨 to choose."
])
)
message = await user.send(embed=embed)
key = {"🇵" : "player", "🇨" : "center"}
for i in key:
await message.add_reaction(i)
def check(r, u):
return u.id == user.id and r.message.id == message.id and str(r.emoji) in key
reaction, user = await self.bot.wait_for("reaction_add", check=check)
selection = key[str(reaction.emoji)]
if selection == "player":
await user.send("Choose which player, using either their full username or nickname.")
def user_check(m):
if m.author.id != self.bot.user.id:
if m.channel.id == user.dm_channel.id:
if game.fetch_player(m.content) != -1:
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
player = game.fetch_player((await self.bot.wait_for("message", check=user_check)).content)
await user.send(cards[game.initial_roles[player]])
elif selection == "center":
await user.send("Choose which two using two numbers (1, 2, 3) seperated with a space.")
def card_check(m):
if m.channel.id != user.dm_channel.id:
return False
split = m.content.split()
if len(split) != 2:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
try:
valid = int(split[0]) in [1, 2, 3] and int(split[1]) in [1, 2, 3] and split[0] != split[1]
if valid:
self.bot.loop.create_task(m.add_reaction("✅"))
return True
self.bot.loop.create_task(m.add_reaction("❌"))
return False
except ValueError:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
centers = [int(i) for i in (await self.bot.wait_for("message", check=card_check)).content.split()]
await user.send(cards[game.initial_roles[game.players.index(centers[0])]])
await user.send(cards[game.initial_roles[game.players.index(centers[1])]])
await user.send("You're good to go!")
return []
async def do_robber(self, user, game):
embed = discord.Embed(
title = "You are a robber!",
description = " ".join([
"Your morals are flexible, and so is your identity.",
"Choose another player to swap your card with.",
"Whoever ends up with your card will be on the villager team.",
"(Send a message containing their full username or nickname.)"
])
)
message = await user.send(embed=embed)
initial = game.fetch_player(user.id)
def check(m):
if m.channel.id == user.dm_channel.id:
if (game.fetch_player(m.content) != -1
and game.fetch_player(m.content) != initial):
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
target = game.fetch_player((await self.bot.wait_for("message", check=check)).content)
await user.send("you are now the " + cards[game.initial_roles[target]])
await user.send("You're good to go!")
return [(initial, target)]
async def do_troublemaker(self, user, game):
await user.send("choose two players to swap (seperate messages)")
initial = game.fetch_player(user.id)
first = None
def check(m):
if m.channel.id == user.dm_channel.id:
if (game.fetch_player(m.content) != -1
and game.fetch_player(m.content) != first
and game.fetch_player(m.content) != initial):
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
first_message = await self.bot.wait_for("message", check=check)
first = game.fetch_player(first_message.content)
second_message = await self.bot.wait_for("message", check=check)
second = game.fetch_player(second_message.content)
await user.send("You're good to go!")
return [(first, second)]
async def do_drunk(self, user, game):
embed = discord.Embed(
title = "You are a drunk!",
description = " ".join([
"You like the happy juice a biiiit more than is probably healthy.",
"Choose a card in the center to swap with."
])
)
message = await user.send(embed=embed)
key = {"1️⃣" : 1, "2️⃣" : 2, "3️⃣" : 3}
for i in key:
await message.add_reaction(i)
def check(r, u):
return u.id == user.id and r.message.id == message.id and str(r.emoji) in key
reaction, user = await self.bot.wait_for("reaction_add", check=check)
selection = key[str(reaction.emoji)]
current = game.fetch_player(user.id)
middle = game.players.index(key[str(reaction.emoji)])
await user.send("You're good to go!")
return [(current, middle)]
async def do_insomniac(self, user, game):
current = game.fetch_player(user.id)
await user.send(cards[game.current_roles[current]])
@commands.group(aliases=["ww"], invoke_without_command=True)
async def werewolf(self, ctx):
host = ctx.message.author
embed = discord.Embed(
title = "Werewolf",
description = " ".join([
"A classic social deduction game where two sides face off against each other: the **Villagers** and **Werewolves**.",
"Uncover who the werewolves are, or use deception to stay hidden until the end.",
"But be careful: if you kill the Tanner, then both teams lose.",
"\n\n**Instructions:**\n",
"**React to this message with 🐺** to join the game, "
"then **add cards** using `%ww add` followed by a list of the roles you want to add, seperated by spaces.",
"For example, you might do something like this to add multiple roles:",
"`%ww add werewolf minion seer tanner troublemaker mason mason`.",
"Additionally, you can get the order of the roles using %ww roleOrder."
]),
color = 0x7289da
)
embed.set_footer(text=f"{host.display_name} is the host", icon_url=host.avatar_url)
embed.add_field(name="Players", value="None")
embed.add_field(name="Roles", value="None")
if not ctx.channel.id in self.games:
message = await ctx.send(embed=embed)
await message.add_reaction("🐺")
self.games[ctx.channel.id] = Game(ctx.message.author, message)
else:
await ctx.send("There's already a game running here!")
@werewolf.command()
async def join(self, ctx):
game = None
if not ctx.channel.id in self.games:
self.games[ctx.channel.id] = Game()
game = self.games[ctx.channel.id]
if game.state == "preparing":
if game.fetch_player(ctx.message.author.id) == -1:
game.players.append(ctx.message.author)
await ctx.send("yeah sure")
else:
await ctx.send("already in the game")
else:
await ctx.send("nah you cant join in the middle of a round")
@werewolf.command(aliases=["addcard"])
async def add(self, ctx, *, names):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can add roles.")
return
if all([name.lower() in cards for name in names.split()]) and game.state == "preparing":
for name in names.split():
game.initial_roles.append(cards.index(name.lower()))
await game.join_message.edit(embed=game.get_refreshed_embed())
@werewolf.command()
async def set(self, ctx, *, names):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can add roles.")
return
game.initial_roles = []
if all([name.lower() in cards for name in names.split()]) and game.state == "preparing":
for name in names.split():
game.initial_roles.append(cards.index(name.lower()))
await game.join_message.edit(embed=game.get_refreshed_embed())
@werewolf.command(aliase=["removecard"])
async def remove(self, ctx, name):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can add roles.")
game = self.games[ctx.channel.id]
if game.state == "preparing" and cards.index(name.lower()) in game.initial_roles:
game.initial_roles.remove(cards.index(name.lower()))
await game.join_message.edit(embed=game.get_refreshed_embed())
@werewolf.command()
async def vote(self, ctx, *, accused : str):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.state != "voting":
await ctx.send("cant vote yet")
return
author = ctx.message.author
if game.fetch_player(author.id) != -1:
if game.fetch_player(accused) != -1:
game.votes[author.id] = game.players[game.fetch_player(accused)].id
if len(game.votes) == len(game.players) - 3:
tally = {}
for i in game.votes:
if not game.votes[i] in tally:
tally[game.votes[i]] = 0
tally[game.votes[i]] += 1
top = sorted([(tally[i], i) for i in tally])
if len(top) > 1 and top[-1][0] == top[-2][0]:
await ctx.send("no decisive winner")
else:
killed_id = top[-1][1]
index = game.fetch_player(killed_id)
killed = game.players[index]
await ctx.send("killing " + killed.mention)
if cards[game.current_roles[index]] == "hunter":
def user_check(m):
if m.channel.id == ctx.message.channel.id:
if game.fetch_player(m.content) != -1:
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
player = game.fetch_player((await self.bot.wait_for("message", check=user_check)).content)
await ctx.send(killed.display_name + " was " + cards[game.current_roles[index]])
paired_roles = [" was ".join(i) for i in game.get_debrief()]
await ctx.send(", ".join(paired_roles))
del self.games[ctx.channel.id]
else:
await ctx.send("vote registered")
else:
await ctx.send("cant find")
else:
await ctx.send("you're not playing")
@werewolf.command()
async def roleOrder(self, ctx):
await ctx.send(", ".join(cards))
@werewolf.command()
async def start(self, ctx):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can start the game.")
return
if len(game.players) > len(game.initial_roles):
await ctx.send("You need more roles to play!")
return
if len(game.players) < len(game.initial_roles):
await ctx.send("You need less roles to play!")
return
game.state = "running"
game.initial_roles = sorted(game.initial_roles)
random.shuffle(game.players)
game.current_roles = [i for i in game.initial_roles]
await ctx.send("game starting")
for i in range(len(game.players)):
if not isinstance(game.players[i], int):
await game.players[i].send("you're the " + cards[game.initial_roles[i]])
tasks = []
for i in range(len(game.initial_roles)):
if not isinstance(game.players[i], int):
if game.initial_roles[i] == 1:
tasks.append(self.do_werewolf(game.players[i], game))
if game.initial_roles[i] == 2:
tasks.append(self.do_minion(game.players[i], game))
if game.initial_roles[i] == 3:
tasks.append(self.do_mason(game.players[i], game))
if game.initial_roles[i] == 4:
tasks.append(self.do_seer(game.players[i], game))
if game.initial_roles[i] == 5:
tasks.append(self.do_robber(game.players[i], game))
if game.initial_roles[i] == 6:
tasks.append(self.do_troublemaker(game.players[i], game))
if game.initial_roles[i] == 7:
tasks.append(self.do_drunk(game.players[i], game))
instructions = await asyncio.gather(*tasks)
game.simulate(instructions)
for i in range(len(game.players)):
if not isinstance(game.players[i], int):
if game.initial_roles[i] == 8:
await self.do_insomniac(game.players[i], game)
await ctx.send("the night's now over, do ur stuff then do %ww vote")
game.state = "voting"
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if user.id == self.bot.user.id:
return
message = reaction.message
if message.channel.id in self.games and self.games[message.channel.id].state == "preparing":
game = self.games[message.channel.id]
if game.join_message.id == message.id:
if game.fetch_player(user.id) == -1:
game.players.append(user)
await message.edit(embed=game.get_refreshed_embed())
@commands.Cog.listener()
async def on_reaction_remove(self, reaction, user):
message = reaction.message
if message.channel.id in self.games and self.games[message.channel.id].state == "preparing":
game = self.games[message.channel.id]
if game.join_message.id == message.id:
if game.fetch_player(user.id) != -1:
for player in range(len(game.players)):
if not isinstance(game.players[player], int) and game.players[player].id == user.id:
del game.players[player]
await message.edit(embed=game.get_refreshed_embed())
@werewolf.command()
async def cancel(self, ctx):
if ctx.message.channel.id in self.games:
game = self.games[ctx.channel.id]
if game.host.id == ctx.message.author.id:
del self.games[ctx.channel.id]
await ctx.send("game cancelled")
def setup(bot):
bot.add_cog(Dictionary(bot))
|
StarcoderdataPython
|
11226556
|
from simpleai.search import (
SearchProblem,
breadth_first,
depth_first,
uniform_cost,
greedy,
astar
)
from simpleai.search.viewers import WebViewer, BaseViewer, ConsoleViewer
from itertools import combinations
GRID = [(r,c) for r in range(7) for c in range(6)]
ROADS = []
RACKS = [(0,0),(1,0),(2,0),(4,0),(5,0),(6,0),(0,2),(1,2),(2,2),(4,2),(5,2),(6,2),(0,4),(1,4),(2,4),(4,4),(5,4),(6,4)]
IO = [(3,5)]
for cell in GRID:
if cell not in RACKS:
ROADS.append(cell)
MOVEMENTS = [
(0,1),
(1,0),
(0,-1),
(-1,0)
]
INITIAL_STATE = ((3,5), (), (('C1', (0, 1)), ('C2', (1, 1)), ('C3', (2, 5)), ('C4', (5,3)), ('C5', (0,3))))
# pos robot, list of loaded boxes, list of pending boxes
class AmagonProblem(SearchProblem):
def is_goal(self, state):
_, loaded_boxes, pending_boxes = state
return len(loaded_boxes) + len(pending_boxes) == 0
def actions(self, state):
possible_actions = []
deposit_coordinates = []
bot_pos, loaded_boxes, pending_boxes = state
bot_row, bot_col = bot_pos
if len(loaded_boxes) > 0:
for loaded_box in loaded_boxes:
deposit_coordinates = []
deposit_coordinates.append(loaded_box[1])
if bot_pos == (3,5) and len(loaded_boxes) < 2 and len(pending_boxes) > 0:
# If bot is in I/O, has space to load at least 1 and there is at least 1 box to load
boxes_to_load = []
if len(loaded_boxes) == 1: # If there is space for only one
boxes_to_load.append(pending_boxes[0])
else: #len(loaded_boxes) == 2 # If there is space for two
if len(pending_boxes) == 1: # If there is one to load
boxes_to_load.append(pending_boxes[0])
else: # If there are two or more to load
boxes_to_load.append(pending_boxes[0])
boxes_to_load.append(pending_boxes[1])
possible_actions.append(('Load', boxes_to_load))
elif bot_pos in deposit_coordinates:
possible_actions.append(('Unload', bot_pos))
else:
for move in MOVEMENTS:
move_row, move_col = move
new_row = bot_row + move_row
new_col = bot_col + move_col
new_pos = tuple((new_row, new_col))
if (0 <= new_row <= 6) and (0 <= new_col <= 5):
possible_actions.append(('Move', new_pos))
return possible_actions
def result(self, state, action):
action_type, data = action
bot_pos, loaded_boxes, pending_boxes = state
state = list(state)
if action_type == 'Move':
bot_pos = list(bot_pos)
bot_pos = data
bot_pos = tuple(bot_pos)
state[0] = bot_pos
elif action_type == 'Unload':
loaded_boxes = list(loaded_boxes)
for loaded_box in loaded_boxes:
if loaded_box[1] == bot_pos:
loaded_boxes.remove(loaded_box)
loaded_boxes = tuple(loaded_boxes)
state[1] = loaded_boxes
else:
loaded_boxes = list(loaded_boxes)
pending_boxes = list(pending_boxes)
if len(data) == 1: #Just one box to load
pending_boxes.remove(data[0])
loaded_boxes.append(data[0])
else:
for box in data:
pending_boxes.remove(box)
loaded_boxes.append(box)
loaded_boxes = tuple(loaded_boxes)
pending_boxes = tuple(pending_boxes)
state[1] = loaded_boxes
state[2] = pending_boxes
state = tuple(state)
return state
def cost(self, state, action, state2):
return 1
def heuristic(self, state):
# Without heuristic
# {'max_fringe_size': 39, 'visited_nodes': 635, 'iterations': 635}
# return super().heuristic(state)
# {'max_fringe_size': 42, 'visited_nodes': 578, 'iterations': 578}
return len(state[1]) + len(state[2])
METHODS = (
breadth_first,
depth_first,
uniform_cost,
greedy,
astar
)
for search_algorithm in METHODS:
print()
print('=' * 50)
print("Running:", search_algorithm)
visor = BaseViewer()
problem = AmagonProblem(INITIAL_STATE)
result = search_algorithm(problem, graph_search = True, viewer = visor)
#result = astar(problem, graph_search = True, viewer = visor)
print ('Final State:', result.state)
print('=' * 50)
print(' - Statistics:')
print(' - Amount of actions until goal:', len(result.path()))
print(' - Raw data:', visor.stats)
'''
for action, state in result.path():
print(" - Action:", action)
print(" - Resulting State:", state)
'''
|
StarcoderdataPython
|
11300199
|
import serial
import numpy as np
from time import sleep
import sys
import json
class EIS():
adc_np_type_map = {1:np.int16,2:np.uint16,3:np.uint16}
def __init__(self, COM, BAUD = 115200, timeout = .1):
self.serial = serial.Serial(COM, BAUD, timeout = .1)
def get_and_print_responses(self, print_response = False):
'''
Retrieve messages written by the teensy to the
serial connection under the assumption that the output is
utf-8 encoded. If this assumption is violated the resulting
exception is printed and an empty string is returned.
'''
response = 'No response'
ret = ''
while response != '':
response = self.serial.readline()
try:
response = response.decode('utf-8')
ret += response
except Exception as e:
print(f'Exception: {e} \nResponse: {response}')
ret = ''
if print_response:
print(response)
return ret
def get_data(self, stimulus_parameters, adc_type):
'''
This function can be used to get all the measured data for a single subtype.
Parameters:
stimulus_parameters: contains metadata on the stimulus.
adc_type: 1. V1
2. V2
3. DAC stimulus
Output:
data: a numpy array of the correct type (see adc_np_type_map) containing
the full measurement sequence for a single type.
'''
start_pos = 0
dtype = self.adc_np_type_map[adc_type]
length = stimulus_parameters['length']
# Allocate memory to store the result
data = np.zeros((length,), dtype=dtype)
while True:
data_slice = self.get_data_slice(adc_type, start_pos)
if data_slice['end']<=data_slice['start']: # This signals that we try to read beyond the end of the avaialble data
break
data[data_slice['start']:data_slice['end']]=np.array(data_slice['data'],dtype=dtype)
start_pos = data_slice['end']
return data
def get_data_slice(self, adc_type, start_pos):
'''
This function can be used to get a slice of the measured data.
Parameters:
adc_type: 1. V1
2. V2
3. DAC stimulus
start_pos: sample at which to start the data request.
Output:
dataslice: A dictionary containing three key-value pairs: 'start', 'end'
and 'data'. The value of 'start' is the value of the parameter
start_pos, the value of 'end' is the index of the first sample
that is not returned (to be used as start_pos in the next call),
the value of the 'data' property contains the measured data
as a list.
'''
commandstring = f'D{adc_type:2}_{start_pos}\n'
self.serial.write(commandstring.encode('utf8'))
json_data = self.get_and_print_responses( print_response = False)
data_slice = json.loads(json_data)
return data_slice
def get_stimulus_parameters(self):
'''
This function can be used to get all the stimulus parameters.
Output:
stimulus_parameters:
dictionary with metadata on the stimulus, available keys:
'stimulus_parameters_valid':
0 (False) or 1 (True) indicates whether
data in buffers are from measurement for these
stimulus settings
'length':
length of the stimulus and the measured data
'digital_amplitude':
Amplitude of DAC input
'f_stimulus':
stimulus frequency DAC
'f_sampling':
sampling frequency DAC and ADC
'stimulus_duration':
stimulus duraction in seconds
'ADC_averaging_number':
averaging over samples in ADC
'''
jsonStimulusParameters = None
stimulus_parameters = None
try:
# Get the metadata
self.serial.write("D00\n".encode('utf8'))
jsonStimulusParameters = self.get_and_print_responses()
stimulus_parameters = json.loads(jsonStimulusParameters)
except Exception as e:
print("jsonStimulusParameters: ", jsonStimulusParameters)
print("stimulus_parameters: ",stimulus_parameters)
raise(e)
if not stimulus_parameters['stimulus_parameters_valid']:
raise RuntimeError(f"Stimulus parameters where changed after the last measurement: {stimulus_parameters}")
return stimulus_parameters
def set_stimulus_parameters(self,
f_stimulus,
DC_offset = 2048,
A_stimulus = 0.6,
f_sampling = 10000,
print_response = False):
''' Method to set the stimulus parameters used in a single measurement.
Parameters:
f_stimulus: frequency in Hz of the stimulus used
DC_offset = 2048:
output range of teensy is positive, so we need to offset a sinusoid by a
fixed value to keep it in range.
A_stimulus = 0.6:
amplitude specified as part of the maximum allowed offset
f_sampling = 10000:
sampling frequency in samples per second
print_response = False: if True responses retrieved from teensy are printed'''
# "A" Change the output amplitude used for the measurement
self.serial.write(f"A{A_stimulus}\n".encode('utf8'))
response = self.get_and_print_responses()
if print_response:
print(response)
# "G<samplefreq>\n" Acquire data at <samplefreq>
self.serial.write(f"G{f_sampling}\n".encode('utf8'))
response = self.get_and_print_responses()
if print_response:
print(response)
# "Y<DC_Offset>\n" set average value stimulus (has to be between positive)
self.serial.write(f"Y{DC_offset}\n".encode('utf8'))
response = self.get_and_print_responses()
if print_response:
print(response)
# Set the stimulus frequency
measurement_string = f"F{f_stimulus}\n"
self.serial.write(measurement_string.encode('utf8'))
response = self.get_and_print_responses()
if print_response:
print(response)
def measure_spectrum(self,
f_range,
DC_offset = 2048,
A_stimulus = 0.6,
f_sampling = 10000,
Rs= 1000,
process_measurement=None ):
''' This function can be called to measure a complete impedance spectrum.
Parameters:
f_range:
list or numpy array with frequencies at which to probeb the system
DC_offset = 2048:
output range of teensy is positive, so we need to offset a sinusoid by a
fixed value to keep it in range.
A_stimulus = 0.6:
amplitude specified as part of the maximum allowed offset
f_sampling = 10000:
sampling frequency in samples per second
Rs= 1000:
value of the shunt resistor, see wiring folder for proposed circuits
process_measurement=estimate_impedance:
function to calculate impedance based on the frequency response
Output:
spectrum: a list containing for each frequency in f_range the
output of estimate_impedance.'''
if process_measurement is None:
process_measurement = self.estimate_impedance
spectrum = [None]*len(f_range)
for f_index, f in enumerate(f_range):
print(f"\n--------------------------------------------")
self.set_stimulus_parameters(f,
DC_offset = DC_offset,
A_stimulus = A_stimulus,
f_sampling = f_sampling,
print_response = True)
print(f"Measure at frequency {f}")
# Execute Measurement
self.serial.write("M\n".encode('utf8'))
sleep(0.1)
response = self.get_and_print_responses()
while True:
sleep(0.1)
# Get the metadata
self.serial.write("D00\n".encode('utf8'))
jsonStimulusParameters = self.get_and_print_responses()
try:
stimulus_parameters = json.loads(jsonStimulusParameters)
except Exception as e:
print('JSON STRING:', jsonStimulusParameters)
raise e
print('.',end='')
# Check if measurement is finished
if stimulus_parameters['stimulus_parameters_valid']:
break
V1 = self.get_data(stimulus_parameters, 1)
V2 = self.get_data(stimulus_parameters, 2)
DAC = self.get_data(stimulus_parameters, 3)
spectrum[f_index] = process_measurement(Rs, f, f_sampling, V1, V2, DAC)
# sleep(0.1)
print(stimulus_parameters)
print(f"\n--------------------------------------------")
return spectrum
@staticmethod
def estimate_impedance(Rs, f_stim, f_samp, V1, V2, DAC):
''' Estimate the frequency response at the stimulus frequency.
Parameters:
Rs: shunt resistance
f_stim: stimulus frequency
f_samp: sampling frequency
V1: measured V1 data
V2: measured V2 data
DAC: signal provided to DAC
Output:
Z: impedance at frequency f_stim
V1_1, V1_2, V1_0: fit parameters for equation 6 from referenced paper
V2_1, V2_2, V2_0: fit parameters for equation 7 from referenced paper
'Electrochemical Impedance Spectroscopy System Based on a Teensy Board',
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
IEEE TRANSACTIONS ON INSTRUMENTATION AND MEASUREMENT, VOL. 70, 2021
Here we implement equations 6, 7 and 8 of this paper.
Still needs a resistor and stimulus amplitude correction.'''
if not len(V1) == len(V2) or not len(V1) == len(DAC):
raise ValueError(f'Incompatible input lengths: len V1: {len(V1)}, len V2: {len(V2)}, len Dac: {len(DAC)}')
tstamps = np.arange(0, len(DAC), 1)/f_samp
sine = np.sin(2*np.pi*f_stim*tstamps)
cosine = np.cos(2*np.pi*f_stim*tstamps)
A = np.vstack([cosine, sine, np.ones(len(DAC))]).T
V1_1, V1_2, V1_0 = np.linalg.lstsq(A, V1, rcond=None)[0] # Fit according equation 6
V2_1, V2_2, V2_0 = np.linalg.lstsq(A, V2, rcond=None)[0] # Fit according equation 7
Z = (V2_2 + 1j* V2_1)/(V1_2 + 1j* V1_1)*Rs/2 # Apply equation 8, factor is difference between differential and direct on teensy
return { 'Z': Z, 'Rs':Rs, 'f_stim': f_stim, 'f_samp': f_samp,
'V1_1': V1_1, 'V1_2': V1_2, 'V1_0': V1_0,
'V2_1': V2_1, 'V2_2': V2_2, 'V2_0': V2_0 }
|
StarcoderdataPython
|
1756484
|
<reponame>pauliyobo/mapJSON
import json
import os
class MapObj:
def __init__(self, minx=0, maxx=0, miny=0, maxy=0, minz=0, maxz=0, type=None, value=None):
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
self.minz = minz
self.maxz = maxz
self.type = type
self.value = value
def covers(self, x, y, z):
if (
self.minx <= x
and self.maxx >= x
and self.miny <= y
and self.maxy >= y
and self.minz <= z
and self.maxz >= z
):
return True
return False
class tile(MapObj):
"""The tile class."""
def __init__(self, minx=0, maxx=0, miny=0, maxy=0, minz=0, maxz=0, tiletype=""):
super(tile, self).__init__(minx, maxx, miny, maxy, minz, maxz, "tile")
self.tiletype = tiletype
class zone(MapObj):
def __init__(self, minx, maxx, miny, maxy, minz, maxz, name=""):
super(zone, self).__init__(minx, maxx, miny, maxy, minz, maxz, "zone")
self.name = name
class Map:
def __init__(self, maxx=0, maxy=0, maxz=0, name=""):
self.set_borders((maxx, maxy, maxz))
self.set_name(name)
self.tiles = []
self.zones = []
def add_tile(self, minx=0, maxx=0, miny=0, maxy=0, minz=0, maxz=0, type="tile"):
self.tiles.append(tile(minx, maxx, miny, maxy, minz, maxz, type))
def add_zone(self, minx=0, maxx=0, miny=0, maxy=0, minz=0, maxz=0, name="zone"):
self.zones.append(zone(minx, maxx, miny, maxy, minz, maxz, name))
def set_name(self, name):
self.name = name
def set_borders(self, coords):
x, y, z = coords
self.maxx, self.maxy, self.maxz = x, y, z
def get_tile(self, x=0, y=0, z=0):
try:
t = self.get_tile_obj(x, y, z)
if t:
return t.type
else:
return ""
except Exception as e:
pass
def get_tile_obj(self, x=0, y=0, z=0):
filter = [i for i in self.tiles if i.covers(x, y, z)]
try:
if filter[-1]:
return filter[-1]
except Exception as e:
pass
def get_zone(self, x=0, y=0, z=0):
try:
z = self.get_zone_obj(x, y, z)
if z:
return z.name
else:
return ""
except Exception as e:
pass
def get_zone_obj(self, x=0, y=0, z=0):
filter = [i for i in self.zones if i.covers(x, y, z)]
try:
if filter[-1]:
return filter[-1]
except Exception as e:
pass
def delete_tile_at(self, x=0, y=0, z=0):
try:
t = self.get_tile_obj(x, y, z)
if t:
self.tiles.remove(t)
except Exception as e:
pass
def delete_zone_at(self, x=0, y=0, z=0):
try:
z = self.get_zone_obj(x, y, z)
if z:
self.zones.remove(z)
except Exception as e:
pass
def dump(self, in_file=False, file=""):
try:
self.tiles = [i.__dict__ for i in self.tiles]
self.zones = [i.__dict__ for i in self.zones]
data2 = {
"name": self.name,
"borders": [self.maxx, self.maxy, self.maxz],
"objects": self.tiles + self.zones,
}
if in_file == True and file != "":
with open(file, "w") as f:
f.write(json.dumps(data2, indent=1))
else:
return json.dumps(data2, indent=1)
except Exception as e:
pass
def load(self, target=""):
try:
if target and os.path.isfile(target):
with open(target, "r") as f:
data = json.loads(f.read())
else:
data = json.loads(data)
self.name = data["name"]
self.maxx, self.maxy, self.maxz = data["borders"]
self.tiles = [
tile(
t["minx"],
t["maxx"],
t["miny"],
t["maxy"],
t["minz"],
t["maxz"],
t["tiletype"],
)
for t in data["objects"]
if t["type"] == "tile"
]
self.zones = [
zone(
t["minx"],
t["maxx"],
t["miny"],
t["maxy"],
t["minz"],
t["maxz"],
t["name"],
)
for t in data["objects"]
if t["type"] == "zone"
]
except Exception as e:
print(e.args)
def __repr__(self):
return "Map(maxx=%d, maxy=%d, maxz=%d, name=%s)" % (
self.maxx,
self.maxy,
self.maxz,
repr(self.name),
)
def __getitem__(self, item):
if isinstance(item, int):
return self.get_tile(item)
elif isinstance(item, tuple):
return self.get_tile(*item)
raise TypeError("Must send tuple or int, not %r" % item)
|
StarcoderdataPython
|
6666900
|
# Author: <NAME>
# email: <EMAIL>
from PIL import Image
import numpy as np
import init_paths
from image_processing import hwc2chw
from xinshuo_visualization import visualize_image
def test_hwc2chw():
print('test same channel image, should be the same')
image_path = '../lena.jpg'
img = np.array(Image.open(image_path).convert('RGB'))
visualize_image(img[:, :, 0], vis=True)
img_shape = img.shape
chw_img = hwc2chw(img)
visualize_image(chw_img[0, :, :], vis=True)
print(chw_img.shape)
print(img_shape)
assert chw_img.shape[0] == img_shape[2] and chw_img.shape[1] == img_shape[0] and chw_img.shape[2] == img_shape[1]
print('test different channel image, should not be the same')
image_path = '../lena.jpg'
img = np.array(Image.open(image_path).convert('RGB'))
visualize_image(img[:, :, 0], vis=True)
img_shape = img.shape
chw_img = hwc2chw(img)
visualize_image(chw_img[1, :, :], vis=True)
print(chw_img.shape)
print(img_shape)
assert chw_img.shape[0] == img_shape[2] and chw_img.shape[1] == img_shape[0] and chw_img.shape[2] == img_shape[1]
print('\n\nDONE! SUCCESSFUL!!\n')
if __name__ == '__main__':
test_hwc2chw()
|
StarcoderdataPython
|
6476017
|
<filename>building_clean_tweets.py
import pandas as pd
from clean_text import get_clean
import os
import json
path = 'Data/Tweets/'
users = os.listdir(path)
cnt = 1
x = pd.DataFrame(columns=['users', 'locations', 'tweets'])
x.to_csv('Data/twitter_data.csv', index=False)
for user in users:
print(cnt, " => ", user)
files = os.listdir(path + user + '/')
tweet = ''
location = set()
for file in files:
with open(path + user + '/' + file, 'r', encoding='utf-8') as f:
data = json.load(f)
try:
tweet += ' ' + get_clean(data['text'])
except Exception as e:
print(e)
tweet += ' '
try:
location.add(data['user']['location'])
except Exception as e:
print(e)
df = pd.DataFrame([[user, list(location), tweet]])
df.to_csv('Data/twitter_data.csv', mode='a', header=False, index=False)
cnt += 1
print('Done...')
|
StarcoderdataPython
|
6493728
|
<gh_stars>0
from setuptools import setup, find_packages
from pip.req import parse_requirements
import sys, os
def run_setup():
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'Readme.md')).read()
version = '0.1'
install_reqs = parse_requirements(os.path.join(here, 'requirements.txt'), session=False)
reqs = [str(ir.req) for ir in install_reqs]
setup(name='sql_kernel',
version=version,
description="SQL Kernel for Jupyter",
long_description=README,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
],
keywords='database jupyter sqlalchemy ipython dbms',
author='<NAME>',
author_email='<EMAIL>',
url='bitbucket.org/alyr/sql_kernel',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=reqs,
)
def register_kernel():
from jupyter_client.kernelspec import install_kernel_spec
from IPython.utils.tempdir import TemporaryDirectory
import json
import os
import sys
kernel_json = {
"argv":[sys.executable,"-m","sql_kernel", "-f", "{connection_file}"],
"display_name":"SQL Kernel",
"language":"sql",
"codemirror_mode":"sql"
}
def install_my_kernel_spec(user=True):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
install_kernel_spec(td, 'sql_kernel', user=user, replace=True)
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
argv = sys.argv
user = '--user' in argv or not _is_root()
install_my_kernel_spec(user=user)
run_setup()
register_kernel()
|
StarcoderdataPython
|
3523329
|
# -*- coding: utf-8 -*-
from ._common import *
class YinYueTai(Extractor):
name = '音悦台 (YinYueTai)'
def prepare(self):
info = MediaInfo(self.name)
info.extra.referer = 'https://www.yinyuetai.com/'
if not self.vid:
self.vid = match1(self.url,'\Wid=(\d+)')
data = get_response('https://data.yinyuetai.com/video/getVideoInfo',
params={'id': self.vid}).json()
assert not data['delFlag'], 'MTV has been deleted!'
info.title = data['videoName']
info.artist = data['artistName']
url = data['videoUrl']
info.streams['current'] = {
'container': url_info(url)[1],
'video_profile': 'current',
'src' : [url]
}
return info
site = YinYueTai()
|
StarcoderdataPython
|
8179859
|
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
if __name__ == "__main__":
df = pd.read_csv('./IMDB Dataset.csv').values
train_df = df[:int(len(df) * 0.9)]
val_df = df[int(len(df) * 0.9):]
with open('imdb-train.txt', 'w') as f:
for i in tqdm(range(len(train_df))):
f.write(f'{train_df[i][0]}\n\n')
with open('imdb-val.txt', 'w') as f:
for i in tqdm(range(len(val_df))):
f.write(f'{val_df[i][0]}\n\n')
# kaggle datasets download lakshmi25npathi/imdb-dataset-of-50k-movie-reviews
|
StarcoderdataPython
|
1746251
|
from pydc import DDC
def main():
ddc = DDC("example_ddc_hmm.pl", 500)
prob_s0 = ddc.query(
"current(weather(brussels))~=sunny")
ddc.step(observations="observation(activity(tintin))~=clean")
prob_s1 = ddc.query("(current(temperature(brussels))~=X, X>20)")
# prob_s1 = ddc.query("(current(weather(brussels))~=sunny)")
print(prob_s0)
print(prob_s1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3482100
|
<gh_stars>1-10
"""
For organizing the post-data.
It's basically a django project with some modifications. So you can uses it to
save your texts and then use the gateway to post it on different social
networks.
"""
|
StarcoderdataPython
|
12843783
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Update : 2020-09-05 09:46:01
# @Author : <NAME> (<EMAIL>)
"""Sequence taggers."""
from old_fashioned_nlp.tagging.token import CharTfidfTagger
__all__ = ['CharTfidfTagger']
|
StarcoderdataPython
|
4956697
|
#!/usr/bin/python
# coding=utf-8
import sys, time #Allows for controlling system functions (interrupt) and sleep times.
import os
import RPi.GPIO as GPIO #Tells python to use GPIO libraries
from lifxlan import LifxLAN, Light #Make sure 'pip install lifxlan' was run to enable lifx controls
from signal import pause #Not sure if this is needed after testing, but I'm 15 beers in and don't feel like playing with it.
from datetime import datetime #Allows for time delays. There's probably a better way to do this but I don't know how to do it.
#Instantiate lifxlan
lifxlan = LifxLAN()
#Setup for master light for power state polling
lightmac1="YOU_LIGHT_MAC_ADDRESS_GOES_HERE (AA:AA:AA, etc.)"
lightIP1="YOUR_LIGHT_IP_ADDRESS_GOES_HERE"
#Setup GPIO pins as BCM (Broadcom). Physical pin for PIR sensor input is 16.
GPIO.setmode(GPIO.BCM)
GPIO.setup(23,GPIO.IN,GPIO.PUD_DOWN)
#Set the master light variables required for polling. Called as 'doorlight'.
doorlight = Light(lightmac1,lightIP1)
#Set the group of lights to control. Other option is to send to all lights, but this affects everyroom on the network.
officelights=lifxlan.get_devices_by_group("Office")
#This entire thing was hacked together. Ideally I would like it to not check for power so often as to minimize network traffic to the lights. I'm still thinking how to do this. Something maybe like 'Check for motion, if no motion then loop. If motion then turn lights on.'
#But then how would the lights get turned off when there's no motion?
#Also, I'm not sure if the 'now' and 'dt_string' variables are needed every time before a print. I have a feeling that if they're not then it would read once globally and that would be the value. A possible option is to look into turning the date/time variables into a function and calling that everytime?
def Main():
while True:
i = GPIO.input(23)
if i==0: #When output from motion sensor is LOW
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "No Motion Detected. Pin reads", i
while True:
try:
powerstatus=Light.get_power(doorlight)
except:
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "Couldn't get powerstatus while no motion is detected. Return is", powerstatus
continue
break
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "Before no motion power check", powerstatus
if powerstatus != 0: #Check if lights are currently turned on
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "After power check inside no motion", powerstatus
officelights.set_power(0) #Turn lights off
time.sleep(10)
elif i==1: #When output from motion sensor is HIGH
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "Motion Detected. Pin reads", i
while True:
try:
powerstatus=Light.get_power(doorlight)
except:
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "Couldn't get powerstatus while motion is detected. Return is", powerstatus
continue
break
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "Motion detected powerstatus", powerstatus
if powerstatus == 0: #Check if lights are currently turned off
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "After power check inside motion detected", powerstatus
officelights.set_power(65535) #Turn lights on full
time.sleep(600)
#If this is installed as a service then I'm not entirely sure this is necessary. However, I need to look into any cleanup requirements (GPIO.cleanup) when failure occurs while running as a service.
if __name__ == '__main__':
try:
Main()
except KeyboardInterrupt:
now = datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
print dt_string, "CTRL+C entered. Running cleanup!"
try:
GPIO.cleanup()
sys.exit(0)
except SystemExit:
os._exit(0)
|
StarcoderdataPython
|
3486361
|
import asyncio
import sys
import pytest
from HABApp.rule import Rule
from ..rule_runner import SimpleRuleRunner
class ProcRule(Rule):
def __init__(self):
super().__init__()
self.ret = None
def set_ret(self, value):
self.ret = value
@pytest.fixture(scope="function")
def rule():
runner = SimpleRuleRunner()
runner.set_up()
rule = ProcRule()
yield rule
runner.tear_down()
@pytest.mark.asyncio
async def test_run_func(rule):
rule.execute_subprocess(
rule.set_ret, sys.executable, '-c', 'import datetime; print("OK", end="")', capture_output=True
)
await asyncio.sleep(0.5)
assert rule.ret.returncode == 0
assert rule.ret.stdout == 'OK'
assert rule.ret.stderr == ''
@pytest.mark.asyncio
async def test_run_func_no_cap(rule):
rule.execute_subprocess(
rule.set_ret, sys.executable, '-c', 'import datetime; print("OK", end="")', capture_output=False
)
await asyncio.sleep(0.5)
assert rule.ret.returncode == 0
assert rule.ret.stdout is None
assert rule.ret.stderr is None
@pytest.mark.asyncio
async def test_invalid_program(rule):
rule.execute_subprocess(rule.set_ret, 'ProgramThatDoesNotExist', capture_output=True)
await asyncio.sleep(0.5)
assert rule.ret.returncode == -1
assert rule.ret.stdout is None
assert isinstance(rule.ret.stderr, str)
|
StarcoderdataPython
|
3341301
|
<reponame>thermokarst-forks/library
from django.apps import AppConfig
class PluginsConfig(AppConfig):
name = 'library.plugins'
def ready(self):
# register the decorated signals
from . import signals # noqa: F401
|
StarcoderdataPython
|
6632865
|
<reponame>ryoma-jp/009_Benchmark<filename>benchmark_tensorflow.py
#! -*- coding: utf-8 -*-
"""
[tensorflow]
python benchmark_tensorflow.py --help
python benchmark_tensorflow.py --param_csv benchmark.csv
"""
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import sys
import time
import tqdm
import argparse
from argparse import RawTextHelpFormatter
import numpy as np
import pandas as pd
from common import GetParams, DataLoader
import cv2
import tensorflow as tf
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
"""
関数名: ArgParser
説明:引数を解析して値を取得する
"""
def ArgParser():
parser = argparse.ArgumentParser(description='TensorFlowによるベンチマークスコアの計測', formatter_class=RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--param_csv', dest='param_csv', type=str, required=True, help='ベンチマーク条件を記載したパラメータファイル\n'
'[Format] type, model_dir, data_dir\n'
' type: classification, ...[T.B.D]\n'
' model_dir: 学習済みモデルが格納されたディレクトリ\n'
' model_name: モデルファイル群のファイル名\n'
' data_dir: テストデータが格納されたディレクトリを指定')
args = parser.parse_args()
return args
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
# --- 引数処理 ---
args = ArgParser()
# --- パラメータ取得 ---
type, model_dir, model_name, data_dir = GetParams(args.param_csv)
f_log = open('log.csv', 'w')
f_log.write('iter,elapsed_time[sec],inference_time[sec/100iter]\n')
for _type, _model_dir, _model_name, _data_dir in zip(type, model_dir, model_name, data_dir):
# --- DataLoader生成 ---
data_loader = DataLoader(_data_dir)
print(data_loader.GetData())
data = data_loader.GetData()
# --- モデルロード ---
gd = tf.compat.v1.GraphDef.FromString(open(os.path.join(_model_dir, _model_name+'_frozen.pb'), 'rb').read())
inp, predictions = tf.compat.v1.import_graph_def(gd, return_elements = ['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
# --- 推論 ---
img = None
prediction_header = ['prediction', 'label_id', 'filename']
start_time = time.time()
for cnt, (label_id, filename) in enumerate(zip(data['label_id'], data['filename'])):
if (img is None):
img = np.array([cv2.imread(os.path.join(_data_dir, label_id, filename))]) / 128 - 1
else:
try:
img = np.vstack((img, np.array([cv2.imread(os.path.join(_data_dir, label_id, filename)) / 128 - 1])))
except:
print(_data_dir)
print(label_id)
print(filename)
quit()
if ((cnt+1) % 10 == 0):
print(str(time.time()-start_time) + ' : ' + str(cnt+1) + ' of ' + str(len(data)))
with tf.compat.v1.Session(graph=inp.graph):
pre_inference = time.time()
prediction_val = predictions.eval(feed_dict={inp: img})
after_inference = time.time()
if ((cnt+1) == 10):
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-10:cnt+1], data['filename'].values[cnt+1-10:cnt+1])).T).to_csv('predictions.csv', \
header=prediction_header, index=False)
else:
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-10:cnt+1], data['filename'].values[cnt+1-10:cnt+1])).T).to_csv('predictions.csv', \
mode='a', \
header=False, index=False)
img = None
f_log.write(str(cnt+1)+','+str(time.time()-start_time)+','+str(after_inference-pre_inference)+'\n')
if ((cnt+1) % 10 > 0):
with tf.compat.v1.Session(graph=inp.graph):
pre_inference = time.time()
prediction_val = predictions.eval(feed_dict={inp: img})
after_inference = time.time()
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-((cnt+1)%10):], data['filename'].values[cnt+1-((cnt+1)%10):])).T).to_csv('predictions.csv', \
mode='a', \
header=False, index=False)
img = None
f_log.write(str(cnt+1)+','+str(time.time()-start_time)+','+str(after_inference-pre_inference)+'\n')
f_log.close()
|
StarcoderdataPython
|
5098646
|
<filename>dag_bakery/callbacks/slack_callback.py
from datetime import datetime
from typing import Callable, Optional, List
from airflow.hooks.base_hook import BaseHook
from airflow.models import TaskInstance
from airflow.operators.slack_operator import SlackAPIPostOperator
from dag_bakery.callbacks.context_callback import ContextCallback
def task_fail_slack_msg(_: dict) -> List[dict]:
attachments = [
{
"mrkdwn_in": ["text"],
"color": "#ED553B",
"author_name": "{{ task_instance.dag_id }}",
"author_link": "{{ task_instance.log_url }}",
"author_icon": "https://airflow.apache.org/docs/stable/_images/pin_large.png",
"fields": [
{
"title": "Task ID",
"value": "{{ task_instance.task_id }}",
"short": False,
},
{
"title": "Exception",
"value": "{{ exception }}",
"short": False,
},
],
"footer": "{{ env_var('ENV', 'local') }} | <{{ task_instance.log_url }}|Check Logs>",
"footer_icon": "https://image.flaticon.com/icons/png/512/391/391116.png",
"ts": datetime.now().timestamp(),
},
]
return attachments
class SlackAlertCallback(ContextCallback):
def __init__(
self, slack_conn_id: str = "SLACK", message_gen: Callable = task_fail_slack_msg, channel: Optional[str] = None
):
self.slack_conn_id = slack_conn_id
self.message_gen = message_gen
self.channel = channel
def callback(self, context: dict):
attachments = self.message_gen(context)
slack_conn = BaseHook.get_connection(self.slack_conn_id)
slack_channel = self.channel or slack_conn.login
slack_alert_operator = SlackAPIPostOperator(
slack_conn_id=self.slack_conn_id,
task_id="slack_alert",
channel=slack_channel,
text=None,
attachments=attachments,
username="Airflow {{ env_var('ENV', 'local') }}",
)
task_instance: TaskInstance = context["task_instance"]
template_env = task_instance.task.get_template_env()
slack_alert_operator.render_template_fields(
context=context,
jinja_env=template_env,
)
return slack_alert_operator.execute(context=context)
|
StarcoderdataPython
|
6686942
|
class ListSecrets():
pass
|
StarcoderdataPython
|
3386440
|
# -*- coding: utf-8 -*- #
#
# oeis/generators/__init__.py
#
#
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Interface to the OEIS.
"""
# -------------- Standard Library -------------- #
import itertools
# ---------------- oeis Library ---------------- #
__all__ = ("g27", "i27", "g40", "i40")
def g27():
"""A000027: The Natural Numbers."""
yield from itertools.count(1)
def i27(index):
"""A000027: The Natural Numbers."""
return index
def g40():
"""A000040: The Prime Numbers."""
yield 0
def i40(index):
"""A000040: The Prime Numbers."""
return 0
|
StarcoderdataPython
|
5188999
|
<reponame>cdunn6754/cdunnSite
from django.db import models
from django.utils import timezone
from django.urls.base import reverse
# Create your models here.
class ContentTopic(models.Model):
name = models.CharField(max_length = 100)
description = models.CharField(max_length = 500)
def __str__(self):
return "ContentTopic: {}".format(self.name)
class ContentPost(models.Model):
title = models.CharField(max_length = 250)
description = models.CharField(max_length = 500)
heading_image = models.ImageField(upload_to="heading_images")
template_file = models.FileField(upload_to="template_files")
creation_date = models.DateField(default=timezone.now)
date_modified = models.DateField(auto_now=True)
contentTopics = models.ManyToManyField(ContentTopic)
slug = models.SlugField(max_length=200, unique=True)
def get_absolute_url(self):
return reverse('content:post_detail', kwargs={'slug': self.slug})
def __str__(self):
return "ContentPost: {}".format(self.title)
|
StarcoderdataPython
|
9681612
|
# These classes represent the query expressions from section 2.6 of the paper
# Dichotomy of Probabilistic Inference for Unions of Conjunctive Queries
# by <NAME> 2012
from collections import defaultdict
import itertools
import nltk
from symbolic_slimshot import algorithm
class Graph(object):
def __init__(self, adjacencyList={}):
self.adjacencyList = adjacencyList
def connectedComponents(self):
vertices = self.adjacencyList.keys()
explored = set()
components = []
for v in vertices:
if v not in explored:
explored.add(v)
component = set([v])
neighbors = set(self.adjacencyList[v])
[component.add(n) for n in self.adjacencyList[v]]
while neighbors:
next = neighbors.pop()
if next not in explored:
explored.add(next)
[neighbors.add(n) for n in self.adjacencyList[next]]
[component.add(n) for n in self.adjacencyList[next]]
components.append(component)
return components
# Connected conjunctive query
class Component(object):
def __init__(self, relations=[]):
self.relations = relations
def copy(self):
return Component([r.copy() for r in self.getRelations()])
def copyWithDeterminism(self, relsToMakeDeterministic):
newRelations = []
for r in self.relations:
newRel = r.copy()
relName = r.getName()
if relName in relsToMakeDeterministic:
newRel.deterministic = True
newRel.sampled = True
newRelations.append(newRel)
return Component(newRelations)
def getRelations(self):
return self.relations
def getProbabilisticRelations(self):
return list(filter(lambda x: not x.isDeterministic(), self.relations))
def getDeterministicRelations(self):
return list(filter(lambda x: x.isDeterministic(), self.relations))
def getProbabilisticRelationSymbols(self):
return list(
map(
lambda x: x.getName(),
list(filter(lambda x: not x.isDeterministic(), self.relations)),
)
)
def getRelationSymbols(self):
return [rel.getName() for rel in self.getRelations()]
def hasNegatedRelations(self):
return any([rel.isNegated() for rel in self.getRelations()])
def getVariables(self):
return set([x for rel in self.relations for x in rel.getVariables()])
# Returns a dictionary: {Var : { RelName : set([varPos1, [varPos2, ...]])
# }}
def hasVariables(self):
return len(self.getVariables()) > 0
def getVarPositions(self):
varsToPositions = {}
for rel in self.relations:
if rel.hasVariables():
varPos = rel.getVariablePositions()
for var in varPos:
if var in varsToPositions:
varsToPositions[var][rel] = varPos[var]
else:
varsToPositions[var] = {rel: varPos[var]}
return varsToPositions
def containedIn(self, com2):
# TODO(ericgribkoff) Implement minimization for queries with
# negation
c1Vars = self.getVariables()
c2Vars = com2.getVariables()
byVarMaps = []
for var in c2Vars:
byVarMaps.append([pair for pair in itertools.product([var], c1Vars)])
mappings = [h for h in itertools.product(*byVarMaps)]
for mapping in mappings:
h = dict((x, y) for x, y in list(mapping))
if self.isHomomorphism(com2.applyH(h)):
return True
return False
def applyH(self, h):
mappedRels = []
for rel in self.relations:
newRel = rel.copy()
newRel.applyH(h)
mappedRels.append(newRel)
return Component(mappedRels)
def isHomomorphism(self, com2):
relDict = {}
for rel in self.getRelations():
relStr = "%s:%s" % (
rel.getNameWithEqualityConstraints(),
",".join(rel.getVariablesForHomomorphism()),
)
relDict[relStr] = True
for rel in com2.getRelations():
relStr = "%s:%s" % (
rel.getNameWithEqualityConstraints(),
",".join(rel.getVariablesForHomomorphism()),
)
if relStr not in relDict:
return False
return True
def minimize(self):
for x in algorithm.powerset(self.getRelations()):
if len(x) and not len(x) == len(self.getRelations()):
c = Component(list(x))
if c.containedIn(self) and self.containedIn(c):
return c
return self.copy()
def usesSeparator(self, subId):
return any(r.usesSeparator(subId) for r in self.getRelations())
def getUsedSeparators(self):
seps = set()
for r in self.getRelations():
seps.update(r.getUsedSeparators())
return seps
def applySeparator(self, separator, replacement):
for rel in self.relations:
rel.applySeparator(separator, replacement)
def hasRelation(self, rel):
return rel.getName() in [r.getName() for r in self.relations]
def isSuperset(self, c):
return all([c.hasRelation(rel) for rel in self.relations])
def getAdjacencyList(self):
varsToRels = defaultdict(set)
adjList = defaultdict(set)
for rel in self.relations:
if rel.hasVariables():
[varsToRels[var].add(rel) for var in rel.getVariables()]
else:
# add any relations without vars as singleton components
adjList[rel].add(rel)
for var in varsToRels.keys():
[
adjList[rel1].add(rel2)
for rel1 in varsToRels[var]
for rel2 in varsToRels[var]
]
return adjList
def quantifiersProver9(self, formula):
quantifiedFormula = []
for v in self.getVariables():
quantifiedFormula.append("exists %s.(" % v.getVar())
quantifiedFormula.append(formula)
quantifiedFormula.append(")" * len(self.getVariables()))
return "".join(quantifiedFormula)
def toProver9(self):
return self.quantifiersProver9(
" and ".join(["(%s)" % x.toProver9() for x in self.relations])
)
def prettyPrint(self):
return "%s" % ", ".join([x.__repr__() for x in self.relations])
def prettyPrintCNF(self):
return "%s" % " v ".join([x.__repr__() for x in self.relations])
def __repr__(self):
return "(%s)" % ", ".join([x.__repr__() for x in self.relations])
# Conjunction of components
class ConjunctiveQuery(object):
def __init__(self, components=[]):
self.components = components
def copy(self):
return ConjunctiveQuery([c.copy() for c in self.components])
def copyWithDeterminism(self, relsToMakeDeterministic):
return ConjunctiveQuery(
[c.copyWithDeterminism(relsToMakeDeterministic) for c in self.components]
)
def getComponents(self):
return self.components
def getComponent(self, index):
return self.components[index]
def getRelations(self):
return [rel for c in self.components for rel in c.getRelations()]
def getRelationSymbols(self):
return [rel.getName() for rel in self.getRelations()]
def getVariables(self):
vars = set()
for rel in self.getRelations():
vars.update(set(rel.getVariables()))
return vars
def hasVariables(self):
return len(self.getVariables()) > 0
def usesSeparator(self, subId):
return any(c.usesSeparator(subId) for c in self.components)
def getUsedSeparators(self):
seps = set()
for c in self.components:
seps.update(c.getUsedSeparators())
return seps
def applySeparator(self, separator, replacement):
componentsWithVars = [c for c in self.components if c.hasVariables()]
for i in range(len(componentsWithVars)):
componentsWithVars[i].applySeparator(separator[i], replacement)
def getSeparator(self):
componentsWithVars = [c for c in self.components if c.hasVariables()]
varPositions = [c.getVarPositions() for c in componentsWithVars]
componentVars = [c.keys() for c in varPositions] # used to fix an ordering
for potentialSep in itertools.product(*componentVars):
potentialMap = {}
validMap = True
for ind, var in enumerate(potentialSep):
relationsWithThisVarInThisComponent = set(varPositions[ind][var].keys())
probabilisticRelationsInThisComponent = set(
componentsWithVars[ind].getProbabilisticRelations()
)
if len(
probabilisticRelationsInThisComponent.difference(
relationsWithThisVarInThisComponent
)
):
validMap = False
break
deterministicRelationsInThisComponent = set(
self.components[ind].getDeterministicRelations()
)
for detR in deterministicRelationsInThisComponent.difference(
relationsWithThisVarInThisComponent
):
if detR.getName() in potentialMap:
del potentialMap[detR.getName()]
# look at all relations in the component with this variable
for rel in varPositions[ind][var]:
if rel.getName() not in potentialMap:
# haven't seen this relation before, add it to potential separator
# mapping
potentialMap[rel.getName()] = varPositions[ind][var][rel]
# we have seen this relation before, see if the potential separator
# positions intersect with those seen before
elif (
len(
potentialMap[rel.getName()].intersection(
varPositions[ind][var][rel]
)
)
== 0
):
if not rel.isDeterministic():
validMap = False
break
elif rel.getName() in potentialMap:
del potentialMap[rel.getName()]
else:
potentialMap[rel.getName()] = potentialMap[
rel.getName()
].intersection(varPositions[ind][var][rel])
if not validMap:
break
if validMap:
return potentialSep
def minimize(self):
minCom = [c.minimize() for c in self.getComponents()]
redundant = [False] * len(minCom)
for i in range(len(minCom)):
for j in range(len(minCom)):
if i == j or redundant[i]:
continue
else:
if minCom[i].containedIn(minCom[j]):
redundant[j] = True
finalCom = []
for i in range(len(minCom)):
if not redundant[i]:
finalCom.append(minCom[i])
return ConjunctiveQuery(finalCom)
def containedIn(self, con2):
for com in con2.getComponents():
if not any([c.containedIn(com) for c in self.getComponents()]):
return False
return True
def toProver9(self):
return " and ".join(["(%s)" % x.toProver9() for x in self.components])
def prettyPrint(self):
return "(%s)" % " ^ ".join([x.prettyPrint() for x in self.components])
def prettyPrintCNF(self):
return "(%s)" % " v ".join([x.prettyPrintCNF() for x in self.components])
def __repr__(self):
return "c(%s)" % " ^ ".join([x.__repr__() for x in self.components])
# Disjunction of components
class DisjunctiveQuery(object):
def __init__(self, components=[]):
self.components = components
def copy(self):
return DisjunctiveQuery([c.copy() for c in self.components])
def getComponents(self):
return self.components
def getRelations(self):
return [rel for c in self.components for rel in c.getRelations()]
def getRelationSymbols(self):
return [rel.getName() for rel in self.getRelations()]
def hasVariables(self):
return any([len(c.getVariables()) > 0 for c in self.components])
def containedIn(self, dis2):
for com in self.getComponents():
# forall i, does there exist a j s.t. c_i => c_j
if not any([com.containedIn(c) for c in dis2.getComponents()]):
return False
return True
def minimize(self):
minCom = [c.minimize() for c in self.getComponents()]
redundant = [False] * len(minCom)
for i in range(len(minCom)):
for j in range(len(minCom)):
if i == j or redundant[j]:
continue
else:
if minCom[i].containedIn(minCom[j]):
redundant[i] = True
finalCom = []
for i in range(len(minCom)):
if not redundant[i]:
finalCom.append(minCom[i])
return DisjunctiveQuery(finalCom)
def usesSeparator(self, subId):
return any(c.usesSeparator(subId) for c in self.components)
def getUsedSeparators(self):
seps = set()
for c in self.components:
seps.update(c.getUsedSeparators())
return seps
def applySeparator(self, separator, replacement):
componentsWithVars = [c for c in self.components if c.hasVariables()]
for i in range(len(componentsWithVars)):
componentsWithVars[i].applySeparator(separator[i], replacement)
# TODO this function and getAdjacencyList should be using
# getNameWithEqualityConstraints()
def getSeparator(self):
componentsWithVars = [c for c in self.components if c.hasVariables()]
varPositions = [c.getVarPositions() for c in componentsWithVars]
componentVars = [c.keys() for c in varPositions] # used to fix an ordering
for potentialSep in itertools.product(*componentVars):
potentialMap = {}
validMap = True
for ind, var in enumerate(potentialSep):
relationsWithThisVarInThisComponent = set(varPositions[ind][var].keys())
probabilisticRelationsInThisComponent = set(
componentsWithVars[ind].getProbabilisticRelations()
)
if len(
probabilisticRelationsInThisComponent.difference(
relationsWithThisVarInThisComponent
)
):
validMap = False
break
deterministicRelationsInThisComponent = set(
self.components[ind].getDeterministicRelations()
)
for detR in deterministicRelationsInThisComponent.difference(
relationsWithThisVarInThisComponent
):
if detR.getName() in potentialMap:
del potentialMap[detR.getName()]
# look at all relations in the component with this variable
for rel in varPositions[ind][var]:
if rel.getName() not in potentialMap:
# haven't seen this relation before, add it to potential separator
# mapping
potentialMap[rel.getName()] = varPositions[ind][var][rel]
# we have seen this relation before, see if the potential separator
# positions intersect with those seen before
elif (
len(
potentialMap[rel.getName()].intersection(
varPositions[ind][var][rel]
)
)
== 0
):
if not rel.isDeterministic():
validMap = False
break
elif rel.getName() in potentialMap:
del potentialMap[rel.getName()]
else:
potentialMap[rel.getName()] = potentialMap[
rel.getName()
].intersection(varPositions[ind][var][rel])
if not validMap:
break
if validMap:
return potentialSep
# TODO(ericgribkoff) using getNameWithEqualityConstraints() for adjacencyList computation is not
# sufficient, as components are independent iff their equality constraints don't overlap
# although, depending on how "complete" the shattering is, and if only one set of constants
# were introduced with equality, this might be sufficient
def getAdjacencyList(self):
relsToComponents = defaultdict(set)
adjList = defaultdict(set)
for c in self.components:
if all(r.isDeterministic() for r in c.getRelations()):
pass
else:
[
relsToComponents[rel.getRelationNameForAdjacency()].add(c)
for rel in c.getRelations()
if not rel.isDeterministic()
]
for rel in relsToComponents.keys():
[
adjList[c1].add(c2)
for c1 in relsToComponents[rel]
for c2 in relsToComponents[rel]
]
# TODO(ericgribkoff) Fix this hack
for c in self.components:
if c not in adjList:
adjList[c].add(c)
return adjList
def toProver9(self):
return " or ".join(["(%s)" % x.toProver9() for x in self.components])
def prettyPrint(self):
return "(%s)" % " v ".join([x.prettyPrint() for x in self.components])
def prettyPrintCNF(self):
return "(%s)" % " ^ ".join([x.prettyPrintCNF() for x in self.components])
def __repr__(self):
return "d(\n%s)" % " v \n".join([x.__repr__() for x in self.components])
# Disjunction of conjuctive queries
class DNF(object):
def __init__(self, conjuncts=[]):
self.conjuncts = conjuncts
def copy(self):
return DNF([c.copy() for c in self.conjuncts])
def copyWithDeterminism(self, relsToMakeDeterministic):
return DNF(
[c.copyWithDeterminism(relsToMakeDeterministic) for c in self.conjuncts]
)
def containedIn(self, dnf2):
for con in self.getConjuncts():
if not any([c.containedIn(con) for c in dnf2.getConjuncts()]):
return False
return True
def getUsedSeparators(self):
seps = set()
for c in self.conjuncts:
seps.update(c.getUsedSeparators())
return seps
def applySeparator(self, separator, replacement):
for c in self.conjuncts:
c.applySeparator(separator, replacement)
def getConjuncts(self):
return self.conjuncts
def getRelations(self):
rels = set()
for d in self.conjuncts:
rels.update(set(d.getRelations()))
return rels
def getRelationSymbols(self):
rels = set()
for d in self.conjuncts:
rels.update(set(d.getRelationSymbols()))
return rels
def minimize(self):
minCom = [c.minimize() for c in self.getConjuncts()]
redundant = [False] * len(minCom)
for i in range(len(minCom)):
for j in range(len(minCom)):
if i == j or redundant[j]:
continue
else:
if minCom[i].containedIn(minCom[j]):
redundant[i] = True
finalCom = []
for i in range(len(minCom)):
if not redundant[i]:
finalCom.append(minCom[i])
return DNF(finalCom)
def getAdjacencyList(self):
relsToConjuncts = defaultdict(set)
adjList = defaultdict(set)
for d in self.conjuncts:
# don't include deterministic relations without vars
if all(r.isDeterministic() for r in d.getRelations()):
pass
else:
[
relsToConjuncts[rel.getRelationNameForAdjacency()].add(d)
for rel in d.getRelations()
]
for rel in relsToConjuncts.keys():
[
adjList[d1].add(d2)
for d1 in relsToConjuncts[rel]
for d2 in relsToConjuncts[rel]
]
# TODO(ericgribkoff) Fix this hack
for d in self.conjuncts:
if d not in adjList:
adjList[d].add(d)
return adjList
def toCNF(self):
conjI = 0
comI = 0
stack = [(conjI, comI)]
disjuncts = []
# Applying distributivity
while True:
if len(stack) < len(self.conjuncts):
stack.append((conjI + 1, 0))
conjI = conjI + 1
else:
disjuncts.append(
DisjunctiveQuery(
[self.conjuncts[i].getComponent(j) for (i, j) in stack]
)
)
(lastConj, lastCom) = stack.pop()
if lastCom + 1 < len(self.conjuncts[lastConj].getComponents()):
stack.append((lastConj, lastCom + 1))
else:
while stack:
(lastConj, lastCom) = stack.pop()
conjI = conjI - 1
if lastCom + 1 < len(self.conjuncts[lastConj].getComponents()):
stack.append((lastConj, lastCom + 1))
break
if not stack:
break
return CNF(disjuncts)
def prettyPrint(self):
return "(%s)" % " v ".join([x.prettyPrint() for x in self.conjuncts])
def prettyPrintCNF(self):
return "(%s)" % " ^ ".join([x.prettyPrintCNF() for x in self.conjuncts])
def __repr__(self):
return "dnf(\n%s\n)" % "\n v ".join([x.__repr__() for x in self.conjuncts])
# Conjunction of disjunctive queries
class CNF(object):
def __init__(self, disjuncts=[]):
self.disjuncts = [d.minimize() for d in disjuncts]
def copy(self):
return CNF([d.copy() for d in self.disjuncts])
def getDisjuncts(self):
return self.disjuncts
def getRelations(self):
rels = set()
for d in self.disjuncts:
rels.update(set(d.getRelations()))
return rels
def getRelationSymbols(self):
rels = set()
for d in self.disjuncts:
rels.update(set(d.getRelationSymbols()))
return rels
def minimize(self):
minCom = [c.minimize() for c in self.getDisjuncts()]
redundant = [False] * len(minCom)
for i in range(len(minCom)):
for j in range(len(minCom)):
if i == j or redundant[i]:
continue
else:
if minCom[i].containedIn(minCom[j]):
redundant[j] = True
lexpr = nltk.Expression.fromstring
finalCom = []
for i in range(len(minCom)):
if not redundant[i]:
p9Component = lexpr(minCom[i].toProver9())
prover = nltk.Prover9Command(p9Component)
if not prover.prove(): # component is not always true
finalCom.append(minCom[i])
return CNF(finalCom)
def containedIn(self, cnf2):
for con in cnf2.getDisjuncts():
if not any([c.containedIn(con) for c in self.getDisjuncts()]):
return False
return True
def usesSeparator(self, subId):
return any(d.usesSeparator(subId) for d in self.disjuncts)
def getUsedSeparators(self):
seps = set()
for d in self.disjuncts:
seps.update(d.getUsedSeparators())
return seps
def getAdjacencyList(self):
relsToDisjuncts = defaultdict(set)
adjList = defaultdict(set)
for d in self.disjuncts:
# don't include deterministic relations without vars
if all(r.isDeterministic() for r in d.getRelations()):
pass
else:
[
relsToDisjuncts[rel.getRelationNameForAdjacency()].add(d)
for rel in d.getRelations()
]
for rel in relsToDisjuncts.keys():
[
adjList[d1].add(d2)
for d1 in relsToDisjuncts[rel]
for d2 in relsToDisjuncts[rel]
]
# TODO(ericgribkoff) Fix this hack
for d in self.disjuncts:
if d not in adjList:
adjList[d].add(d)
return adjList
def toProver9(self):
return " and ".join(["(%s)" % x.toProver9() for x in self.disjuncts])
def prettyPrint(self):
return "(%s)" % " ^ ".join([x.prettyPrint() for x in self.disjuncts])
def prettyPrintCNF(self):
return "(%s)" % " v ".join([x.prettyPrintCNF() for x in self.disjuncts])
def __repr__(self):
return "cnf(\n%s\n)" % " ^ \n".join([x.__repr__() for x in self.disjuncts])
def decomposeComponent(orig):
connectedComponents = Graph(orig.getAdjacencyList()).connectedComponents()
if len(connectedComponents) == 1:
return [orig]
else:
return [Component(list(c)) for c in connectedComponents]
def computeSymbolComponentsDNF(dnf):
connectedComponents = Graph(dnf.getAdjacencyList()).connectedComponents()
return connectedComponents
def computeSymbolComponentsCNF(cnf):
connectedComponents = Graph(cnf.getAdjacencyList()).connectedComponents()
return connectedComponents
def computeSymbolComponentsDisjunct(d):
connectedComponents = Graph(d.getAdjacencyList()).connectedComponents()
return connectedComponents
|
StarcoderdataPython
|
4980082
|
<reponame>Muflhi01/zorya<filename>gcp/gae.py<gh_stars>100-1000
"""Interactions with compute engine."""
import logging
import backoff
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from util import utils
CREDENTIALS = None
class Gae(object):
"""App Engine actions."""
def __init__(self, project):
self.app = discovery.build(
'appengine', 'v1', credentials=CREDENTIALS, cache_discovery=False
)
self.project = project
def change_status(self, to_status, service_id, version_id):
"""
Stop/start version based on tags
Args:
to_status: 0 stop 1 start
service_id: The App Engine service id
version_id: The App Engine version id
Returns:
"""
try:
if int(to_status) == 1:
logging.info(
"Starting App Engine service: %s version: %s on project: %s",
service_id,
version_id,
self.project
)
self.start_version(service_id, version_id)
else:
logging.info(
"Stopping App Engine service: %s version: %s on project: %s",
service_id,
version_id,
self.project
)
self.stop_version(service_id, version_id)
except HttpError as http_error:
logging.error(http_error)
return "Error", 500
return "ok", 200
@backoff.on_exception(backoff.expo, HttpError, max_tries=8, giveup=utils.fatal_code)
def stop_version(self, service_id, version_id):
"""
Stop an instance.
Args:
service_id: The App Engine service id
version_id: The App Engine version id
Returns:
"""
# TODO add requestId
return (
self.app.apps().services().versions().patch(servicesId=service_id, appsId=self.project,
versionsId=version_id, updateMask='servingStatus',
body={"servingStatus": "STOPPED"}).execute()
)
@backoff.on_exception(backoff.expo, HttpError, max_tries=8, giveup=utils.fatal_code)
def start_version(self, service_id, version_id):
"""
Start an instance.
Args:
service_id: The App Engine service id
version_id: The App Engine version id
Returns:
"""
# TODO add requestId
return (
self.app.apps().services().versions().patch(servicesId=service_id, appsId=self.project,
versionsId=version_id, updateMask='servingStatus',
body={"servingStatus": "SERVING"}).execute()
)
|
StarcoderdataPython
|
1670084
|
# -*- coding: utf-8 -*-
u"""
Test of cymel.utils.operation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
import maya.cmds as cmds
from cymel.utils.operation import (
docmd, undoChunk, undoTransaction, nonUndoable,
PreserveSelection,
)
#------------------------------------------------------------------------------
class TestOperation(unittest.TestCase):
u"""
Test of cymel.utils.operation
"""
def assertSuccessful(self, proc, *args, **kwargs):
try:
return proc(*args, **kwargs)
except Exception as e:
self.fail('%s() raised %r' % (proc.__name__, e))
def test_docmd(self):
r = [0, False]
def do():
r[0] += 1
def undo():
r[0] -= 1
r[1] = False
def redo():
r[0] += 1
r[1] = True
self.assertSuccessful(docmd, do, undo)
self.assertEqual(r[0], 1)
cmds.undo()
self.assertEqual(r[0], 0)
cmds.redo()
self.assertEqual(r[0], 1)
cmds.undo()
self.assertEqual(r[0], 0)
self.assertSuccessful(docmd, do, undo, redo)
self.assertEqual(r[0], 1)
cmds.undo()
self.assertEqual(r[0], 0)
cmds.redo()
self.assertEqual(r[0], 1)
self.assertTrue(r[1])
cmds.undo()
self.assertEqual(r[0], 0)
self.assertFalse(r[1])
def test_UndoChunk(self):
cmds.file(f=True, new=True)
cmds.createNode('transform')
with undoChunk:
cmds.createNode('transform')
cmds.createNode('transform')
cmds.undo()
self.assertEqual(cmds.ls('transform*'), ['transform1'])
def test_UndoTransaction(self):
num = len(cmds.ls(type='transform'))
try:
with undoTransaction():
for i in range(5):
cmds.createNode('transform')
raise RuntimeError()
except:
pass
self.assertEqual(num, len(cmds.ls(type='transform')))
def test_NonUndoable(self):
num = len(cmds.ls(type='transform'))
with undoChunk:
cmds.createNode('transform')
with nonUndoable:
cmds.createNode('transform')
cmds.createNode('transform')
cmds.createNode('transform')
self.assertEqual(num + 4, len(cmds.ls(type='transform')))
cmds.undo()
self.assertEqual(num + 2, len(cmds.ls(type='transform')))
def test_PreserveSelection(self):
cmds.select('persp')
with PreserveSelection():
cmds.select('side')
with PreserveSelection():
cmds.select('top')
self.assertEqual(cmds.ls(sl=True), ['side'])
self.assertEqual(cmds.ls(sl=True), ['persp'])
with PreserveSelection(True):
cmds.select('side')
with PreserveSelection(True):
cmds.select('top')
self.assertEqual(cmds.ls(sl=True), ['side'])
self.assertEqual(cmds.ls(sl=True), ['persp'])
#------------------------------------------------------------------------------
def suite():
return unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
def run(**kwargs):
unittest.TextTestRunner(**kwargs).run(suite())
if __name__ == '__main__':
run(verbosity=2)
|
StarcoderdataPython
|
1607595
|
"""isort:skip_file"""
from docs_snippets.concepts.partitions_schedules_sensors.partitioned_job import (
do_stuff_partitioned,
)
# start
def test_do_stuff_partitioned():
assert do_stuff_partitioned.execute_in_process(partition_key="2020-01-01").success
# end
|
StarcoderdataPython
|
6557513
|
#!/usr/bin/env python3
# 必要なライブラリをインポート
import rospy
from std_msgs.msg import Float64
import pyaudio
import audioop
import numpy as np
import math
class OtoNode():
def __init__(self):
# パブリッシャーを定義
self.sound_pub = rospy.Publisher("/sound", Float64, queue_size=1)
# データ保存用の変数
self.sound_val = None
self.decibel = 0
# チャンク:音源から1回読み込むときのデータサイズ
self.chunk = 512
# サンプリングレート
sampling_rate = 44100
self.p = pyaudio.PyAudio()
self.p.stream = self.p.open(format=self.p.get_format_from_width(2), channels=1, rate=sampling_rate, frames_per_buffer=self.chunk, input=True, output=False)
self.stream = self.p.stream
def get_sound_val(self):
"""
音センサ(マイク)からの音を処理する関数
"""
# 音声データを取得
# CHANKの長さ分のデータを読み込む(データの中身は、音センサから取得した音の強さを表すデジタルの数値)
input = self.stream.read(self.chunk, exception_on_overflow=False)
# 音センサの値をint16のNumpy形式に変換
self.sound_val = np.frombuffer(input, dtype='int16')
# デシベルに変換
rms = audioop.rms(self.sound_val[0], 2)
self.decibel = 20 * math.log10(rms) if rms > 0 else 0
# 変換したデシベルの値をパブリッシュする
print("音センサの値[db]: {}".format(self.decibel))
self.sound_pub.publish(self.decibel)
def stop(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
if __name__ == '__main__':
# ノードを宣言
rospy.init_node("mic")
# クラスのインスタンスを作成
oto = OtoNode()
# ループ処理開始
while not rospy.is_shutdown():
# 処理を実行
oto.get_sound_val()
oto.stop()
|
StarcoderdataPython
|
8053346
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Dataverse data-types data model."""
from __future__ import absolute_import
from pyDataverse.utils import dict_to_json
from pyDataverse.utils import read_file_json
from pyDataverse.utils import write_file_json
"""
Data-structure to work with data and metadata of Dataverses, Datasets and
Datafiles - coming from different sources.
"""
class Dataverse(object):
"""Base class for Dataverse data model."""
"""Attributes required for Dataverse metadata json."""
__attr_required_metadata = [
'alias',
'name',
'dataverseContacts'
]
"""Attributes valid for Dataverse metadata json."""
__attr_valid_metadata = [
'alias',
'name',
'affiliation',
'description',
'dataverseContacts',
'dataverseType'
]
"""Attributes valid for Dataverse class."""
__attr_valid_class = [
# 'datasets',
# 'dataverses',
'pid'
] + __attr_valid_metadata
def __init__(self):
"""Init a Dataverse() class.
Examples
-------
Create a Dataverse::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
"""
"""Misc"""
self.datasets = []
self.dataverses = []
self.pid = None
"""Metadata"""
self.name = None
self.alias = None
self.dataverseContacts = []
self.affiliation = None
self.description = None
self.dataverseType = None
def __str__(self):
"""Return name of Dataverse() class for users."""
return 'pyDataverse Dataverse() model class.'
def set(self, data):
"""Set class attributes with a flat dict.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.name
'Test pyDataverse'
"""
for key, val in data.items():
if key in self.__attr_valid_class:
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def import_metadata(self, filename, format='dv_up'):
"""Import Dataverse metadata from file.
This simply parses in data with valid attribute naming as keys.
Data must not be complete, and also attributes required for the
metadata json export can be missing.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format.
Examples
-------
Import metadata coming from json file::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> dv.import_metadata('tests/data/dataverse_min.json')
>>> dv.name
'Test pyDataverse'
"""
data = {}
if format == 'dv_up':
metadata = read_file_json(filename)
# get first level metadata and parse it automatically
for attr in self.__attr_valid_metadata:
if attr in metadata:
data[attr] = metadata[attr]
self.set(data)
elif format == 'dv_down':
metadata = read_file_json(filename)
self.set(data)
else:
# TODO: Exception
print('Data-format not right.')
def is_valid(self):
"""Check if set attributes are valid for Dataverse api metadata creation.
The attributes required are listed in `__attr_required_metadata`.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': '<NAME>',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.is_valid
True
>>> dv.name = None
>>> dv.is_valid
False
"""
is_valid = True
for attr in self.__attr_required_metadata:
if not self.__getattribute__(attr):
is_valid = False
print('attribute \'{0}\' missing.'.format(attr))
return is_valid
def dict(self, format='dv_up'):
"""Create dicts in different data formats.
`dv_up`: Checks if data is valid for the different dict formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> data = dv.dict()
>>> data['name']
'Test pyDataverse'
Todo
-------
Validate standards.
"""
data = {}
if format == 'dv_up':
if self.is_valid():
for attr in self.__attr_valid_metadata:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
# TODO: prüfen, ob required attributes gesetzt sind = Exception
return data
else:
print('dict can not be created. Data is not valid for format')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
# TODO: Exception
print('Format not right for dict.')
return None
def json(self, format='dv_up'):
r"""Create json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> data = dv.json()
>>> data
'{\n "name": "Test pyDataverse",\n "dataverseContacts": [\n {\n "contactEmail": "<EMAIL>"\n }\n ],\n "alias": "test-pyDataverse"\n}'
Todo
-------
Validate standards.
"""
if format == 'dv_up':
data = self.dict('dv_up')
if data:
return dict_to_json(data)
else:
return None
elif format == 'all':
data = self.dict('all')
if data:
return dict_to_json(data)
else:
return None
else:
# TODO Exception
print('data format not valid.')
def export_metadata(self, filename, format='dv_up'):
"""Export Dataverse metadata to Dataverse api upload json.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format for export. Available format is: `dv_up` with all
metadata for Dataverse api upload.
Examples
-------
Export Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '<EMAIL>'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.export_metadata('tests/data/dataverse_export.json')
"""
if format == 'dv_up':
return write_file_json(filename, self.dict())
else:
# TODO: Exception
print('Data-format not right.')
class Dataset(object):
"""Base class for the Dataset data model."""
"""Attributes required for Dataset metadata json."""
__attr_required_metadata = [
'title',
'author',
'datasetContact',
'dsDescription',
'subject'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'].
"""
__attr_valid_metadata_datasetVersion = [
'license',
'termsOfUse',
'termsOfAccess'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'citation\'].
"""
__attr_valid_metadata_citation_dicts = [
'title',
'subtitle',
'alternativeTitle',
'alternativeURL',
'subject',
'notesText',
'productionDate',
'productionPlace',
'distributionDate',
'depositor',
'dateOfDeposit',
'kindOfData',
'seriesName',
'seriesInformation',
'relatedMaterial',
'relatedDatasets',
'otherReferences',
'dataSources',
'originOfSources',
'characteristicOfSources',
'accessToSources',
'kindOfData'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'citation\'][\'fields\'].
"""
__attr_valid_metadata_citation_arrays = {
'otherId': ['otherIdAgency', 'otherIdValue'],
'author': ['authorName', 'authorAffiliation', 'authorIdentifierScheme',
'authorIdentifier'],
'datasetContact': ['datasetContactName', 'datasetContactAffiliation',
'datasetContactEmail'],
'dsDescription': ['dsDescriptionValue', 'dsDescriptionDate'],
'keyword': ['keywordValue', 'keywordVocabulary',
'keywordVocabularyURI'],
'producer': ['producerName', 'producerAffiliation',
'producerAbbreviation', 'producerURL', 'producerLogoURL'],
'contributor': ['contributorType', 'contributorName'],
'grantNumber': ['grantNumberAgency', 'grantNumberValue'],
'topicClassification': ['topicClassValue', 'topicClassVocab'],
'publication': ['publicationCitation', 'publicationIDType',
'publicationIDNumber', 'publicationURL'],
'distributor': ['distributorName', 'distributorAffiliation',
'distributorAbbreviation', 'distributorURL',
'distributorLogoURL'],
'timePeriodCovered': ['timePeriodCoveredStart',
'timePeriodCoveredEnd'],
'dateOfCollection': ['dateOfCollectionStart', 'dateOfCollectionEnd'],
'software': ['softwareName', 'softwareVersion']
}
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'geospatial\'].
"""
__attr_valid_metadata_geospatial_dicts = [
'geographicUnit'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'geospatial\'][\'fields\'].
"""
__attr_valid_metadata_geospatial_arrays = {
'geographicCoverage': ['country', 'state', 'city',
'otherGeographicCoverage'],
'geographicBoundingBox': ['westLongitude', 'eastLongitude',
'northLongitude', 'southLongitude']
}
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'socialscience\'].
"""
__attr_valid_metadata_socialscience_dicts = [
'unitOfAnalysis',
'universe',
'timeMethod',
'dataCollector',
'collectorTraining',
'frequencyOfDataCollection',
'samplingProcedure',
'deviationsFromSampleDesign',
'collectionMode',
'researchInstrument',
'dataCollectionSituation',
'actionsToMinimizeLoss',
'controlOperations',
'weighting',
'cleaningOperations',
'datasetLevelErrorNotes',
'responseRate',
'samplingErrorEstimates',
'otherDataAppraisal',
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'journal\'].
"""
__attr_valid_metadata_journal_dicts = [
'journalArticleType'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'journal\'][\'fields\'].
"""
__attr_valid_metadata_journal_arrays = {
'journalVolumeIssue': ['journalVolume', 'journalIssue',
'journalPubDate']
}
"""Attributes valid for Dataset class."""
__attr_valid_class = [
'datafiles'
] + __attr_valid_metadata_datasetVersion \
+ __attr_valid_metadata_citation_dicts \
+ list(__attr_valid_metadata_citation_arrays.keys()) \
+ __attr_valid_metadata_geospatial_dicts \
+ list(__attr_valid_metadata_geospatial_arrays.keys()) \
+ __attr_valid_metadata_socialscience_dicts \
+ __attr_valid_metadata_journal_dicts \
+ list(__attr_valid_metadata_journal_arrays.keys()) \
def __init__(self):
"""Init a Dataset() class.
Examples
-------
Create a Dataverse::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
"""
"""Misc"""
self.datafiles = []
"""Metadata: dataset"""
self.license = None
self.termsOfUse = None
self.termsOfAccess = None
"""Metadata: citation"""
self.citation_displayName = None
self.title = None
self.subtitle = None
self.alternativeTitle = None
self.alternativeURL = None
self.otherId = []
self.author = []
self.datasetContact = []
self.dsDescription = []
self.subject = []
self.keyword = []
self.topicClassification = []
self.publication = []
self.notesText = None
self.producer = []
self.productionDate = None
self.productionPlace = None
self.contributor = []
self.grantNumber = []
self.distributor = []
self.distributionDate = None
self.depositor = None
self.dateOfDeposit = None
self.timePeriodCovered = []
self.dateOfCollection = []
self.kindOfData = []
self.seriesName = None
self.seriesInformation = None
self.software = []
self.relatedMaterial = []
self.relatedDatasets = []
self.otherReferences = []
self.dataSources = []
self.originOfSources = None
self.characteristicOfSources = None
self.accessToSources = None
"""Metadata: geospatial"""
self.geospatial_displayName = None
self.geographicCoverage = []
self.geographicUnit = None
self.geographicBoundingBox = []
"""Metadata: socialscience"""
self.socialscience_displayName = None
self.unitOfAnalysis = []
self.universe = []
self.timeMethod = None
self.dataCollector = None
self.collectorTraining = None
self.frequencyOfDataCollection = None
self.samplingProcedure = None
self.targetSampleActualSize = None
self.targetSampleSizeFormula = None
self.socialScienceNotesType = None
self.socialScienceNotesSubject = None
self.socialScienceNotesText = None
self.deviationsFromSampleDesign = None
self.collectionMode = None
self.researchInstrument = None
self.dataCollectionSituation = None
self.actionsToMinimizeLoss = None
self.controlOperations = None
self.weighting = None
self.cleaningOperations = None
self.datasetLevelErrorNotes = None
self.responseRate = None
self.samplingErrorEstimates = None
self.otherDataAppraisal = None
"""Metadata: journal"""
self.journal_displayName = None
self.journalVolumeIssue = []
self.journalArticleType = None
def __str__(self):
"""Return name of Dataset() class for users."""
return 'pyDataverse Dataset() model class.'
def set(self, data):
"""Set class attributes with a flat dict as input.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> }
>>> ds.set(data)
>>> ds.title
'pyDataverse study 2019'
"""
for key, val in data.items():
if key in self.__attr_valid_class or key == 'citation_displayName' or key == 'geospatial_displayName' or key == 'socialscience_displayName' or key == 'journal_displayName' or key == 'targetSampleActualSize' or key == 'targetSampleSizeFormula' or key == 'socialScienceNotesType' or key == 'socialScienceNotesText' or key == 'socialScienceNotesSubject':
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def import_metadata(self, filename, format='dv_up'):
"""Import Dataset metadata from file.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
api upload compatible format.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> ds.import_metadata('tests/data/dataset_full.json')
>>> ds.title
'Replication Data for: Title'
"""
data = {}
if format == 'dv_up':
metadata = read_file_json(filename)
"""dataset"""
# get first level metadata and parse it automatically
for key, val in metadata['datasetVersion'].items():
if key in self.__attr_valid_metadata_datasetVersion:
data[key] = val
# get nested metadata and parse it manually
if 'dataverseContacts' in metadata:
data['contactEmail'] = []
for contact in metadata['dataverseContacts']:
for key, val in contact.items():
if key == 'contactEmail':
data['contactEmail'].append(val)
"""citation"""
if 'citation' in metadata['datasetVersion']['metadataBlocks']:
citation = metadata['datasetVersion']['metadataBlocks']['citation']
if 'displayName' in citation:
data['citation_displayName'] = citation['displayName']
for field in citation['fields']:
if field['typeName'] in self.__attr_valid_metadata_citation_dicts:
data[field['typeName']] = field['value']
if field['typeName'] in self.__attr_valid_metadata_citation_arrays:
data[field['typeName']] = self.__parse_dicts(
field['value'],
self.__attr_valid_metadata_citation_arrays[field['typeName']])
if field['typeName'] == 'series':
if 'seriesName' in field['value']:
data['seriesName'] = field['value']['seriesName']['value']
if 'seriesInformation' in field['value']:
data['seriesInformation'] = field['value']['seriesInformation']['value']
else:
# TODO: Exception
print('citation not in json')
"""geospatial"""
if 'geospatial' in metadata['datasetVersion']['metadataBlocks']:
geospatial = metadata['datasetVersion']['metadataBlocks']['geospatial']
if 'displayName' in geospatial:
self.__setattr__('geospatial_displayName',
geospatial['displayName'])
for field in geospatial['fields']:
if field['typeName'] in self.__attr_valid_metadata_geospatial_dicts:
data[field['typeName']] = field['value']
if field['typeName'] in self.__attr_valid_metadata_geospatial_arrays:
data[field['typeName']] = self.__parse_dicts(
field['value'],
self.__attr_valid_metadata_geospatial_arrays[field['typeName']])
else:
# TODO: Exception
print('geospatial not in json')
"""socialscience"""
if 'socialscience' in metadata['datasetVersion']['metadataBlocks']:
socialscience = metadata['datasetVersion']['metadataBlocks']['socialscience']
if 'displayName' in socialscience:
self.__setattr__('socialscience_displayName',
socialscience['displayName'])
for field in socialscience['fields']:
if field['typeName'] in self.__attr_valid_metadata_socialscience_dicts:
data[field['typeName']] = field['value']
if field['typeName'] == 'targetSampleSize':
if 'targetSampleActualSize' in field['value']:
data['targetSampleActualSize'] = field['value']['targetSampleActualSize']['value']
if 'targetSampleSizeFormula' in field['value']:
data['targetSampleSizeFormula'] = field['value']['targetSampleSizeFormula']['value']
if field['typeName'] == 'socialScienceNotes':
if 'socialScienceNotesType' in field['value']:
data['socialScienceNotesType'] = field['value']['socialScienceNotesType']['value']
if 'socialScienceNotesSubject' in field['value']:
data['socialScienceNotesSubject'] = field['value']['socialScienceNotesSubject']['value']
if 'socialScienceNotesText' in field['value']:
data['socialScienceNotesText'] = field['value']['socialScienceNotesText']['value']
else:
# TODO: Exception
print('socialscience not in json')
"""journal"""
if 'journal' in metadata['datasetVersion']['metadataBlocks']:
journal = metadata['datasetVersion']['metadataBlocks']['journal']
if 'displayName' in journal:
self.__setattr__('journal_displayName',
journal['displayName'])
for field in journal['fields']:
if field['typeName'] in self.__attr_valid_metadata_journal_dicts:
data[field['typeName']] = field['value']
if field['typeName'] in self.__attr_valid_metadata_journal_arrays:
data[field['typeName']] = self.__parse_dicts(
field['value'],
self.__attr_valid_metadata_journal_arrays[field['typeName']])
else:
# TODO: Exception
print('journal not in json')
self.set(data)
elif format == 'dv_down':
metadata = read_file_json(filename)
self.set(data)
else:
# TODO: Exception
print('Data-format not right')
def __parse_dicts(self, data, attr_list):
"""Parse out Dataverse api metadata dicts.
Parameters
----------
data : list
List of Dataverse api metadata fields.
attr_list : list
List of attributes to be parsed.
Returns
-------
list
List of dicts with parsed out key-value pairs.
"""
data_tmp = []
for d in data:
tmp_dict = {}
for key, val in d.items():
if key in attr_list:
tmp_dict[key] = val['value']
else:
print('Key \'{0}\' not in attribute list'.format(key))
data_tmp.append(tmp_dict)
return data_tmp
def is_valid(self):
"""Check if attributes available are valid for Dataverse api metadata creation.
The attributes required are listed in `__attr_required_metadata`.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> }
>>> ds.set(data)
>>> ds.is_valid()
False
>>> ds.author = [{'authorName': 'LastAuthor1, FirstAuthor1'}]
>>> ds.datasetContact = [{'datasetContactName': 'LastContact1, FirstContact1'}]
>>> ds.subject = ['Engineering']
>>> ds.is_valid()
True
Todo
-------
Test out required fields or ask Harvard.
"""
is_valid = True
# check if all required attributes are set
for attr in self.__attr_required_metadata:
if not self.__getattribute__(attr):
is_valid = False
print('Metadata not valid: attribute \'{0}\' missing.'.format(attr))
# check if attribute sets are complete where necessary
tp_cov = self.__getattribute__('timePeriodCovered')
if tp_cov:
for tp in tp_cov:
if 'timePeriodCoveredStart' in tp or 'timePeriodCoveredEnd' in tp:
if not ('timePeriodCoveredStart' in tp and 'timePeriodCoveredEnd' in tp):
is_valid = False
d_coll = self.__getattribute__('dateOfCollection')
if d_coll:
for d in d_coll:
if 'dateOfCollectionStart' in d or 'dateOfCollectionEnd' in d:
if not ('dateOfCollectionStart' in d and 'dateOfCollectionEnd' in d):
is_valid = False
authors = self.__getattribute__('author')
if authors:
for a in authors:
if 'authorAffiliation' in a or 'authorIdentifierScheme' in a or 'authorIdentifier' in a:
if 'authorName' not in a:
is_valid = False
ds_contac = self.__getattribute__('datasetContact')
if ds_contac:
for c in ds_contac:
if 'datasetContactAffiliation' in c or 'datasetContactEmail' in c:
if 'datasetContactName' not in c:
is_valid = False
producer = self.__getattribute__('producer')
if producer:
for p in producer:
if 'producerAffiliation' in p or 'producerAbbreviation' in p or 'producerURL' in p or 'producerLogoURL' in p:
if not p['producerName']:
is_valid = False
contributor = self.__getattribute__('contributor')
if contributor:
for c in contributor:
if 'contributorType' in c:
if 'contributorName' not in c:
is_valid = False
distributor = self.__getattribute__('distributor')
if distributor:
for d in distributor:
if 'distributorAffiliation' in d or 'distributorAbbreviation' in d or 'distributorURL' in d or 'distributorLogoURL' in d:
if 'distributorName' not in d:
is_valid = False
bbox = self.__getattribute__('geographicBoundingBox')
if bbox:
for b in bbox:
if b:
if not ('westLongitude' in b and 'eastLongitude' in b and 'northLongitude' in b and 'southLongitude' in b):
is_valid = False
return is_valid
def dict(self, format='dv_up'):
"""Create dicts in different data formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> }
>>> ds.set(data)
>>> data = dv.dict()
>>> data['title']
'pyDataverse study 2019'
Todo
-------
Validate standard
"""
if format == 'dv_up':
if self.is_valid():
data = {}
data['datasetVersion'] = {}
data['datasetVersion']['metadataBlocks'] = {}
citation = {}
citation['fields'] = []
geospatial = {}
geospatial['fields'] = []
socialscience = {}
socialscience['fields'] = []
journal = {}
journal['fields'] = []
"""dataset"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_datasetVersion:
if self.__getattribute__(attr) is not None:
data['datasetVersion'][attr] = self.__getattribute__(attr)
"""citation"""
if self.citation_displayName:
citation['displayName'] = self.citation_displayName
# Generate first level attributes
for attr in self.__attr_valid_metadata_citation_dicts:
if self.__getattribute__(attr) is not None:
citation['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate fields attributes
for key, val in self.__attr_valid_metadata_citation_arrays.items():
if self.__getattribute__(key) is not None:
citation['fields'].append({
'typeName': key,
'value': self.__generate_dicts(key, val)
})
# Generate series attributes
if self.__getattribute__('seriesName') is not None or self.__getattribute__('seriesInformation') is not None:
tmp_dict = {}
tmp_dict['value'] = {}
if self.__getattribute__('seriesName') is not None:
tmp_dict['value']['seriesName'] = {}
tmp_dict['value']['seriesName']['typeName'] = 'seriesName'
tmp_dict['value']['seriesName']['value'] = self.__getattribute__('seriesName')
if self.__getattribute__('seriesInformation') is not None:
tmp_dict['value']['seriesInformation'] = {}
tmp_dict['value']['seriesInformation']['typeName'] = 'seriesInformation'
tmp_dict['value']['seriesInformation']['value'] = self.__getattribute__('seriesInformation')
citation['fields'].append({
'typeName': 'series',
'value': tmp_dict
})
"""geospatial"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_geospatial_dicts:
if self.__getattribute__(attr) is not None:
geospatial['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate fields attributes
for key, val in self.__attr_valid_metadata_geospatial_arrays.items():
# check if attribute exists
if self.__getattribute__(key) is not None:
geospatial['fields'].append({
'typeName': key,
'value': self.__generate_dicts(key, val)
})
"""socialscience"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_socialscience_dicts:
if self.__getattribute__(attr) is not None:
socialscience['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate targetSampleSize attributes
if self.__getattribute__('targetSampleActualSize') is not None or self.__getattribute__('targetSampleSizeFormula') is not None:
tmp_dict = {}
tmp_dict['value'] = {}
if 'targetSampleActualSize' in self.__getattribute__('targetSampleSize'):
if self.__getattribute__('targetSampleActualSize') is not None:
tmp_dict['value']['targetSampleActualSize'] = {}
tmp_dict['value']['targetSampleActualSize']['typeName'] = 'targetSampleActualSize'
tmp_dict['value']['targetSampleActualSize']['value'] = self.__getattribute__('targetSampleActualSize')
if 'targetSampleSizeFormula' in self.__getattribute__('targetSampleSize'):
if self.__getattribute__('targetSampleSizeFormula') is not None:
tmp_dict['value']['targetSampleSizeFormula'] = {}
tmp_dict['value']['targetSampleSizeFormula']['typeName'] = 'targetSampleSizeFormula'
tmp_dict['value']['targetSampleSizeFormula']['value'] = self.__getattribute__('targetSampleSizeFormula')
socialscience['fields'].append({
'typeName': 'targetSampleSize',
'value': tmp_dict
})
# Generate socialScienceNotes attributes
if self.__getattribute__('socialScienceNotesType') is not None or self.__getattribute__('socialScienceNotesSubject') is not None or self.__getattribute__('socialScienceNotesText') is not None:
tmp_dict = {}
tmp_dict['value'] = {}
if self.__getattribute__('socialScienceNotesType') is not None:
tmp_dict['value']['socialScienceNotesType'] = {}
tmp_dict['value']['socialScienceNotesType']['typeName'] = 'socialScienceNotesType'
tmp_dict['value']['socialScienceNotesType']['value'] = self.__getattribute__('socialScienceNotesType')
if self.__getattribute__('socialScienceNotesSubject') is not None:
tmp_dict['value']['socialScienceNotesSubject'] = {}
tmp_dict['value']['socialScienceNotesSubject']['typeName'] = 'socialScienceNotesSubject'
tmp_dict['value']['socialScienceNotesSubject']['value'] = self.__getattribute__('socialScienceNotesSubject')
if self.__getattribute__('socialScienceNotesText') is not None:
tmp_dict['value']['socialScienceNotesText'] = {}
tmp_dict['value']['socialScienceNotesText']['typeName'] = 'socialScienceNotesText'
tmp_dict['value']['socialScienceNotesText']['value'] = self.__getattribute__('socialScienceNotesText')
socialscience['fields'].append({
'typeName': 'socialScienceNotes',
'value': tmp_dict
})
"""journal"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_journal_dicts:
if self.__getattribute__(attr) is not None:
journal['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate fields attributes
for key, val in self.__attr_valid_metadata_journal_arrays.items():
if self.__getattribute__(key) is not None:
journal['fields'].append({
'typeName': key,
'value': self.__generate_dicts(key, val)
})
# TODO: prüfen, ob required attributes gesetzt sind. wenn nicht = Exception!
data['datasetVersion']['metadataBlocks']['citation'] = citation
data['datasetVersion']['metadataBlocks']['socialscience'] = socialscience
data['datasetVersion']['metadataBlocks']['geospatial'] = geospatial
data['datasetVersion']['metadataBlocks']['journal'] = journal
return data
else:
print('dict can not be created. Data is not valid for format')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
print('dict can not be created. Format is not valid')
return None
def __generate_dicts(self, key, val):
"""Generate dicts for array attributes of Dataverse api metadata upload.
Parameters
----------
key : string
Name of attribute
val : string
Value of attribute.
Returns
-------
list
List of filled dicts of metadata for Dataverse api upload.
"""
# check if attribute exists
tmp_list = []
if self.__getattribute__(key):
# loop over list of attribute dicts()
for d in self.__getattribute__(key):
tmp_dict = {}
# iterate over key-value pairs
for k, v in d.items():
# check if key is in attribute list
if k in val:
tmp_dict[k] = {}
tmp_dict[k]['typeName'] = k
tmp_dict[k]['value'] = v
tmp_list.append(tmp_dict)
return tmp_list
def json(self, format='dv_up'):
"""Create Dataset json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get json of Dataverse api upload::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> 'author': [{'authorName': 'LastAuthor1, FirstAuthor1'}],
>>> 'datasetContact': [{'datasetContactName': 'LastContact1, FirstContact1'}],
>>> 'subject': ['Engineering'],
>>> }
>>> ds.set(data)
>>> data = ds.json()
Todo
-------
TODO: Validate standard
TODO: Link to default json file
"""
if format == 'dv_up':
return dict_to_json(self.dict())
elif format == 'all':
return dict_to_json(self.dict('all'))
else:
# TODO Exception
print('data format not valid.')
def export_metadata(self, filename, format='dv_up'):
"""Export Dataset metadata to Dataverse api upload json.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format for export. Available format is: `dv_up` with all
metadata for Dataverse api upload.
Examples
-------
Export metadata to json file::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> 'author': [{'authorName': 'LastAuthor1, FirstAuthor1'}],
>>> 'datasetContact': [{'datasetContactName': 'LastContact1, FirstContact1'}],
>>> 'subject': ['Engineering'],
>>> }
>>> ds.export_metadata('tests/data/export_dataset.json')
"""
if format == 'dv_up':
return write_file_json(filename, self.dict())
else:
# TODO: Exception
print('Data-format not right.')
class Datafile(object):
"""Base class for the Datafile model.
Parameters
----------
filename : string
Filename with full path.
pid : type
Description of parameter `pid` (the default is None).
Attributes
----------
description : string
Description of datafile
restrict : bool
Unknown
__attr_required_metadata : list
List with required metadata.
__attr_valid_metadata : list
List with valid metadata for Dataverse api upload.
__attr_valid_class : list
List of all attributes.
pid
filename
"""
"""Attributes required for Datafile metadata json."""
__attr_required_metadata = [
'filename',
'pid'
]
"""Attributes on first level of Datafile metadata json."""
__attr_valid_metadata = [
'description',
'pid',
'restrict'
]
"""Attributes on first level of Datafile metadata json."""
__attr_valid_class = [
'filename'
] + __attr_valid_metadata
def __init__(self, filename=None, pid=None):
"""Init a Datafile() class.
Parameters
----------
filename : string
Filename with full path.
pid : string
Persistend identifier, e.g. DOI.
Examples
-------
Create a Datafile::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> df
<pyDataverse.models.Datafile at 0x7f4dfc0466a0>
"""
"""Misc"""
self.pid = pid
self.filename = filename
"""Metadata"""
self.description = None
self.restrict = None
def __str__(self):
"""Return name of Datafile() class for users."""
return 'pyDataverse Datafile() model class.'
def set(self, data):
"""Set class attributes with a flat dict.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Datafile attributes via flat dict::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> df.pid
'doi:10.11587/EVMUHP',
"""
for key, val in data.items():
if key in self.__attr_valid_class:
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def is_valid(self):
"""Check if set attributes are valid for Dataverse api metadata creation.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> df.is_valid
True
>>> df.filename = None
>>> df.is_valid
False
"""
is_valid = True
for attr in self.__attr_required_metadata:
if self.__getattribute__(attr) is None:
is_valid = False
print('attribute \'{0}\' missing.'.format(attr))
return is_valid
def dict(self, format='dv_up'):
"""Create dict in different data formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> data = df.dict()
>>> data['description']
'Test file'
Todo
-------
Validate standards.
"""
data = {}
if format == 'dv_up':
if self.is_valid():
for attr in self.__attr_valid_metadata:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
print('dict can not be created. Data is not valid')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
# TODO: Exception
print('Format not right for dict.')
return None
def json(self, format='dv_up'):
r"""Create json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> df.dict()
{'description': 'Test file',
'directoryLabel': None,
'restrict': None}
Todo
-------
Validate standards.
Link to default json file
"""
if format == 'dv_up':
data = self.dict('dv_up')
if data:
return dict_to_json(data)
else:
print('Dict can not be created')
return None
elif format == 'all':
data = self.dict('all')
if data:
return dict_to_json(data)
else:
print('Dict can not be created')
return None
else:
# TODO Exception
print('data format not valid.')
return None
|
StarcoderdataPython
|
11365409
|
C = raw_input()
C = C.upper()
vowels = ['A','E','I','O','U']
if C in vowels:
print("Vowel")
else:
print("Consonant")
|
StarcoderdataPython
|
6490016
|
from random import random
# ---------------------- Observable --------------------------
class IObservable:
def __init__(self):
self.__observers = []
def attach(self, observer):
self.__observers.append(observer)
def detach(self, observer):
self.__observers = [obs for obs in self.__observers if obs is not observer]
def notify(self):
for obs in self.__observers:
obs.update()
# --- Concrete Observables ---
class TempObservable(IObservable):
temp = 0
def test_run(self):
for _ in range(3):
self.temp = random() + 25
self.notify()
def get_temp(self):
return self.temp
class HumidObservable(IObservable):
humid = 0
def test_run(self):
for _ in range(4):
self.humid = random() + 74
self.notify()
def get_humid(self):
return self.humid
|
StarcoderdataPython
|
5198256
|
<reponame>yakky/microservice-talk<filename>book_search/models/response.py
from typing import List
from pydantic import BaseModel
class Author(BaseModel):
name: str
class Tag(BaseModel):
title: str
slug: str
class Book(BaseModel):
book_id: int
title: str
isbn13: str
authors: List[Author]
tags: List[Tag]
original_publication_year: int
class BookList(BaseModel):
results: List[Book]
count: int
|
StarcoderdataPython
|
8129558
|
<reponame>fga-gpp-mds/2018.1-Cris-Down<filename>drdown/forum/views/view_post.py
from ..models.model_post import Post
from ..models.model_category import Category
from .view_base import BaseViewTemplate
from django.views.generic import ListView
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import UpdateView
from django.urls import reverse_lazy
from django.utils import timezone
from drdown.forum.form.forum_forms import PostForm
class PostListView(BaseViewTemplate, ListView):
model = Post
paginate_by = 20
def get_context_data(self, **kwargs):
context = super(PostListView, self).get_context_data(**kwargs)
pk = self.kwargs.get('pk')
context['post_category'] = Category.objects.get(pk=pk)
return context
def get_queryset(self):
pk = self.kwargs.get('pk')
queryset = Post.objects.filter(
category=Category.objects.get(pk=pk)
).order_by('-created_at')
return queryset
class PostCreateView(BaseViewTemplate, CreateView):
model = Post
form_class = PostForm
template_name = 'forum/form_post.html'
def get_success_url(self, **kwargs):
success_create_url = reverse_lazy(
viewname='forum:list_posts',
kwargs={
'slug': self.kwargs.get('slug'),
'pk': self.kwargs.get('pk'),
}
)
return success_create_url
def get_context_data(self, **kwargs):
context = super(PostCreateView, self).get_context_data(**kwargs)
pk = self.kwargs.get('pk')
context['post_category'] = Category.objects.get(pk=pk)
return context
def form_valid(self, form):
# Get category that post belongs to
form.instance.category = Category.objects.get(pk=self.kwargs.get('pk'))
form.instance.created_by = self.request.user
form.save()
return super(PostCreateView, self).form_valid(form)
class PostDeleteView (BaseViewTemplate, DeleteView):
model = Post
def get_success_url(self, **kwargs):
success_delete_url = reverse_lazy(
viewname='forum:list_posts',
kwargs={
'pk': self.kwargs.get('pk'),
'slug': self.kwargs.get('slug'),
}
)
return success_delete_url
def get_object(self):
post = Post.objects.get(
pk=self.kwargs.get('post_pk')
)
return post
class PostUpdateView(BaseViewTemplate, UpdateView):
model = Post
form_class = PostForm
template_name = 'forum/form_post.html'
def get_success_url(self, **kwargs):
success_update_url = reverse_lazy(
viewname='forum:list_commentary',
kwargs={
'pk': self.kwargs.get('pk'),
'post_pk': self.kwargs.get('post_pk'),
'slug': self.kwargs.get('slug'),
}
)
return success_update_url
def get_context_data(self, **kwargs):
context = super(PostUpdateView, self).get_context_data(**kwargs)
pk = self.kwargs.get('pk')
context['post_category'] = Category.objects.get(pk=pk)
return context
def get_object(self):
post = Post.objects.get(
pk=self.kwargs.get('post_pk')
)
return post
def form_valid(self, form):
# Get updated_at datetime
form.instance.updated_at = timezone.now()
form.save()
return super(PostUpdateView, self).form_valid(form)
|
StarcoderdataPython
|
5078157
|
import os
import pathlib
from dotenv import load_dotenv
load_dotenv()
# env vars
PREFIX = os.getenv("PREFIX") or "!"
TOKEN = os.getenv("TOKEN")
# paths
EXTENSIONS = pathlib.Path("bot/exts/")
|
StarcoderdataPython
|
6669328
|
<reponame>LihaoR/tensorflow-rl<gh_stars>100-1000
# -*- encoding: utf-8 -*-
import time
import cPickle
import numpy as np
import utils.logger
import tensorflow as tf
from skimage.transform import resize
from collections import deque
from utils import checkpoint_utils
from actor_learner import ONE_LIFE_GAMES
from utils.decorators import Experimental
from utils.fast_cts import CTSDensityModel
from utils.replay_memory import ReplayMemory
from policy_based_actor_learner import A3CLearner, A3CLSTMLearner
from value_based_actor_learner import ValueBasedLearner
logger = utils.logger.getLogger('intrinsic_motivation_actor_learner')
class PixelCNNDensityModel(object):
pass
class PerPixelDensityModel(object):
"""
Calculates image probability according to per-pixel counts: P(X) = ∏ p(x_ij)
Mostly here for debugging purposes as CTSDensityModel is much more expressive
"""
def __init__(self, height=42, width=42, num_bins=8, beta=0.05):
self.counts = np.zeros((width, height, num_bins))
self.height = height
self.width = width
self.beta = beta
self.num_bins = num_bins
def update(self, obs):
obs = resize(obs, (self.height, self.width), preserve_range=True)
obs = np.floor((obs*self.num_bins)).astype(np.int32)
log_prob, log_recoding_prob = self._update(obs)
return self.exploration_bonus(log_prob, log_recoding_prob)
def _update(self, obs):
log_prob = 0.0
log_recoding_prob = 0.0
for i in range(self.height):
for j in range(self.height):
self.counts[i, j, obs[i, j]] += 1
bin_count = self.counts[i, j, obs[i, j]]
pixel_mass = self.counts[i, j].sum()
log_prob += np.log(bin_count / pixel_mass)
log_recoding_prob += np.log((bin_count + 1) / (pixel_mass + 1))
return log_prob, log_recoding_prob
def exploration_bonus(self, log_prob, log_recoding_prob):
recoding_prob = np.exp(log_recoding_prob)
prob_ratio = np.exp(log_recoding_prob - log_prob)
pseudocount = (1 - recoding_prob) / np.maximum(prob_ratio - 1, 1e-10)
return self.beta / np.sqrt(pseudocount + .01)
def get_state(self):
return self.num_bins, self.height, self.width, self.beta, self.counts
def set_state(self, state):
self.num_bins, self.height, self.width, self.beta, self.counts = state
class DensityModelMixin(object):
"""
Mixin to provide initialization and synchronization methods for density models
"""
def _init_density_model(self, args):
self.density_model_update_steps = 20*args.q_target_update_steps
self.density_model_update_flags = args.density_model_update_flags
model_args = {
'height': args.cts_rescale_dim,
'width': args.cts_rescale_dim,
'num_bins': args.cts_bins,
'beta': args.cts_beta
}
if args.density_model == 'cts':
self.density_model = CTSDensityModel(**model_args)
else:
self.density_model = PerPixelDensityModel(**model_args)
def write_density_model(self):
logger.info('T{} Writing Pickled Density Model to File...'.format(self.actor_id))
raw_data = cPickle.dumps(self.density_model.get_state(), protocol=2)
with self.barrier.counter.lock, open('/tmp/density_model.pkl', 'wb') as f:
f.write(raw_data)
for i in xrange(len(self.density_model_update_flags.updated)):
self.density_model_update_flags.updated[i] = 1
def read_density_model(self):
logger.info('T{} Synchronizing Density Model...'.format(self.actor_id))
with self.barrier.counter.lock, open('/tmp/density_model.pkl', 'rb') as f:
raw_data = f.read()
self.density_model.set_state(cPickle.loads(raw_data))
class A3CDensityModelMixin(DensityModelMixin):
"""
Mixin to share _train method between A3C and A3C-LSTM models
"""
def _train(self):
""" Main actor learner loop for advantage actor critic learning. """
logger.debug("Actor {} resuming at Step {}".format(self.actor_id,
self.global_step.value()))
bonuses = deque(maxlen=100)
while (self.global_step.value() < self.max_global_steps):
# Sync local learning net with shared mem
s = self.emulator.get_initial_state()
self.reset_hidden_state()
self.local_episode += 1
episode_over = False
total_episode_reward = 0.0
episode_start_step = self.local_step
while not episode_over:
self.sync_net_with_shared_memory(self.local_network, self.learning_vars)
self.save_vars()
rewards = list()
states = list()
actions = list()
values = list()
local_step_start = self.local_step
self.set_local_lstm_state()
while self.local_step - local_step_start < self.max_local_steps and not episode_over:
# Choose next action and execute it
a, readout_v_t, readout_pi_t = self.choose_next_action(s)
new_s, reward, episode_over = self.emulator.next(a)
total_episode_reward += reward
# Update density model
current_frame = new_s[...,-1]
bonus = self.density_model.update(current_frame)
bonuses.append(bonus)
if self.is_master() and (self.local_step % 400 == 0):
bonus_array = np.array(bonuses)
logger.debug('π_a={:.4f} / V={:.4f} / Mean Bonus={:.4f} / Max Bonus={:.4f}'.format(
readout_pi_t[a.argmax()], readout_v_t, bonus_array.mean(), bonus_array.max()))
# Rescale or clip immediate reward
reward = self.rescale_reward(self.rescale_reward(reward) + bonus)
rewards.append(reward)
states.append(s)
actions.append(a)
values.append(readout_v_t)
s = new_s
self.local_step += 1
global_step, _ = self.global_step.increment()
if global_step % self.density_model_update_steps == 0:
self.write_density_model()
if self.density_model_update_flags.updated[self.actor_id] == 1:
self.read_density_model()
self.density_model_update_flags.updated[self.actor_id] = 0
next_val = self.bootstrap_value(new_s, episode_over)
advantages = self.compute_gae(rewards, values, next_val)
targets = self.compute_targets(rewards, next_val)
# Compute gradients on the local policy/V network and apply them to shared memory
entropy = self.apply_update(states, actions, targets, advantages)
elapsed_time = time.time() - self.start_time
steps_per_sec = self.global_step.value() / elapsed_time
perf = "{:.0f}".format(steps_per_sec)
logger.info("T{} / EPISODE {} / STEP {}k / REWARD {} / {} STEPS/s".format(
self.actor_id,
self.local_episode,
self.global_step.value()/1000,
total_episode_reward,
perf))
self.log_summary(total_episode_reward, np.array(values).mean(), entropy)
@Experimental
class PseudoCountA3CLearner(A3CLearner, A3CDensityModelMixin):
"""
Attempt at replicating the A3C+ model from the paper 'Unifying Count-Based Exploration and Intrinsic Motivation' (https://arxiv.org/abs/1606.01868)
"""
def __init__(self, args):
super(PseudoCountA3CLearner, self).__init__(args)
self._init_density_model(args)
def train(self):
self._train()
@Experimental
class PseudoCountA3CLSTMLearner(A3CLSTMLearner, A3CDensityModelMixin):
def __init__(self, args):
super(PseudoCountA3CLSTMLearner, self).__init__(args)
self._init_density_model(args)
def train(self):
self._train()
class PseudoCountQLearner(ValueBasedLearner, DensityModelMixin):
"""
Based on DQN+CTS model from the paper 'Unifying Count-Based Exploration and Intrinsic Motivation' (https://arxiv.org/abs/1606.01868)
Presently the implementation differs from the paper in that the novelty bonuses are computed online rather than by computing the
prediction gains after the model has been updated with all frames from the episode. Async training with different final epsilon values
tends to produce better results than just using a single actor-learner.
"""
def __init__(self, args):
self.args = args
super(PseudoCountQLearner, self).__init__(args)
self.cts_eta = args.cts_eta
self.cts_beta = args.cts_beta
self.batch_size = args.batch_update_size
self.replay_memory = ReplayMemory(
args.replay_size,
self.local_network.get_input_shape(),
self.num_actions)
self._init_density_model(args)
self._double_dqn_op()
def generate_final_epsilon(self):
if self.num_actor_learners == 1:
return self.args.final_epsilon
else:
return super(PseudoCountQLearner, self).generate_final_epsilon()
def _get_summary_vars(self):
q_vars = super(PseudoCountQLearner, self)._get_summary_vars()
bonus_q05 = tf.Variable(0., name='novelty_bonus_q05')
s1 = tf.summary.scalar('Novelty_Bonus_q05_{}'.format(self.actor_id), bonus_q05)
bonus_q50 = tf.Variable(0., name='novelty_bonus_q50')
s2 = tf.summary.scalar('Novelty_Bonus_q50_{}'.format(self.actor_id), bonus_q50)
bonus_q95 = tf.Variable(0., name='novelty_bonus_q95')
s3 = tf.summary.scalar('Novelty_Bonus_q95_{}'.format(self.actor_id), bonus_q95)
augmented_reward = tf.Variable(0., name='augmented_episode_reward')
s4 = tf.summary.scalar('Augmented_Episode_Reward_{}'.format(self.actor_id), augmented_reward)
return q_vars + [bonus_q05, bonus_q50, bonus_q95, augmented_reward]
#TODO: refactor to make this cleaner
def prepare_state(self, state, total_episode_reward, steps_at_last_reward,
ep_t, episode_ave_max_q, episode_over, bonuses, total_augmented_reward):
# Start a new game on reaching terminal state
if episode_over:
T = self.global_step.value() * self.max_local_steps
t = self.local_step
e_prog = float(t)/self.epsilon_annealing_steps
episode_ave_max_q = episode_ave_max_q/float(ep_t)
s1 = "Q_MAX {0:.4f}".format(episode_ave_max_q)
s2 = "EPS {0:.4f}".format(self.epsilon)
self.scores.insert(0, total_episode_reward)
if len(self.scores) > 100:
self.scores.pop()
logger.info('T{0} / STEP {1} / REWARD {2} / {3} / {4}'.format(
self.actor_id, T, total_episode_reward, s1, s2))
logger.info('ID: {0} -- RUNNING AVG: {1:.0f} ± {2:.0f} -- BEST: {3:.0f}'.format(
self.actor_id,
np.array(self.scores).mean(),
2*np.array(self.scores).std(),
max(self.scores),
))
self.log_summary(
total_episode_reward,
episode_ave_max_q,
self.epsilon,
np.percentile(bonuses, 5),
np.percentile(bonuses, 50),
np.percentile(bonuses, 95),
total_augmented_reward,
)
state = self.emulator.get_initial_state()
ep_t = 0
total_episode_reward = 0
episode_ave_max_q = 0
episode_over = False
return (
state,
total_episode_reward,
steps_at_last_reward,
ep_t,
episode_ave_max_q,
episode_over
)
def _double_dqn_op(self):
q_local_action = tf.cast(tf.argmax(
self.local_network.output_layer, axis=1), tf.int32)
q_target_max = utils.ops.slice_2d(
self.target_network.output_layer,
tf.range(0, self.batch_size),
q_local_action,
)
self.one_step_reward = tf.placeholder(tf.float32, self.batch_size, name='one_step_reward')
self.is_terminal = tf.placeholder(tf.bool, self.batch_size, name='is_terminal')
self.y_target = self.one_step_reward + self.cts_eta*self.gamma*q_target_max \
* (1 - tf.cast(self.is_terminal, tf.float32))
self.double_dqn_loss = self.local_network._value_function_loss(
self.local_network.q_selected_action
- tf.stop_gradient(self.y_target))
self.double_dqn_grads = tf.gradients(self.double_dqn_loss, self.local_network.params)
# def batch_update(self):
# if len(self.replay_memory) < self.replay_memory.maxlen//10:
# return
# s_i, a_i, r_i, s_f, is_terminal = self.replay_memory.sample_batch(self.batch_size)
# feed_dict={
# self.one_step_reward: r_i,
# self.target_network.input_ph: s_f,
# self.local_network.input_ph: np.vstack([s_i, s_f]),
# self.local_network.selected_action_ph: np.vstack([a_i, a_i]),
# self.is_terminal: is_terminal
# }
# grads = self.session.run(self.double_dqn_grads, feed_dict=feed_dict)
# self.apply_gradients_to_shared_memory_vars(grads)
def batch_update(self):
if len(self.replay_memory) < self.replay_memory.maxlen//10:
return
s_i, a_i, r_i, s_f, is_terminal = self.replay_memory.sample_batch(self.batch_size)
feed_dict={
self.local_network.input_ph: s_f,
self.target_network.input_ph: s_f,
self.is_terminal: is_terminal,
self.one_step_reward: r_i,
}
y_target = self.session.run(self.y_target, feed_dict=feed_dict)
feed_dict={
self.local_network.input_ph: s_i,
self.local_network.target_ph: y_target,
self.local_network.selected_action_ph: a_i
}
grads = self.session.run(self.local_network.get_gradients,
feed_dict=feed_dict)
self.apply_gradients_to_shared_memory_vars(grads)
def train(self):
""" Main actor learner loop for n-step Q learning. """
logger.debug("Actor {} resuming at Step {}, {}".format(self.actor_id,
self.global_step.value(), time.ctime()))
s = self.emulator.get_initial_state()
s_batch = list()
a_batch = list()
y_batch = list()
bonuses = deque(maxlen=1000)
episode_over = False
t0 = time.time()
global_steps_at_last_record = self.global_step.value()
while (self.global_step.value() < self.max_global_steps):
# # Sync local learning net with shared mem
# self.sync_net_with_shared_memory(self.local_network, self.learning_vars)
# self.save_vars()
rewards = list()
states = list()
actions = list()
max_q_values = list()
local_step_start = self.local_step
total_episode_reward = 0
total_augmented_reward = 0
episode_ave_max_q = 0
ep_t = 0
while not episode_over:
# Sync local learning net with shared mem
self.sync_net_with_shared_memory(self.local_network, self.learning_vars)
self.save_vars()
# Choose next action and execute it
a, q_values = self.choose_next_action(s)
new_s, reward, episode_over = self.emulator.next(a)
total_episode_reward += reward
max_q = np.max(q_values)
current_frame = new_s[...,-1]
bonus = self.density_model.update(current_frame)
bonuses.append(bonus)
# Rescale or clip immediate reward
reward = self.rescale_reward(self.rescale_reward(reward) + bonus)
total_augmented_reward += reward
ep_t += 1
rewards.append(reward)
states.append(s)
actions.append(a)
max_q_values.append(max_q)
s = new_s
self.local_step += 1
episode_ave_max_q += max_q
global_step, _ = self.global_step.increment()
if global_step % self.q_target_update_steps == 0:
self.update_target()
if global_step % self.density_model_update_steps == 0:
self.write_density_model()
# Sync local tensorflow target network params with shared target network params
if self.target_update_flags.updated[self.actor_id] == 1:
self.sync_net_with_shared_memory(self.target_network, self.target_vars)
self.target_update_flags.updated[self.actor_id] = 0
if self.density_model_update_flags.updated[self.actor_id] == 1:
self.read_density_model()
self.density_model_update_flags.updated[self.actor_id] = 0
if self.local_step % self.q_update_interval == 0:
self.batch_update()
if self.is_master() and (self.local_step % 500 == 0):
bonus_array = np.array(bonuses)
steps = global_step - global_steps_at_last_record
global_steps_at_last_record = global_step
logger.debug('Mean Bonus={:.4f} / Max Bonus={:.4f} / STEPS/s={}'.format(
bonus_array.mean(), bonus_array.max(), steps/float(time.time()-t0)))
t0 = time.time()
else:
#compute monte carlo return
mc_returns = np.zeros((len(rewards),), dtype=np.float32)
running_total = 0.0
for i, r in enumerate(reversed(rewards)):
running_total = r + self.gamma*running_total
mc_returns[len(rewards)-i-1] = running_total
mixed_returns = self.cts_eta*np.asarray(rewards) + (1-self.cts_eta)*mc_returns
#update replay memory
states.append(new_s)
episode_length = len(rewards)
for i in range(episode_length):
self.replay_memory.append(
states[i],
actions[i],
mixed_returns[i],
i+1 == episode_length)
s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \
self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over, bonuses, total_augmented_reward)
|
StarcoderdataPython
|
3276679
|
import re
from django.contrib.postgres.aggregates import StringAgg
from django.forms import CheckboxSelectMultiple
from django.urls import reverse_lazy
from additional_codes import models
from common.filters import ActiveStateMixin
from common.filters import LazyMultipleChoiceFilter
from common.filters import StartYearMixin
from common.filters import TamatoFilter
from common.filters import TamatoFilterBackend
from common.filters import TamatoFilterMixin
from common.filters import type_choices
COMBINED_ADDITIONAL_CODE_AND_TYPE_ID = re.compile(
r"(?P<type__sid>[A-Z0-9])(?P<code>[A-Z0-9]{3})",
)
class AdditionalCodeFilterMixin(TamatoFilterMixin):
"""
Filter mixin to allow custom filtering on type__sid, sid, code and
description.
Also provides a regex to split combined type__sid and code. e.g. "8001" ->
"8", "001"
"""
search_fields = (
StringAgg("type__sid", delimiter=" "),
"code",
"sid",
StringAgg("descriptions__description", delimiter=" "),
) # XXX order is significant
search_regex = COMBINED_ADDITIONAL_CODE_AND_TYPE_ID
class AdditionalCodeFilterBackend(TamatoFilterBackend, AdditionalCodeFilterMixin):
pass
class AdditionalCodeFilter(
TamatoFilter,
AdditionalCodeFilterMixin,
StartYearMixin,
ActiveStateMixin,
):
"""
FilterSet for Additional Codes.
Provides multiple choice widgets for Type SIDs, the start year of the
additional code as well as filters for the code and SID
"""
additional_code_type = LazyMultipleChoiceFilter(
choices=type_choices(models.AdditionalCodeType.objects.latest_approved()),
widget=CheckboxSelectMultiple,
field_name="type__sid",
label="Additional Code Type",
help_text="Select all that apply",
required=False,
)
clear_url = reverse_lazy("additional_code-ui-list")
class Meta:
model = models.AdditionalCode
# Defines the order shown in the form.
fields = ["search", "additional_code_type", "start_year", "active_state"]
|
StarcoderdataPython
|
237083
|
<gh_stars>1-10
import random
class RandomList(list):
def get_random_element(self):
"""
Returns and removes a random element from the list
"""
element_to_remove = random.choice(self)
self.remove(element_to_remove)
return element_to_remove
ll = RandomList([1, 2, 3, 4, 5, 6, 7])
print(ll)
print(ll.get_random_element())
print(ll)
|
StarcoderdataPython
|
6607767
|
#!/usr/bin/env python3
import sys
from testrunner import run
def testfunc(child):
child.sendline("ifconfig")
child.expect(r"Iface\s+(\d+)\s+HWaddr:")
if __name__ == "__main__":
sys.exit(run(testfunc, timeout=1, echo=False))
|
StarcoderdataPython
|
8106648
|
<reponame>ashimgiyanani/ProjectTemplate_python<filename>src/s_MetMast_Gill_plots.py
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 15:11:00 2020
@author: papalk
"""
import sys
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.dates import DateFormatter
from matplotlib.ticker import StrMethodFormatter
import pandas as pd
import numpy as np
import xarray as xr
from mpl_toolkits.mplot3d import Axes3D
plt.style.use('seaborn-whitegrid')
SMALL_SIZE = 17
MEDIUM_SIZE = 22
BIGGER_SIZE = 22
plt.rc('font', size=SMALL_SIZE,weight = 'bold') # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc('figure', figsize = (8, 8))
def addtimedecimal(l):
for i,x in enumerate(l):
if '.' not in x:
l[i] = x+'.00'
else:
l[i] = x
return(l)
#%% Import data gill
# Set path
path = r'Z:\Projekte\109797-TestfeldBHV\30_Technical_execution_Confidential\TP3\AP2_Aufbau_Infrastruktur\Infrastruktur_Windmessung\02_Equipment\01_Wartung Messmast GE-NET_DWG_20190226\Maintenance\MetMast Upgrade\Data\ASCII'
# Select device
device = 'gill'
# Select start and end date
dt_start ='2021-01-29 00:00:00' # Select start date in the form yyyy-mm-dd_HH-MM-SS
dt_end = dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S') + dt.timedelta(days=10)# Select end date in the form yyyy-mm-dd_HH-MM-SS
dt_end = dt_end.strftime('%Y-%m-%d %H:%M:%S')
#import sys
# insert at 1, 0 is the script path (or '' in REPL)
#sys.path.insert(0, r'C:\Users\papalk\Desktop\04_Python\Testfeld')
# import data
from f_import_gill_thies_data import f_import_gill_thies_data
df = f_import_gill_thies_data(path,device,dt_start,dt_end)
#Correct time lag
df.index = [date+dt.timedelta(seconds=0) for date in df.index] # Logger time unit: UTC+0 after 14/01/2021 11:00, if before seconds=-7200
#%%
# Resample files gill
dfr = df.resample('1H').mean() # Resample in hourly averages mean(skipna = True)
dfs = df.resample('1H').count()/72000*100 # Resample and count in hourly averages
df10 = df.resample('600S').mean() # Resample in 10 min averages mean(skipna = True)
df10s = df.resample('600S').count()/12000*100 # Resample and count in 10min averages
#%% Calculate Vhor & Dir
#55m
Vhor55 = np.sqrt(df.gill_55_u**2+df.gill_55_v**2).resample('600S').mean()
Vhor55_std = np.sqrt(df.gill_55_u**2+df.gill_55_v**2).resample('600S').std()
Dir55 = np.mod(90-np.mod(np.arctan2(df.gill_55_v,df.gill_55_u)*180/np.pi,360).resample('600S').mean()+30.5,360)
Dir55_std = np.mod(90-np.mod(np.arctan2(df.gill_55_v,df.gill_55_u)*180/np.pi,360).resample('600S').std()+30.5,360)
#110m
Vhor110 = np.sqrt(df.gill_115_u**2+df.gill_115_v**2).resample('600S').mean()
Vhor110_std = np.sqrt(df.gill_115_u**2+df.gill_115_v**2).resample('600S').std()
Dir110 = np.mod(90-np.mod(np.arctan2(df.gill_115_v,df.gill_115_u)*180/np.pi,360).resample('600S').mean()+30.6,360)
Dir110_std = np.mod(90-np.mod(np.arctan2(df.gill_115_v,df.gill_115_u)*180/np.pi,360).resample('600S').std()+30.6,360)
#%% Plots
# # raw gill
# for i in [1,3,4,5,7,8,11,12,13,15,16]:
# # date-ws_free_avg
# fig = plt.figure(figsize = (20, 8))
# ax = fig.add_subplot(111)
# # ax.plot(df.ix[df['LOS index']==LOS,i],'.',label = 'Valid data');
# ax.plot(df.TIMESTAMP[(df.gill_55_SpeedOfSound >100)&(df.gill_115_SpeedOfSound>100)],df.ix[(df.gill_55_SpeedOfSound >100)&(df.gill_115_SpeedOfSound>100),i],'.');
# ax.set_xlabel('time ',labelpad=40,weight= 'bold')
# ax.set_ylabel(df.columns[i],labelpad=40,weight= 'bold')
# date_form = DateFormatter("%H:%M:%S")
# ax.xaxis.set_major_formatter(date_form)
# legend = ax.legend(frameon = 1)
# frame = legend.get_frame()
# frame.set_color('white')
# plt.savefig(r'Z:\Projekte\109797-TestfeldBHV\30_Technical_execution_Confidential\TP3\AP2_Aufbau_Infrastruktur\Infrastruktur_Windmessung\02_Equipment\01_Wartung Messmast GE-NET_DWG_20190226\MetMast Upgrade\Data\plots\TS' + path[-20:-4]+'_'+df.columns[i]+'.png',bbox_inches='tight')
# # Hourly status
# for i in np.arange(len(dfr.columns)):
# # date-ws_free_avg
# fig = plt.figure(figsize = (20, 8))
# ax = fig.add_subplot(111)
# ax.plot(dfr.index[(dfr.gill_55_SpeedOfSound >100)&(dfr.gill_115_SpeedOfSound>100)],dfr.ix[(dfr.gill_55_SpeedOfSound >100)&(dfr.gill_115_SpeedOfSound>100),i],'.');
# ax.set_xlabel('time ',labelpad=40,weight= 'bold')
# ax.set_ylabel(dfr.columns[i],labelpad=40,weight= 'bold')
# date_form = DateFormatter("%H:%M:%S")
# ax.xaxis.set_major_formatter(date_form)
# legend = ax.legend(frameon = 1)
# frame = legend.get_frame()
# frame.set_color('white')
# plt.savefig(r'Z:\Projekte\109797-TestfeldBHV\30_Technical_execution_Confidential\TP3\AP2_Aufbau_Infrastruktur\Infrastruktur_Windmessung\02_Equipment\01_Wartung Messmast GE-NET_DWG_20190226\MetMast Upgrade\Data\plots\TS_1H_2020-09-06_'+df.columns[i]+'.png',bbox_inches='tight')
#%% QM plots
fig,ax = plt.subplots(5,1, figsize = (10, 15),sharex=True)
ax[0].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_115_u'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[0].set_xlabel('date',labelpad=40,weight= 'bold')
ax[0].set_ylabel("u [m/s]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[0].xaxis.set_major_formatter(date_form)
ax[0].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[0].set_ylim([-25,25])
ax[1].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_115_v'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[1].set_xlabel('date',labelpad=40,weight= 'bold')
ax[1].set_ylabel("v [m/s]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[1].xaxis.set_major_formatter(date_form)
ax[1].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[1].set_ylim([-25,25])
ax[2].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_115_w'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[2].set_xlabel('date',labelpad=40,weight= 'bold')
ax[2].set_ylabel("w [m/s]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[2].xaxis.set_major_formatter(date_form)
ax[2].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[2].set_ylim([-5,5])
ax[3].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_115_SonicTempC'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[3].set_xlabel('date',labelpad=40,weight= 'bold')
ax[3].set_ylabel("Temp [$^o$C]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[3].xaxis.set_major_formatter(date_form)
ax[3].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[3].set_ylim([-10,30])
ax[4].plot(df10s.index[(df10s.index>dt_start)&(df10s.index<dt_end)],df10s['RECORD'][(df10s.index>dt_start)&(df10s.index<dt_end)].values,'.',color = 'limegreen');
ax[4].plot(df10s.index[(df10s.index>dt_start)&(df10s.index<dt_end)&(df10s['RECORD']<100)],df10s['RECORD'][(df10s.index>dt_start)&(df10s.index<dt_end)&(df10s['RECORD']<100)].values,'.',color = 'red');
ax[4].set_xlabel('date',labelpad=40,weight= 'bold')
ax[4].set_ylabel("Availability [%]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[4].xaxis.set_major_formatter(date_form)
ax[4].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[4].set_ylim([0,100])
plt.savefig(r'Z:\Projekte\109797-TestfeldBHV\30_Technical_execution_Confidential\TP3\AP2_Aufbau_Infrastruktur\Infrastruktur_Windmessung\02_Equipment\01_Wartung Messmast GE-NET_DWG_20190226\Maintenance\MetMast Upgrade\Data\\'+dt_start[0:10]+'_'+dt_end[0:10]+'_115_gill.png',
bbox_inches='tight',dpi = 100)
# QM plots
fig,ax = plt.subplots(5,1, figsize = (10, 15),sharex=True)
ax[0].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_55_u'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[0].set_xlabel('date',labelpad=40,weight= 'bold')
ax[0].set_ylabel("u [m/s]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[0].xaxis.set_major_formatter(date_form)
ax[0].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[0].set_ylim([-25,25])
ax[1].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_55_v'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[1].set_xlabel('date',labelpad=40,weight= 'bold')
ax[1].set_ylabel("v [m/s]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[1].xaxis.set_major_formatter(date_form)
ax[1].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[1].set_ylim([-25,25])
ax[2].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_55_w'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[2].set_xlabel('date',labelpad=40,weight= 'bold')
ax[2].set_ylabel("w [m/s]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[2].xaxis.set_major_formatter(date_form)
ax[2].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[2].set_ylim([-5,5])
ax[3].plot(df10.index[(df10.index>dt_start)&(df10.index<dt_end)],df10['gill_55_SonicTempC'][(df10.index>dt_start)&(df10.index<dt_end)].values,'.');
ax[3].set_xlabel('date',labelpad=40,weight= 'bold')
ax[3].set_ylabel("Temp [$^o$C]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[3].xaxis.set_major_formatter(date_form)
ax[3].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[3].set_ylim([-10,30])
ax[4].plot(df10s.index[(df10s.index>dt_start)&(df10s.index<dt_end)],df10s['RECORD'][(df10s.index>dt_start)&(df10s.index<dt_end)].values,'.',color = 'limegreen');
ax[4].plot(df10s.index[(df10s.index>dt_start)&(df10s.index<dt_end)&(df10s['RECORD']<100)],df10s['RECORD'][(df10s.index>dt_start)&(df10s.index<dt_end)&(df10s['RECORD']<100)].values,'.',color = 'red');
ax[4].set_xlabel('date',labelpad=40,weight= 'bold')
ax[4].set_ylabel("Availability [%]",labelpad=40,weight= 'bold')
date_form = DateFormatter("%d/%m")
ax[4].xaxis.set_major_formatter(date_form)
ax[4].set_xlim([ dt.datetime.strptime(dt_start, '%Y-%m-%d %H:%M:%S'), dt.datetime.strptime(dt_end, '%Y-%m-%d %H:%M:%S')])
ax[4].set_ylim([0,100])
plt.savefig(r'Z:\Projekte\109797-TestfeldBHV\30_Technical_execution_Confidential\TP3\AP2_Aufbau_Infrastruktur\Infrastruktur_Windmessung\02_Equipment\01_Wartung Messmast GE-NET_DWG_20190226\Maintenance\MetMast Upgrade\Data\\'+dt_start[0:10]+'_'+dt_end[0:10]+'_55_gill.png',
bbox_inches='tight',dpi = 100)
#%% Compare MM
# from class_mm import C_MMprocess
# start = C_MMprocess(r'E:\113166_Testfeld\01_Instruments\03_MetMast\OneDAS_2021-01-04T00-00_600_s_063b14a8.zip','600S')
# data_mm = start.loadzip()
# # plot Vhor
# fig = plt.figure(figsize = (20, 8))
# plt.plot(Vhor110,label = 'USA3D Gill 110m')
# plt.plot(Vhor55,label = 'USA3D Gill 55m')
# plt.plot(Vhor,label = 'USA3D Thies 25m')
# plt.plot(data_mm.M0000_V1,label = 'Cup Thies 115m')
# plt.plot(data_mm.M0030_V4,label = 'Cup Thies 55m')
# plt.plot(data_mm.M0040_V5,label = 'Cup Thies 25m')
# plt.ylim(0,20)
# plt.xlabel('date',labelpad=10,weight= 'bold')
# plt.ylabel('$V_{hor}$ [m/s]',labelpad=10,weight= 'bold')
# plt.legend()
# # plot Dir
# fig = plt.figure(figsize = (20, 8))
# plt.plot(Dir110,label = 'USA3D Gill 110m')
# plt.plot(Dir55,label = 'USA3D Gill 55m')
# plt.plot(Dir,label = 'USA3D Thies 25m')
# plt.plot(data_mm.M0070_D1,label = 'Vane Thies 110m')
# plt.plot(data_mm.M0100_D4,label = 'Vane Thies 23.5m')
# plt.ylim(0,360)
# plt.xlabel('date',labelpad=10,weight= 'bold')
# plt.ylabel('$V_{hor}$ [m/s]',labelpad=10,weight= 'bold')
# plt.legend()
# # # date-ws_free_avg
# # fig = plt.figure(figsize = (20, 8))
# # ax = fig.add_subplot(111)
# # ax.plot(df10.index[(df10.gill_55_SpeedOfSound >100)&(df10.gill_115_SpeedOfSound>100)],df10.ix[(df10.gill_55_SpeedOfSound >100)&(df10.gill_115_SpeedOfSound>100),1],'.',label = 'Gill110');
# # ax.plot(data_mm.index,data_mm.M0000_V1, '.',label = 'Cup115');
# # ax.set_xlabel('time ',labelpad=40,weight= 'bold')
# # ax.set_ylabel(["m/s"],labelpad=40,weight= 'bold')
# # date_form = DateFormatter("%H:%M:%S")
# # ax.xaxis.set_major_formatter(date_form)
# # legend = ax.legend(frameon = 1)
# # frame = legend.get_frame()
# # frame.set_color('white')
|
StarcoderdataPython
|
6536988
|
<filename>misc/explore.py
import json
import os
import subprocess
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
simulator_path = "/home/jm1417/Simulator/cmake-build-release/bin/simulator"
config_path = "/home/jm1417/Simulator/examples/multiplex/config.json"
program_path = "/home/jm1417/Simulator/examples/scamp5/motion.txt"
output_path = str(pathlib.Path(__file__).parent.absolute())
original_config = None
results = []
def read_json(file):
with open(file, 'r') as f:
data = json.load(f)
return data
def read_config_file():
data = read_json(config_path)
global original_config
original_config = data
return data
def write_config_file(config):
with open(config_path, 'w') as f:
json.dump(config, f, indent=4)
def execute(config):
write_config_file(config)
process = subprocess.Popen([simulator_path, config_path, program_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
def mutate(config):
row_strides = [8, 16, 64, 256]
col_strides = [8, 16, 64, 256]
rows = [64, 128, 256]
cols = [64, 128, 256]
clock_rates = [10000000]
array_rows = [256, 350, 500]
array_cols = [8, 16, 32]
possibilities = list(itertools.product(row_strides,col_strides,rows, cols, clock_rates, array_rows, array_cols))
print("Combinations:", len(possibilities))
count = 0
for row_stride, col_stride, rows, cols, clock_rate, array_row, array_col in possibilities:
if (row_stride > rows or col_stride > cols):
continue
config['SCAMP5M']['rows'] = rows
config['SCAMP5M']['cols'] = cols
config['SCAMP5M']['row_stride'] = row_stride
config['SCAMP5M']['col_stride'] = col_stride
config['SCAMP5M']['config']['clock_rate'] = clock_rate
config['SCAMP5M']['components'][1]['array_rows'] = array_row
config['SCAMP5M']['components'][1]['array_cols'] = array_col
config['output_filename'] = str(count)
print("Executing with row_stride=", row_stride, "col_stride=", col_stride, "rows=", rows, "cols=", cols, "clock=", clock_rate)
execute(config)
try:
res = read_json(output_path + "/" + str(count) + ".json")
results.append((res["Cycle count"], res["Architecture total power"]))
except:
print("Could not add")
count+=1
# execute the simulator with a new config
def run():
config = read_config_file()
mutate(config)
write_config_file(original_config)
for r in results:
print(r)
def is_outlier(points, thresh=6):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
<NAME> and <NAME> (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
'''
Method to take two equally-sized lists and return just the elements which lie
on the Pareto frontier, sorted into order.
Default behaviour is to find the maximum for both X and Y, but the option is
available to specify maxX = False or maxY = False to find the minimum for either
or both of the parameters.
'''
def pareto_frontier(Xs, Ys, maxX = True, maxY = True):
# Sort the list in either ascending or descending order of X
myList = sorted([[Xs[i], Ys[i]] for i in range(len(Xs))], reverse=maxX)
# Start the Pareto frontier with the first value in the sorted list
p_front = [myList[0]]
# Loop through the sorted list
for pair in myList[1:]:
if maxY:
if pair[1] >= p_front[-1][1]: # Look for higher values of Y…
p_front.append(pair) # … and add them to the Pareto frontier
else:
if pair[1] <= p_front[-1][1]: # Look for lower values of Y…
p_front.append(pair) # … and add them to the Pareto frontier
# Turn resulting pairs back into a list of Xs and Ys
p_frontX = [pair[0] for pair in p_front]
p_frontY = [pair[1] for pair in p_front]
return p_frontX, p_frontY
def plot_results():
res = np.array(results)
cycles = res[:, 0]
powers = res[:, 1]
pareto_front = pareto_frontier(powers, cycles, False, False)
plt.scatter(powers, cycles)
plt.plot(pareto_front[0], pareto_front[1], color='r')
plt.xlim(0, 0.5)
plt.ylim(0, 0.4e7)
plt.xlabel('Total Power (W)')
plt.ylabel('Total Cycles')
plt.savefig("pareto.png")
plt.show()
run()
plot_results()
|
StarcoderdataPython
|
5065592
|
<gh_stars>0
import unittest
from pygraph.prim import min_span_tree
import pygraph
class TestPrimMST(unittest.TestCase):
def test_fixed(self):
g = pygraph.Graph()
# --- vertices
g.AddVertex('a')
g.AddVertex('b')
g.AddVertex('c')
g.AddVertex('d')
g.AddVertex('e')
g.AddVertex('f')
g.AddVertex('g')
# --- edges
g.AddEdge('a', 'b', 1)
g.AddEdge('a', 'c', 8)
g.AddEdge('a', 'e', 2)
g.AddEdge('b', 'd', 6)
g.AddEdge('c', 'd', 4)
g.AddEdge('c', 'e', 3)
g.AddEdge('d', 'f', 5)
g.AddEdge('e', 'f', 9)
g.AddEdge('e', 'g', 7)
expected_g = pygraph.Graph()
# --- vertices
expected_g.AddVertex('a')
expected_g.AddVertex('b')
expected_g.AddVertex('c')
expected_g.AddVertex('d')
expected_g.AddVertex('e')
expected_g.AddVertex('f')
expected_g.AddVertex('g')
# --- edges
expected_g.AddEdge('a', 'b', 1)
expected_g.AddEdge('a', 'e', 2)
expected_g.AddEdge('c', 'e', 3)
expected_g.AddEdge('c', 'd', 4)
expected_g.AddEdge('d', 'f', 5)
expected_g.AddEdge('e', 'g', 7)
mst_prim = min_span_tree(g)
mst_kruskal = pygraph.min_span_tree(g)
#print(mst)
self.assertEqual(expected_g, mst_prim)
self.assertEqual(mst_kruskal, mst_prim)
return
def test_fixed_2(self):
g = pygraph.Graph()
# --- vertices
g.AddVertex('a')
g.AddVertex('b')
g.AddVertex('c')
g.AddVertex('d')
g.AddVertex('e')
g.AddVertex('f')
g.AddVertex('g')
# --- edges
g.AddEdge('a', 'b', 12)
g.AddEdge('a', 'c', 8)
g.AddEdge('a', 'd', 13)
g.AddEdge('b', 'c', 21)
g.AddEdge('b', 'e', 32)
g.AddEdge('b', 'f', 7)
g.AddEdge('c', 'f', 2)
g.AddEdge('d', 'g', 9)
g_expected = pygraph.Graph()
# --- vertices
g_expected.AddVertex('a')
g_expected.AddVertex('b')
g_expected.AddVertex('c')
g_expected.AddVertex('d')
g_expected.AddVertex('e')
g_expected.AddVertex('f')
g_expected.AddVertex('g')
# --- edges
g_expected.AddEdge('a', 'c', 8)
g_expected.AddEdge('a', 'd', 13)
g_expected.AddEdge('b', 'e', 32)
g_expected.AddEdge('b', 'f', 7)
g_expected.AddEdge('c', 'f', 2)
g_expected.AddEdge('d', 'g', 9)
mst_prim = min_span_tree(g)
mst_kruskal = pygraph.min_span_tree(g)
self.assertEqual(mst_prim, g_expected)
self.assertEqual(mst_prim, mst_kruskal)
return
def test_auto_gen(self):
g = pygraph.gen_graph(4096)
mst_kruskal = pygraph.min_span_tree(g)
mst_prim = min_span_tree(g)
self.assertEqual(len(g.vertices), len(mst_kruskal.vertices))
self.assertEqual(len(mst_kruskal.vertices) - 1, len(mst_kruskal.edges))
self.assertEqual(len(g.vertices), len(mst_prim.vertices))
self.assertEqual(len(mst_prim.vertices) - 1, len(mst_prim.edges))
prim_sum = 0
for v in mst_prim.vertices.values():
for e in v.edges.values():
prim_sum += e.weight
pass
kruskal_sum = 0
for v in mst_kruskal.vertices.values():
for e in v.edges.values():
kruskal_sum += e.weight
pass
# The graph might be slighly different with edges have the same weight,
# but the sum of weight must be the same.
self.assertEqual(kruskal_sum, prim_sum)
return
pass
|
StarcoderdataPython
|
178275
|
"""Definitions for the base Cranelift language."""
|
StarcoderdataPython
|
6479257
|
"""Module used to build consistent UI for Colorium's tools using maya.cmds."""
import maya.cmds as cmds
import colorium.data_binding as data_binding
class CUI(object):
"""Base class for a Colorium UI."""
@property
def name(self):
"""The name of the UI."""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def controller(self):
"""The controller associated with the UI."""
return self._controller
@controller.setter
def controller(self, value):
self._controller = value
@property
def main_window(self):
"""The name of the UI's main window."""
return self._main_window
@main_window.setter
def main_window(self, value):
self._main_window = value
@property
def main_frame(self):
"""The name of the UI's main frame."""
return self._main_frame
@main_frame.setter
def main_frame(self, value):
self._main_frame = value
@property
def main_layout(self):
"""The name of the UI's main layout."""
return self._main_layout
@main_layout.setter
def main_layout(self, value):
self._main_layout = value
def __init__(self, name, controller):
self._controls = []
self._name = name
self._controller = None
self._main_window = ''
self._main_frame = ''
self._main_layout = ''
if not self._controller or self._controller != controller:
self._controller = controller
self._controller.ui = self
self.display_ui(self._controller.display_ui_callback)
def display_ui(self, callback):
"""
Displays the tool's UI.
"""
self.build_main_window()
self.build_main_layout()
self.build_ui()
for control in self._controls:
control.build_ui()
callback()
def build_ui(self):
"""
Builds the tool's UI.
"""
pass
def add_control(self, control):
"""Adds a control to the UI's controls."""
if control not in self._controls:
self._controls.append(control)
def remove_control(self, control):
"""Removes a control from the UI's controls."""
if control in self._controls:
self._controls.remove(control)
def get_control_by_name(self, name):
"""Returns a control from the UI's controls by it's name."""
for control in self._controls:
if control.name == name:
return control
def build_main_window(self):
"""Builds the main window."""
self._main_window = self.build_window("win_main", self._name)
def build_window(self, name="untitled", title="untitled"):
"""Helper function that builds a window using maya.cmds."""
window_exists = cmds.window(name, q=True, ex=True)
if window_exists:
cmds.deleteUI(name, window=True)
window = cmds.window(name, t=title, rtf=True, tlb=True)
cmds.showWindow(window)
return window
def build_main_layout(self):
"""Builds the main layout."""
self._main_frame = cmds.frameLayout("frm_main", p=self._main_window, lv=False, mh=5, mw=5)
self._main_layout = cmds.columnLayout("lay_main", p=self._main_frame, adj=True)
class CController(object):
"""Base class for a Colorium UI Controller."""
@property
def ui(self):
"""The UI associated with the controller."""
return self._ui
@ui.setter
def ui(self, value):
self._ui = value
def __init__(self):
self._ui = None
def display_ui_callback(self):
"""Called at the end of the method Display of the associated UI instance. Used for post-display operations."""
pass
class CToggleable():
"""\"Interface\" used to define toggleable controls."""
def toggle(self, value):
"""Method used to enable/disable the control."""
pass
class CUIElement(object):
"""Base class for a Colorium UI Element."""
@property
def name(self):
"""The name of the CUIElement."""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def title(self):
"""The title of the CUIElement."""
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def parent(self):
"""The name of the CUIElement's parent."""
return self._parent
@parent.setter
def parent(self, value):
self._parent = value
def __init__(self, name, title, parent):
self._name = name
self._title = title
self._parent = parent
class CLayout(CUIElement):
"""Base class for a Colorium UI Layout."""
def __init__(self, name, title, parent, childrens=None):
super(CLayout, self).__init__(name, title, parent)
self._childrens = childrens if childrens else []
def add_children(self, children):
"""Add a given children to the CLayout's childrens."""
if children not in self._childrens:
self._childrens.append(children)
def remove_children(self, children):
"""Remove a given children from the CLayout's childrens."""
if children in self._childrens:
self._childrens.remove(children)
class CControl(CUIElement, data_binding.CBindable):
"""Base class for a Colorium UI Control."""
def __init__(self, name, title, parent):
super(CControl, self).__init__(name, title, parent)
data_binding.CBindable.__init__(self)
def build_ui(self):
"""Method used to build the control's UI."""
pass
class CInlineLayout(CLayout):
"""Class representing an Inline Layout. It's childrens are placed side by side, aligned on the left or on the right."""
def __init__(self, name, title, parent, childrens=None, align="left"):
super(CInlineLayout, self).__init__(name, title, parent)
self._childrens = childrens if childrens else []
self._align = align
def build_ui(self):
"""Method used to build the control's UI."""
align_offset = 0
align_adjustement_column = len(self._childrens) + 1
if self._align == "right":
align_offset = 1
align_adjustement_column = 1
column_attachments = []
for children in range(1 + align_offset, len(self._childrens) + 1 + align_offset):
if children == 1 + align_offset:
column_attachments.append((children, "right", 2.5))
elif children == len(self._childrens) + align_offset:
column_attachments.append((children, "left", 2.5))
else:
column_attachments.append((children, "both", 2.5))
layout = cmds.rowLayout("lay_{}".format(self._name), p=self._parent, nc=len(self._childrens) + 1, cat=column_attachments, adj=align_adjustement_column)
if self._align == "right":
cmds.separator("sep_{}".format(self._name), vis=False)
if self._childrens:
for children in self._childrens:
children.parent = layout
children.build_ui()
if self._align == "left":
cmds.separator("sep_{}".format(self._name), vis=False)
class CTextInput(CControl, CToggleable):
"""Class that represents a Text Input. Used to write text."""
@property
def text(self):
"""The text written in the Text Input."""
return self.__text
@text.setter
def text(self, value):
self.__text = value
cmds.textField("txt_{}".format(self._name), e=True, tx=value)
self.notify_property_changed("text", value)
print('\'text\' property of CTextInput set to \'{}\'').format(value)
@property
def enabled(self):
"""The state of the Text Input."""
return self.__enabled
@enabled.setter
def enabled(self, value):
self.__enabled = value
cmds.checkBox("chk_{}".format(self._name), e=True, v=value)
cmds.control("txt_{}".format(self._name), e=True, en=value)
self.notify_property_changed("enabled", value)
print('\'enabled\' property of CTextInput set to \'{}\'').format(value)
def __init__(self, name, title, parent, enabled=True, changed_command=None, toggle_command=None, toggleable=False, default_value=""):
super(CTextInput, self).__init__(name, title, parent)
self.__text = default_value
self.__enabled = enabled
self.__toggleable = toggleable
self.__changed_command = changed_command if changed_command else lambda value: NotImplemented
self.__toggle_command = toggle_command if toggle_command else lambda value: NotImplemented
def build_ui(self):
"""Method used to build the control's UI."""
layout = cmds.rowLayout("lay_{}".format(self._name), p=self._parent, nc=3, cat=[(2, "right", 5)], cw=[(1, 25), (2, 100)], adj=3)
if self.__toggleable:
cmds.checkBox("chk_{}".format(self._name), p=layout, l="", v=self.__enabled, cc=self.toggle)
else:
cmds.separator("sep_{}".format(self._name), vis=False)
cmds.text("lbl_{}".format(self._name), p=layout, l=self._title, al="right")
cmds.textField("txt_{}".format(self._name), p=layout, tx=self.__text, en=self.__enabled, cc=self.text_changed)
def text_changed(self, value):
"""Method called when the Text Input's text changed."""
self.__changed_command(value)
self.text = value
def toggle(self, value):
"""Method called when the Text Input's state changed."""
self.__toggle_command(value)
self.enabled = value
cmds.control("txt_{}".format(self._name), e=True, en=value)
class CIntInput(CControl, CToggleable):
"""Class that represents a Int Input. Used to write numbers."""
@property
def value(self):
"""The number written in the Int Input."""
return self.__value
@value.setter
def value(self, value):
self.__value = value
cmds.intSliderGrp("int_{}".format(self._name), e=True, v=value)
self.notify_property_changed("value", value)
print('\'value\' property of CIntInput set to \'{}\'').format(value)
@property
def enabled(self):
"""The state of the Int Input."""
return self.__enabled
@enabled.setter
def enabled(self, value):
self.__enabled = value
cmds.checkBox("chk_{}".format(self._name), e=True, v=value)
cmds.control("int_{}".format(self._name), e=True, en=value)
self.notify_property_changed("enabled", value)
print('\'enabled\' property of CIntInput set to \'{}\'').format(value)
def __init__(self, name, title, parent, enabled=True, min_value=1, max_value=10, changed_command=None, toggle_command=None, toggleable=False, default_value=1):
super(CIntInput, self).__init__(name, title, parent)
self.__value = default_value
self.__enabled = enabled
self.__toggleable = toggleable
self.__min = min_value
self.__max = max_value
self.__changed_command = changed_command if changed_command else lambda value: NotImplemented
self.__toggle_command = toggle_command if toggle_command else lambda value: NotImplemented
def build_ui(self):
"""Method used to build the control's UI."""
layout = cmds.rowLayout("lay_{}".format(self._name), p=self._parent, nc=3, cat=[(2, "right", 5)], cw=[(1, 25), (2, 100)], adj=3)
if self.__toggleable:
cmds.checkBox("chk_{}".format(self._name), p=layout, l="", v=self.__enabled, cc=self.toggle)
else:
cmds.separator("sep_{}".format(self._name), vis=False)
cmds.text("lbl_{}".format(self._name), p=layout, l=self._title, al="right")
cmds.intSliderGrp("int_{}".format(self._name), p=layout, v=self.__value, f=True, min=self.__min, max=self.__max, en=self.__enabled, cc=self.value_changed)
def value_changed(self, value):
"""Method called when the Int Input's value changed."""
self.__changed_command(value)
self.value = value
def toggle(self, value):
"""Method called when the Int Input's state changed."""
self.__toggle_command(value)
self.enabled = value
cmds.control("int_{}".format(self._name), e=True, en=value)
class CComboInput(CControl, CToggleable):
"""Class that represents a Combo Input. Used to choose a value from a list of values."""
@property
def value(self):
"""The chosen value of the Combo Input."""
return self.__value
@value.setter
def value(self, value):
self.__value = value
cmds.optionMenu("cmb_{}".format(self._name), e=True, v=value)
self.notify_property_changed("value", value)
print('\'value\' property of CComboInput set to \'{}\'').format(value)
@property
def enabled(self):
"""The state of the Combo Input."""
return self.__enabled
@enabled.setter
def enabled(self, value):
self.__enabled = value
cmds.checkBox("chk_{}".format(self._name), e=True, v=value)
cmds.control("cmb_{}".format(self._name), e=True, en=value)
self.notify_property_changed("enabled", value)
print('\'enabled\' property of CComboInput set to \'{}\'').format(value)
def __init__(self, name, title, parent, enabled=True, items=None, changed_command=None, toggle_command=None, toggleable=False, default_value=""):
super(CComboInput, self).__init__(name, title, parent)
self.__value = default_value
self.__enabled = enabled
self.__toggleable = toggleable
self.__items = items if items else []
self.__changed_command = changed_command if changed_command else lambda value: NotImplemented
self.__toggle_command = toggle_command if toggle_command else lambda value: NotImplemented
def build_ui(self):
"""Method used to build the control's UI."""
layout = cmds.rowLayout("lay_{}".format(self._name), p=self._parent, nc=3, cat=[(2, "right", 5)], cw=[(1, 25), (2, 100)], adj=3)
if self.__toggleable:
cmds.checkBox("chk_{}".format(self._name), p=layout, l="", v=self.__enabled, cc=self.toggle)
else:
cmds.separator("sep_{}".format(self._name), vis=False)
cmds.text("lbl_{}".format(self._name), p=layout, l=self._title, al="right")
cmds.optionMenu("cmb_{}".format(self._name), p=layout, en=self.__enabled, cc=self.value_changed)
for item in self.__items:
cmds.menuItem("itm_{}_{}".format(self._name, item), l=item)
if self.__value in self.__items:
cmds.optionMenu("cmb_{}".format(self._name), e=True, v=self.__value)
def value_changed(self, value):
"""Method called when the Combo Input's value changed."""
self.__changed_command(value)
self.value = value
def toggle(self, value):
"""Method called when the Combo Input's state changed."""
self.__toggle_command(value)
self.enabled = value
cmds.control("cmb_{}".format(self._name), e=True, en=value)
class CFilePathInput(CControl, CToggleable):
"""Class that represents a File Path Input. Used to write a file path and open it in the explorer."""
@property
def text(self):
"""The file path written in the File Path Input."""
return self.__text
@text.setter
def text(self, value):
self.__text = value
cmds.textField("txt_{}".format(self._name), e=True, tx=value)
self.notify_property_changed("text", value)
print('\'text\' property of CFilePathInput set to \'{}\'').format(value)
@property
def enabled(self):
"""The state of the File Path Input."""
return self.__enabled
@enabled.setter
def enabled(self, value):
self.__enabled = value
cmds.checkBox("chk_{}".format(self._name), e=True, v=value)
cmds.control("txt_{}".format(self._name), e=True, en=value)
self.notify_property_changed("enabled", value)
print('\'enabled\' property of CFilePathInput set to \'{}\'').format(value)
def __init__(self, name, title, parent, enabled=True, changed_command=None, open_command=None, toggle_command=None, toggleable=False, default_value=""):
super(CFilePathInput, self).__init__(name, title, parent)
self.__text = default_value
self.__enabled = enabled
self.__toggleable = toggleable
self.__changed_command = changed_command if changed_command else lambda value: NotImplemented
self.__open_command = open_command if open_command else lambda value: NotImplemented
self.__toggle_command = toggle_command if toggle_command else lambda value: NotImplemented
def build_ui(self):
"""Method used to build the control's UI."""
layout = cmds.rowLayout("lay_{}".format(self._name), p=self._parent, nc=4, cat=[(2, "right", 5), (4, "left", 5)], cw=[(1, 25), (2, 100), (4, 100)], adj=3)
if self.__toggleable:
cmds.checkBox("chk_{}".format(self._name), p=layout, l="", v=self.__enabled, cc=self.toggle)
else:
cmds.separator("sep_{}".format(self._name), vis=False)
cmds.text("lbl_{}".format(self._name), p=layout, l=self._title, al="right")
cmds.textField("txt_{}".format(self._name), p=layout, tx=self.__text, en=self.__enabled, cc=self.text_changed)
cmds.button("btn_open_{}".format(self._name), p=layout, l="Open", w=60, c=self.__open_command)
def text_changed(self, value):
"""Method called when the File Path Input's value changed."""
self.__changed_command(value)
self.text = value
def toggle(self, value):
"""Method called when the File Path Input's state changed."""
self.__toggle_command(value)
self.enabled = value
cmds.control("txt_{}".format(self._name), e=True, en=value)
class CButtonControl(CControl):
"""Class that represents a Button Control. Used to execute a command."""
def __init__(self, name, title, parent, command=None):
super(CButtonControl, self).__init__(name, title, parent)
self._command = command if command else lambda value: NotImplemented
def build_ui(self):
"""Method used to build the control's UI."""
cmds.button("btn_{}".format(self._name), l=self._title, p=self._parent, w=60, c=self._command)
class CCheckControl(CControl):
"""Class that represents a Check Controle. Used to be turned on or off."""
@property
def value(self):
"""The value of the Check Control."""
return self.__value
@value.setter
def value(self, value):
self.__value = value
cmds.checkBox("chk_{}".format(self._name), e=True, v=value)
self.notify_property_changed("value", value)
print('\'value\' property of CCheckControl set to \'{}\'').format(value)
def __init__(self, name, title, parent, changed_command=None, default_value=False):
super(CCheckControl, self).__init__(name, title, parent)
self.__value = default_value
self.__changed_command = changed_command if changed_command else lambda value: NotImplemented
def build_ui(self):
"""Method used to build the control's UI."""
layout = cmds.rowLayout("lay_{}".format(self._name), p=self._parent, nc=3, cat=[(2, "right", 5)], cw=[(1, 25), (2, 100)], adj=3)
cmds.separator("sep_{}".format(self._name), vis=False)
cmds.text("lbl_{}".format(self._name), p=layout, l=self._title, al="right")
cmds.checkBox("chk_{}".format(self._name), p=layout, l="", v=self.__value, cc=self.value_changed)
def value_changed(self, value):
"""Method called when the Check Control's value changed."""
self.__changed_command(value)
self.value = value
|
StarcoderdataPython
|
1726135
|
#!/usr/bin/env python
import boto3
import datetime
import dateutil.parser as dp
import sys
import argparse
ec2 = boto3.resource('ec2')
def image_deregister(imageid):
try:
image = ec2.Image(imageid)
image_date = image.creation_date
except:
print "ImageId not found, or you do not have permission to deregister that ImageId"
exit()
try:
block_list = image.block_device_mappings
response = image.deregister()
print "Image: " + image.image_id + " Deleted"
for items in block_list:
if items.has_key('Ebs'):
snapshot = ec2.Snapshot(items['Ebs']['SnapshotId'])
response = snapshot.delete()
print "SnapshotID: " + items['Ebs']['SnapshotId'] + " Deleted"
except:
print "no EBS snapshots associated with imageid %s" % imageid
print "Deregistered ImageID: %s" % imageid
exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="Filename containing the ImageIds", default="NONE")
parser.add_argument("-t", "--time", default=1, help="Number of Days to keep, Images in Filename older than this will be deleted, default is to keep 1 day.", type=int)
parser.add_argument("-i", "--imageid", help="Imageid, or list of ImageIds to delete", default="NONE")
args = parser.parse_args()
if (args.imageid == "NONE" and args.file == "NONE"):
print "Filename or ImageId required, please see usage / help -h"
exit()
elif (args.imageid != "NONE"):
image_deregister(args.imageid)
exit()
#need to do some time adjustment to get this in seconds since epoc
#isoformat is needed because AWS uses iso formated datetimes
today = datetime.datetime.now().isoformat()
#Parse the datetime to get it to seconds since epoch.
seconds_to_keep = args.time * 86400
today_parsed = dp.parse(today)
today_seconds = today_parsed.strftime('%s')
try:
backup_log = open(args.file,"rw")
except:
print "Error: Unable to open file " + args.file
exit()
for line in backup_log.readlines():
if "ImageId" in line:
tag, imageidraw = line.split(':')
imageid = imageidraw[2:-2]
image = ec2.Image(imageid)
# I don't cleanup my backup_log files (mainly so I have historical reference)
# Because of that I just skip the ImageId if it doesn't exist.
try:
image_date_parsed = dp.parse(image.creation_date)
except:
continue #skip if it doesn't exist.
image_seconds = image_date_parsed.strftime('%s')
if (int(today_seconds) - seconds_to_keep) > int(image_seconds):
image_deregister(imageid)
backup_log.close()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
283537
|
<gh_stars>1-10
#!/usr/bin/env python3
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=UserWarning)
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import sys
import dill
sys.path.append('../')
from SimulationEnviroment.SimulatorEnviroment import StreamingEnviroment
import os
import json
import random
os.environ['CUDA_VISIBLE_DEVICES']=''
import argparse
import numpy as np
import itertools
import datetime
import logging
################## ROBUST MPC ###################
S_INFO = 5 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
MPC_FUTURE_CHUNK_COUNT = 5
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BITRATE_REWARD_MAP = {0: 0, 300: 1, 750: 2, 1200: 3, 1850: 12, 2850: 15, 4300: 20}
M_IN_K = 1000.0
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 48
DEFAULT_QUALITY = 0 # default video quality without agent
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> this number of Mbps
SMOOTH_PENALTY = 1
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
MILLISECONDS_IN_SEC = 1000
MAX_LOOKAHEAD = 3
MAX_LOOKBACK = 10
MAX_SWITCHES = 2
TREE_FOLDER = './src/video_server/trees/'
VIDEO_CSV_PATH = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'
TRACE_DUMMY = os.path.join(TREE_FOLDER, 'dummy_trace')
# in format of time_stamp bit_rate buffer_size rebuffer_time video_chunk_size download_time reward
NN_MODEL = None
CHUNK_COMBO_OPTIONS = []
# past errors in bandwidth
past_errors = []
past_bandwidth_ests = []
class OnlineStreaming(StreamingEnviroment):
def __init__(self, video_information_csv_path, max_lookback: int,
max_lookahead: int, max_switch_allowed: int):
logger.info("Initializing supeclass")
super().__init__(TRACE_DUMMY, video_information_csv_path, None, max_lookback, max_lookahead,
max_switch_allowed, 60*MILLISECONDS_IN_SEC)
"""
Padding and stuff
"""
self.timestamp_arr = [0] * self.max_lookback
self.data_used_relative = [0] * self.max_lookback
self.quality_arr = [0] * self.max_lookback
self.downloadtime_arr = [0] * self.max_lookback
self.sleep_time_arr = [0] * self.max_lookback
self.buffer_size_arr = [0] * self.max_lookback
self.rebuffer_time_arr = [0] * self.max_lookback
self.video_chunk_remain_arr_relative = [0] * self.max_lookback
self.video_chunk_size_arr = [0] * self.max_lookback
self.rate_played_relative_arr = [0] * self.max_lookback
self.segment_length_arr = [0] * self.max_lookback
self.encoded_mbitrate_arr = [0] * self.max_lookback
self.single_mbitrate_arr = [0] * self.max_lookback
self.vmaf_arr = [0] * self.max_lookback
def copy(self):
return OnlineStreaming( self.video_information_csv_path,
self.max_lookback,
self.max_lookahead, self.max_switch_allowed)
def set_state(self, state):
self.video_chunk_counter = state['video_chunk_counter']
self.mahimahi_ptr = state['mahimahi_ptr']
self.buffer_size_ms = state['buffer_size_ms']
self.last_mahimahi_time = state['last_mahimahi_time']
self.last_quality = state['last_quality']
self.timestamp_s = state['timestamp_s']
self.data_used_bytes = state['data_used_bytes_relative']
self.timestamp_arr = self.timestamp_arr[:state['timestamp_s_arr_ptr']]
self.data_used_relative = self.data_used_relative[:state['data_used_bytes_arr_ptr']]
self.quality_arr = self.quality_arr[:state['quality_arr_ptr']]
self.downloadtime_arr = self.downloadtime_arr[:state['downloadtime_arr_ptr']]
self.sleep_time_arr = self.sleep_time_arr[:state['sleep_time_arr_ptr']]
self.buffer_size_arr = self.buffer_size_arr[:state['buffer_size_arr_ptr']]
self.rebuffer_time_arr = self.rebuffer_time_arr[:state['rebuffer_time_arr_ptr']]
self.video_chunk_remain_arr_relative = self.video_chunk_remain_arr_relative[:state['video_chunk_ptr']]
self.video_chunk_size_arr = self.video_chunk_size_arr[:state['video_chunk_size_ptr']]
self.rate_played_relative_arr = self.rate_played_relative_arr[:state['rate_played_relative_ptr']]
self.segment_length_arr = self.segment_length_arr[:state['segment_length_ptr']]
self.encoded_mbitrate_arr = self.encoded_mbitrate_arr[:state['encoded_mbitrate_ptr']]
self.single_mbitrate_arr = self.single_mbitrate_arr[:state['single_mbitrate_ptr']]
self.vmaf_arr = self.vmaf_arr[:state['vmaf_ptr']]
assert len(self.logging_file) >= state['logging_file_ptr'], 'We somehow lost logging data on the way'
self.logging_file = self.logging_file[:state['logging_file_ptr']]
def save_state(self):
return {'mahimahi_ptr': self.mahimahi_ptr,
'buffer_size_ms': self.buffer_size_ms,
'last_mahimahi_time': self.last_mahimahi_time,
'last_quality': self.last_quality,
'video_chunk_counter': self.video_chunk_counter,
'timestamp_s': self.timestamp_s,
'data_used_bytes_relative': self.data_used_bytes,
'video_chunk_size_ptr': len(self.video_chunk_size_arr),
'timestamp_s_arr_ptr': len(self.timestamp_arr),
'data_used_bytes_arr_ptr': len(self.data_used_relative),
'quality_arr_ptr': len(self.quality_arr),
'downloadtime_arr_ptr': len(self.downloadtime_arr),
'sleep_time_arr_ptr': len(self.sleep_time_arr),
'buffer_size_arr_ptr': len(self.buffer_size_arr),
'rebuffer_time_arr_ptr': len(self.rebuffer_time_arr),
'video_chunk_ptr': len(self.video_chunk_remain_arr_relative),
'logging_file_ptr': len(self.logging_file),
'rate_played_relative_ptr': len(self.rate_played_relative_arr),
'segment_length_ptr': len(self.segment_length_arr),
'encoded_mbitrate_ptr': len(self.encoded_mbitrate_arr),
'single_mbitrate_ptr': len(self.single_mbitrate_arr),
'vmaf_ptr': len(self.vmaf_arr)}
def get_video_chunk(self, quality, download_time_ms, rebuffer_time_ms, buffer_size_ms, timestamp_s, activate_logging=True):
assert quality >= 0
video_chunk_size = self.byte_size_match.iloc[self.video_chunk_counter, quality]
relative_encoded_bitrate = self.get_encoded_bitrate(quality) / self.get_encoded_bitrate(-1)
segment_length_ms = self.video_information_csv.iloc[
self.video_chunk_counter].seg_len_s * 1000.
encoded_mbitrate = self.get_encoded_bitrate(quality) * 1e-6
current_mbitrate = self.bitrate_match.iloc[self.video_chunk_counter, quality] * 1e-6
vmaf = self.vmaf_match.iloc[self.video_chunk_counter, quality]
# add in the new chunk
self.buffer_size_ms = buffer_size_ms # buffer size is in ms
# sleep if buffer gets too large -> in a real environment it never get bigger than the actual one
sleep_time_ms = 0
# the "last buffer size" return to the controller
# Note: in old version of dash the lowest buffer is 0.
# In the new version the buffer always have at least
# one chunk of video
self.video_chunk_counter += 1
video_chunk_remain = self.n_video_chunk - self.video_chunk_counter
end_of_video = False
if self.video_chunk_counter >= self.n_video_chunk:
end_of_video = True
self.data_used_bytes += video_chunk_size
self.timestamp_s = timestamp_s
self.timestamp_arr.append(self.timestamp_s)
self.data_used_relative.append(self.data_used_bytes / self.max_data_used)
self.quality_arr.append(quality)
self.downloadtime_arr.append(download_time_ms / MILLISECONDS_IN_SEC)
self.sleep_time_arr.append(sleep_time_ms / MILLISECONDS_IN_SEC)
self.buffer_size_arr.append(self.buffer_size_ms / MILLISECONDS_IN_SEC)
self.rebuffer_time_arr.append(rebuffer_time_ms / MILLISECONDS_IN_SEC)
self.video_chunk_size_arr.append(video_chunk_size)
self.video_chunk_remain_arr_relative.append(video_chunk_remain / float(self.n_video_chunk))
self.rate_played_relative_arr.append(relative_encoded_bitrate)
self.segment_length_arr.append(segment_length_ms / 1000.)
self.encoded_mbitrate_arr.append(encoded_mbitrate)
self.single_mbitrate_arr.append(current_mbitrate)
self.vmaf_arr.append(vmaf)
observation = self.generate_observation_dictionary()
self.last_quality = quality
return observation, end_of_video
def generate_observation_dictionary(self):
quality = self.quality_arr[-1]
observation = []
observation.append(self.timestamp_arr[-self.max_lookback:])
observation.append(self.data_used_relative[-self.max_lookback:])
observation.append(self.quality_arr[-self.max_lookback:])
observation.append(self.downloadtime_arr[-self.max_lookback:])
observation.append(self.sleep_time_arr[-self.max_lookback:])
observation.append(self.buffer_size_arr[-self.max_lookback:])
observation.append(self.rebuffer_time_arr[-self.max_lookback:])
observation.append(self.video_chunk_size_arr[-self.max_lookback:])
observation.append(self.video_chunk_remain_arr_relative[-self.max_lookback:])
observation.append(self.rate_played_relative_arr[-self.max_lookback:])
observation.append(self.segment_length_arr[-self.max_lookback:])
observation.append(self.encoded_mbitrate_arr[-self.max_lookback:])
observation.append(self.single_mbitrate_arr[-self.max_lookback:])
observation.append(self.vmaf_arr[-self.max_lookback:])
for switch in np.arange(quality - self.max_switch_allowed, quality + self.max_switch_allowed + 1):
switch = np.clip(switch, a_min=0, a_max=self.max_quality_level)
switch = int(switch)
future_chunk_size_arr = []
for lookahead in range(0, self.max_lookahead):
if self.video_chunk_counter + lookahead < self.n_video_chunk:
future_chunk_size = self.byte_size_match.iloc[self.video_chunk_counter + lookahead, switch]
else:
future_chunk_size = 0
future_chunk_size_arr.append(future_chunk_size)
observation.append(future_chunk_size_arr)
for switch in np.arange(quality - self.max_switch_allowed, quality + self.max_switch_allowed + 1):
switch = np.clip(switch, a_min=0, a_max=self.max_quality_level)
switch = int(switch)
future_chunk_size_arr = []
for lookahead in range(0, self.max_lookahead):
if self.video_chunk_counter + lookahead < self.n_video_chunk:
future_chunk_size = self.bitrate_match.iloc[self.video_chunk_counter + lookahead, switch]
else:
future_chunk_size = 0
future_chunk_size_arr.append(future_chunk_size)
observation.append(future_chunk_size_arr)
for switch in np.arange(quality - self.max_switch_allowed, quality + self.max_switch_allowed + 1):
switch = np.clip(switch, a_min=0, a_max=self.max_quality_level)
switch = int(switch)
future_chunk_size_arr = []
for lookahead in range(0, self.max_lookahead):
if self.video_chunk_counter + lookahead < self.n_video_chunk:
future_chunk_size = self.vmaf_match.iloc[self.video_chunk_counter + lookahead, switch]
else:
future_chunk_size = 0
future_chunk_size_arr.append(future_chunk_size)
observation.append(future_chunk_size_arr)
observation.append(self)
observation = {obs_key: obs_value for obs_key, obs_value in zip(self.get_obs_names(), observation)}
return observation
def get_past_dims(self):
return len([v for v in self.get_obs_names() if 'future' not in v]) - 1
def get_future_dims(self):
return len([v for v in self.get_obs_names() if 'future' in v])
def get_obs_names(self):
column_switches = ['future_chunk_size_byte_switch_%d' % switch for switch in np.arange(
-self.max_switch_allowed, self.max_switch_allowed + 1)]
column_switches += ['future_chunk_bitrate_switch_%d' % switch for switch in np.arange(
-self.max_switch_allowed, self.max_switch_allowed + 1)]
column_switches += ['future_chunk_vmaf_switch_%d' % switch for switch in np.arange(
-self.max_switch_allowed, self.max_switch_allowed + 1)]
return ['timestamp_s', 'data_used_bytes_relative', 'current_level', 'download_time_s', 'sleep_time_s',
'buffer_size_s', 'rebuffer_time_s', 'video_chunk_size_byte',
'relative_chunk_remain', 'relative_rate_played', 'segment_length_s', 'encoded_mbitrate',
'single_mbitrate',
'vmaf'] + column_switches + [
'streaming_environment']
def reset(self):
self.video_chunk_counter = 0
self.buffer_size_ms = 0
self.mahimahi_ptr = 1
self.last_quality = 0
self.logging_file = []
self.timestamp_s = 0
self.data_used_bytes = 0
"""
Padding and stuff
"""
self.timestamp_arr = [0] * self.max_lookback
self.data_used_relative = [0] * self.max_lookback
self.quality_arr = [0] * self.max_lookback
self.downloadtime_arr = [0] * self.max_lookback
self.sleep_time_arr = [0] * self.max_lookback
self.buffer_size_arr = [0] * self.max_lookback
self.rebuffer_time_arr = [0] * self.max_lookback
self.video_chunk_remain_arr_relative = [0] * self.max_lookback
self.video_chunk_size_arr = [0] * self.max_lookback
self.rate_played_relative_arr = [0] * self.max_lookback
self.segment_length_arr = [0] * self.max_lookback
self.encoded_mbitrate_arr = [0] * self.max_lookback
self.single_mbitrate_arr = [0] * self.max_lookback
self.vmaf_arr = [0] * self.max_lookback
def make_request_handler(input_dict):
class Request_Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.input_dict = input_dict
self.log_file = input_dict['log_file']
#self.saver = input_dict['saver']
self.s_batch = input_dict['s_batch']
#self.a_batch = input_dict['a_batch']
#self.r_batch = input_dict['r_batch']
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = json.loads(self.rfile.read(content_length))
if len(post_data) == 1: # message comes from the controller that suggests the optimal bitrate
self.send_response(200)
send_data = ''
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data.encode('utf-8'))
return
if ( 'pastThroughput' in post_data ):
# @Hongzi: this is just the summary of throughput/quality at the end of the load
# so we don't want to use this information to send back a new quality
print("Summary: {} ".format(post_data))
else:
if not 'streaming_env' in input_dict.keys():
logger.info("Creating streaming enviroment")
input_dict['streaming_env'] = OnlineStreaming(VIDEO_CSV_PATH, MAX_LOOKBACK, MAX_LOOKAHEAD, MAX_SWITCHES)
if not 'classifier' in input_dict.keys():
with open(TREE_FILENAME, 'rb') as fin:
logger.info("Loading classifier")
clas = dill.load(fin)
logger.info("Creating ABR Policy Classifier")
input_dict['classifier'] = clas
if not 'time_start' in input_dict.keys() :
input_dict['time_start'] = datetime.datetime.now()
streaming_env = input_dict['streaming_env']
classifier = input_dict['classifier']
time_start = input_dict['time_start']
rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])
reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
- REBUF_PENALTY * rebuffer_time / M_IN_K \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
self.input_dict['last_bit_rate']) / M_IN_K
self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]
self.input_dict['last_total_rebuf'] = post_data['RebufferTime']
# compute bandwidth measurement
video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
video_chunk_size = post_data['lastChunkSize']
# compute number of video chunks left
video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict['video_chunk_coount']
self.input_dict['video_chunk_coount'] += 1
time = datetime.datetime.now()
time_str = datetime.time(time.hour, time.minute, time.second, time.microsecond)
# log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
self.log_file.write(str(time_str) + '\t' +
str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
str(post_data['buffer']) + '\t' +
str(rebuffer_time / M_IN_K) + '\t' +
str(video_chunk_size) + '\t' +
str(video_chunk_fetch_time) + '\t' +
str(reward) + '\n')
self.log_file.flush()
time_relative = (time - time_start).total_seconds()
logger.info("Retrieving Observation Dictionary")
logger.info("Input parameters: last quality (INDEX) {},\n\
video_chunk_fetch_time (MS) {},\n\
rebuffer_time {} (MS),\n\
buffer (MS) {},\n\
time elapsed since playback started (S) {}".format( post_data['lastquality'],
video_chunk_fetch_time,
rebuffer_time,
post_data['buffer']*MILLISECONDS_IN_SEC,
time_relative) )
observation_dictionary, end_of_video = streaming_env.get_video_chunk(post_data['lastquality'],
video_chunk_fetch_time,
rebuffer_time,
post_data['buffer']*MILLISECONDS_IN_SEC,
time_relative)
import pprint
obs_dict_string = pprint.pformat(observation_dictionary)
logger.debug(obs_dict_string)
send_data = str(classifier.next_quality(observation_dictionary, reward))
logger.info("Selected quality {}".format(send_data))
end_of_video = False
if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
send_data = "" # send_data = "REFRESH" we don't want the video to restart
end_of_video = True
self.input_dict['last_total_rebuf'] = 0
self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
self.input_dict['video_chunk_coount'] = 0
self.log_file.write('\n') # so that in the log we know where video ends
print("done_successful") # signal player finished a video
sys.stdout.flush()
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data.encode('utf-8'))
def do_GET(self):
self.send_response(200)
#self.send_header('Cache-Control', 'Cache-Control: no-cache, no-store, must-revalidate max-age=0')
self.send_header('Cache-Control', 'max-age=3000')
self.send_header('Content-Length', 20)
self.end_headers()
self.wfile.write("console.log('here');")
def log_message(self, format, *args):
return
return Request_Handler
def run(server_class=HTTPServer, port=8333, log_file_path=LOG_FILE):
np.random.seed(RANDOM_SEED)
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
# make chunk combination options
for combo in itertools.product([0,1,2,3,4,5], repeat=5):
CHUNK_COMBO_OPTIONS.append(combo)
with open(log_file_path, 'w') as log_file:
s_batch = [np.zeros((S_INFO, S_LEN))]
last_bit_rate = DEFAULT_QUALITY
last_total_rebuf = 0
# need this storage, because observation only contains total rebuffering time
# we compute the difference to get
video_chunk_count = 0
input_dict = {'log_file': log_file,
'last_bit_rate': last_bit_rate,
'last_total_rebuf': last_total_rebuf,
'video_chunk_coount': video_chunk_count,
's_batch': s_batch}
# interface to abr_rl server
handler_class = make_request_handler(input_dict=input_dict)
#serve on random port
while True:
try:
server_address = ('localhost', port)
httpd = server_class(server_address, handler_class)
except socketserver.socket.error:
port = random.randint(2000,65535)
else:
break
print('Listening on port ' + str(port))
sys.stdout.flush()
httpd.serve_forever()
def main():
if len(sys.argv) > 6:
parser = argparse.ArgumentParser()
parser.add_argument('port', help='port of the server', type=int)
parser.add_argument('abr', help='abr under test', type=str)
parser.add_argument('trace_file', help='traces file folder', type=str)
parser.add_argument('timeout', help='timeout', type=int)
parser.add_argument("result_dir", help='results directory', type=str)
parser.add_argument('stream_id', help='stream id', type=int)
parser.add_argument("--debug", action="store_true", help='If selected, logging also to debug level')
parser.add_argument("--display", action="store_true", help='If selected, logging also to stderr')
global args
args = parser.parse_args()
global TREE_FILENAME
TREE_FILENAME = os.path.join(TREE_FOLDER, args.abr, 'classifier')
form = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logfile = os.path.join(args.result_dir, "log/{}_abr_server_{}.log".format(args.abr,args.stream_id))
llevel = logging.INFO
if args.debug:
llevel = logging.DEBUG
global logger
logger = logging.getLogger("{} ABR SERVER {}".format(args.abr, args.stream_id))
logger.setLevel(llevel)
fo = logging.FileHandler(logfile, mode = 'a')
formatter = logging.Formatter(form)
fo.setLevel(llevel)
fo.setFormatter(formatter)
logger.addHandler(fo)
if args.display:
so = logging.StreamHandler(sys.stderr)
so.setLevel(llevel)
so.setFormatter(formatter)
logger.addHandler(so)
log_file_p = os.path.join(args.result_dir, 'result')
if not os.path.exists(log_file_p):
os.makedirs(log_file_p, 0o777)
log_file_abr_server = os.path.join(log_file_p, '{}_rewards_{}.log'.format(args.abr,args.stream_id))
logger.info('Running with arguments passed {}'.format(args.trace_file))
run(port=args.port, log_file_path=log_file_abr_server)
logger.info('Listening on port ' + str(args.port))
else:
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Keyboard interrupted.")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.