id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3588290
|
<filename>src/psiopic2/app/baseApp.py
import sys
from datetime import datetime
from docopt import docopt
from psiopic2.app.ui.prompt import ask, prompt
import logging
import textwrap
from psiopic2.app.tasks import TaskException
import traceback
BASE_OPTIONS = """Options:
--no-widgets Turn off all widgets, useful for logfiles
--no-colors Turn off any coloring
-d Enable debugging
-h --help Show this help information
-v --version Version information
"""
class Arg(object):
def __init__(self, **kwargs):
self.title = kwargs.get('title') or kwargs.get('name')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.defaultValue = kwargs.get('defaultValue')
self.valueDesc = kwargs.get('valueDesc', '')
self.prompt = kwargs.get('prompt', True)
self.smallName = kwargs.get('smallName', '')
self.startTime = None
self.endTime = None
BASE_ARGS = [
Arg(
title="Force",
name='force',
description='Do not prompt user for missing settings. Use defaults',
defaultValue=False,
prompt=False
),
Arg(
title='No Widgets',
name='no-widgets',
description='Turn off all widgets, useful for logfiles',
defaultValue=False,
prompt=False
),
Arg(
title='No Colours',
name='no-colors',
description='Turn off all colours',
defaultValue=False,
prompt=False
),
Arg(
title='Debug',
name='debug',
smallName='d',
description='Turn on debugging',
defaultValue=False,
prompt=False
),
Arg(
title='Help',
name='help',
smallName='h',
description='Display this help',
defaultValue=False,
prompt=False
),
Arg(
title='Version',
name='version',
description='Display product version',
defaultValue=False,
prompt=False
)
]
class BaseApp(object):
def __init__(self, name, argv, version=None, title=None, description=None):
self._argv = argv
self._name = name
self._docString = None
self._version = version
self._availArgs = None
self._args = None
self._cliArgs = None
self._workQueue = []
self._log = None
self.description = description
self.title = title or name
def help(self):
sys.stdout.write(self.getDocString())
def version(self):
sys.stdout.write(self._version)
sys.stdout.write("\n")
def getLogger(self):
if self._log == None:
self._log = logging.getLogger('psiopic.' + self._name)
return self._log
def addArg(self, name, title=None, description=None, defaultValue=None, prompt=True):
if self._availArgs == None:
self._availArgs = []
if name.__class__ == Arg:
self._availArgs.append(name)
else:
self._availArgs.append(Arg(
name=name,
title=title,
description=description,
defaultValue=defaultValue,
prompt=prompt
))
def getDocString(self):
if self._docString == None:
docString = ""
if self.title:
docString += self.title + "\n"
if self.description:
docString += "\n" + self.description
docString += "\nUsage: psiopic2 %s [options]\n" % self._name
docString += "\n"
docString += "Options (%s):\n" % self._name
# find longest argument name
longestLen = max(len(arg.name) + len(arg.valueDesc) + len(arg.smallName) for arg in self._availArgs)
longestLen = longestLen + 12 # allow for additional formatting
descriptionWidth = 80 - longestLen
for arg in self._availArgs:
descriptionLines = textwrap.wrap(arg.description, descriptionWidth)
optString = " "
if arg.smallName:
optString += "-%s " % (arg.smallName)
if arg.name and arg.name != arg.smallName:
optString += "--%s" % (arg.name)
if arg.valueDesc:
optString += '=%s' % (arg.valueDesc)
optString += ' '
if len(optString) < longestLen:
optString += " " * (longestLen - len(optString))
docString += optString + '%s\n' % (descriptionLines.pop())
if len(descriptionLines) > 0:
for line in descriptionLines:
docString += " " * longestLen
docString += line + "\n"
docString += "\n"
# docString += BASE_OPTIONS
self._docString = docString
return self._docString
def getCliArgs(self):
if self._cliArgs == None:
self._cliArgs = docopt(self.getDocString(), self._argv[1:], False)
return self._cliArgs
def getArg(self, argName):
return self._args[argName]
def addTask(self, task, taskOpts, taskTitle=None):
if taskTitle == None:
taskTitle = task.__name__
self._workQueue.append((task, taskOpts, taskTitle))
def initConfiguration(self):
if self._args == None:
self._args = {}
cliArgs = self.getCliArgs()
argValue = None
for arg in self._availArgs:
if '--' + arg.name in cliArgs and cliArgs['--' + arg.name] != None:
argValue = cliArgs['--' + arg.name]
elif arg.prompt and cliArgs['--force'] != True:
argValue = ask(arg.title, arg.defaultValue)
else:
argValue = arg.defaultValue
self._args[arg.name] = argValue
def run(self):
ret = 0
log = self.getLogger()
self.startTime = datetime.now()
self.initConfiguration()
if len(self._workQueue) == 0:
log.warning("There are no tasks in the work queue, is this app setup properly?")
else:
try:
for taskClass, taskOpts, taskTitle in self._workQueue:
task = taskClass(self._args)
log.info(taskTitle or task.name)
task.run(**taskOpts)
except TaskException as e:
log.error('%s failed' % task.name)
log.exception(e)
ret = 1
except Exception as e:
log.critical('Unhandled exception from %s' % taskTitle)
log.critical(traceback.format_exc())
ret = 1
self.endTime = datetime.now() - self.startTime
log.info('Completed in: %s' % self.endTime)
return ret
|
StarcoderdataPython
|
1639003
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from jamendo.models import Artist, Album, Track, License, Language,\
Country, State, City, Playlist, Radio, JamendoUser, Genre, Review
admin.site.register(Artist)
admin.site.register(Album)
admin.site.register(Track)
admin.site.register(License)
admin.site.register(Language)
admin.site.register(Country)
admin.site.register(State)
admin.site.register(City)
admin.site.register(Playlist)
admin.site.register(Radio)
admin.site.register(JamendoUser)
admin.site.register(Genre)
admin.site.register(Review)
|
StarcoderdataPython
|
3388119
|
from twittertail.get_twitter_values import GetTwitterValues
from twittertail.exceptions import (
FailedToGetTwitterValueException,
FailedToGetTweetsException
)
from html import unescape
import requests
import re
class GetTweetsAPI:
'''
GetTweetsAPI mimmicks the process a browser uses when accessing a Twitter
user's tweet timeline in an unauthenticated session. To do this, it:
1. Gets the "guest token" from the Twitter markup, and uses it in the
"x-guest-token" request header in the API call.
2. Gets the bearer token from the Twitter main.js, and uses it in the
"authorization" request header in the API call.
3. Gets the user id for the supplied username from a GraphQL query.
4. Queries the Twitter API at /2/timeline/profile/X.json, where X is the
user id.
Arguments:
user (str): the username of the Twitter user to query against.
retweets (bool): toggles whether to return retweets
Raises:
FailedToGetTweetsException: if the username is invalid by Twitter
standards.
'''
def __init__(self, user, retweets=False):
if len(user) > 15 or re.search('[^a-zA-Z0-9_]+', user):
raise FailedToGetTweetsException(
'Invalid username - Twitter usernames must be 15 or fewer '
'characters in length, and must be alphanumeric only '
'(with underscores).'
)
self.user = user
self.retweets = retweets
self.__refresh_twitter_values()
self.s_twitter = requests.session()
self.headers = {
'x-guest-token': self.gt,
'authorization': 'Bearer %s' % (self.bearer_token),
'content-type': 'application/json'
}
self.user_id = self.__get_user_id()
def __refresh_twitter_values(self):
(
self.gt,
self.bearer_token,
self.query_id
) = self.__get_new_twitter_values()
def __get_new_twitter_values(self):
'''
Collect the values needed to make an unauthenticated request to the
Twitter API for a user's timeline, using GetTwitterValues.
Raises:
FailedToGetTweetsException: if values could not be retrieved.
Return:
gt, bearer_token, query_id (tuple): the API values.
'''
twitter_values = GetTwitterValues()
try:
gt = twitter_values.get_guest_token()
bearer_token = twitter_values.get_bearer_token()
query_id = twitter_values.get_query_id()
except FailedToGetTwitterValueException as e:
raise FailedToGetTweetsException(e)
return (gt, bearer_token, query_id)
def get_twitter_values(self):
'''
Returns the current tokens retrieved from Twitter.
Returns:
tuple: the guest token, bearer token, and query id
'''
return (self.gt, self.bearer_token, self.query_id)
def __get_user_id(self):
'''
Gets the id of the Twitter username supplied, by querying the GraphQL
API's "UserByScreenName" operation.
Raises:
FailedToGetTweetsException: if the user id could not be retrieved.
Returns:
user_id (str): the user id.
'''
user_id = None
url = (
'https://api.twitter.com/graphql/%s/UserByScreenName'
% (self.query_id)
)
params = {
'variables': (
'{"screen_name":"%s","withHighlightedLabel":true}'
% (self.user)
)
}
try:
r = self.s_twitter.get(
url,
params=params,
headers=self.headers
)
graph_ql_json = r.json()
except Exception as e:
raise FailedToGetTweetsException(
'Failed to get the user id, request excepted with: %s'
% (str(e))
)
try:
user_id = graph_ql_json['data']['user']['rest_id']
except KeyError:
raise FailedToGetTweetsException(
'Failed to get the user id, could not find user rest_id in '
'GraphQL response.'
)
return user_id
def get_tweets(
self,
count=None,
refresh_tokens=False,
last_id=None
):
'''
Queries the Twitter API using a guest token and authorization bearer
token retrived from GetTwitterValues().
Arguments:
count (int): the amount of tweets to get.
refresh_tokens (bool): get new tokens before checking for tweets.
last_id (int): the id of the latest seen tweet.
Raises:
FailedToGetTweetsException: if tweets could not be retrieved.
Returns:
tweets (list): a list of tweet timestamp and text tuples for the
user, sorted ascending in time, limited by the 'limit'
argument.
'''
if refresh_tokens:
self.__refresh_twitter_values()
tweets = list()
try:
url = (
'https://api.twitter.com/2/timeline/profile/%s.json'
% self.user_id
)
# the 'count' param in this query is actually a maximum,
# where deleted or suspended tweets are removed after the
# count is applied, so we don't supply a count param in
# the API query and instead apply it to the response data
# later.
params = {
'tweet_mode': 'extended'
}
r = self.s_twitter.get(url, headers=self.headers, params=params)
timeline_json = r.json()
except Exception as e:
raise FailedToGetTweetsException(
'Failed to get tweets, request excepted with: %s'
% (str(e))
)
try:
tweets_json = timeline_json['globalObjects']['tweets']
if self.retweets:
tweet_ids = list(int(x) for x in tweets_json.keys())
else:
tweet_ids = list(
int(x) for x in tweets_json.keys()
if 'retweeted_status_id_str' not in tweets_json[x]
)
# if a last id value is supplied, only return tweets which an id
# greater
if last_id:
tweet_ids = list(x for x in tweet_ids if x > last_id)
if len(tweet_ids) > 0:
# an assumption here is the display should be oldest to newest,
# i.e. opposite to Twitter's UI, as it makes more sense in a
# cli environment.
tweet_ids.sort(reverse=True)
tweet_ids_culled = tweet_ids[:count]
tweet_ids_culled.sort()
tweets = (
list(
(
tweets_json[str(x)]['created_at'],
unescape(tweets_json[str(x)]['full_text']),
tweets_json[str(x)]['id_str']
)
for x in tweet_ids_culled
)
)
except KeyError:
raise FailedToGetTweetsException('Failed to get tweets.')
return tweets
|
StarcoderdataPython
|
4914791
|
import pytask
from src.config import BLD
from src.config import SRC
import numpy as np
import json
from itertools import product
import pandas as pd
from statsmodels.nonparametric.kernel_regression import KernelReg
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from src.conformal_methods.utils import flatten, generate_X_fixed_positions, generate_y_fixed_positions, x_scale, construc_cond_metric_df_simulation
from src.conformal_methods.split_conformal_inference import SplitConformalRegressor
from src.conformal_methods.r_objects import QuantregForest
def conc_tuples(index_prep, method, i):
conc = (method, i)
return index_prep + conc
def run_simulation(specs, produces):
methods = ["mean-based", "weighted-mean-based", "quantile-based", "cdf-based"]
simulation_ids = np.arange(specs["n_sims"])
index_prep = [(
specs["n"],
specs["p"],
specs["X_dist"],
specs["X_correlation"],
specs["eps_dist"],
specs["error_type"],
specs["functional_form"],
specs["non_zero_beta_count"],
specs["uniform_upper"],
bool(int(specs["standardized_X"])),
)
]
index = product(index_prep, methods, simulation_ids)
index = flatten(l=list(index))
index = pd.MultiIndex.from_tuples(
index,
names=(
"n",
"p",
"X_dist",
"X_correlation",
"eps_dist",
"error_type",
"functional_form",
"non_zero_beta_count",
"uniform_upper",
"standardized_X",
"method",
"simulation_id",
),
)
df = pd.DataFrame(
columns=[
"mean_interval_length",
"mean_coverage",
"conditional_metrics_df",
],
index=index,
)
same_case_as_previous_round = False
for index in df.index:
if index[11] != 0:
print("Previous index is: " + str(previous_index))
print("Current index is: " + str(index))
if index[11] != 0:
same_case_as_previous_round = True
for i in range(len(previous_index) - 1):
if previous_index[i] != index[i]:
same_case_as_previous_round = False
total_sample = index[0] + specs["pred_samples"]
# draw samples:
X = generate_X_fixed_positions(
n=total_sample,
p=index[1],
X_dist=index[2],
cor=index[3],
standardize=index[9],
uniform_upper=index[8],
)
if index[6] == "stochastic_poisson":
y = generate_y_fixed_positions(
X_mat=X,
eps_dist=index[4],
error_type=index[5],
functional_form=index[6],
non_zero_beta_count=index[7],
)
else:
y, eps, sigma_vec, mu, beta = generate_y_fixed_positions(
X_mat=X,
eps_dist=index[4],
error_type=index[5],
functional_form=index[6],
non_zero_beta_count=index[7],
)
X_predict, X_split_again, y_predict, y_split_again = train_test_split(
X, y, train_size=specs["pred_samples"]
)
# X_train, X_conf, y_train, y_conf = train_test_split(
# X_split_again, y_split_again, train_size=specs["train_size"]
# )
if X_split_again.shape[1] > 1:
max_features = round(X_split_again.shape[1] / 3)
elif X_split_again.shape[1] == 1:
max_features = 1
else:
raise ValueError("X has a dimensionality problem, missing regressors.")
if (index[10] == "mean-based"):
reg = SplitConformalRegressor(RandomForestRegressor, method="mean-based", conf_size=1-specs["train_size"], quantiles_to_fit=np.array([0.05,0.95]))
reg = reg.fit(X=X_split_again, y=y_split_again, params={"min_samples_leaf": specs["nodesize"],
"max_features": max_features,
"n_estimators": specs["n_estimators"]})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
elif (index[10] == "weighted-mean-based"):
reg = SplitConformalRegressor(RandomForestRegressor, method="weighted-mean-based", conf_size=1-specs["train_size"], quantiles_to_fit=np.array([0.05,0.95]))
reg = reg.fit(X=X_split_again, y=y_split_again, params={"min_samples_leaf": specs["nodesize"],
"max_features": max_features,
"n_estimators": specs["n_estimators"]})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
elif (index[10] == "quantile-based"):
reg = SplitConformalRegressor(QuantregForest, method="quantile-based", conf_size=1-specs["train_size"], quantiles_to_fit=np.array([0.05,0.95]))
reg = reg.fit(X=X_split_again, y=y_split_again, params={"nodesize": specs["nodesize"], "mtry": max_features})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
elif (index[10] == "cdf-based"):
reg = SplitConformalRegressor(QuantregForest, method="cdf-based", conf_size=1-specs["train_size"])
reg = reg.fit(X=X_split_again, y=y_split_again, params={"nodesize": specs["nodesize"], "mtry": max_features})
res = reg.predict_intervals(X_pred=X_predict, alpha=0.1)
else:
raise ValueError("Method misspecified.")
# determine metrics:
length_bands = res[:, 1] - res[:, 0]
mean_interval_length = np.mean(length_bands)
in_the_range = np.sum((y_predict.flatten() >= res[:, 0]) & (y_predict.flatten() <= res[:, 1]))
mean_coverage = in_the_range / len(y_predict)
# this determines which x-scale should be used for the later plots (X in univariate, or X*beta in multivariate case)
if index[5] == "simple_linear": # these are process_types 3 and 4 (univariate)
x_scale_diag = x_scale(X_mat=X_predict, error_type=index[5])
else:
linear_part = X_predict @ beta
x_scale_diag = x_scale(X_mat=X_predict, error_type=index[5], linear_part=linear_part)
cond_metrics_df = construc_cond_metric_df_simulation(x_scale=x_scale_diag, result_pred_bands=res, y_predict=y_predict)
df.at[index, "mean_interval_length"] = mean_interval_length
df.at[index, "mean_coverage"] = mean_coverage
df.at[index, "conditional_metrics_df"] = cond_metrics_df
previous_index = index
# after for loop and calculation, write average metrics into file:
result = (df[["mean_interval_length", "mean_coverage"]].groupby(by=["method"]).sum() / specs["n_sims"])
result.to_csv(produces["average_metrics_df"])
# the following generates the kernel regression estimates for the four methods:
for i in range(specs["n_sims"]):
if i == 0:
res_mean_based = df.at[conc_tuples(index_prep=index_prep[0], method="mean-based", i=i), "conditional_metrics_df",]
# res_mean_based = df.at[(200, 10, "mixture", "auto", "t", "varying_third_moment_mu", "linear", 5, 1, True, "pred_band_mean_based", i), "conditional_metrics_df"]
else:
tmp = df.at[conc_tuples(index_prep=index_prep[0], method="mean-based", i=i), "conditional_metrics_df",]
res_mean_based = np.concatenate((res_mean_based, tmp), axis=0)
for i in range(specs["n_sims"]):
if i == 0:
res_weighted_mean_based = df.at[
conc_tuples(
index_prep=index_prep[0], method="weighted-mean-based", i=i
),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(
index_prep=index_prep[0], method="weighted-mean-based", i=i
),
"conditional_metrics_df",
]
res_weighted_mean_based = np.concatenate((res_weighted_mean_based, tmp), axis=0)
for i in range(specs["n_sims"]):
if i == 0:
res_quantile_based = df.at[
conc_tuples(
index_prep=index_prep[0], method="quantile-based", i=i
),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(
index_prep=index_prep[0], method="quantile-based", i=i
),
"conditional_metrics_df",
]
res_quantile_based = np.concatenate((res_quantile_based, tmp), axis=0)
for i in range(specs["n_sims"]):
if i == 0:
res_cdf_based = df.at[
conc_tuples(index_prep=index_prep[0], method="cdf-based", i=i),
"conditional_metrics_df",
]
else:
tmp = df.at[
conc_tuples(index_prep=index_prep[0], method="cdf-based", i=i),
"conditional_metrics_df",
]
res_cdf_based = np.concatenate((res_cdf_based, tmp), axis=0)
upper = 0.999
lower = 0.001
df_mean_based = pd.DataFrame(
{
"x_scale": res_mean_based[:, 0],
"length": res_mean_based[:, 1],
"coverage": res_mean_based[:, 2],
}
)
df_w_mean_based = pd.DataFrame(
{
"x_scale": res_weighted_mean_based[:, 0],
"length": res_weighted_mean_based[:, 1],
"coverage": res_weighted_mean_based[:, 2],
}
)
df_quantile_based = pd.DataFrame(
{
"x_scale": res_quantile_based[:, 0],
"length": res_quantile_based[:, 1],
"coverage": res_quantile_based[:, 2],
}
)
df_cdf_based = pd.DataFrame(
{
"x_scale": res_cdf_based[:, 0],
"length": res_cdf_based[:, 1],
"coverage": res_cdf_based[:, 2],
}
)
Q3 = df_mean_based.x_scale.quantile(upper)
Q1 = df_mean_based.x_scale.quantile(lower)
df_mean_based_cleaned = df_mean_based[
(df_mean_based.x_scale < Q3) & (df_mean_based.x_scale > Q1)
]
Q3 = df_w_mean_based.x_scale.quantile(upper)
Q1 = df_w_mean_based.x_scale.quantile(lower)
df_w_mean_based_cleaned = df_w_mean_based[
(df_w_mean_based.x_scale < Q3) & (df_w_mean_based.x_scale > Q1)
]
Q3 = df_quantile_based.x_scale.quantile(upper)
Q1 = df_quantile_based.x_scale.quantile(lower)
df_quantile_based_cleaned = df_quantile_based[
(df_quantile_based.x_scale < Q3) & (df_quantile_based.x_scale > Q1)
]
Q3 = df_cdf_based.x_scale.quantile(upper)
Q1 = df_cdf_based.x_scale.quantile(lower)
df_cdf_based_cleaned = df_cdf_based[
(df_cdf_based.x_scale < Q3) & (df_cdf_based.x_scale > Q1)
]
x_scales_merged = np.concatenate(
(
np.array(df_mean_based_cleaned["x_scale"]),
np.array(df_w_mean_based_cleaned["x_scale"]),
np.array(df_quantile_based_cleaned["x_scale"]),
np.array(df_cdf_based_cleaned["x_scale"]),
)
)
minimum = np.min(x_scales_merged)
maximum = np.max(x_scales_merged)
grid = np.linspace(minimum, maximum, 1000)
print("Start.")
df_mean_based_cleaned.to_csv(produces["conditional_res_mean_based"])
df_w_mean_based_cleaned.to_csv(produces["conditional_res_w_mean_based"])
df_quantile_based_cleaned.to_csv(produces["conditional_res_quantile_based"])
df_cdf_based_cleaned.to_csv(produces["conditional_res_cdf_based"])
# generate kernel estimates:
for mode in ["coverage", "length"]:
if mode == "coverage":
print("Coverage stage.")
kde_cov_mean_based = KernelReg(
endog=df_mean_based_cleaned["coverage"],
exog=df_mean_based_cleaned["x_scale"],
var_type="o",
)
kernel_fit_cov_mean_based, marginal_cov_mean_based = kde_cov_mean_based.fit(
data_predict=grid
)
##
print("Fitted mean based.")
kde_cov_weighted_mean_based = KernelReg(
endog=df_w_mean_based_cleaned["coverage"],
exog=df_w_mean_based_cleaned["x_scale"],
var_type="o",
)
(
kernel_fit_cov_weigthed_mean_based,
marginal_cov_weighted_mean_based,
) = kde_cov_weighted_mean_based.fit(data_predict=grid)
##
print("Fitted w. mean based.")
#
kde_cov_quantile_based = KernelReg(
endog=df_quantile_based_cleaned["coverage"],
exog=df_quantile_based_cleaned["x_scale"],
var_type="o",
)
(
kernel_fit_cov_quantile_based,
marginal_cov_quantile_based,
) = kde_cov_quantile_based.fit(data_predict=grid)
##
print("Fitted quantile_based.")
kde_cov_cdf_based = KernelReg(
endog=df_cdf_based_cleaned["coverage"],
exog=df_cdf_based_cleaned["x_scale"],
var_type="o",
)
kernel_fit_cov_cdf_based, marginal_cov_cdf_based = kde_cov_cdf_based.fit(
data_predict=grid
)
###
print("Fitted cdf_based.")
dataset = pd.DataFrame(
{
"cond_variance_y_grid": grid,
"mean_based_cond_coverage": kernel_fit_cov_mean_based,
"w_mean_based_cond_coverage": kernel_fit_cov_weigthed_mean_based,
"quantile_based_cond_coverage": kernel_fit_cov_quantile_based,
"cdf_based_cond_coverage": kernel_fit_cov_cdf_based,
}
)
dataset.to_csv(produces["final_kernel_estimated_coverage"])
elif mode == "length":
print("Length stage.")
kde_cov_mean_based = KernelReg(
endog=df_mean_based_cleaned["length"],
exog=df_mean_based_cleaned["x_scale"],
var_type="c",
reg_type="lc",
)
kernel_fit_cov_mean_based, marginal_cov_mean_based = kde_cov_mean_based.fit(
data_predict=grid
)
##
print("Fitted mean based.")
kde_cov_weighted_mean_based = KernelReg(
endog=df_w_mean_based_cleaned["length"],
exog=df_w_mean_based_cleaned["x_scale"],
var_type="c",
)
(
kernel_fit_cov_weigthed_mean_based,
marginal_cov_weighted_mean_based,
) = kde_cov_weighted_mean_based.fit(data_predict=grid)
##
print("Fitted w. mean based.")
#
kde_cov_quantile_based = KernelReg(
endog=df_quantile_based_cleaned["length"],
exog=df_quantile_based_cleaned["x_scale"],
var_type="c",
)
(
kernel_fit_cov_quantile_based,
marginal_cov_quantile_based,
) = kde_cov_quantile_based.fit(data_predict=grid)
##
print("Fitted quantile_based.")
kde_cov_cdf_based = KernelReg(
endog=df_cdf_based_cleaned["length"],
exog=df_cdf_based_cleaned["x_scale"],
var_type="c",
)
kernel_fit_cov_cdf_based, marginal_cov_cdf_based = kde_cov_cdf_based.fit(
data_predict=grid
)
###
print("Fitted cdf_based.")
dataset = pd.DataFrame(
{
"cond_variance_y_grid": grid,
"mean_based_cond_length": kernel_fit_cov_mean_based,
"w_mean_based_cond_length": kernel_fit_cov_weigthed_mean_based,
"quantile_based_cond_length": kernel_fit_cov_quantile_based,
"cdf_based_cond_length": kernel_fit_cov_cdf_based,
}
)
dataset.to_csv(produces["final_kernel_estimated_length"])
else:
print("Mode not specified.")
@pytask.mark.parametrize("depends_on, produces",
[
(
{
"type": SRC / "simulations" / "specs" / f"cond_sim_type_{type}.json",
},
{
"average_metrics_df": BLD / "simulations" / "cond_perf_simulations" / f"average_results_{type}.csv",
"conditional_res_mean_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_mean_based_{type}.csv",
"conditional_res_w_mean_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_w_mean_based_{type}.csv",
"conditional_res_quantile_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_quantile_based_{type}.csv",
"conditional_res_cdf_based": BLD / "simulations" / "cond_perf_simulations" / "cond_detailed_dfs" / f"cond_res_cdf_based_{type}.csv",
"final_kernel_estimated_coverage": BLD / "simulations" / "cond_perf_simulations" / f"kernel_coverage_results_{type}.csv",
"final_kernel_estimated_length": BLD / "simulations" / "cond_perf_simulations" / f"kernel_length_results_{type}.csv",
}
)
for type in [1,2,3,4]
],
)
def task_cond_perf_simulations(depends_on, produces):
# dictionary imported into "specs":
specs = json.loads(depends_on["type"].read_text(encoding="utf-8"))
run_simulation(specs, produces)
|
StarcoderdataPython
|
1867357
|
<reponame>sonata-nfv/son-monitor
## ALL RIGHTS RESERVED.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
## Neither the name of the SONATA-NFV, 5GTANGO [, ANY ADDITIONAL AFFILIATION]
## nor the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## This work has been performed in the framework of the SONATA project,
## funded by the European Commission under Grant number 671517 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the SONATA
## partner consortium (www.sonata-nfv.eu).
##
## This work has been performed in the framework of the 5GTANGO project,
## funded by the European Commission under Grant number 761493 through
## the Horizon 2020 and 5G-PPP programmes. The authors would like to
## acknowledge the contributions of their colleagues of the 5GTANGO
## partner consortium (www.5gtango.eu).
# encoding: utf-8
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from api.serializers import *
from api.prometheus import *
from rest_framework import generics
from rest_framework.reverse import reverse
from itertools import *
import json, socket, os, base64
from api.httpClient import Http
from django.db.models import Q
import datetime
import psutil, logging
from django.db import IntegrityError
from api.logger import TangoLogger
# Create your views here.
LOG = TangoLogger.getLogger(__name__, log_level=logging.INFO, log_json=True)
TangoLogger.getLogger("Monitoring_manager", logging.INFO, log_json=True)
LOG.setLevel(logging.INFO)
LOG.info('Monitoring Manager started')
@api_view(('GET',))
def api_root(request, format=None):
return Response({
'user': reverse('UserDetail', request=request, format=format),
'tests': reverse('TestList', request=request, format=format),
'test': reverse('TestDetail', request=request, format=format),
'tests': reverse('UserList', request=request, format=format),
})
class SntPLCRuleconf(generics.CreateAPIView):
serializer_class = SntPLCRulesConfSerializer
def post(self, request, *args, **kwargs):
dt = request.data
if 'plc_cnt' in dt.keys():
policy_cnt = dt['plc_cnt']
if 'srvID' in self.kwargs:
srvid = self.kwargs['srvID']
elif 'sonata_service_id' in dt.keys():
srvid = dt['sonata_service_id']
else:
LOG.info("Undefined service_id")
return Response({'error': 'Undefined service_id'}, status=status.HTTP_400_BAD_REQUEST)
if 'vnfs' in dt.keys():
vnfs = dt['vnfs']
else:
LOG.info("Undefined VNFs")
return Response({'error': 'Undefined vnfs'}, status=status.HTTP_400_BAD_REQUEST)
# Check if service exists
srv = monitoring_services.objects.all().filter(sonata_srv_id=srvid)
if srv.count() == 0:
if srvid != 'generic':
LOG.info("Requested Service not found")
return Response({'error': 'Requested Service not found'}, status=status.HTTP_404_NOT_FOUND)
else:
srvid = 'alerts'
# Delete old rule from DB
rules_db = monitoring_rules.objects.all().filter(service__sonata_srv_id=srvid, consumer='PLC')
rules_db.delete()
# Create prometheus configuration file
rls = {}
rls['rules'] = []
srvrules = []
rules_status = 0
rls['service'] = 'plc-' + srvid
for vnf in dt['vnfs']:
rls['vnf'] = vnf['vnf_id']
if 'vdus' in vnf.keys():
for vdu in vnf['vdus']:
rls['vdu_id'] = vdu['vdu_id']
rules = vdu['rules']
rules_status += len(rules)
for r in rules:
nt = monitoring_notif_types.objects.all().filter(id=r['notification_type']['id'])
if nt.count() == 0:
LOG.info("Alert notification type does not supported. Action Aborted")
return Response({'error': 'Alert notification type does not supported. Action Aborted'},
status=status.HTTP_400_BAD_REQUEST)
else:
if srvid != "alerts":
rule = monitoring_rules(service=srv[0], summary=r['summary'], notification_type=nt[0],
name=r['name'], condition=r['condition'],
duration=r['duration'],
description=r['description'], consumer='PLC',
function=rls['vnf'], vdu=rls['vdu_id'])
rule.save()
rl = {}
rl['name'] = r['name']
rl['description'] = r['description']
rl['summary'] = r['summary']
rl['duration'] = r['duration']
rl['notification_type'] = r['notification_type']
rl['condition'] = r['condition']
rl['labels'] = ["serviceID=\"" + srvid + "\", functionID=\"" + rls['vnf'] + "\", tp=\"PLC\""]
rls['rules'].append(rl)
srvrules += rules
if len(srvrules) > 0:
cl = Http()
rsp = cl.POST('http://prometheus:9089/prometheus/rules', [], json.dumps(rls))
if rsp == 200:
LOG.info("PLC rules added")
return Response({'status': "success", "rules": rules_status})
else:
LOG.warning("PLC rule update failed")
return Response({'error': 'Rule update fail ' + str(rsp)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.warning('No PLC rules defined')
return Response({'error': 'No rules defined'})
class SntPLCRulesDetail(generics.DestroyAPIView):
serializer_class = SntRulesSerializer
def delete(self, request, *args, **kwargs):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['sonata_srv_id']
fq = queryset.filter(service__sonata_srv_id=srvid, consumer='PLC')
if fq.count() > 0:
fq.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('plc-' + srvid), [])
LOG.info('PLC Rules deleted')
return Response({'staus': "service's rules removed"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info('PLC Rules not found')
return Response({'status': "rules not found"}, status=status.HTTP_404_NOT_FOUND)
class SntPLCRulesPerServiceList(generics.ListAPIView):
serializer_class = SntRulesPerSrvSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['srv_id']
return queryset.filter(service__sonata_srv_id=srvid, consumer='PLC')
def delete(self, request, *args, **kwargs):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['srv_id']
fq = queryset.filter(service__sonata_srv_id=srvid, consumer='PLC')
if fq.count() > 0:
fq.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('plc-' + srvid), [])
LOG.info("PLC rules removed")
return Response({'status': "service's rules removed"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info("PLC rules not found")
return Response({'status': "rules not found"}, status=status.HTTP_404_NOT_FOUND)
class SntPLCRulesList(generics.ListAPIView):
serializer_class = SntRulesSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
return queryset.filter(consumer='PLC')
###SLA Rules
class SntSLARuleconf(generics.CreateAPIView):
serializer_class = SntSLARulesConfSerializer
def post(self, request, *args, **kwargs):
dt = request.data
if 'srvID' in self.kwargs:
srvid = self.kwargs['srvID']
elif 'sonata_service_id' in dt:
srvid = dt['sonata_service_id']
else:
LOG.info('service_id is missing')
return Response({'error': 'sonata_service_id missing'}, status=status.HTTP_400_BAD_REQUEST)
if 'plc_cnt' in dt.keys():
policy_cnt = dt['plc_cnt']
if 'vnfs' in dt.keys():
vnfs = dt['vnfs']
else:
LOG.info('Undefined VNFs')
return Response({'error': 'Undefined vnfs'}, status=status.HTTP_400_BAD_REQUEST)
# Check if service exists
srv = monitoring_services.objects.all().filter(sonata_srv_id=srvid)
if srv.count() == 0:
if srvid != 'generic':
LOG.info('Requested Service not found')
return Response({'error': 'Requested Service not found'}, status=status.HTTP_404_NOT_FOUND)
else:
srvid = 'alerts'
# Delete old rule from DB
rules_db = monitoring_rules.objects.all().filter(service__sonata_srv_id=srvid, consumer='SLA')
rules_db.delete()
# Create prometheus configuration file
rls = {}
rls['rules'] = []
srvrules = []
rules_status = 0
rls['service'] = 'sla-' + srvid
for vnf in dt['vnfs']:
rls['vnf'] = vnf['vnf_id']
if 'vdus' in vnf.keys():
for vdu in vnf['vdus']:
rls['vdu_id'] = vdu['vdu_id']
rules = vdu['rules']
rules_status += len(rules)
for r in rules:
nt = monitoring_notif_types.objects.all().filter(id=r['notification_type']['id'])
if nt.count() == 0:
LOG.info('Alert notification type does not supported. Action Aborted')
return Response({'error': 'Alert notification type does not supported. Action Aborted'},
status=status.HTTP_400_BAD_REQUEST)
else:
if srvid != "alerts":
rule = monitoring_rules(service=srv[0], summary=r['summary'], notification_type=nt[0],
name=r['name'], condition=r['condition'],
duration=r['duration'],
description=r['description'], consumer='SLA',
function=rls['vnf'], vdu=rls['vdu_id'])
rule.save()
rl = {}
rl['name'] = r['name']
rl['description'] = r['description']
rl['summary'] = r['summary']
rl['duration'] = r['duration']
rl['notification_type'] = r['notification_type']
rl['condition'] = r['condition']
rl['labels'] = ["serviceID=\"" + srvid + "\", functionID=\"" + rls['vnf'] + "\", tp=\"SLA\""]
rls['rules'].append(rl)
srvrules += rules
if len(srvrules) > 0:
cl = Http()
rsp = cl.POST('http://prometheus:9089/prometheus/rules', [], json.dumps(rls))
if rsp == 200:
LOG.info("SLA rules added")
return Response({'status': "success", "rules": rules_status})
else:
LOG.info("SLA rules update failed")
return Response({'error': 'Rules update fail ' + str(rsp)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.info("No SLA rules defined")
return Response({'error': 'No rules defined'})
class SntSLARulesDetail(generics.DestroyAPIView):
serializer_class = SntRulesSerializer
def delete(self, request, *args, **kwargs):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['sonata_srv_id']
fq = queryset.filter(service__sonata_srv_id=srvid, consumer='SLA')
if fq.count() > 0:
fq.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('sla-' + srvid), [])
LOG.info("SLA rules removed")
return Response({'status': "service's rules removed"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info("SLA rules not found")
return Response({'status': "rules not found"}, status=status.HTTP_404_NOT_FOUND)
class SntSLARulesPerServiceList(generics.ListAPIView):
serializer_class = SntRulesPerSrvSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['srv_id']
return queryset.filter(service__sonata_srv_id=srvid, consumer='SLA')
def delete(self, request, *args, **kwargs):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['srv_id']
fq = queryset.filter(service__sonata_srv_id=srvid, consumer='SLA')
if fq.count() > 0:
fq.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('sla-' + srvid), [])
LOG.info("SLA rules removed")
return Response({'status': "service's rules removed"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info("SLA rules not found")
return Response({'status': "rules not found"}, status=status.HTTP_404_NOT_FOUND)
class SntSLAAlertsList(generics.ListAPIView):
serializer_class = SntAlertsListSerializer
def get(self, request, *args, **kwargs):
cl = Http()
rsp = cl.GET('http://prometheus:9090/api/v1/series?match[]=ALERTS{tp="SLA",alertstate="firing"}', [])
if rsp['status'] == 'success':
r = {}
r['status'] = rsp['status']
r['alerts'] = rsp['data']
LOG.info(r)
return Response(r, status=status.HTTP_200_OK)
else:
LOG.info('SLA rules retrieve failed')
return Response(rsp, status=status.HTTP_404_NOT_FOUND)
class SntPLCAlertsList(generics.ListAPIView):
serializer_class = SntAlertsListSerializer
def get(self, request, *args, **kwargs):
cl = Http()
rsp = cl.GET('http://prometheus:9090/api/v1/series?match[]=ALERTS{tp="PLC",alertstate="firing"}', [])
if rsp['status'] == 'success':
r = {}
r['status'] = rsp['status']
r['alerts'] = rsp['data']
LOG.info(r)
return Response(r, status=status.HTTP_200_OK)
else:
LOG.warning('SLA rules retrieve failed')
return Response(rsp, status=status.HTTP_404_NOT_FOUND)
class SntSLARulesList(generics.ListAPIView):
serializer_class = SntRulesSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
return queryset.filter(consumer='SLA')
class SntSNMPEntCreate(generics.ListCreateAPIView):
queryset = monitoring_snmp_entities.objects.all()
serializer_class = SntSNMPEntFullSerializer
class SntSNMPEntList(generics.ListAPIView):
queryset = monitoring_snmp_entities.objects.all()
serializer_class = SntSNMPEntSerializer
class SntSNMPEntDetail(generics.DestroyAPIView):
# queryset = monitoring_snmp_entities.objects.all()
# serializer_class = SntSNMPEntSerializer
def delete(self, request, *args, **kwargs):
id = self.kwargs['pk']
queryset = monitoring_snmp_entities.objects.all().filter(id=id)
if queryset.count() > 0:
queryset.update(status='DELETED')
LOG.info('SNMP oids removed')
return Response(status=status.HTTP_204_NO_CONTENT)
else:
LOG.info('SNMP server not found')
return Response({'status': "SNMP server not found"}, status=status.HTTP_404_NOT_FOUND)
class SntSmtpCreate(generics.CreateAPIView):
# queryset = monitoring_smtp.objects.all()
serializer_class = SntSmtpSerializerCreate
def post(self, request, *args, **kwargs):
queryset = monitoring_smtp.objects.filter(component=request.data['component'])
if queryset.count() > 0:
queryset.update(smtp_server=request.data['smtp_server'], port=request.data['port'],
user_name=request.data['user_name'], password=request.data['password'],
sec_type=request.data['sec_type'])
LOG.info('SNMP oids updated')
return Response(monitoring_smtp.objects.values().filter(component=request.data['component']))
else:
smtp = monitoring_smtp(smtp_server=request.data['smtp_server'], port=request.data['port'],
user_name=request.data['user_name'], password=request.data['password'],
component=request.data['component'], sec_type=request.data['sec_type'])
smtp.save()
LOG.info('SNMP oids added')
return Response(monitoring_smtp.objects.values().filter(component=request.data['component']))
class SntSmtpList(generics.ListAPIView):
serializer_class = SntSmtpSerializerList
def get_queryset(self):
comp = self.kwargs['component']
queryset = monitoring_smtp.objects.filter(component=comp)
return queryset
class SntSmtpDetail(generics.DestroyAPIView):
queryset = monitoring_smtp.objects.all()
serializer_class = SntSmtpSerializerList
class SntCredList(generics.ListAPIView):
# serializer_class = SntSmtpSerializerList
serializer_class = SntSmtpSerializerCred
def get(self, request, *args, **kwargs):
smtp = monitoring_smtp.objects.filter(component=self.kwargs['component'])
if smtp.count() > 0:
dict = [obj.as_dict() for obj in smtp]
psw = (dict[0])['psw']
psw = base64.b64encode(psw)
return Response({'status': 'key found', 'creds': psw}, status=status.HTTP_200_OK)
else:
return Response({'status': 'key not found'}, status=status.HTTP_200_OK)
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
return True
def getPromIP(pop_id_):
arch = os.environ.get('MON_ARCH', 'CENTRALIZED')
pop_id = pop_id_
pop = monitoring_pops.objects.values('prom_url').filter(sonata_pop_id=pop_id)
if pop.count() == 0:
LOG.info('Undefined POP')
return {'status': 'failed', 'msg': 'Undefined POP', 'addr': None}
# return Response({'status':"Undefined POP"}, status=status.HTTP_404_NOT_FOUND)
elif pop.count() > 1:
LOG.info('Muliple POPs with same id')
return {'status': 'failed', 'msg': 'Many POPs with same id', 'addr': None}
# return Response({'status':"Many POPs with same id"}, status=status.HTTP_404_NOT_FOUND)
if arch != 'CENTRALIZED':
prom_url = monitoring_pops.objects.values('prom_url').filter(sonata_pop_id=pop_id)[0]['prom_url']
if prom_url == 'undefined':
LOG.info('Undefined Prometheus address')
return {'status': 'failed', 'msg': 'Undefined Prometheus address', 'addr': None}
# return Response({'status':"Undefined Prometheus address"}, status=status.HTTP_404_NOT_FOUND)
else:
prom_url = 'prometheus'
LOG.info('Prometheus address found')
return {'status': 'success', 'msg': 'Prometheus address found', 'addr': prom_url}
class SntPOPList(generics.ListCreateAPIView):
serializer_class = SntPOPSerializer
def get_queryset(self):
queryset = monitoring_pops.objects.all()
return queryset
def getCfgfile(self):
url = 'http://prometheus:9089/prometheus/configuration'
cl = Http()
rsp = cl.GET(url, [])
return rsp
def postCfgfile(self, confFile):
url = 'http://prometheus:9089/prometheus/configuration'
cl = Http()
rsp = cl.POST(url, [], json.dumps(confFile))
return rsp
def updatePromConf(self, pop):
arch = os.environ.get('MON_ARCH', 'CENTRALIZED')
if arch == 'CENTRALIZED':
return 200
updated = False
file = self.getCfgfile()
if 'scrape_configs' in file:
for obj in file['scrape_configs']:
if 'target_groups' in obj:
for trg in obj['target_groups']:
if 'labels' in trg:
if 'pop_id' in trg['labels']:
if trg['labels']['pop_id'] == pop['sonata_pop_id'] and trg['labels']['sp_id'] == pop[
'sonata_sp_id']:
trg['labels']['name'] = pop['name']
trg['targets'] = []
trg['targets'].append(pop['prom_url'])
updated = True
continue
if not updated:
newTrg = {}
newTrg['labels'] = {}
newTrg['labels']['pop_id'] = pop['sonata_pop_id']
newTrg['labels']['sp_id'] = pop['sonata_sp_id']
newTrg['labels']['name'] = pop['name']
newTrg['targets'] = []
newTrg['targets'].append(pop['prom_url'])
obj['target_groups'].append(newTrg)
else:
LOG.info('NOT FOUND scrape_configs')
return 'NOT FOUND scrape_configs'
if not is_json(json.dumps(file)):
LOG.info('Prometheus reconfiguration failed')
return Response({'status': "Prometheus reconfiguration failed"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
code = self.postCfgfile(file)
LOG.info('Prometheus reconfiguration successed')
return code
def post(self, request, *args, **kwargs):
pop_id = request.data['sonata_pop_id']
sp_id = request.data['sonata_sp_id']
name = 'undefined'
prom_url = 'udefined'
if 'name' in request.data:
name = request.data['name']
if 'prom_url' in request.data:
prom_url = request.data['prom_url']
sp = monitoring_service_platforms.objects.all().filter(sonata_sp_id=sp_id)
if sp.count() == 0:
sp = monitoring_service_platforms(sonata_sp_id=sp_id, name='undefined', manager_url='127.0.0.1')
sp.save()
pop = monitoring_pops.objects.all().filter(sonata_pop_id=pop_id, sonata_sp_id=sp_id)
if pop.count() == 1:
# pop = monitoring_pops(sonata_pop_id=pop_id,sonata_sp_id=sp_id, name=name,prom_url=prom_url)
code = self.updatePromConf(request.data)
if code == 200:
pop.update(name=name, prom_url=prom_url)
else:
LOG.info('Prometheus reconfiguration failed')
return Response({'status': "Prometheus reconfiguration failed"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
elif pop.count() > 1:
LOG.info('Multiple POPs with same id')
return Response({'status': "Many POPs with same id"}, status=status.HTTP_404_NOT_FOUND)
else:
code = self.updatePromConf(request.data)
if code == 200:
pop = monitoring_pops(sonata_pop_id=pop_id, sonata_sp_id=sp_id, name=name, prom_url=prom_url)
pop.save()
else:
LOG.info('Prometheus reconfiguration failed')
return Response({'status': "Prometheus reconfiguration failed"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
LOG.info('New POP added')
return Response(monitoring_pops.objects.values().filter(sonata_pop_id=pop_id, sonata_sp_id=sp_id))
class SntPOPperSPList(generics.ListAPIView):
# queryset = monitoring_functions.objects.all()
serializer_class = SntPOPSerializer
def get_queryset(self):
queryset = monitoring_pops.objects.all()
service_platform_id = self.kwargs['spID']
return queryset.filter(sonata_sp_id=service_platform_id)
class SntPOPDetail(generics.DestroyAPIView):
serializer_class = SntPOPSerializer
def getCfgfile(self):
url = 'http://prometheus:9089/prometheus/configuration'
cl = Http()
rsp = cl.GET(url, [])
return rsp
def postCfgfile(self, confFile):
url = 'http://prometheus:9089/prometheus/configuration'
cl = Http()
rsp = cl.POST(url, [], json.dumps(confFile))
return rsp
def updatePromConf(self, pop_id):
arch = os.environ.get('MON_ARCH', 'CENTRALIZED')
if arch == 'CENTRALIZED':
return 200
updated = False
file = self.getCfgfile()
if 'scrape_configs' in file:
for obj in file['scrape_configs']:
if 'target_groups' in obj:
for trg in obj['target_groups']:
if 'labels' in trg:
if 'pop_id' in trg['labels']:
if trg['labels']['pop_id'] == pop_id:
obj['target_groups'].remove(trg)
updated = True
else:
return 'NOT FOUND scrape_configs'
if not is_json(json.dumps(file)):
LOG.info('Prometheus reconfiguration failed')
return Response({'status': "Prometheus reconfiguration failed"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
code = self.postCfgfile(file)
LOG.info('Prometheus reconfiguration successed')
return code
def delete(self, request, *args, **kwargs):
# karpa start
self.lookup_field = 'sonata_pop_id'
pop_id = self.kwargs['pop_id']
queryset = monitoring_pops.objects.all()
queryset = queryset.filter(sonata_pop_id=pop_id)
if queryset.count() > 0:
code = self.updatePromConf(pop_id)
if code == 200:
queryset.delete()
LOG.info('POP removed')
return Response({'status': "POP removed"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info('POP not found')
return Response({'status': "POP not found"}, status=status.HTTP_404_NOT_FOUND)
class SntSPList(generics.ListCreateAPIView):
queryset = monitoring_service_platforms.objects.all()
serializer_class = SntSPSerializer
class SntSPDetail(generics.DestroyAPIView):
queryset = monitoring_service_platforms.objects.all()
serializer_class = SntSPSerializer
class SntPromMetricPerPOPList(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
pop_id = self.kwargs['popID']
prom_url = getPromIP(pop_id)
if prom_url['status'] == 'failed':
LOG.info('Prometheus not found')
return Response({'status': prom_url['msg']}, status=status.HTTP_404_NOT_FOUND)
mt = ProData(prom_url['addr'], 9090)
data = mt.getMetrics()
response = {}
response['metrics'] = data['data']
return Response(response)
class SntPromMetricPerPOPDetail(generics.ListAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
metric_name = self.kwargs['metricName']
pop_id = self.kwargs['popID']
prom_url = getPromIP(pop_id)
if prom_url['status'] == 'failed':
LOG.info('Prometheus not found')
return Response({'status': prom_url['msg']}, status=status.HTTP_404_NOT_FOUND)
mt = ProData(prom_url['addr'], 9090)
data = mt.getMetricFullDetail(metric_name)
response = {}
response['metrics'] = data['data']
return Response(response)
class SntPromMetricPerPOPData(generics.CreateAPIView):
serializer_class = SntPromMetricSerializer
'''
{
"name": "up",
"start": "2016-02-28T20:10:30.786Z",
"end": "2016-03-03T20:11:00.781Z",
"step": "1h",
"labels": [{"labeltag":"instance", "labelid":"192.168.1.39:9090"},{"labeltag":"group", "labelid":"development"}]
}
'''
def post(self, request, *args, **kwargs):
pop_id = self.kwargs['popID']
prom_url = getPromIP(pop_id)
if prom_url['status'] == 'failed':
return Response({'status': prom_url['msg']}, status=status.HTTP_404_NOT_FOUND)
mt = ProData(prom_url['addr'], 9090)
data = mt.getTimeRangeData(request.data)
response = {}
try:
response['metrics'] = data['data']
except KeyError:
response = data
LOG.info('KEY ERROR exception')
return Response(response)
class SntPromSrvPerPOPConf(generics.ListAPIView):
# start from here
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
pop_id = self.kwargs['popID']
prom_url = getPromIP(pop_id)
if prom_url['status'] == 'failed':
LOG.info('Prometheus not found')
return Response({'status': prom_url['msg']}, status=status.HTTP_404_NOT_FOUND)
url = 'http://' + prom_url['addr'] + ':9089/prometheus/configuration'
cl = Http()
rsp = cl.GET(url, [])
return Response({'config': rsp}, status=status.HTTP_200_OK)
class SntUserList(generics.ListAPIView):
serializer_class = SntUserSerializer
def get_queryset(self):
queryset = monitoring_users.objects.all()
userid = self.kwargs['pk']
return queryset.filter(pk=userid)
class SntUserPerTypeList(generics.ListAPIView):
# queryset = monitoring_users.objects.all().filter(component=self.kwargs['pk'])
serializer_class = SntUserSerializer
def get_queryset(self):
queryset = monitoring_users.objects.all()
user_type = self.kwargs['type']
return queryset.filter(type=user_type)
class SntUsersList(generics.ListCreateAPIView):
queryset = monitoring_users.objects.all()
serializer_class = SntUserSerializer
class SntUsersDetail(generics.DestroyAPIView):
queryset = monitoring_users.objects.all()
serializer_class = SntUserSerializer
class SntServicesPerUserList(generics.ListAPIView):
# queryset = monitoring_services.objects.all().filter(self.kwargs['usrID'])
serializer_class = SntServicesFullSerializer
def get_queryset(self):
queryset = monitoring_services.objects.all()
userid = self.kwargs['usrID']
return queryset.filter(user__sonata_userid=userid)
class SntServiceList(generics.ListAPIView):
# queryset = monitoring_services.objects.all().filter(self.kwargs['usrID'])
serializer_class = SntServicesSerializer
def get_queryset(self):
queryset = monitoring_services.objects.all()
srvid = self.kwargs['srvID']
return queryset.filter(sonata_srv_id=srvid)
class SntServicesList(generics.ListCreateAPIView):
queryset = monitoring_services.objects.all()
serializer_class = SntServicesSerializer
class SntFunctionsPerServiceList(generics.ListAPIView):
# queryset = monitoring_functions.objects.all()
serializer_class = SntFunctionsFullSerializer
def get_queryset(self):
queryset = monitoring_functions.objects.all()
srvid = self.kwargs['srvID']
return queryset.filter(service__sonata_srv_id=srvid)
class SntServicesDetail(generics.DestroyAPIView):
serializer_class = SntServicesDelSerializer
def delete(self, request, *args, **kwargs):
self.lookup_field = 'srv_id'
queryset = monitoring_services.objects.all()
srvid = self.kwargs['srv_id']
queryset = queryset.filter(sonata_srv_id=srvid)
if queryset.count() > 0:
# DELETE also the SNMP entities (if any)
fcts = monitoring_functions.objects.all().filter(service__sonata_srv_id=srvid)
if fcts.count() > 0:
for f in fcts:
snmp_entities = monitoring_snmp_entities.objects.all().filter(
Q(entity_id=f.host_id) & Q(entity_type='vnf'))
if snmp_entities.count() > 0:
snmp_entities.update(status='DELETED')
queryset.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str(srvid), [])
time.sleep(2)
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/sla-' + str(srvid), [])
time.sleep(2)
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/plc-' + str(srvid), [])
LOG.info('Network Service deteted')
return Response({'status': "service removed"}, status=status.HTTP_200_OK)
else:
LOG.info('Network Service not found')
return Response({'status': "service not found"}, status=status.HTTP_200_OK)
class SntFunctionsList(generics.ListAPIView):
queryset = monitoring_functions.objects.all()
serializer_class = SntFunctionsSerializer
class SntFunctionsDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = monitoring_functions.objects.all()
serializer_class = SntFunctionsSerializer
class SntNotifTypesList(generics.ListCreateAPIView):
queryset = monitoring_notif_types.objects.all()
serializer_class = SntNotifTypeSerializer
class SntNotifTypesDetail(generics.DestroyAPIView):
queryset = monitoring_notif_types.objects.all()
serializer_class = SntNotifTypeSerializer
class SntMetricsList(generics.ListAPIView):
queryset = monitoring_metrics.objects.all()
serializer_class = SntMetricsSerializer
class SntMetricsPerFunctionList(generics.ListAPIView):
# queryset = monitoring_metrics.objects.all()
serializer_class = SntMetricsFullSerializer
def get_queryset(self):
queryset = monitoring_metrics.objects.all()
functionid = self.kwargs['funcID']
result_list = list(chain(monitoring_services.objects.all(), monitoring_functions.objects.all(),
monitoring_metrics.objects.all()))
return queryset.filter(function__sonata_func_id=functionid)
class SntMetricsPerFunctionList1(generics.ListAPIView):
# queryset = monitoring_metrics.objects.all()
def list(self, request, *args, **kwargs):
functionid = kwargs['funcID']
queryset = monitoring_metrics.objects.all().filter(function_id=functionid)
dictionaries = [obj.as_dict() for obj in queryset]
response = {}
response['data_server_url'] = 'http://sp.int2.sonata-nfv.eu:9091'
response['metrics'] = dictionaries
return Response(response)
class SntNewServiceConf(generics.ListCreateAPIView):
def get_queryset(self):
self.serializer_class = SntServicesSerializer
queryset = monitoring_services.objects.all()
return queryset
def post(self, request, *args, **kwargs):
self.serializer_class = NewServiceSerializer
if not 'service' in request.data:
LOG.info('Received new Service notification: Undefined Service')
return Response({'error': 'Undefined Service'}, status=status.HTTP_400_BAD_REQUEST)
if not 'functions' in request.data:
LOG.info('Received new Service notification: Undefined Functions')
return Response({'error': 'Undefined Functions'}, status=status.HTTP_400_BAD_REQUEST)
if not 'rules' in request.data:
LOG.info('Received new Service notification: Undefined Rules')
return Response({'error': 'Undefined Rules'}, status=status.HTTP_400_BAD_REQUEST)
LOG.info('Received new Service notification: ' + json.dumps(request.data))
service = request.data['service']
functions = request.data['functions']
rules = request.data['rules']
functions_status = 0
metrics_status = 0
rules_status = 0
oids_status = 0
usr = None
if 'sonata_usr' in service:
customer = {}
customer['email'] = None
customer['phone'] = None
if 'email' in service['sonata_usr']:
customer['email'] = service['sonata_usr']['email']
if 'phone' in service['sonata_usr']:
customer['phone'] = service['sonata_usr']['phone']
u = monitoring_users.objects.all().filter(
Q(email=customer['email']) & Q(mobile=customer['phone']) & Q(type='cst'))
if len(u) == 0:
usr = monitoring_users(mobile=customer['phone'], email=customer['email'], type='cst')
usr.save()
else:
usr = u[0]
dev = None
if 'sonata_dev' in service:
developer = {}
developer['email'] = None
developer['phone'] = None
if 'email' in service['sonata_dev']:
developer['email'] = service['sonata_dev']['email']
if 'phone' in service['sonata_dev']:
developer['phone'] = service['sonata_dev']['phone']
u = monitoring_users.objects.all().filter(
Q(email=developer['email']) & Q(mobile=developer['phone']) & Q(type='dev'))
if len(u) == 0:
dev = monitoring_users(mobile=developer['phone'], email=developer['email'], type='dev')
dev.save()
else:
dev = u[0]
srv_pop_id = ''
srv_host_id = ''
if service['pop_id']:
srv_pop_id = service['pop_id']
pop = monitoring_pops.objects.all().filter(sonata_pop_id=srv_pop_id)
if pop.count() == 0:
pop = monitoring_pops(sonata_pop_id=srv_pop_id, sonata_sp_id="undefined", name="undefined",
prom_url="undefined", type="undefined")
pop.save()
if service['host_id']:
srv_host_id = service['host_id']
srv = monitoring_services.objects.all().filter(sonata_srv_id=service['sonata_srv_id'])
if srv.count() > 0:
old_vnf = monitoring_functions.objects.all().filter(service_id=srv.values('id'))
if old_vnf.count() > 0:
old_vnf.delete()
srv=srv[0]
else:
srv = monitoring_services(sonata_srv_id=service['sonata_srv_id'], name=service['name'],
description=service['description'], host_id=srv_host_id, pop_id=srv_pop_id)
srv.save()
if isinstance(usr, monitoring_users):
srv.user.add(usr)
if isinstance(dev, monitoring_users):
srv.user.add(dev)
srv.save()
oids_status = 0
metrics_status = 0
for f in functions:
fnc_pop_id = f['pop_id']
pop = monitoring_pops.objects.all().filter(sonata_pop_id=fnc_pop_id)
functions_status = len(functions)
pop_type = 'undefined'
sch_key = 'resource_id'
if 'host_id' in f:
vdu = f['host_id']
sch_key = 'resource_id'
pop_type = 'openstack'
if 'cnt_name' in f:
vdu = f['cnt_name'][0]
sch_key = 'container_name'
pop_type = 'k8s'
if pop.count() == 0:
pop = monitoring_pops(sonata_pop_id=fnc_pop_id, sonata_sp_id="undefined",
name="undefined",prom_url="undefined",type=pop_type)
pop.save()
func = monitoring_functions(service=srv, host_id=vdu, name=f['name'], host_type=sch_key,
sonata_func_id=f['sonata_func_id'], description=f['description'],
pop_id=f['pop_id'])
func.save()
for m in f['metrics']:
metrics_status += 1
metric = monitoring_metrics(function=func, name=m['name'], cmd=m['cmd'], threshold=m['threshold'],
interval=m['interval'], description=m['description'])
metric.save()
old_snmps = monitoring_snmp_entities.objects.all().filter(entity_id=vdu)
if old_snmps.count() > 0:
old_snmps.update(status='DELETED')
if 'snmp' in f:
if len(f['snmp']) > 0:
snmp = f['snmp']
if 'port' in snmp:
port = snmp['port']
else:
port = 161
ent = monitoring_snmp_entities(entity_id=vdu, version=snmp['version'],
auth_protocol=snmp['auth_protocol'],
security_level=snmp['security_level'],
ip=snmp['ip'], port=port, username=snmp['username'],
password='<PASSWORD>', interval=snmp['interval'],
entity_type='vnf')
ent.save()
for o in snmp['oids']:
oid = monitoring_snmp_oids(snmp_entity=ent, oid=o['oid'], metric_name=o['metric_name'],
metric_type=o['metric_type'], unit=o['unit'], mib_name=o['mib_name'])
oid.save()
oids_status += 1
rls = {}
rls['service'] = service['sonata_srv_id']
rls['vnf'] = "To be found..."
rls['rules'] = []
for r in rules:
nt = monitoring_notif_types.objects.all().filter(id=r['notification_type'])
if nt.count() == 0:
LOG.info('Alert notification type does not supported. Action Aborted')
return Response({'error': 'Alert notification type does not supported. Action Aborted'},
status=status.HTTP_400_BAD_REQUEST)
srv.delete()
else:
rules_status = len(rules)
rule = monitoring_rules(service=srv, summary=r['summary'], notification_type=nt[0], name=r['name'],
condition=r['condition'], duration=r['duration'], description=r['description'])
rule.save()
rl = {}
rl['name'] = r['name']
rl['description'] = r['description']
rl['summary'] = r['summary']
rl['duration'] = r['duration']
rl['notification_type'] = r['notification_type']
rl['condition'] = r['condition']
rl['labels'] = ["serviceID=\"" + rls['service'] + "\", tp=\"DEV\""]
rls['rules'].append(rl)
if len(rules) > 0:
cl = Http()
rsp = cl.POST('http://prometheus:9089/prometheus/rules', [], json.dumps(rls))
if rsp == 200:
LOG.info('New NS addition successed')
return Response(
{'status': "success", "vnfs": functions_status, "metrics": metrics_status, "rules": rules_status,
"snmp_oids": oids_status})
else:
srv.delete()
LOG.info('Service update failed')
return Response({'error': 'Service update fail ' + str(rsp)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.info('New NS addition successed')
return Response(
{'status': "success", "vnfs": functions_status, "metrics": metrics_status, "rules": rules_status,
"snmp_oids": oids_status})
def getVnfId(funct_, host_):
for fn in funct_:
if fn['host_id'] == host_:
return fn['sonata_func_id']
else:
return 'Undefined'
class SntMetricsDetail(generics.DestroyAPIView):
queryset = monitoring_metrics.objects.all()
serializer_class = SntMetricsSerializer
class SntRulesList(generics.ListAPIView):
serializer_class = SntRulesSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
return queryset.filter(consumer='DEV')
class SntRulesPerServiceList(generics.ListAPIView):
# queryset = monitoring_functions.objects.all()
serializer_class = SntRulesPerSrvSerializer
def get_queryset(self):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['srvID']
return queryset.filter(service__sonata_srv_id=srvid, consumer='DEV')
class SntRulesDetail(generics.DestroyAPIView):
# queryset = monitoring_rules.objects.all()
serializer_class = SntRulesSerializer
def delete(self, request, *args, **kwargs):
queryset = monitoring_rules.objects.all()
srvid = self.kwargs['sonata_srv_id']
fq = queryset.filter(service__sonata_srv_id=srvid)
if fq.count() > 0:
fq.delete()
cl = Http()
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str(srvid), [])
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('plc-' + srvid), [])
rsp = cl.DELETE('http://prometheus:9089/prometheus/rules/' + str('sla-' + srvid), [])
LOG.info("Service's rules removed (inl. SLA, POLICY)")
return Response({'status': "Service's rules removed (inl. SLA, POLICY)"}, status=status.HTTP_204_NO_CONTENT)
else:
LOG.info("Service's rules not found")
return Response({'status': "rules not found"}, status=status.HTTP_404_NOT_FOUND)
class SntPromMetricList(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
mt = ProData('prometheus', 9090)
data = mt.getMetrics()
response = {}
if 'data' in data:
response['metrics'] = data['data']
else:
response = data
print (response)
return Response(response)
class SntPromNSMetricListVnf(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
time_window = request.GET.get('tm',None)
mt = ProData('prometheus', 9090)
queryset = monitoring_functions.objects.all()
srvid = self.kwargs['srv_id']
vnfs = queryset.filter(service__sonata_srv_id=srvid)
response = {}
if vnfs.count() == 0:
response['status'] = "Fail (VNF not found)"
return Response(response)
response['vnfs'] = []
response['status'] = 'Success'
for vnf in vnfs:
f = {}
f['vnf_id'] = vnf.sonata_func_id
f['vdus'] = []
vdu={}
vdu['vdu_id'] = vnf.host_id
data = mt.getMetricsResId(vnf.host_type,vnf.host_id,time_window)
if 'data' in data:
vdu['metrics'] = data['data']
else:
vdu['metrics'] = []
f['vdus'].append(vdu)
response['vnfs'].append(f)
return Response(response)
class SntPromMetricListVnf(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
time_window = request.GET.get('tm', None)
mt = ProData('prometheus', 9090)
vnfid = self.kwargs['vnf_id']
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
response = {}
if vnf.count() == 0:
response['status'] = "Fail (VNF not found)"
return Response(response)
vdus = []
vdus.append(vnf[0].host_id)
response['status'] = 'Success'
response['vdus'] = []
for vdu in vdus:
dt = {}
dt['vdu_id'] = vdu
data = mt.getMetricsResId(vnf[0].host_type,vdu,time_window)
if 'data' in data:
dt['metrics'] = data['data']
else:
dt['metrics'] = []
response['vdus'].append(dt)
return Response(response)
class SntPromMetricListVnfVdu(generics.RetrieveAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
time_window = request.GET.get('tm', None)
mt = ProData('prometheus', 9090)
vnfid = self.kwargs['vnf_id']
vduid = self.kwargs['vdu_id']
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
response = {}
if vnf.count() == 0:
response['status'] = "Fail (VNF: " + vnfid + " not found)"
return Response(response)
vdus = []
vdus.append(vnf[0].host_id)
if vduid not in vdus:
response['status'] = "Fail (VDU: " + vduid + " doesn't belong in VNF:" + vnfid + ")"
return Response(response)
response['status'] = 'Success'
response['vdus'] = []
for vdu in vdus:
dt = {}
dt['vdu_id'] = vdu
data = mt.getMetricsResId(vnf[0].host_type,vdu,time_window)
if 'data' in data:
dt['metrics'] = data['data']
else:
dt['metrics'] = []
response['vdus'].append(dt)
return Response(response)
class SntPromVnfMetricDetail(generics.ListAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
metric_name = self.kwargs['metricName']
vnfid = self.kwargs['vnf_id']
mt = ProData('prometheus', 9090)
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
response = {}
if vnf.count() == 0:
response['status'] = "Fail (VNF: " + vnfid + " not found)"
return Response(response)
vdus = []
vdus.append(vnf[0].host_id)
response['status'] = 'Success'
response['vdus'] = []
for vdu in vdus:
dt = {}
dt['vdu_id'] = vdu
data = mt.getMetricDetail(vdu, metric_name)
if 'data' in data:
dt['metrics'] = data['data']['result']
else:
dt['metrics'] = []
response['vdus'].append(dt)
return Response(response)
class SntWSreq(generics.CreateAPIView):
serializer_class = SntWSreqSerializer
def post(self, request, *args, **kwargs):
filters = []
psw = socket.gethostbyname('pushgateway')
if 'filters' in request.data.keys():
filters = request.data['filters']
metric = request.data['metric']
url = "http://" + psw + ":8002/new/?metric=" + metric + "¶ms=" + json.dumps(filters).replace(" ", "")
cl = Http()
rsp = cl.GET(url, [])
response = {}
try:
if 'name_space' in rsp:
response['status'] = "SUCCESS"
response['metric'] = request.data['metric']
response['ws_url'] = "ws://" + psw + ":8002/ws/" + str(rsp['name_space'])
else:
response['status'] = "FAIL"
response['ws_url'] = None
except KeyError:
response = request.data
pass
return Response(response)
class SntWSreqPerPOP(generics.CreateAPIView):
serializer_class = SntWSreqSerializer
def post(self, request, *args, **kwargs):
filters = []
if 'filters' in request.data.keys():
filters = request.data['filters']
metric = request.data['metric']
pop_id = self.kwargs['popID']
prom_url = getPromIP(pop_id)
if prom_url['status'] == 'failed':
return Response({'status': prom_url['msg']}, status=status.HTTP_404_NOT_FOUND)
ip = socket.gethostbyname(prom_url['addr'])
url = "http://" + ip + ":8002/new/?metric=" + metric + "¶ms=" + json.dumps(filters).replace(" ", "")
cl = Http()
rsp = cl.GET(url, [])
response = {}
try:
if 'name_space' in rsp:
response['status'] = "SUCCESS"
response['metric'] = request.data['metric']
response['ws_url'] = "ws://" + ip + ":8002/ws/" + str(rsp['name_space'])
else:
response['status'] = "FAIL"
response['ws_url'] = None
except KeyError:
response = request.data
pass
return Response(response)
class SntRuleconf(generics.CreateAPIView):
serializer_class = SntRulesConfSerializer
def post(self, request, *args, **kwargs):
srvid = self.kwargs['srvID']
if 'rules' in request.data.keys():
rules = request.data['rules']
else:
return Response({'error': 'Undefined rules'}, status=status.HTTP_400_BAD_REQUEST)
# Check if service exists
srv = monitoring_services.objects.all().filter(sonata_srv_id=srvid)
if srv.count() == 0:
if srvid != 'generic':
return Response({'error': 'Requested Service not found'}, status=status.HTTP_404_NOT_FOUND)
else:
srvid = 'alerts'
# Delete old rule from DB
rules_db = monitoring_rules.objects.all().filter(service__sonata_srv_id=srvid, consumer='DEV')
rules_db.delete()
# Create prometheus configuration file
rls = {}
rls['service'] = srvid
rls['vnf'] = "To be found..."
rls['rules'] = []
rules_status = len(rules)
for r in rules:
nt = monitoring_notif_types.objects.all().filter(id=r['notification_type'])
if nt.count() == 0:
return Response({'error': 'Alert notification type does not supported. Action Aborted'},
status=status.HTTP_400_BAD_REQUEST)
else:
if srvid != "alerts":
rule = monitoring_rules(service=srv[0], summary=r['summary'], notification_type=nt[0],
name=r['name'], condition=r['condition'], duration=r['duration'],
description=r['description'])
rule.save()
rl = {}
rl['name'] = r['name']
rl['description'] = r['description']
rl['summary'] = r['summary']
rl['duration'] = r['duration']
rl['notification_type'] = r['notification_type']
rl['condition'] = r['condition']
rl['labels'] = ["serviceID=\"" + rls['service'] + "\", tp=\"DEV\""]
rls['rules'].append(rl)
if len(rules) > 0:
cl = Http()
rsp = cl.POST('http://' + prometheus + ':9089/prometheus/rules', [], json.dumps(rls))
if rsp == 200:
LOG.info('Rules updated')
return Response({'status': "success", "rules": rules_status})
else:
LOG.info('Rules update failed')
return Response({'error': 'Rule update fail ' + str(rsp)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
LOG.info('No rules defined')
return Response({'error': 'No rules defined'})
class SntPromMetricData(generics.CreateAPIView):
serializer_class = SntPromMetricSerializer
'''
{
"name": "up",
"start": "2016-02-28T20:10:30.786Z",
"end": "2016-03-03T20:11:00.781Z",
"step": "1h",
"labels": [{"labeltag":"instance", "labelid":"192.168.1.39:9090"},{"labeltag":"group", "labelid":"development"}]
}
'''
def post(self, request, *args, **kwargs):
mt = ProData('prometheus', 9090)
data = mt.getTimeRangeData(request.data)
response = {}
try:
response['metrics'] = data['data']
except KeyError:
response = data
return Response(response)
class SntPromMetricDataPerVnf(generics.CreateAPIView):
serializer_class = SntPromMetricSerializer
'''
{
"name": "up",
"start": "2016-02-28T20:10:30.786Z",
"end": "2016-03-03T20:11:00.781Z",
"step": "1h",
"labels": [{"labeltag":"instance", "labelid":"192.168.1.39:9090"},{"labeltag":"group", "labelid":"development"}]
}
'''
def post(self, request, *args, **kwargs):
mt = ProData('prometheus', 9090)
vnfid = self.kwargs['vnf_id']
request.data["name"] = "cpu_util"
queryset = monitoring_functions.objects.all()
vnf = queryset.filter(sonata_func_id=vnfid)
request.data["labels"] = [{"labeltag": "resource_id", "labelid": vnf[0].host_id}]
data = mt.getTimeRangeDataVnf(request.data)
response = {}
try:
response['metrics'] = data['data']
except KeyError:
response = data
return Response(response)
class SntPromMetricDetail(generics.ListAPIView):
serializer_class = promMetricsListSerializer
def get(self, request, *args, **kwargs):
metric_name = self.kwargs['metricName']
mt = ProData('prometheus', 9090)
data = mt.getMetricFullDetail(metric_name)
response = {}
response['metrics'] = data['data']
#print (response)
return Response(response)
class SntPromSrvConf(generics.ListAPIView):
# start from here
def get(self, request, *args, **kwargs):
url = 'http://prometheus:9089/prometheus/configuration'
cl = Http()
rsp = cl.GET(url, [])
return Response({'config': rsp}, status=status.HTTP_200_OK)
class SntPromSrvTargets(generics.ListCreateAPIView):
serializer_class = SntPromTargetsSerialazer
url = 'http://prometheus:9089/prometheus/configuration'
def get(self, request, *args, **kwargs):
cl = Http()
rsp = cl.GET(self.url, [])
if 'scrape_configs' in rsp:
rsp = rsp['scrape_configs']
else:
rsp = []
return Response({'targets': rsp}, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
data = request.data
if not 'targets' in data:
return Response({"error":"data malformed"}, status=status.HTTP_400_BAD_REQUEST)
if type(data['targets']) is list:
for trg in data['targets']:
if not 'static_configs' in trg or not 'job_name' in trg:
LOG.info("Data malformed")
return Response({"error": "data malformed"}, status=status.HTTP_400_BAD_REQUEST)
if type(trg['static_configs']) is list:
for url in trg['static_configs']:
if not 'targets' in url:
LOG.info("Data malformed")
return Response({"error": "data malformed"}, status=status.HTTP_400_BAD_REQUEST)
else:
LOG.info("Data malformed")
return Response({"error": "data malformed"}, status=status.HTTP_400_BAD_REQUEST)
else:
LOG.info("Data malformed")
return Response({"error": "data malformed"}, status=status.HTTP_400_BAD_REQUEST)
#Get current congiguration from prometheus
cl = Http()
rsp = cl.GET(self.url, [])
#Update configuration
if 'scrape_configs' in rsp:
rsp['scrape_configs'] = data['targets']
#Save the new configuration
rsp = cl.POST(url_=self.url, headers_=[], data_=json.dumps(rsp))
LOG.info("Prometheus targets updated")
return Response({"prometheus_conf":rsp}, status=status.HTTP_200_OK)
class SntActMRList(generics.ListAPIView):
serializer_class = SntActMonResSerializer
def get_queryset(self):
queryset = active_monitoring_res.objects.all()
service_id_ = self.kwargs['service_id']
return queryset.filter(service_id=service_id_)
def delete(self, request, *args, **kwargs):
self.lookup_field = 'service_id'
queryset = active_monitoring_res.objects.all()
srvid = self.kwargs['srv_id']
queryset = queryset.filter(service_id=srvid)
if queryset.count() > 0:
queryset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
LOG.info("Monitoring Data not found")
return Response({'status': "Results not found"}, status=status.HTTP_404_NOT_FOUND)
class SntActMRDetail(generics.ListAPIView):
serializer_class = SntActMonResDetailSerializer
def get_queryset(self):
queryset = active_monitoring_res.objects.all()
service_id_ = self.kwargs['srv_id']
test_id_ = self.kwargs['test_id']
return queryset.filter(service_id=service_id_, test_id=test_id_)
class SntActMRPost(generics.CreateAPIView):
serializer_class = SntActMonResDetailSerializer
def post(self, request, *args, **kwargs):
queryset = active_monitoring_res.objects.all()
data_ = request.data
if 'service_id' in self.kwargs:
service_id_ = self.kwargs['service_id']
elif 'ServiceID' in data_:
service_id_ = data_['ServiceID']
else:
LOG.info('service_id tag is missing')
return Response({'error': 'service_id missing...'}, status=status.HTTP_400_BAD_REQUEST)
if 'test_id' in self.kwargs:
test_id_ = self.kwargs['test_id']
elif 'TestID' in data_:
test_id_ = data_['TestID']
else:
LOG.info('test_id tag is missing')
return Response({'error': 'test_id missing...'}, status=status.HTTP_400_BAD_REQUEST)
tmstp_ = data_['Timestamp']
cnfg_ = data_['TestConfig']
tm = datetime.datetime.utcfromtimestamp(float(tmstp_)).strftime('%Y-%m-%d %H:%M:%S')
try:
data = active_monitoring_res(test_id=test_id_, service_id=service_id_, timestamp=tm, config=cnfg_,
data=data_)
data.save()
except IntegrityError as e:
return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
LOG.info('Montoring data-set added')
return Response({'service_id': service_id_, 'test_id': test_id_, 'timestamp': tm, 'configuration': cnfg_},
status=status.HTTP_200_OK)
class Ping(generics.ListAPIView):
serializer_class = HealthSerializer
def get(self, request, *args, **kwargs):
p = psutil.Process(os.getpid())
uptime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.create_time())) + ' UTC'
LOG.info(json.dumps({'alive_since':uptime}))
return Response({'alive_since': uptime}, status=status.HTTP_200_OK)
|
StarcoderdataPython
|
4994792
|
input = """
1 2 2 1 3 4
1 3 2 1 2 4
1 4 0 0
1 5 2 1 6 7
1 6 2 1 5 7
1 7 0 0
1 8 2 1 9 10
1 9 2 1 8 10
1 10 0 0
1 11 2 1 12 13
1 12 2 1 11 13
1 13 0 0
1 14 2 1 15 16
1 15 2 1 14 16
1 16 0 0
1 17 2 1 18 19
1 18 2 1 17 19
1 19 0 0
1 20 2 1 21 22
1 21 2 1 20 22
1 22 0 0
1 23 2 1 24 25
1 24 2 1 23 25
1 25 0 0
1 26 1 0 2
1 27 1 0 17
1 28 2 0 27 20
1 29 2 0 26 5
1 26 2 0 29 5
1 30 2 0 29 14
1 31 2 0 28 23
1 32 2 0 30 8
1 1 1 1 30
1 1 1 1 31
1 1 2 0 31 32
1 33 2 1 26 5
1 33 2 1 32 8
0
27 j
33 p
12 n_d
20 g
26 i
2 a
8 c
32 o
11 d
3 n_a
18 n_f
24 n_h
31 n
30 m
9 n_c
5 b
29 l
17 f
23 h
28 k
15 n_e
14 e
21 n_g
6 n_b
0
B+
0
B-
1
0
1
"""
output = """
{b, n_c, e, f, g, h, i, j, k, l, m, n, a}
"""
|
StarcoderdataPython
|
11211195
|
<filename>many_classes/__init__.py<gh_stars>0
"""
This package contains an example on how to run a Tango Device Server with 2
or more Device Classes.
It includes the said 2 example Device Classes and a module to run them in one
Device Server.
"""
__author__ = "<NAME>"
__all__ = ["device_one", "device_two", "run_server"]
|
StarcoderdataPython
|
4993574
|
# -*- coding: utf-8 -*-
"""Top-level package for lidar."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.5.0'
from .filling import ExtractSinks
from .slicing import DelineateDepressions
from .filtering import MeanFilter, MedianFilter, GaussianFilter
from .gui import gui, GUI
# from .mounts import DelineateMounts
|
StarcoderdataPython
|
6442462
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, load_module, reload_module, get_stdout, check_source
from functools import reduce
import os
import textwrap
from random import choice, randint
exercise = 'src.factorials'
function = 'factorials'
def get_correct(test_case: int) -> dict:
k = lambda n: reduce(lambda a,b: a * b, range(1, n + 1))
return {i: k(i) for i in range (1, test_case + 1)}
def output(d: dict):
for key in sorted(d.keys()):
print(str(key) + ": " + str(d[key]))
@points('5.factorials')
class FactorialsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', side_effect=[AssertionError("Asking input from the user was not expected")]):
cls.module = load_module(exercise, 'en')
def test_0_main_program_ok(self):
ok, line = check_source(self.module)
message = """The code for testing the functions should be placed inside
if __name__ == "__main__":
block. The following row should be moved:
"""
self.assertTrue(ok, message+line)
def test_1_function_exists(self):
try:
from src.factorials import factorials
except:
self.assertTrue(False, 'Your code should contain function named as factorials(n: int)')
try:
factorials = load(exercise, function, 'en')
factorials(1)
except:
self.assertTrue(False, 'Make sure, that function can be called as follows\nfactorials(1)')
def test_2_type_of_return_value(self):
factorials = load(exercise, function, 'en')
val = factorials(1)
self.assertTrue(type(val) == dict, f"Function {function} should return value which type is dict.")
def test_3_factorials(self):
test_cases = (1,2,4,3,5,6,8,10)
for test_case in test_cases:
with patch('builtins.input', side_effect=[AssertionError("Asking input from the user was not expected")]):
reload_module(self.module)
output_alussa = get_stdout()
factorials = load(exercise, function, 'en')
value = factorials(test_case)
correct = get_correct(test_case)
self.assertEqual(len(correct), len(value), f"The returned dictionary should contain {len(correct)} items, but it contains {len(value)} items: \n{value}\nwhen calling factorials({test_case})")
self.assertEqual(value, correct, f"The result \n{value}\ndoes not match with the model solution \n{correct}\nwhen calling factorials({test_case})")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1929622
|
<gh_stars>1000+
from typing import Any
from packaging import version
import optuna
from optuna._deprecated import deprecated
from optuna._imports import try_import
with try_import() as _imports:
import fastai
if version.parse(fastai.__version__) >= version.parse("2.0.0"):
raise ImportError(
f"You don't have fastai V1 installed! Fastai version: {fastai.__version__}"
)
from fastai.basic_train import Learner
from fastai.callbacks import TrackerCallback
if not _imports.is_successful():
TrackerCallback = object # NOQA
@deprecated("2.4.0", "4.0.0")
class FastAIV1PruningCallback(TrackerCallback):
"""FastAI callback to prune unpromising trials for fastai.
.. note::
This callback is for fastai<2.0.
See `the example <https://github.com/optuna/optuna-examples/blob/main/
fastai/fastaiv1_simple.py>`__
if you want to add a pruning callback which monitors validation loss of a ``Learner``.
Example:
Register a pruning callback to ``learn.fit`` and ``learn.fit_one_cycle``.
.. code::
learn.fit(n_epochs, callbacks=[FastAIPruningCallback(learn, trial, "valid_loss")])
learn.fit_one_cycle(
n_epochs,
cyc_len,
max_lr,
callbacks=[FastAIPruningCallback(learn, trial, "valid_loss")],
)
Args:
learn:
`fastai.basic_train.Learner <https://docs.fast.ai/basic_train.html#Learner>`_.
trial:
A :class:`~optuna.trial.Trial` corresponding to the current
evaluation of the objective function.
monitor:
An evaluation metric for pruning, e.g. ``valid_loss`` and ``Accuracy``.
Please refer to `fastai.callbacks.TrackerCallback reference
<https://fastai1.fast.ai/callbacks.tracker.html#TrackerCallback>`_ for further
details.
"""
def __init__(self, learn: "Learner", trial: optuna.trial.Trial, monitor: str) -> None:
super().__init__(learn, monitor)
_imports.check()
self._trial = trial
def on_epoch_end(self, epoch: int, **kwargs: Any) -> None:
value = self.get_monitor_value()
if value is None:
return
# This conversion is necessary to avoid problems reported in issues.
# - https://github.com/optuna/optuna/issue/642
# - https://github.com/optuna/optuna/issue/655.
self._trial.report(float(value), step=epoch)
if self._trial.should_prune():
message = "Trial was pruned at epoch {}.".format(epoch)
raise optuna.TrialPruned(message)
|
StarcoderdataPython
|
1643741
|
<gh_stars>0
from uuid import uuid4
def generateUUID():
return str(uuid4())
|
StarcoderdataPython
|
8096511
|
<reponame>ethereum/asyncio-cancel-token
import asyncio
from typing import ( # noqa: F401
Any,
Awaitable,
List,
Sequence,
TypeVar,
cast,
)
from .exceptions import (
EventLoopMismatch,
OperationCancelled,
)
_R = TypeVar('_R')
class CancelToken:
def __init__(self, name: str, loop: asyncio.AbstractEventLoop = None) -> None:
self.name = name
self._chain: List['CancelToken'] = []
self._triggered = asyncio.Event(loop=loop)
self._loop = loop
@property
def loop(self) -> asyncio.AbstractEventLoop:
"""
Return the `loop` that this token is bound to.
"""
return self._loop
def chain(self, token: 'CancelToken') -> 'CancelToken':
"""
Return a new CancelToken chaining this and the given token.
The new CancelToken's triggered will return True if trigger() has been
called on either of the chained tokens, but calling trigger() on the new token
has no effect on either of the chained tokens.
"""
if self.loop != token._loop:
raise EventLoopMismatch("Chained CancelToken objects must be on the same event loop")
chain_name = ":".join([self.name, token.name])
chain = CancelToken(chain_name, loop=self.loop)
chain._chain.extend([self, token])
return chain
def trigger(self) -> None:
"""
Trigger this cancel token and any child tokens that have been chained with it.
"""
self._triggered.set()
@property
def triggered_token(self) -> 'CancelToken':
"""
Return the token which was triggered.
The returned token may be this token or one that it was chained with.
"""
if self._triggered.is_set():
return self
for token in self._chain:
if token.triggered:
# Use token.triggered_token here to make the lookup recursive as self._chain may
# contain other chains.
return token.triggered_token
return None
@property
def triggered(self) -> bool:
"""
Return `True` or `False` whether this token has been triggered.
"""
if self._triggered.is_set():
return True
return any(token.triggered for token in self._chain)
def raise_if_triggered(self) -> None:
"""
Raise `OperationCancelled` if this token has been triggered.
"""
if self.triggered:
raise OperationCancelled(
"Cancellation requested by {} token".format(self.triggered_token))
async def wait(self) -> None:
"""
Coroutine which returns when this token has been triggered
"""
if self.triggered_token is not None:
return
futures = [asyncio.ensure_future(self._triggered.wait(), loop=self.loop)]
for token in self._chain:
futures.append(asyncio.ensure_future(token.wait(), loop=self.loop))
try:
done, pending = await asyncio.wait(
futures,
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop,
)
except asyncio.CancelledError as err:
# Since we use return_when=asyncio.FIRST_COMPLETED above, we can
# be sure none of our futures will be done here, so we don't need
# to check if any is done before cancelling.
for future in futures:
future.cancel()
for future in futures:
try:
await future
except asyncio.CancelledError:
pass
raise err
else:
for future in pending:
future.cancel()
for future in pending:
try:
await future
except asyncio.CancelledError:
pass
async def cancellable_wait(self, *awaitables: Awaitable[_R], timeout: float = None) -> _R:
"""
Wait for the first awaitable to complete, unless we timeout or the
token is triggered.
Returns the result of the first awaitable to complete.
Raises TimeoutError if we timeout or
`~cancel_token.exceptions.OperationCancelled` if the cancel token is
triggered.
All pending futures are cancelled before returning.
"""
futures = [asyncio.ensure_future(a, loop=self.loop) for a in awaitables + (self.wait(),)]
try:
done, pending = await asyncio.wait(
futures,
timeout=timeout,
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop,
)
except asyncio.CancelledError as err:
# Since we use return_when=asyncio.FIRST_COMPLETED above, we can
# be sure none of our futures will be done here, so we don't need
# to check if any is done before cancelling.
for future in futures:
future.cancel()
for future in futures:
try:
await future
except asyncio.CancelledError:
pass
raise err
else:
for future in pending:
future.cancel()
for future in pending:
try:
await future
except asyncio.CancelledError:
pass
if not done:
raise asyncio.TimeoutError()
if self.triggered_token is not None:
# We've been asked to cancel so we don't care about our future, but we must
# consume its exception or else asyncio will emit warnings.
for task in done:
task.result()
raise OperationCancelled(
"Cancellation requested by {} token".format(self.triggered_token)
)
return done.pop().result()
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return '<CancelToken: {0}>'.format(self.name)
|
StarcoderdataPython
|
12805764
|
New Algorithms for Simulating Dynamical Friction
<NAME>, <NAME>, <NAME> — RadiaSoft, LLC
This notebook describes—and documents in code—algorithms for simulating
the dynamical friction experienced by ions in the presence of magnetized electrons.
The $\LaTeX$ preamble is here. $$ %% math text \newcommand{\hmhsp}{\mspace{1mu}}% math hair space \newcommand{\mhsp}{\mspace{2mu}}% math hair space \newcommand{\ud}{\mathop{}\!\mathrm{d}}% upright d for differential \newcommand{\ui}{\mathrm{i}}% upright i for imaginary unit \newcommand{\ue}{\mathrm{e}}% upright e for Euler number %% \newcommand{\Mion}{m_\text{ion}} \newcommand{\Me}{m_\text{e}} %% \newcommand{\vQion}{\vec{q}_\text{ion}} \newcommand{\vPion}{\vec{p}_\text{ion}} \newcommand{\Qion}[1]{#1_\text{ion}} \newcommand{\Pion}[1]{p_{\text{ion},\hmhsp#1}} %% \newcommand{\vQe}{\vec{q}_\text{e}} \newcommand{\vPe}{\vec{p}_\text{e}} \newcommand{\Qe}[1]{#1_\text{e}} \newcommand{\Pe}[1]{p_{\text{e},\hmhsp#1}} %% \newcommand{\Map}[2][]{\mathcal{#2}^{#1}} %% \newcommand{\pgc}{p_\text{gc}} \newcommand{\xgc}{x_\text{gc}} \newcommand{\ygc}{y_\text{gc}} $$
In [3]:
""" Python preamble """
%matplotlib inline
In [4]:
print mp
NameErrorTraceback (most recent call last)
<ipython-input-4-f80345107578> in <module>()
----> 1 print mp
NameError: name 'mp' is not defined
In [5]:
""" Python preamble (cont.) """
from __future__ import division
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
import matplotlib as mpl
from scipy.constants import pi
from scipy.constants import speed_of_light as clight
from scipy.constants import epsilon_0 as eps0
from scipy.constants import mu_0 as mu0
from scipy.constants import elementary_charge as qe
from scipy.constants import electron_mass as me
from scipy.constants import proton_mass as mp
from scipy.constants import Boltzmann as kB
fourPiEps0 = 4 * pi * eps0
invFourPiEps0 = 1 / fourPiEps0
""" reset some default options """
np.set_printoptions(linewidth=96)
""" indexing """
(Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6)
""" prefixes """
(femto, pico, nano, micro, milli, one, kilo, mega, giga, tera, peta) = \
10. ** np.asarray(range(-15, 15+1, 3))
We define the ion charge and mass here as global parameters.
We do the same for the magnetic field strength $B$ and the
thermal velocity $v_\text{th}$.
Then we compute various related derived quantities.
In [6]:
"""
angular frequency of Larmor rotations
NB: This is a *signed* quantity, which means
that for electrons, say, you must set Z = -1.
"""
def omega_Larmor(mass, B, Z = 1):
return Z * qe * B / mass
Z_ion = 1
M_ion = mp
B_mag = 1. # Tesla
e_temp = 300. # Kelvin
N_gyro = 100 # a somewhat arbitrary choice, range [100, 160]
""" derived quantities """
V_th = math.sqrt(2 * kB * e_temp / me)
rho_gc = me * V_th / (qe * B_mag)
Omega_e = omega_Larmor(me, B_mag, Z = -1)
T_e = (2 * pi) / abs(Omega_e)
T_intxn = N_gyro * T_e
print "V_th = ", V_th
print "rho_gc / µm = ", rho_gc / micro
print "Omega_e / s^(-1) = ", Omega_e
print "frequency / GHz = ", Omega_e / (2 * pi) / giga
print "T_e / ns = ", T_e / nano
print "T_intxn / ns = ", T_intxn / nano
V_th = 95361.4171888
rho_gc / µm = 0.542189740332
Omega_e / s^(-1) = -1.7588200236e+11
frequency / GHz = -27.9924900765
T_e / ns = 0.0357238672682
T_intxn / ns = 3.57238672682
Two-body Magnetized Collisions
The Hamiltonian for a two-body interaction between an ion and a magnetized electron is $$ \vphantom{\Big]} H(\vQion, \vPion, \vQe, \vPe) = H_0(\vPion, \Qe{y}, \vPe)
+ H_\text{C}(\vQion, \vQe)
$$ where $$$$\begin{align} H_0(\vPion, \Qe{y}, \vPe) &= \frac{1}{2\Mion}\bigl(\Pion{x}^2 + \Pion{y}^2 + \Pion{z}^2\bigr) + \frac{1}{2\Me}\bigl((\Pe{x} + e B \Qe{y})^2 + \Pe{y}^2 + \Pe{z}^2\bigr),\\[1ex] H_\text{C}(\vQion, \vQe) &= -\frac{Ze^2}{4\pi\varepsilon_0} \big/ {\sqrt{(\Qion{x}-\Qe{x})^2 + (\Qion{y}-\Qe{y})^2 + (\Qion{z}-\Qe{z})^2}}, \end{align}$$
$$1ex] $$ and $$$ denotes the elementary quantum of charge.
The simplest second-order scheme for integrating this system uses
a split-operator approach: We approximate the total map $\Map{M}$ for a
time step of size $h$ by the symmetric form $$ \vphantom{\Big]} \Map{M}(h) \approx \Map{M}_0(h/2) \Map{M}_C(h) \Map{M}_0(h/2) $$ where $\Map{M}_0$ and $\Map{M}_C$ are the exact maps for the Hamiltonians $H_0$
and $H_C$ respectively. The map $\Map{M}_0$ is a simple linear map. The map
$\Map{M}_C$ generates a nonlinear kick of both ion and electron momenta.
Hamiltonians for Two-body Magnetized Collisions
In [5]:
"""
Hamiltonian for free ion and electron in a magnetic field, under
the assuption that the ion is unaffected by that magnetic field.
Arguments:
z_i (ndArray): 6 x N array of canonical coördinates
and conjugate momenta for the ions
z_e (ndArray): 6 x N array of canonical coördinates
and conjugate momenta for the electrons
In both of the above arrays, the six phase-space variables
are given in the order(x, px, y, py, z, pz)
Return:
the total 'free' energy of each ion-electron pair
"""
def H_twobody_0(z_i, z_e):
ham_i = ((z_i[Ipx,:] ** 2 + z_i[Ipy,:] ** 2 + z_i[Ipz,:] ** 2)
/ (2 * M_ion))
ham_e = ((z_e[Ipx,:] + (-qe) * B_mag * z_e[Iy,:]) ** 2
+ z_e[Ipy,:] ** 2 + z_e[Ipz,:] ** 2) / (2 * me)
return ham_i + ham_e
"""
Hamiltonian for the interaction of each ion-electron pair.
"""
def H_twobody_C(z_i, z_e):
g_ie = -(Z_ion * qe ** 2) / (4 * pi * eps0)
intxn = g_ie / np.sqrt(
+ (z_i[Ix,:] - z_e[Ix,:]) ** 2
+ (z_i[Iy,:] - z_e[Iy,:]) ** 2
+ (z_i[Iz,:] - z_e[Iz,:]) ** 2)
return intxn
"""
Total Hamiltonian for each ion-electron pair.
"""
def H_twobody(z_i, z_e):
ham_0 = H_twobody_0(z_i, z_e)
ham_C = H_twobody_C(z_i, z_e)
return ham_0 + ham_C
Maps for Two-body Magnetized Collisions
In [6]:
"""
define transfer maps for ions and electrons
There are three maps to define here: one each
for ions and electrons under H_0, and another
"""
""" matrix for a linear drift """
def MatD(mass, h):
Mdrift = np.identity(6)
for i in (Ix, Iy, Iz):
Mdrift[i, i + 1] = h / mass
return Mdrift
""" matrix for linear electron dynamics in a solenoidal field """
def MatK0_e(h):
mw = me * Omega_e
wh = Omega_e * h
cwh = math.cos(wh)
swh = math.sin(wh)
cwh1m = 2 * math.sin(wh / 2) ** 2 # 1 - cos(a) = 2 sin^2(a / 2)
MK0 = np.identity(6)
MK0[Iy, Iy ] = cwh
MK0[Ipy, Ipy] = cwh
MK0[Iy, Ipy] = swh / mw
MK0[Ipy, Iy ] = -mw * swh
MK0[Iz, Ipz] = h / me
MK0[Ix, Ipx] = swh / mw
MK0[Ix, Iy ] = swh
MK0[Ix, Ipy] = cwh1m / mw
MK0[Iy, Ipx] = -cwh1m / mw
MK0[Ipy, Ipx] = -swh
return MK0
"""
map phase-space coördinates forward in time by amount h
based on the Hamiltonian H_0, which describes the free
motion of ions and the motion of electrons in a solenoidal
magnetic field
"""
def MapZ_0(h, z_i, z_e):
mat = MatD(M_ion, h)
zf_i = mat.dot(z_i)
mat = MatK0_e(h)
zf_e = mat.dot(z_e)
return zf_i, zf_e
"""
map phase-space coördinates forward in time by amount h
based on the Hamiltonian H_C, which describes the collision
between a single ion-electron pair
"""
def MapZ_C(h, z_i, z_e):
g = h * Z_ion * qe ** 2 / (4 * pi * eps0)
dz = z_i - z_e
denom = (dz[Ix,:] ** 2 + dz[Iy,:] ** 2 + dz[Iz,:] ** 2) ** (3/2)
zf_i = z_i.copy()
zf_e = z_e.copy()
for ip in (Ipx, Ipy, Ipz):
zf_i[ip,:] = z_i[ip,:] - g * dz[ip - 1] / denom
zf_e[ip,:] = z_e[ip,:] + g * dz[ip - 1] / denom
return zf_i, zf_e
def apply_MapZ_0(h, n, z_i, z_e):
mat_i = MatD(M_ion, h)
mat_e = MatK0_e(h)
zf_i = [z_i]
zf_e = [z_e]
for i in range(n):
z_i = mat_i.dot(z_i)
z_e = mat_e.dot(z_e)
zf_i.append(z_i)
zf_e.append(z_e)
return np.asarray(zf_i), np.asarray(zf_e)
""" second-order split-operator integration for the total Hamiltonian """
def apply_MapZ(h, n, z_i, z_e):
hh = 0.5 * h
mat_i = MatD(M_ion, hh)
mat_e = MatK0_e(hh)
zf_i = [z_i]
zf_e = [z_e]
for i in range(n):
z_i = mat_i.dot(z_i)
z_e = mat_e.dot(z_e)
z_i, z_e = MapZ_C(h, z_i, z_e)
z_e = mat_e.dot(z_e)
z_i = mat_i.dot(z_i)
zf_i.append(z_i)
zf_e.append(z_e)
return np.asarray(zf_i), np.asarray(zf_e)
Guiding-center Coördinates and $\Theta$-J Coördinates
Transformations To and From Guiding-center Coördinates and $\Theta$-J Coördinates
We transform the electron's transverse phase-space coördinates
using the type-1 generating function $$ F_1(x,y;\, \phi,\ygc) = m\Omega\Bigl[\frac{1}{2}(y - \ygc)^2\cot\phi - y \ygc\Bigr]. $$ This yields the following transformation rules:
to guiding-center coördinates $$ \begin{align} m\Omega &= qB_0, \quad\text{(this is a signed quantity)}\\[1ex] \phi &= \arctan\Bigl(\frac{p_x + e B y}{p_y}\Bigr),\\[1ex] p_\phi &= \frac{1}{2m\Omega}\bigl[(p_x + m\Omega y)^2 + p_y^2\bigr],\\[1ex] \ygc &= -\frac{p_x}{m\Omega},\\[1ex] \pgc &= p_y + m\Omega x. \end{align} $$ from guiding-center coördinates $$ \begin{align} r_L &= \frac{1}{m\Omega}\sqrt{2m\Omega\,p_\phi}, \quad\text{(this is a signed quantity)}\\[1ex] x &= \frac{\pgc}{m\Omega} - r_L\cos\phi,\\[1ex] p_x &= -m\Omega\,\ygc,\\[1ex] y &= \ygc + r_L\sin\phi,\\[1ex] p_y &= m\Omega\,r_L\cos\phi. \end{align} $$
We also require the transformation to and from the coördinates $\Theta$-J: $$ \begin{align} \Theta &= \dotsb, \ J &= p_\phi + \frac{Ze^2}{4\pi\varepsilon_0} \frac{r_L}{\Omega} \frac{(\Qion{x}-\xgc)\cos\phi - (\Qion{y}-\ygc)\sin\phi}{% \bigl[(\Qion{x}-\Qe{x})^2 + (\Qion{y}-\Qe{y})^2 + (\Qion{z}-\Qe{z})^2
+ r_L^2\bigr]^{3/2}}.
\end{align} $$
$$ \begin{align} \phi &= \dotsb, \\ p_\phi &= \dotsb. \end{align} $$
In [7]:
""" convert to guiding-center coordinates """
def toGuidingCenter(z_e):
mOmega = me * Omega_e
zgc = z_e.copy()
zgc[Ix,:] = np.arctan2(z_e[Ipx,:] + mOmega * z_e[Iy,:], z_e[Ipy,:])
zgc[Ipx,:] = (((z_e[Ipx,:] + mOmega * z_e[Iy,:]) ** 2
+ z_e[Ipy,:] ** 2) / (2 * mOmega))
zgc[Iy,:] = - z_e[Ipx,:] / mOmega
zgc[Ipy,:] = z_e[Ipy,:] + mOmega * z_e[Ix,:]
return zgc
""" convert from guiding-center coordinates """
def fromGuidingCenter(zgc):
mOmega = me * Omega_e
rhoL = np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega
z_e = zgc.copy()
z_e[Ix,:] = zgc[Ipy,:] / mOmega - rhoL * np.cos(zgc[Ix,:])
z_e[Ipx,:] = - mOmega * zgc[Iy,:]
z_e[Iy,:] = zgc[Iy,:] + rhoL * np.sin(zgc[Ix,:])
z_e[Ipy,:] = mOmega * rhoL * np.cos(zgc[Ix,:])
return z_e
""" return J(z_gc) coordinates using the (probably correct) minus sign """
def actionJ(z_i, zgc):
g = Z_ion * qe ** 2 / (4 * pi * eps0)
mOmega = me * Omega_e
rhoL = np.sqrt(2 * zgc[Ipx,:] / mOmega)
num = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) * np.cos(zgc[Ix,:])
- (z_i[Iy,:] - zgc[Iy,:]) * np.sin(zgc[Ix,:]))
den = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2
+ (z_i[Iz,:] - zgc[Iz,:]) ** 2 + rhoL ** 2) ** (3/2)
return zgc[Ipx,:] + g * (rhoL / Omega_e) * num / den
In [8]:
""" return the Larmor radius """
def rLarmor(z_e):
mOmega = me * Omega_e
return np.sqrt((z_e[Ipx,:] + mOmega * z_e[Iy,:]) ** 2 + z_e[Ipy,:] ** 2) / mOmega
""" return the Larmor radius """
def rLarmor_gc(zgc):
mOmega = me * Omega_e
return np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega
""" return the perturbation ratio (uses the minus sign) """
def pertubationRatio(z_i, zgc):
mOmega = me * Omega_e
rhoL = np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega
num = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) * np.cos(zgc[Ix,:])
- (z_i[Iy,:] - zgc[Iy,:]) * np.sin(zgc[Ix,:]))
den = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2
+ (z_i[Iz,:] - zgc[Iz,:]) ** 2 + rhoL ** 2)
return 2 * rhoL * num / den
""" return the ratio (impact parameter) / (Larmor radius) """
def impactParamOverRL(z_i, zgc):
mOmega = me * Omega_e
rhoL = np.sqrt(2 * mOmega * zgc[Ipx,:]) / mOmega
b = np.sqrt((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2)
return b / rhoL
Hamiltonians using Guiding-center Coördinates
In [9]:
"""
Hamiltonian for free ion and electron in a magnetic field, under
the assuption that the ion is unaffected by that magnetic field.
"""
def H_gc_0(z_i, zgc):
ham_i = ((z_i[Ipx,:] ** 2 + z_i[Ipy,:] ** 2 + z_i[Ipz,:] ** 2)
/ (2 * M_ion))
ham_e = Omega_e * actionJ(z_i, zgc) + zgc[Ipz,:] ** 2 / (2 * me)
return ham_i + ham_e
"""
Hamiltonian for the ion-electron interaction
in guiding-center (Θ,J) coördinates.
"""
def H_gc_C(z_i, zgc):
g_ie = (Z_ion * qe ** 2) / (4 * pi * eps0)
mOmega = me * Omega_e
intxn = -g_ie / np.sqrt(
+ (z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2
+ (z_i[Iz,:] - zgc[Iz,:]) ** 2
+ 2 * actionJ(z_i, zgc) / mOmega)
return intxn
"""
total Hamiltonian for the ion-electron system in GC coördinates
"""
def H_gc(z_i, zgc):
ham_0 = H_gc_0(z_i, zgc)
ham_C = H_gc_C(z_i, zgc)
return ham_0 + ham_C
"""
Hamiltonian for the ion-electron interaction
in guiding-center (Θ,J) coördinates.
"""
def H_gc_Cp(z_i, zgc):
g_ie = (Z_ion * qe ** 2) / (4 * pi * eps0)
mOmega = me * Omega_e
intxn = -g_ie / np.sqrt(
+ (z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2
+ (z_i[Iz,:] - zgc[Iz,:]) ** 2
+ 2 * actionJp(z_i, zgc) / mOmega)
return intxn
"""
total Hamiltonian for the ion-electron system in GC coördinates
"""
def H_gc_m(z_i, zgc):
ham_0 = H_gc_0(z_i, zgc)
ham_C = H_gc_Cm(z_i, zgc)
return ham_0 + ham_C
Maps using Guiding-center Coördinates
In [10]:
"""
define transfer maps for ions and electrons
There are three maps to define here: one each
for ions and electrons under H_0, and another
for the interaction under H_c
"""
"""
Map phase-space coördinates forward in time by amount h.
This map is based on the Hamiltonian H_gc_0, which describes
the free motion of ions and the averaged motion of electrons
in a solenoidal magnetic field.
NB: We do not update the \Theta coördinate, as it does not
contribute to the dynamics of any other variables.
"""
def MapZgc_0(h, z_i, zgc):
matD = MatD(M_ion, h)
zf_i = matD.dot(z_i)
zf_e = zgc.copy()
zf_e[Iz,:] += (h / me) * zgc[Ipz,:]
return zf_i, zf_e
"""
Map phase-space coördinates forward in time by amount h.
This map is based on the Hamiltonian H_gc_C, which describes
the collision between a single ion-electron pair in guiding-center
coördinates.
NB: We do not update the \Theta coördinate, as it does not
contribute to the dynamics of any other variables.
"""
def MapZgc_C(h, z_i, zgc):
g_ie = Z_ion * qe ** 2 / (4 * pi * eps0)
mOmega = me * Omega_e
dr3 = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2
+ (z_i[Iz,:] - zgc[Iz,:]) ** 2
+ (2 / mOmega) * actionJ(z_i, zgc)) ** (3/2)
Omega_gc = (g_ie / mOmega) / dr3
S = np.sin(Omega_gc * h)
C1 = 2 * np.sin(Omega_gc * (h / 2)) ** 2
zf_i = z_i.copy()
zf_e = zgc.copy()
Dxgc = ((z_i[Ix,:] - zgc[Ipy,:] / mOmega) * C1
+ (z_i[Iy,:] - zgc[Iy,:]) * S)
Dygc = ((z_i[Iy,:] - zgc[Iy,:]) * C1
- (z_i[Ix,:]- zgc[Ipy,:] / mOmega) * S)
Dpz = (Omega_gc * h) * mOmega * (z_i[Iz,:] - zgc[Iz,:])
zf_i[Ipx,:] += mOmega * Dygc
zf_i[Ipy,:] -= mOmega * Dxgc
zf_i[Ipz,:] -= Dpz
zf_e[Iy,:] += Dygc
zf_e[Ipy,:] += mOmega * Dxgc
zf_e[Ipz,:] += Dpz
return zf_i, zf_e
def apply_MapZgc_0(h, n, z_i, zgc):
mat_i = MatD(M_ion, h)
mat_e = np.identity(6)
mat_e[Iz, Ipz] = h / me
zf_i = [z_i]
zf_e = [zgc]
for i in range(n):
z_i = mat_i.dot(z_i)
zgc = mat_e.dot(zgc)
zf_i.append(z_i)
zf_e.append(zgc)
return np.asarray(zf_i), np.asarray(zf_e)
def apply_MapZgc(h, n, z_i, zgc):
hh = 0.5 * h
mat_i = MatD(M_ion, hh)
mat_e = np.identity(6)
mat_e[Iz, Ipz] = hh / me
zf_i = [z_i]
zf_e = [zgc]
for i in range(n):
z_i = mat_i.dot(z_i)
zgc = mat_e.dot(zgc)
z_i, zgc = MapZgc_C(h, z_i, zgc)
zgc = mat_e.dot(zgc)
z_i = mat_i.dot(z_i)
zf_i.append(z_i)
zf_e.append(zgc)
return np.asarray(zf_i), np.asarray(zf_e)
Maps using the Magnus Expansion
In [11]:
"""
compute $\Delta P_\text{ion}$ using the Magnus expansion
"""
def magnus_deltaPIon(h, z_i, zgc):
g_ie = (Z_ion * qe ** 2) / (4 * pi * eps0)
mOmega = me * Omega_e
xgc = zgc[Ipy,:] / mOmega
C1 = ((z_i[Ix,:] - xgc) ** 2
+ (z_i[Iy,:] - zgc[Iy,:]) ** 2
+ (z_i[Iz,:] - zgc[Iz,:]) ** 2
+ (2 / mOmega) * actionJ(z_i, zgc))
C2 = (2 * ((z_i[Ix,:] - xgc) * z_i[Ipx,:] / M_ion
+ (z_i[Iy,:] - zgc[Iy,:]) * z_i[Ipy,:] / M_ion
+ (z_i[Iz,:] - zgc[Iz,:]) * (z_i[Ipz,:] / M_ion - zgc[Ipz,:] / me)))
C3 = ((z_i[Ipx,:] / M_ion) ** 2
+ (z_i[Ipy,:] / M_ion) ** 2
+ (z_i[Ipz,:] / M_ion - zgc[Ipz,:] / me) ** 2)
B = np.sqrt(C1 + (C2 + C3 * h) * h)
Delta = 4 * C1 * C3 - C2 ** 2
D1 = (2 * C3 * h + C2) / B - C2 / np.sqrt(C1)
D2 = (C2 * h + 2 * C1) / B - 2 * np.sqrt(C1)
dPx = - ((2 * g_ie / Delta) *
((z_i[Ix,:] - xgc) * D1 - (z_i[Ipx,:] / M_ion) * D2))
dPy = - ((2 * g_ie / Delta) *
((z_i[Iy,:] - zgc[Iy,:]) * D1 - (z_i[Ipy,:] / M_ion) * D2))
dPz = - ((2 * g_ie / Delta) *
((z_i[Iz,:] - zgc[Iz,:]) * D1 - (z_i[Ipz,:] / M_ion - zgc[Ipz,:] / me) * D2))
return np.asarray(( dPx, dPy, dPz)).T
In [12]:
"""
For the Magnus computation to work well, the interaction must be perturbative.
This function return a value for the minimum impact parameter, above which the
interaction becomes perturbative.
"""
def minImpactParam(magB, EkOverEV, bOverRL):
numer = 2 * EkOverEV * (invFourPiEps0 * Z_ion * me)
denom = ((1 / bOverRL) ** 2 + (tan_alpha / (N_gyro * pi)) ** 2) * magB ** 2
return (numer / denom) ** (1/3)
Conversion from (Q,V) to (Q,P)
In [13]:
"""
define matrix that transforms ion coördinate-velocity data
to canonically conjugate phase-space variables
"""
QVtoZion = np.identity(6)
for i in (Ipx, Ipy, Ipz):
QVtoZion[i, i] = M_ion
"""
define matrix that transforms electron coördinate-velocity data
to canonically conjugate phase-space variables
NB: This depends on the local magnetic vector potential,
hence on the local magnetic field.
"""
QVtoZe = np.identity(6)
for i in (Ipx, Ipy, Ipz):
QVtoZe[i, i] = me
QVtoZe[Ipx, Iy] = (-qe) * (-B_mag)
Explore a range of values for the perturbation parameters
In [14]:
Z_ion = 1
M_ion = mp
e_temp = 300. # Kelvin
B_mag = 1. # Tesla
N_gyro = 100 # enforce adequate averaging
tan_alpha = 5.0 # and an adequate opening angle
""" derived quantities """
Omega_e = omega_Larmor(me, B_mag, Z = -1)
T_e = (2 * pi) / abs(Omega_e)
In [17]:
bOverLarmorR = 20.0 # 20 ==> max perturbation ratio of about 0.1
EkinOverVmax = 10.0 # 10 ==> eV_closest_approach / E_kinetic = 0.1
print minImpactParam(B_mag, EkinOverVmax, bOverLarmorR) / micro
3.90333173895
In [26]:
decades = 2
pts_per_decade = 3
logBs = np.linspace(0., 1. * float(decades), num = 1 + pts_per_decade * decades, endpoint = True)
bvals = np.power(10, logBs)
impactParameterB = micro * bvals
print "b / μm = ", impactParameterB / micro
print "b_min / μm =", minImpactParam(B_mag, EkinOverVmax, bOverLarmorR) / micro
b / μm = [ 1. 2.15443469 4.64158883 10. 21.5443469 46.41588834 100. ]
b_min / μm = 3.90333173895
In [27]:
""" (the following depend on the impact parameter) """
LarmorR = impactParameterB / bOverLarmorR # (this version is defined positive)
V_perp = - LarmorR * Omega_e
L_intxn = tan_alpha * impactParameterB #[-1] * np.ones_like(impactParameterB)
V_z = 2 * L_intxn / (N_gyro * T_e)
T_intxn = 2 * L_intxn / V_z
In [28]:
num_steps_per_gyro = 40
delta_Te = T_e / num_steps_per_gyro
print T_intxn / delta_Te
num_steps = int(np.floor(T_intxn[0] / delta_Te))
print "num_steps =", num_steps
[ 4000. 4000. 4000. 4000. 4000. 4000. 4000.]
num_steps = 4000
In [29]:
""" initial condition for the ion --- just one, for now """
QVion = np.array([
( 0.0 * rho_gc, 0.000 * V_th, 0.0 * rho_gc, 0.000 * V_th, 0.0, 0.000 * V_th)
]).transpose()
Zion = QVtoZion.dot(QVion)
""" initial conditions for the electrons """
QVelec = np.asarray([
np.zeros(impactParameterB.shape),
V_perp,
impactParameterB - LarmorR,
np.zeros(impactParameterB.shape),
- L_intxn,
L_intxn * abs(Omega_e) / (pi * N_gyro)
])
Zelec = QVtoZe.dot(QVelec)
num_elec0 = Zelec.shape[1]
num_ion0 = Zion.shape[1]
"""
=== create arrays of electrons and ions ===
Here we arrange them so that we can pair each ion with each
electron, and compute the \Delta{p} for each interaction.
"""
ZI_elec = np.hstack([Zelec for e in range(num_ion0)])
ZI_ion = Zion[:, np.arange(num_ion0 * num_elec0) // num_elec0]
num_elec = ZI_elec.shape[1]
num_ion = ZI_ion.shape[1]
In [30]:
ZF_i, ZF_e = apply_MapZ(delta_Te, num_steps, ZI_ion, ZI_elec)
In [31]:
navg = 1 # number of gyrotron oscillations over which to average; set to 1, 2, 4, 5, or 10
ZI_elec_gc = toGuidingCenter(ZI_elec)
ZFgc_i, ZFgc_e = apply_MapZgc(navg * 40*delta_Te, num_steps//40 // navg, ZI_ion, ZI_elec_gc)
In [32]:
deltaP_exp = np.array(ZF_i[-1, [Ipx, Ipy, Ipz], :] - ZF_i[0, [Ipx, Ipy, Ipz], :]).T
deltaP_avg = np.array(ZFgc_i[-1, [Ipx, Ipy, Ipz], :] - ZFgc_i[0, [Ipx, Ipy, Ipz], :]).T
deltaP_mgn = magnus_deltaPIon(T_intxn, ZI_ion, ZI_elec_gc)
print deltaP_exp
print
print deltaP_avg
print
print deltaP_mgn
[[ 1.08030578e-26 5.72808106e-26 1.45007842e-27]
[ 4.03458322e-28 1.66767979e-26 1.38486416e-27]
[ 2.76032939e-29 6.40599505e-27 9.05956974e-29]
[ 7.85620796e-31 1.58638251e-27 2.60010730e-30]
[ 1.75039675e-32 3.47567664e-28 5.80271684e-32]
[ 3.78408946e-34 7.50099341e-29 1.25468090e-33]
[ 8.15538018e-36 1.61631831e-29 2.70410697e-35]]
[[ 1.05836708e-26 5.67504416e-26 8.51768805e-28]
[ 4.00940540e-28 1.66262914e-26 1.38431451e-27]
[ 2.74641067e-29 6.39066436e-27 9.05285901e-29]
[ 7.81439841e-31 1.58238970e-27 2.59745819e-30]
[ 1.74099270e-32 3.46684860e-28 5.79654896e-32]
[ 3.76373889e-34 7.48192282e-29 1.25334141e-33]
[ 8.11151685e-36 1.61220857e-29 2.70121883e-35]]
[[ -0.00000000e+00 1.61222627e-25 -1.01894869e-40]
[ -0.00000000e+00 3.47345583e-26 -3.78365367e-41]
[ -0.00000000e+00 7.48333795e-27 -0.00000000e+00]
[ -0.00000000e+00 1.61223638e-27 -0.00000000e+00]
[ -0.00000000e+00 3.47345800e-28 -0.00000000e+00]
[ -0.00000000e+00 7.48333842e-29 -4.49591691e-44]
[ -0.00000000e+00 1.61223639e-29 -0.00000000e+00]]
In [33]:
dDeltaP_ax = deltaP_avg - deltaP_exp
dDeltaP_mx = deltaP_mgn - deltaP_exp
relErr_avg = (np.linalg.norm(dDeltaP_ax, axis = 1)
/ np.linalg.norm(deltaP_exp, axis = 1))
relErr_mgn = (np.linalg.norm(dDeltaP_mx, axis = 1)
/ np.linalg.norm(deltaP_exp, axis = 1))
eV_closest_approach = (invFourPiEps0 * Z_ion * qe ** 2 / impactParameterB)
E_kinetic_e = (me / 2) * (V_perp ** 2 + V_z ** 2)
eVcaOverEkin = eV_closest_approach / E_kinetic_e
llres = plt.figure()
plt.loglog(eVcaOverEkin, relErr_avg, 'bo')
plt.loglog(eVcaOverEkin, relErr_mgn, 'rs')
plt.annotate(s="Averging",xy=(1.e-1,1.e-3))
plt.annotate(s="Magnus",xy=(1.e-2,5.e-1))
plt.xlabel("$eV_{\mathrm{ca}} / E_{\mathrm{kin}}$")
plt.ylabel("relative error in $\Delta{P}_{\mathrm{ion}}$")
plt.show()
llres.savefig("/Users/dabell/Desktop/foo.pdf")
In [ ]:
mae_compare.savefig("/Users/dabell/RadiaSoft/MCool/MgnAvgExpCompare.pdf")
|
StarcoderdataPython
|
3556834
|
from .. import eval as ev
from .. import nodes as no
def test_eval():
q = no.Select(
[
no.AllSelectItem(),
],
[
no.Table(
no.QualifiedNameNode.of(['t0'])),
],
no.BinaryExpr(
no.QualifiedNameNode.of(['id']),
no.BinaryOp.EQ,
no.Integer(2)),
)
print(ev.StmtEvaluator().eval(q))
def test_rels():
q = no.Select(
[
no.AllSelectItem()
],
[
no.Join(
no.Table(
no.QualifiedNameNode.of(['t0'])),
no.JoinType.DEFAULT,
no.Table(
no.QualifiedNameNode.of(['t1'])),
)
]
)
print(ev.StmtEvaluator().eval(q))
|
StarcoderdataPython
|
1644207
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing available reivions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.run import global_methods
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.run import commands
from googlecloudsdk.command_lib.run import connection_context
from googlecloudsdk.command_lib.run import flags
from googlecloudsdk.command_lib.run import pretty_print
from googlecloudsdk.command_lib.run import serverless_operations
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
def _SucceededStatus(job):
return '{} / {}'.format(
job.get('status', {}).get('succeeded', 0),
job.get('spec', {}).get('completions', 0))
def _ByStartAndCreationTime(job):
"""Sort key that sorts jobs by start time, newest and unstarted first.
All unstarted jobs will be first and sorted by their creation timestamp, all
started jobs will be second and sorted by their start time.
Args:
job: googlecloudsdk.api_lib.run.job.Job
Returns:
The lastTransitionTime of the Started condition or the creation timestamp of
the job if the job is unstarted.
"""
return (False if job.started_condition and
job.started_condition['status'] is not None else True,
job.started_condition['lastTransitionTime']
if job.started_condition else job.creation_timestamp)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class List(commands.List):
"""List jobs."""
detailed_help = {
'DESCRIPTION': """
{description}
""",
'EXAMPLES': """
To list all jobs in all regions:
$ {command}
""",
}
@classmethod
def CommonArgs(cls, parser):
parser.display_info.AddFormat(
'table('
'{ready_column},'
'name:label=JOB,'
'region:label=REGION,'
'status.active.yesno(no="0"):label=RUNNING,'
'succeeded_status():label=COMPLETE,'
'creation_timestamp.date("%Y-%m-%d %H:%M:%S %Z"):label=CREATED,'
'author:label="CREATED BY")'.format(
ready_column=pretty_print.READY_COLUMN))
parser.display_info.AddUriFunc(cls._GetResourceUri)
parser.display_info.AddTransforms({
'succeeded_status': _SucceededStatus,
})
@classmethod
def Args(cls, parser):
cls.CommonArgs(parser)
def _SortJobs(self, jobs):
return sorted(
commands.SortByName(jobs), key=_ByStartAndCreationTime, reverse=True)
def Run(self, args):
"""List available revisions."""
# Use the mixer for global request if there's no --region flag.
if not args.IsSpecified('region'):
client = global_methods.GetServerlessClientInstance(
api_version='v1alpha1')
self.SetPartialApiEndpoint(client.url)
# Don't consider region property here, we'll default to all regions
return self._SortJobs(global_methods.ListJobs(client))
conn_context = connection_context.GetConnectionContext(
args,
flags.Product.RUN,
self.ReleaseTrack(),
version_override='v1alpha1')
namespace_ref = resources.REGISTRY.Parse(
properties.VALUES.core.project.Get(required=True),
collection='run.namespaces',
api_version='v1alpha1')
with serverless_operations.Connect(conn_context) as client:
self.SetCompleteApiEndpoint(conn_context.endpoint)
return self._SortJobs(client.ListJobs(namespace_ref))
|
StarcoderdataPython
|
6439497
|
<filename>Dynamic Programming/27_correct_word.py
# We are given a function dict(word) that always works in O(1) time, which returns if a word is
# a correct word of the language. We are given as an input a string without a space. Find an algorithm
# that will find out if it is possible to insert spaces in the input string so that the string of
# word we receive creates word from the given language.
# For example: “alamakotainiemapsa” we can split as “ala ma kota i nie ma psa".
# The algorithm should be fast, but the most important thing is that it should be correct.
def dict(word):
words = ['ala', 'ma', 'kota', 'i', 'nie', 'ma', 'psa']
if word in words:
return True
return False
def correct_word(string):
DP = [False] * (len(string) + 1)
DP[0] = True
for i in range(len(string) + 1):
for j in range(i, -1, -1):
if DP[i]:
break
actual_word = string[j: i]
if dict(actual_word):
DP[i] = DP[j]
return DP[-1]
string = "alamakotainiemapsa"
print(correct_word(string))
|
StarcoderdataPython
|
1723258
|
"""Functions to fit MRI SPGR signal to obtain T1.
Created 28 September 2020
@authors: <NAME>
@email: <EMAIL>
@institution: University of Edinburgh, UK
Functions:
fit_vfa_2_point: obtain T1 using analytical formula based on two images
fit_vfa_linear: obtain T1 using linear regression
fit_vfa_nonlinear: obtain T1 using non-linear least squares fit
fit_hifi: obtain T1 by fitting a combination of SPGR and IR-SPGR scans
spgr_signal: get SPGR signal
irspgr_signal: get IR-SPGR signal
"""
import numpy as np
from scipy.optimize import curve_fit, least_squares
from fitting import calculator
class vfa_2points(calculator):
def __init__(self, fa, tr):
self.fa = np.asarray(fa)
self.tr = tr
self.fa_rad = np.pi*self.fa/180
def proc(self, s, k_fa=1):
with np.errstate(divide='ignore', invalid='ignore'):
fa_true = k_fa * self.fa_rad
sr = s[0] / s[1]
t1 = self.tr / np.log(
(sr*np.sin(fa_true[1])*np.cos(fa_true[0]) -
np.sin(fa_true[0])*np.cos(fa_true[1])) /
(sr*np.sin(fa_true[1]) - np.sin(fa_true[0])))
s0 = s[0] * ((1-np.exp(-self.tr/t1)*np.cos(fa_true[0])) /
((1-np.exp(-self.tr/t1))*np.sin(fa_true[0])))
t1 = np.nan if ~np.isreal(t1) | (t1 <= 0) | np.isinf(t1) else t1
s0 = np.nan if (s0 <= 0) | np.isinf(s0) else s0
return {'s0': s0, 't1': t1}
class vfa_linear(calculator):
def __init__(self, fa, tr):
self.fa = np.asarray(fa)
self.tr = tr
self.fa_rad = np.pi*self.fa/180
def proc(self, s, k_fa=1):
fa_true = k_fa * self.fa_rad
y = s / np.sin(fa_true)
x = s / np.tan(fa_true)
A = np.stack([x, np.ones(x.shape)], axis=1)
slope, intercept = np.linalg.lstsq(A, y, rcond=None)[0]
is_valid = (intercept > 0) and (0. < slope < 1.)
t1, s0 = (-self.tr/np.log(slope),
intercept/(1-slope)) if is_valid else (np.nan, np.nan)
return {'s0': s0, 't1': t1}
class vfa_nonlinear(calculator):
def __init__(self, fa, tr):
self.fa = np.asarray(fa)
self.tr = tr
self.fa_rad = np.pi*self.fa/180
self.linear_fitter = vfa_linear(fa, tr)
def proc(self, s, k_fa=1):
# use linear fit to obtain initial guess
result_linear = self.linear_fitter.proc(s, k_fa=k_fa)
x_linear = np.array((result_linear['s0'], result_linear['t1']))
if (~np.isnan(x_linear[0]) & ~np.isnan(x_linear[1])):
x0 = x_linear
else:
x0 = np.array([s[0] / spgr_signal(1., 1., self.tr, k_fa*self.fa[0]), 1.])
result = least_squares(self.__residuals, x0, args=(s, k_fa), bounds=((1e-8,1e-8),(np.inf,np.inf)), method='trf',
x_scale=x0
)
if result.success is False:
raise ArithmeticError(f'Unable to fit VFA data'
f': {result.message}')
s0, t1 = result.x
return {'s0': s0, 't1': t1}
def __residuals(self, x, s, k_fa):
s0, t1 = x
s_est = spgr_signal(s0, t1, self.tr, k_fa*self.fa)
return s - s_est
class hifi(calculator):
def __init__(self, esp, ti, n, b, td, centre):
self.esp = esp
self.ti = ti
self.n = n
self.b = b
self.td = td
self.centre = centre
# get information about the scans
self.n_scans = len(esp)
self.is_ir = ~np.isnan(ti)
self.is_spgr = ~self.is_ir
self.idx_spgr = np.where(self.is_spgr)[0]
self.n_spgr = self.idx_spgr.size
self.get_linear_estimate = self.n_spgr > 1 and np.all(
np.isclose(esp[self.idx_spgr], esp[self.idx_spgr[0]]))
self.linear_fitter = vfa_linear( b[self.is_spgr], esp[self.idx_spgr[0]])
def proc(self, s, k_fa_fixed=None):
# First get a quick linear T1 estimate
if self.get_linear_estimate: # If >1 SPGR, use linear VFA fit
result_lin = self.linear_fitter.proc(s[self.is_spgr])
if ~np.isnan(result_lin['s0']) and ~np.isnan(result_lin['t1']):
s0_init, t1_init = result_lin['s0'], result_lin['t1']
else: # if result invalid, assume T1=1
t1_init = 1
s0_init = s[self.idx_spgr[0]] / spgr_signal(1, t1_init,
self.esp[self.idx_spgr[0]],
self.b[self.idx_spgr[0]])
elif self.n_spgr == 1: # If 1 SPGR, assume T1=1 and estimate s0 based on this scan
t1_init = 1
s0_init = s[self.idx_spgr[0]] / spgr_signal(1, t1_init,
self.esp[self.idx_spgr[0]],
self.b[self.idx_spgr[0]])
else: # If 0 SPGR, assume T1=1 and estimate s0 based on 1st scan
t1_init = 1
s0_init = s[0] / irspgr_signal(1, t1_init, self.esp[0], self.ti[0], self.n[0], self.b[0],
180, self.td[0], self.centre[0])
# Non-linear fit
if k_fa_fixed is None:
k_init = 1
bounds = ([0, 0, 0], [np.inf, np.inf, np.inf])
else:
k_init = k_fa_fixed
bounds = ([0, 0, 1], [np.inf, np.inf, 1])
x_0 = np.array([t1_init, s0_init, k_init])
result = least_squares(self.__residuals, x_0, args=(s,), bounds=bounds, method='trf',
x_scale=(t1_init, s0_init, k_init)
)
x_opt = result.x if result.success else (np.nan, np.nan, np.nan)
t1_opt, s0_opt, k_fa_opt = x_opt
s_opt = self.__signal(x_opt)
return {'t1': t1_opt, 's0': s0_opt, 'k_fa': k_fa_opt, 's_opt': s_opt}
def __residuals(self, x, s):
return s - self.__signal(x)
def __signal(self, x):
t1, s0, k_fa = x
s = np.zeros(self.n_scans)
s[self.is_ir] = irspgr_signal(s0, t1, self.esp[self.is_ir], self.ti[self.is_ir],
self.n[self.is_ir], k_fa*self.b[self.is_ir], self.td[self.is_ir],
self.centre[self.is_ir])
s[self.is_spgr] = spgr_signal(s0, t1, self.esp[self.is_spgr],
k_fa*self.b[self.is_spgr])
return s
def spgr_signal(s0, t1, tr, fa):
"""Return signal for SPGR sequence.
Parameters
----------
s0 : float
Equilibrium signal.
t1 : float
T1 value (s).
tr : float
TR value (s).
fa : float
Flip angle (deg).
Returns
-------
s : float
Steady-state SPGR signal.
"""
fa_rad = np.pi*fa/180
e = np.exp(-tr/t1)
s = s0 * (((1-e)*np.sin(fa_rad)) /
(1-e*np.cos(fa_rad)))
return s
def irspgr_signal(s0, t1, esp, ti, n, b, td=0, centre=0.5):
"""Return signal for IR-SPGR sequence.
Uses formula by Deichmann et al. (2000) to account for modified
apparent relaxation rate during the pulse train. Note inversion is assumed
to be ideal.
Parameters
----------
s0 : float
Equilibrium signal.
t1 : float
T1 value (s).
esp : float
Echo spacing (s). For SPGR, this is the TR.
ti : float
Inversion time (s). Note this is the actual time delay between the
inversion pulse and the start of the echo train. The effective TI
may be different, e.g for linear phase encoding of the echo train.
n : int
Number of excitation pulses per inversion pulse
b : float
Readout pulse flip angle (deg)
td : float
Delay between end of readout train and the next inversion (s).
centre : float
Time in readout train when centre of k-space is acquired,
expressed as a fraction of the readout duration. e.g. = 0 for
centric phase encoding, = 0.5 for linear phase encoding.
Returns
-------
s : float
Steady-state IR-SPGR signal.
"""
b_rad = np.pi*b/180
tau = esp * n
t1_star = (1/t1 - 1/esp*np.log(np.cos(b_rad)))**-1
m0_star = s0 * ((1-np.exp(-esp/t1)) / (1-np.exp(-esp/t1_star)))
r1 = -tau/t1_star
e1 = np.exp(r1)
e2 = np.exp(-td/t1)
e3 = np.exp(-ti/t1)
a1 = m0_star * (1-e1)
a2 = s0 * (1 - e2)
a3 = s0 * (1 - e3)
a = a3 - a2*e3 - a1*e2*e3
b = -e1*e2*e3
m1 = a/(1-b)
s = np.abs((
m0_star + (m1-m0_star)*np.exp(centre*r1))*np.sin(b_rad))
return s
|
StarcoderdataPython
|
9646089
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
from numericalunits import __version__
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
descrip = ("A package that lets you define quantities with units, which can "
"then be used in almost any numerical calculation in any "
"programming language. Checks that calculations pass dimensional "
"analysis, performs unit conversions, and defines physical "
"constants.")
setup(
name = "numericalunits",
version = __version__,
author = "<NAME>",
author_email = "<EMAIL>",
description = descrip,
license = "MIT",
keywords = "units, quantities, physical constants, dimensional analysis",
url = "http://pypi.python.org/pypi/numericalunits",
py_modules=['numericalunits'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.4"]
)
|
StarcoderdataPython
|
301498
|
<filename>p1_basic/day32_35thread/day33/11_线程池.py
from concurrent.futures import ThreadPoolExecutor
import time
def task(a1, a2):
time.sleep(2)
print(a1, a2)
# 创建了一个线程池(最多5个线程)
pool = ThreadPoolExecutor(5)
for i in range(40):
# 去线程池中申请一个线程,让线程执行task函数。
pool.submit(task, i, 8)
|
StarcoderdataPython
|
3583026
|
<filename>testslider.py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.25)
fig.canvas.set_window_title('Reaktionsfortschritt')
t0 = 0
t = np.arange(0, t0, .5)
k0 = 0.17
a = np.exp(- k0 * t)
min_t = 5
l, = ax.plot(t, a, lw=3, color='crimson')
plt.axis([0, 20, 0, 1])
axrs = plt.axes([0.25, 0.1, 0.65, 0.03])
srs = Slider(axrs, 'Reaktionsschritte', 0, 20, valinit=min_t)
def update(x):
base_t = np.arange(0,20,0.5)
base_y = np.exp(-k0*base_t)
ax.plot(base_t,base_y,lw=0.1,alpha=0.1)
t0 = x
t = np.arange(min_t, t0, .5)
ax.lines.pop(0) # remove previous line plot
ax.plot(t, np.exp(- k0 * t), lw=3, color='crimson') # plot new one
fig.canvas.draw()
srs.on_changed(update)
plt.show()
|
StarcoderdataPython
|
3379421
|
import pytest
import torch
from espnet2.enh.loss.criterions.tf_domain import FrequencyDomainL1
from espnet2.enh.loss.wrappers.fixed_order import FixedOrderSolver
@pytest.mark.parametrize("num_spk", [1, 2, 3])
def test_PITSolver_forward(num_spk):
batch = 2
inf = [torch.rand(batch, 10, 100) for spk in range(num_spk)]
ref = [inf[num_spk - spk - 1] for spk in range(num_spk)] # reverse inf as ref
solver = FixedOrderSolver(FrequencyDomainL1())
loss, stats, others = solver(ref, inf)
|
StarcoderdataPython
|
6622156
|
<reponame>CurryEleison/workdocs-disaster-recovery
from argparse import ArgumentParser, ArgumentTypeError
from os.path import isdir
from pathlib import Path
import logging
from workdocs_dr.cli_arguments import clients_from_input, bucket_url_from_input, logging_setup, organization_id_from_input, wdfilter_from_input
from workdocs_dr.directory_restore import DirectoryRestoreRunner
rootlogger = logging.getLogger()
rootlogger.setLevel(logging.INFO)
def main():
parser = ArgumentParser()
parser.add_argument("--profile", help="AWS profile", default=None)
parser.add_argument("--region", help="AWS region", default=None)
parser.add_argument("--user-query", help="Query of user", default=None)
parser.add_argument("--folder", help="Folder(s) to restore", default=None)
parser.add_argument("--organization-id",
help="Workdocs organization id (directory id)", default=None)
parser.add_argument(
"--prefix", help="Prefix for bucket access", default=None)
parser.add_argument("--bucket-name", help="Name of bucket", default=None)
parser.add_argument("--path", type=dir_path, default=Path("."))
parser.add_argument(
"--bucket-role-arn",
help="ARN of role that puts/gets disaster recovery documents", default=None)
parser.add_argument("--verbose", help="Verbose output",
dest="verbose", action="store_true")
args = parser.parse_args()
clients = clients_from_input(profile_name=args.profile, region_name=args.region,
workdocs_role_arn=None, bucket_role_arn=args.bucket_role_arn)
bucket = bucket_url_from_input(args.bucket_name, args.prefix)
filter = wdfilter_from_input(args.user_query, args.folder)
organization_id = organization_id_from_input(args.organization_id)
# Restorer goes here
drr = DirectoryRestoreRunner(
clients,
organization_id,
bucket,
filter,
args.path
)
drr.runall()
logging_setup(rootlogger=rootlogger, verbose=args.verbose)
def dir_path(path):
if isdir(path):
return path
else:
raise ArgumentTypeError(f"readable_dir:{path} is not a valid path")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
185856
|
<filename>power_perceiver/pytorch_modules/satellite_processor.py
from dataclasses import dataclass
import einops
import torch
from torch import nn
from power_perceiver.consts import BatchKey
from power_perceiver.pytorch_modules.query_generator import reshape_time_as_batch
from power_perceiver.utils import assert_num_dims
# See https://discuss.pytorch.org/t/typeerror-unhashable-type-for-my-torch-nn-module/109424/6
# for why we set `eq=False`
@dataclass(eq=False)
class HRVSatelliteProcessor(nn.Module):
def __post_init__(self):
super().__init__()
def forward(self, x: dict[BatchKey, torch.Tensor]) -> torch.Tensor:
"""Returns a byte array ready for Perceiver.
Args:
x: A batch with at least these BatchKeys:
hrvsatellite_actual
hrvsatellite_predicted
hrvsatellite_time_utc_fourier
hrvsatellite_y_osgb_fourier
hrvsatellite_x_osgb_fourier
hrvsatellite_surface_height
hrvsatellite_solar_azimuth
hrvsatellite_solar_elevation
hrvsatellite_t0_idx
hrvsatellite: shape (batch_size, y, x) (timesteps have been folded into batch_size)
Returns:
tensor of shape (example, (y * x), (time * feature)).
"""
# The strategy is to first get all the tensors into shape (example * x, y, x, features)
# and then, at the end of the function, flatten y and x, so each position is seen
# as a new element.
# Combine actual (history) and predicted satellite:
t0_idx = x[BatchKey.hrvsatellite_t0_idx]
hrvsatellite = torch.concat(
(
x[BatchKey.hrvsatellite_actual][:, : t0_idx + 1, 0],
x[BatchKey.hrvsatellite_predicted].detach(),
),
dim=1, # Concat on the time dimension.
)
n_timesteps = hrvsatellite.shape[1]
# Reshape so each timestep is seen as a separate example!
hrvsatellite = einops.rearrange(hrvsatellite, "example time ... -> (example time) ...")
timeless_x = reshape_time_as_batch(
x=x,
batch_keys=(
BatchKey.hrvsatellite_time_utc_fourier,
BatchKey.hrvsatellite_solar_azimuth,
BatchKey.hrvsatellite_solar_elevation,
),
)
# Patch the hrvsatellite
PATCH_SIZE = 4
hrvsatellite = einops.rearrange(
hrvsatellite,
"example (y y_patch) (x x_patch) -> example y x (y_patch x_patch)",
y_patch=PATCH_SIZE,
x_patch=PATCH_SIZE,
)
# Get position encodings:
y_fourier = x[BatchKey.hrvsatellite_y_osgb_fourier]
x_fourier = x[BatchKey.hrvsatellite_x_osgb_fourier]
# y_fourier and x_fourier are now of shape (example, y, x, n_fourier_features).
# Patch the position encodings
def _reduce(tensor):
return einops.reduce(
tensor,
"example (y y_patch) (x x_patch) ... -> example y x ...",
"mean",
y_patch=PATCH_SIZE,
x_patch=PATCH_SIZE,
)
y_fourier = _reduce(y_fourier)
x_fourier = _reduce(x_fourier)
time_fourier = timeless_x[BatchKey.hrvsatellite_time_utc_fourier]
# `time_fourier` is now shape: (example * time, n_features)
time_fourier = einops.repeat(
time_fourier,
"example features -> example y x features",
y=hrvsatellite.shape[1],
x=hrvsatellite.shape[2],
)
time_fourier_t0 = x[BatchKey.hrvsatellite_time_utc_fourier_t0]
time_fourier_t0 = einops.repeat(
time_fourier_t0,
"example features -> (example time) y x features",
time=n_timesteps,
y=hrvsatellite.shape[1],
x=hrvsatellite.shape[2],
)
surface_height = x[BatchKey.hrvsatellite_surface_height] # (example, y, x)
surface_height = _reduce(surface_height)
surface_height = surface_height.unsqueeze(-1) # (example, y, x, 1)
y_fourier = torch.repeat_interleave(y_fourier, repeats=n_timesteps, dim=0)
x_fourier = torch.repeat_interleave(x_fourier, repeats=n_timesteps, dim=0)
surface_height = torch.repeat_interleave(surface_height, repeats=n_timesteps, dim=0)
# Reshape solar features to shape: (example, y, x, 1):
def _repeat_solar_feature_over_x_and_y(feature: torch.Tensor) -> torch.Tensor:
# Select the last timestep:
assert_num_dims(feature, 1)
return einops.repeat(
feature,
"example -> example y x 1",
y=hrvsatellite.shape[1],
x=hrvsatellite.shape[2],
)
solar_azimuth = _repeat_solar_feature_over_x_and_y(
timeless_x[BatchKey.hrvsatellite_solar_azimuth]
)
solar_elevation = _repeat_solar_feature_over_x_and_y(
timeless_x[BatchKey.hrvsatellite_solar_elevation]
)
# Concatenate spatial features and solar features onto satellite imagery:
# The shape of each tensor, and the concatenated `byte_array`, should be:
# example * time, y, x, feature
byte_array = torch.concat(
(
time_fourier,
time_fourier_t0,
solar_azimuth,
solar_elevation,
y_fourier,
x_fourier,
surface_height,
hrvsatellite,
),
dim=3,
)
# Reshape so each location is seen as a separate element.
byte_array = einops.rearrange(
byte_array,
"example y x feature -> example (y x) feature",
)
return byte_array
# TODO: Test. At an absolute minimum, do something like this:
# HRVSatelliteProcessor()(batch).shape
|
StarcoderdataPython
|
376515
|
# Objective 1: Read table from multiple pdf files contained in a directory to a list
# Obkective 2: clean the pdf text data and find unique words then save them to a dictionary
# To read tables contained within the pdf files, I'm using the tabula.py library
# To install tabula.py on Python3 in windows OS, ensure Java version 8 is installed.
# Next, open a command-prompt window, browse to python directory and execute the command, pip3 install tabula.py
import tabula, os, re, string
from collections import Counter
# path to pdf files
filePath = "C:\\Users\\Ashoo\\Documents\\PythonPlayground\\text-analysis\\data\\pdf"
stripped = [] # initialize an empty string
for filename in os.listdir(filePath):
# search for files ending with .txt extension and read them in memory
if filename.strip().endswith('.pdf'):
print(filename)
# Mote: python will read the pdf file as 'rb' or 'wb' as a binary read and write format
with(open(os.path.join(filePath,filename),'rb')) as pdfFiles:
#df= tabula.read_pdf(f, stream=True)[0]
# read all pdf pages
df= tabula.read_pdf(pdfFiles, pages="all")
print(df)
# convert pdf table to csv format
tabula.convert_into(pdfFiles, "pdf_to_csv.csv", output_format="csv", stream=True)
pdfFiles.close()
|
StarcoderdataPython
|
1691187
|
<reponame>cloudtools/awacs<gh_stars>100-1000
#!/usr/bin/env python3
import asyncio
import importlib
import sys
import urllib.parse
from pathlib import Path
from typing import DefaultDict, Dict, Iterable, List, Set, Tuple
import aiofiles
import httpx
from bs4 import BeautifulSoup
BASE_URL = "https://docs.aws.amazon.com/IAM/latest/UserGuide/"
HEADER = """\
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
"""
CLASSES = """\
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
"""
CLASSES_S3 = """\
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
# account is empty for S3 buckets
if not resource.startswith(("accesspoint/", "job/", "storage-lens/")):
account = ""
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
"""
BASEDIR = "awacs"
IGNORED_SERVICE_ALIASES = {
"Amazon API Gateway Management V2": "apigateway",
"Amazon Kinesis Analytics V2": "kinesisanalytics",
"Amazon Lex V2": "lex",
"Amazon Pinpoint Email Service": "ses",
"Amazon Simple Email Service v2": "ses",
"AWS Cloud Control API": "cloudformation",
"AWS IoT Greengrass V2": "greengrass",
"AWS Marketplace Catalog": "aws-marketplace",
"AWS Marketplace Entitlement Service": "aws-marketplace",
"AWS Marketplace Image Building Service": "aws-marketplace",
"AWS Marketplace Metering Service": "aws-marketplace",
"AWS Marketplace Private Marketplace": "aws-marketplace",
"AWS Marketplace Procurement Systems Integration": "aws-marketplace",
"AWS Private Marketplace": "aws-marketplace",
"Elastic Load Balancing V2": "elasticloadbalancing",
}
RENAME_SERVICE = {
"lambda": "awslambda",
}
def rename_service(name):
return RENAME_SERVICE.get(name, name)
async def main() -> None:
services_with_actions: DefaultDict[str, Set[str]] = DefaultDict(set)
service_names: Dict[str, str] = {}
service_page_responses = await collect_service_info()
for link, r in service_page_responses:
service_name, service_prefix, actions = await extract_actions(
html=r.text, link=link
)
services_with_actions[service_prefix].update(actions)
if IGNORED_SERVICE_ALIASES.get(service_name) != service_prefix:
if (
service_prefix in service_names
and service_names[service_prefix] != service_name
):
raise ValueError(
"Found two different service names for service prefix"
f" {service_prefix!r}: {service_names[service_prefix]!r}"
f" and {service_name!r}."
)
service_names[service_prefix] = service_name
original_services_with_actions = await collect_existing_actions()
for service_prefix, actions in services_with_actions.items():
actions.update(original_services_with_actions.get(service_prefix) or set())
await asyncio.gather(
*(
write_service(service_prefix, service_names[service_prefix], actions)
for service_prefix, actions in services_with_actions.items()
)
)
async def collect_existing_actions() -> Dict[str, Set[str]]:
# pylint: disable=import-outside-toplevel
if "" in sys.path:
sys.path.remove("") # Import the installed awacs (that was processed by 2to3)
import awacs
from awacs.aws import Action as BaseAction
services_with_actions: DefaultDict[str, Set[str]] = DefaultDict(set)
for path in (path.stem for path in Path(awacs.__file__).parent.glob("*.py")):
if path.startswith("__"):
continue
module = importlib.import_module(f"awacs.{path}")
for action in vars(module).values():
if not isinstance(action, BaseAction):
continue
services_with_actions[action.prefix].add(action.action)
return dict(services_with_actions)
async def collect_service_info() -> Iterable[Tuple[str, httpx.Response]]:
max_connections = 2
async with httpx.AsyncClient(
http2=True,
limits=httpx.Limits(max_connections=max_connections),
timeout=10.0,
) as client:
r = await client.get(
urllib.parse.urljoin(
BASE_URL,
"reference_policies_actions-resources-contextkeys.html",
)
)
parsed_html = BeautifulSoup(r.text, features="lxml")
service_links: List[str] = []
for link in parsed_html.body.find_all("a"):
href = link.attrs["href"]
if href.startswith("./list_") and href.endswith(".html"):
service_links.append(href)
# This doesn't work at the moment,
# see https://github.com/encode/httpx/issues/1171
#
# return await asyncio.gather(
# *[
# client.get(urllib.parse.urljoin(BASE_URL, link))
# for link in service_links
# ]
# )
#
# workaround
service_page_responses = []
for start in range(0, len(service_links), max_connections):
service_page_responses += await asyncio.gather(
*[
client.get(urllib.parse.urljoin(BASE_URL, link))
for link in service_links[start : start + max_connections]
]
)
return zip(service_links, service_page_responses)
async def extract_actions(html: str, link: str) -> Tuple[str, str, Set[str]]:
parsed_html = BeautifulSoup(html, features="lxml")
service_prefixes = parsed_html.body.find_all(is_service_prefix)
if len(service_prefixes) < 1:
raise ValueError(f"Found no service prefix in {link!r}.")
if len(service_prefixes) > 1:
raise ValueError(f"Found more than one service prefix in {link!r}.")
service_prefix_tag = service_prefixes[0]
service_name = service_prefix_tag.previous.strip()[: -len(" (service prefix:")]
service_prefix = service_prefix_tag.text
actions = set()
for table in parsed_html.body.find_all("table"):
header = table.find("th")
if not header or header.text != "Actions":
continue
actions |= await _actions_from_table(table)
return service_name, service_prefix, actions
async def write_service(
service_prefix: str, service_name: str, actions: Set[str]
) -> None:
content: List[str] = []
content.append(HEADER)
content.append(f'service_name = "{service_name}"')
content.append(f'prefix = "{service_prefix}"')
content.append("")
content.append("")
content.append(CLASSES_S3 if service_prefix == "s3" else CLASSES)
content.append("")
for action in sorted(actions):
action = action.strip()
# Handle action such as "ReEncrypt*"
if action[-1] == "*":
action = action[:-1]
action_string = '{action} = Action("{action}")'
content.append(action_string.format(action=action))
if content[-1] != "":
# Add a final newline
content.append("")
awacs_service = rename_service(service_prefix.replace("-", "_"))
filename = "".join([BASEDIR, "/", awacs_service, ".py"])
async with aiofiles.open(filename, "w") as fp:
await fp.write("\n".join(content))
async def _actions_from_table(table) -> Set[str]:
actions = set()
skip_next_lines = 0
for table_row in table.find_all("tr"):
if skip_next_lines:
skip_next_lines -= 1
continue
table_cell = table_row.find("td")
if not table_cell:
continue
skip_next_lines = int(table_cell.attrs.get("rowspan") or 1) - 1
action: str = (table_cell.text.strip().split() or [""])[0]
if not action:
continue
actions.add(action)
return actions
def is_service_prefix(tag):
return (
tag
and tag.name == "code"
and tag.previous_element
and tag.previous_element.strip().endswith("(service prefix:")
)
if __name__ == "__main__":
asyncio.run(main())
print("And now run 'black awacs/'", flush=True)
|
StarcoderdataPython
|
3448649
|
<reponame>t18cs020/discordpy-startup
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# インストールした discord.py を読み込む
import discord
import random
import csv
import asyncio
import datetime
# 接続に必要なオブジェクトを生成
client = discord.Client()
#ブキ読み込み
with open('./Splatoon2weapons.csv',encoding="utf-8") as f:
reader = csv.reader(f)
weapons = []
for row in reader:
weapons += row
f.close()
#名前読み込み
with open('./parts.txt', 'r', encoding='UTF-8') as f:
parts = [s.strip() for s in f.readlines()]
f.close()
with open('./parts2.txt', 'r', encoding='UTF-8') as f:
parts2 = [s.strip() for s in f.readlines()]
f.close()
#馬名読み込み
with open('./uma1.txt', 'r', encoding='UTF-8') as f:
uma_parts = [s.strip() for s in f.readlines()]
f.close()
with open('./uma2.txt', 'r', encoding='UTF-8') as f:
uma_parts2 = [s.strip() for s in f.readlines()]
f.close()
voice = None
player = None
#豚彦用
ramen_size = ['ミニ','小','大']
buta = ['ラーメン','ぶた']
siru = ['','汁なし']
ninniku = ['','ニンニク少し','ニンニク','ニンニクマシマシ']
yasai= ['ヤサイ少なめ','','ヤサイマシ','ヤサイマシマシ']
seabura = ['','アブラマシ','アブラマシマシ']
karame = ['','カラメ','カラカラ']
#Apex用
with open('./apex_legends.txt', 'r', encoding='UTF-8') as f:
apexlegends = [s.strip() for s in f.readlines()]
f.close()
with open('./apex_weapons.txt', 'r', encoding='UTF-8') as f:
apexweapons = [s.strip() for s in f.readlines()]
f.close()
#音声ファイル
ikuokamoto = discord.FFmpegPCMAudio('./ikuokamoto.mp3')
ikisugiokamoto = discord.FFmpegPCMAudio('./ikisugiokamoto.mp3')
dokaben = discord.FFmpegPCMAudio('./dokaben.mp3')
watatai = discord.FFmpegPCMAudio('./watatai.mp3')
# 起動時に動作する処理
@client.event
async def on_ready():
# 起動したらターミナルにログイン通知が表示される
print('ログインしました')
# メッセージ受信時に動作する処理
@client.event
async def on_message(message):
global voice, player
msg = message.content
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
return
# 「/neko」と発言したら「にゃーん」が返る処理
if message.content == '/neko':
await message.channel.send('にゃーん')
if message.content == '/spla':
size = len(weapons)
i = random.randint(0,size-1)
await message.channel.send('おめぇのブキは、' + weapons[i] + 'だな!')
if message.content == '/apex':
deletes = []
for num in range(1,4):
i = random.randint(0,len(apexlegends)-1)
weapon1 = ""
weapon2 = ""
p2020 = random.random()
if p2020 < 0.5:
j = random.randint(0,len(apexweapons)-1)
weapon1 = apexweapons[j]
else:
weapon1 = "p2020"
p2020 = random.random()
if p2020 < 0.5:
k = random.randint(0,len(apexweapons)-1)
weapon2 = apexweapons[k]
else:
weapon2 = "p2020"
#リストからレジェンドを削除(被らないように)
legend = apexlegends.pop(i)
await message.channel.send(str(num) + "人目は、" + legend + "で" + weapon1 + "と" + weapon2 + "だな!")
#削除されたレジェンドのリスト
deletes.append(legend)
#終わった後にまた元のリストに戻す
apexlegends.extend(deletes)
if message.content == '/myname':
i = random.randint(0,len(parts))
j = random.randint(0,len(parts)-1)
k = random.randint(0,len(parts2)-1)
if i == len(parts):
await message.channel.send('おめぇの名前は、性器絶頂チンポギアだな!')
else:
await message.channel.send('おめぇの名前は、' + parts[i] + parts[j] + parts2[k] + 'だな!')
if message.content == '/uma':
i = random.randint(0,len(uma_parts))
j = random.randint(0,len(uma_parts2)-1)
if i == len(uma_parts):
name = '性器絶頂チンポギア'
await message.channel.send('おめぇの名前は、性器絶頂チンポギアだな!')
await message.author.edit(nick=name)
else:
await message.channel.send('おめぇの愛馬は、' + uma_parts[i] + uma_parts2[j] + 'だな!')
if message.content == '/changename':
i = random.randint(0,len(parts))
j = random.randint(0,len(parts)-1)
k = random.randint(0,len(parts2)-1)
name = ""
if i == len(parts):
name = '性器絶頂チンポギア'
else:
name = parts[i] + parts[j] + parts2[k]
await message.channel.send("おめぇの名前は、" + name + "だな!")
await message.author.edit(nick=name)
if message.content == '/showparts':
for i in range(len(parts)):
await message.channel.send(parts[i])
if message.content == 'イク岡本':
if message.author.voice is None:
await message.channel.send("あなたはボイスチャンネルに接続していません。")
return
# ボイスチャンネルに接続する
vc = await message.author.voice.channel.connect()
#再生する
vc.play(ikuokamoto)
#切断する
await asyncio.sleep(2)
await message.guild.voice_client.disconnect()
if message.content == 'イキスギ岡本':
if message.author.voice is None:
await message.channel.send("あなたはボイスチャンネルに接続していません。")
return
# ボイスチャンネルに接続する
vc = await message.author.voice.channel.connect()
#再生する
vc.play(ikisugiokamoto)
#切断する
await asyncio.sleep(2)
await message.guild.voice_client.disconnect()
if message.content == '/bakadon':
if message.author.voice is None:
await message.channel.send("あなたはボイスチャンネルに接続していません。")
return
# ボイスチャンネルに接続する
vc = await message.author.voice.channel.connect()
#GIFを送信する
file = discord.File("./bakadon.gif", filename="bakadon.gif")
await message.channel.send(file=file)
await asyncio.sleep(1)
#再生する
vc.play(dokaben)
#切断する
await asyncio.sleep(6)
await message.guild.voice_client.disconnect()
if message.content == '/butahiko':
i = random.randint(0,len(ramen_size)-1)
j = random.randint(0,len(buta)-1)
k = random.randint(0,len(siru)-1)
l = random.randint(0,len(ninniku)-1)
m = random.randint(0,len(yasai)-1)
n = random.randint(0,len(seabura)-1)
o = random.randint(0,len(karame)-1)
await message.channel.send('おめぇのラーメンは、' + ramen_size[i] + buta[j] + siru[k] + ninniku[l] + yasai[m] + seabura[n] + karame[o] + 'だな!')
if message.content == 'ニンニク入れますか?' or message.content == 'ニンニク入れますか?':
l = random.randint(0,len(ninniku)-1)
m = random.randint(0,len(yasai)-1)
n = random.randint(0,len(seabura)-1)
o = random.randint(0,len(karame)-1)
await message.channel.send('俺のラーメンは、' + ninniku[l] + yasai[m] + seabura[n] + karame[o] + 'だな!')
if message.content == '/enter':
await message.author.voice.channel.connect()
if message.content == '/exit':
await message.guild.voice_client.disconnect()
# チャンネル入退室時の通知処理
@client.event
async def on_voice_state_update(member, before, after):
if before.channel != after.channel:
vc = before.channel
vc.play(watatai)
# Botの起動とDiscordサーバーへの接続
client.run(token)
<PASSWORD>
|
StarcoderdataPython
|
3448043
|
import importlib.metadata
__version__ = importlib.metadata.version("cuda_checker")
|
StarcoderdataPython
|
226433
|
<filename>skm_tea/utils/env.py
import os
from iopath.common.file_io import PathManager, PathManagerFactory
from meddlr.utils.cluster import Cluster
from meddlr.utils.env import is_repro, supports_cupy # noqa: F401
def get_path_manager(key="skm_tea") -> PathManager:
return PathManagerFactory.get(key)
def cache_dir() -> str:
return get_path_manager().get_local_path(
os.path.join(Cluster.working_cluster().cache_dir, "skm-tea")
)
|
StarcoderdataPython
|
9701789
|
from biobb_common.tools import test_fixtures as fx
from biobb_io.api.memprotmd_sim_list import memprotmd_sim_list
class TestMemProtMDSimList():
def setUp(self):
fx.test_setup(self,'memprotmd_sim_list')
def tearDown(self):
fx.test_teardown(self)
pass
def test_memprotmd_sim_list(self):
memprotmd_sim_list(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_simulations'])
assert fx.equal(self.paths['output_simulations'], self.paths['reference_output_simulations'])
|
StarcoderdataPython
|
86924
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import mock
import unittest
from mock import patch
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from infra_libs import bqh
from infra_libs.test import testmessage_pb2
class TestBigQueryHelper(unittest.TestCase):
def setUp(self):
super(TestBigQueryHelper, self).setUp()
self.bq_client = mock.Mock()
self.dataset_id = 'test_dataset'
self.table_id = 'test_table'
self.table = self.bq_client.get_table(
self.bq_client.dataset(self.dataset_id).table(self.table_id))
self.mock_create_rows = self.bq_client.create_rows
self.mock_create_rows.return_value = None
def test_send_rows_tuple(self):
rows = [('a',), ('b',), ('c',)]
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, rows)
self.mock_create_rows.assert_any_call(self.table, rows)
def test_send_iterable_rows(self):
rows = [('a',), ('b',), ('c',)]
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, iter(rows))
self.mock_create_rows.assert_any_call(self.table, rows)
def test_batch_sizes(self):
rows = [('a',), ('b',), ('c',)]
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, rows, 0)
self.mock_create_rows.assert_any_call(self.table, rows)
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, rows, 1)
self.mock_create_rows.assert_any_call(self.table, [('a',)])
self.mock_create_rows.assert_any_call(self.table, [('b',)])
self.mock_create_rows.assert_any_call(self.table, [('c',)])
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, rows,
bqh._BATCH_LIMIT+1)
self.mock_create_rows.assert_any_call(self.table, rows)
def test_send_rows_unsupported_type(self):
with self.assertRaises(bqh.UnsupportedTypeError):
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, [{}])
def test_send_rows_message(self):
rows = [testmessage_pb2.TestMessage(str='a')]
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, rows)
expected_rows_arg = [{'num': 0, 'e': 'E0', 'str': u'a'}]
self.mock_create_rows.assert_any_call(self.table, expected_rows_arg)
def test_send_rows_with_errors(self):
rows = [('a',), ('b',), ('c',)]
self.mock_create_rows.return_value = [
{
'index': 0,
'errors': ['some err'],
},
]
with self.assertRaises(bqh.BigQueryInsertError):
bqh.send_rows(self.bq_client, self.dataset_id, self.table_id, rows)
def test_message_to_dict(self):
struct0 = struct_pb2.Struct()
struct0['a'] = 0
struct1 = struct_pb2.Struct()
struct1['a'] = 1
dt0 = datetime.datetime(2018, 2, 20)
dt1 = datetime.datetime(2018, 2, 21)
ts0 = timestamp_pb2.Timestamp()
ts0.FromDatetime(dt0)
ts1 = timestamp_pb2.Timestamp()
ts1.FromDatetime(dt1)
dur = duration_pb2.Duration()
dur.FromTimedelta(datetime.timedelta(seconds=2, microseconds=3))
mmap_hello=testmessage_pb2.NestedMessage(num=100)
mmap_world=testmessage_pb2.NestedMessage(str='yo.')
msg = testmessage_pb2.TestMessage(
str='a',
strs=['a', 'b'],
num=1,
nums=[0, 1, 2],
nested=testmessage_pb2.NestedMessage(num=1, str='a'),
nesteds=[
testmessage_pb2.NestedMessage(num=1, str='a'),
testmessage_pb2.NestedMessage(num=2, str='b'),
],
empty=empty_pb2.Empty(),
empties=[empty_pb2.Empty(), empty_pb2.Empty()],
e=testmessage_pb2.E1,
es=[testmessage_pb2.E0, testmessage_pb2.E2],
struct=struct0,
structs=[struct0, struct1],
timestamp=ts0,
timestamps=[ts0, ts1],
repeated_container=testmessage_pb2.RepeatedContainer(nums=[1, 2]),
duration=dur,
enum_map={
'hello': testmessage_pb2.E1,
'world': testmessage_pb2.E2,
},
scalar_map={'hello': 1, 'world': 2},
message_map={'hello': mmap_hello, 'world': mmap_world},
)
row = bqh.message_to_dict(msg)
expected = {
'str': u'a',
'strs': [u'a', u'b'],
'num': 1,
'nums': [0, 1, 2],
'nested': {
'num': 1,
'str': u'a'
},
'nesteds': [
{
'num': 1,
'str': u'a'
},
{
'num': 2,
'str': u'b'
},
],
# empty messages are omitted
'e': 'E1',
'es': ['E0', 'E2'],
# structs are compared separately.
'timestamp': dt0.isoformat(),
'timestamps': [dt0.isoformat(), dt1.isoformat()],
'repeated_container': {
'nums': [1, 2]
},
'duration': 2.000003,
'enum_map': [
{'key': u'hello', 'value': 'E1'},
{'key': u'world', 'value': 'E2'},
],
'scalar_map': [
{'key': u'hello', 'value': 1},
{'key': u'world', 'value': 2},
],
'message_map': [
{'key': u'hello', 'value': {'num': 100, 'str': u''}},
{'key': u'world', 'value': {'num': 0, 'str': u'yo.'}},
],
}
# compare structs as JSON values, not strings.
self.assertEqual(json.loads(row.pop('struct')), {'a': 0})
self.assertEqual(
[json.loads(s) for s in row.pop('structs')],
[{'a': 0}, {'a': 1}]
)
self.assertEqual(row, expected)
def test_message_to_dict_empty(self):
row = bqh.message_to_dict(testmessage_pb2.TestMessage())
expected = {'e': 'E0', 'str': u'', 'num': 0}
self.assertEqual(row, expected)
def test_message_to_dict_repeated_container_with_no_elems(self):
row = bqh.message_to_dict(testmessage_pb2.TestMessage(
repeated_container=testmessage_pb2.RepeatedContainer()))
self.assertNotIn('repeated_container', row)
def test_message_to_dict_invalid_enum(self):
with self.assertRaisesRegexp(
ValueError, '^Invalid value -1 for enum type bigquery.E$'):
bqh.message_to_dict(testmessage_pb2.TestMessage(e=-1))
def test_message_to_dict_omit_null(self):
with self.assertRaisesRegexp(
ValueError, '^Invalid value -1 for enum type bigquery.E$'):
bqh.message_to_dict(testmessage_pb2.TestMessage(e=-1))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
5076901
|
#
# Testcase for DataHub
#
import pytest
from macaca import DataHub
def test_datahub():
print(DataHub)
datahub = DataHub(hostname='127.0.0.1', port='9200')
datahub.switch_scene(hub='sample', pathname='test1', data={ 'currentScene': 'scene1' })
datahub.switch_all_scenes(hub='sample', data={ 'currentScene': 'default' })
|
StarcoderdataPython
|
5002622
|
from typing import Union
from uuid import UUID
from .base import BaseFunction
from ..request import (
Request,
SSEContextManager,
)
class BackgroundTask(BaseFunction):
"""
Provides server-sent events streaming functions.
"""
task_id: UUID
def __init__(self, task_id: Union[UUID, str]) -> None:
self.task_id = task_id if isinstance(task_id, UUID) else UUID(task_id)
# only supported in AsyncAPISession
def listen_events(self) -> SSEContextManager:
"""
Opens an event stream of the background task updates.
:returns: a context manager that returns an :class:`SSEResponse` object.
"""
params = {
'task_id': str(self.task_id),
}
request = Request(
'GET', '/events/background-task',
params=params,
)
return request.connect_events()
|
StarcoderdataPython
|
3515295
|
"""
Entropy calculations
"""
import math
import zlib
from collections import Counter
from pathlib import Path
from typing import Mapping
from .analyzer import IPathPropertyAnalysis
from . import register_analysis
class EntropyCalculator(IPathPropertyAnalysis):
@classmethod
def ticks(cls) -> Mapping[float, str]:
return {
0.0: 'Low',
0.1: 'Text',
0.8: 'Code',
0.95: 'Compressed',
1.0: 'Encrypted'
}
name = 'entropy'
@staticmethod
def entropy_estimate(data: bytes) -> float:
"""
Heuristic for estimating entropy based on zlib compression ratio
"""
data_len = len(data)
if data_len == 0:
return 0
if data_len < 0x100:
return len(set(data)) / data_len
return min(len(zlib.compress(data)) / data_len, 1.0)
@staticmethod
def entropy_math(data, unit='shannon') -> float:
"""
Actual mathematical calculation of entropy
"""
base = {
'shannon': 2.,
'natural': math.exp(1),
'hartley': 10.
}
if len(data) <= 1:
return 0
counts = Counter()
for d in data:
counts[d] += 1
ent = 0
probabilities = [float(c) / len(data) for c in counts.values()]
for p in probabilities:
if p > 0.:
ent -= p * math.log(p, base[unit])
return ent / 8
def calculate_file(self, file_path: Path) -> float:
return self.entropy_estimate(file_path.read_bytes())
register_analysis(EntropyCalculator)
|
StarcoderdataPython
|
8184466
|
import torch
a = torch.ones([10, 10])
b = torch.rand([10, 10])
print(a[:, None, :].size())
|
StarcoderdataPython
|
52843
|
from fabric.api import *
def live():
""" Set the target to production. """
env.hosts = ['kurosaki@ichigo']
env.remote_app_dir = '/home/kurosaki/htdocs/setr.co.uk/'
env.build_dir = '_site/'
def push():
"""
Pushes the code to nominated server.
restart included.
doesn't touch the db
"""
require('hosts', provided_by=[live, ])
put('%s*' % (env.build_dir,), env.remote_app_dir)
def start():
local('jekyll serve')
|
StarcoderdataPython
|
211538
|
# The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp to open and close trading,
# a list of equity symbols for which it should create order books, a frequency at which to archive snapshots
# of its order books, a pipeline delay (in ns) for order activity, the exchange computation delay (in ns),
# the levels of order stream history to maintain per symbol (maintains all orders that led to the last N trades),
# whether to log all order activity to the agent log, and a random state object (already seeded) to use
# for stochasticity.
from agent.FinancialAgent import FinancialAgent
from message.Message import Message
from util.OrderBook import OrderBook
from util.util import log_print
import datetime as dt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
import jsons as js
import pandas as pd
pd.set_option('display.max_rows', 500)
from copy import deepcopy
class ExchangeAgent(FinancialAgent):
def __init__(self, id, name, type, mkt_open, mkt_close, symbols, book_freq='S', wide_book=False, pipeline_delay = 40000,
computation_delay = 1, stream_history = 0, log_orders = False, random_state = None):
super().__init__(id, name, type, random_state)
# Do not request repeated wakeup calls.
self.reschedule = False
# Store this exchange's open and close times.
self.mkt_open = mkt_open
self.mkt_close = mkt_close
# Right now, only the exchange agent has a parallel processing pipeline delay. This is an additional
# delay added only to order activity (placing orders, etc) and not simple inquiries (market operating
# hours, etc).
self.pipeline_delay = pipeline_delay
# Computation delay is applied on every wakeup call or message received.
self.computation_delay = computation_delay
# The exchange maintains an order stream of all orders leading to the last L trades
# to support certain agents from the auction literature (GD, HBL, etc).
self.stream_history = stream_history
# Log all order activity?
self.log_orders = log_orders
# Create an order book for each symbol.
self.order_books = {}
for symbol in symbols:
self.order_books[symbol] = OrderBook(self, symbol)
# At what frequency will we archive the order books for visualization and analysis?
self.book_freq = book_freq
# Store orderbook in wide format? ONLY WORKS with book_freq == 0
self.wide_book = wide_book
self.wide_book_warning()
# The subscription dict is a dictionary with the key = agent ID,
# value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
# frequency (min number of ns between messages), last agent update timestamp]
# e.g. {101 : {'AAPL' : [1, 10, pd.Timestamp(10:00:00)}}
self.subscription_dict = {}
# The exchange agent overrides this to obtain a reference to an oracle.
# This is needed to establish a "last trade price" at open (i.e. an opening
# price) in case agents query last trade before any simulated trades are made.
# This can probably go away once we code the opening cross auction.
def kernelInitializing (self, kernel):
super().kernelInitializing(kernel)
self.oracle = self.kernel.oracle
# Obtain opening prices (in integer cents). These are not noisy right now.
for symbol in self.order_books:
try:
self.order_books[symbol].last_trade = self.oracle.getDailyOpenPrice(symbol, self.mkt_open)
log_print ("Opening price for {} is {}", symbol, self.order_books[symbol].last_trade)
except AttributeError as e:
log_print(str(e))
# The exchange agent overrides this to additionally log the full depth of its
# order books for the entire day.
def kernelTerminating (self):
super().kernelTerminating()
# If the oracle supports writing the fundamental value series for its
# symbols, write them to disk.
if hasattr(self.oracle, 'f_log'):
for symbol in self.oracle.f_log:
dfFund = pd.DataFrame(self.oracle.f_log[symbol])
if not dfFund.empty:
dfFund.set_index('FundamentalTime', inplace=True)
self.writeLog(dfFund, filename='fundamental_{}'.format(symbol))
log_print("Fundamental archival complete.")
if self.book_freq is None: return
else:
# Iterate over the order books controlled by this exchange.
for symbol in self.order_books:
start_time = dt.datetime.now()
self.logOrderBookSnapshots(symbol)
end_time = dt.datetime.now()
print("Time taken to log the order book: {}".format(end_time - start_time))
print("Order book archival complete.")
def receiveMessage(self, currentTime, msg):
super().receiveMessage(currentTime, msg)
# Unless the intent of an experiment is to examine computational issues within an Exchange,
# it will typically have either 1 ns delay (near instant but cannot process multiple orders
# in the same atomic time unit) or 0 ns delay (can process any number of orders, always in
# the atomic time unit in which they are received). This is separate from, and additional
# to, any parallel pipeline delay imposed for order book activity.
# Note that computation delay MUST be updated before any calls to sendMessage.
self.setComputationDelay(self.computation_delay)
# Is the exchange closed? (This block only affects post-close, not pre-open.)
if currentTime > self.mkt_close:
# Most messages after close will receive a 'MKT_CLOSED' message in response. A few things
# might still be processed, like requests for final trade prices or such.
if msg.body['msg'] in ['LIMIT_ORDER', 'CANCEL_ORDER', 'MODIFY_ORDER']:
log_print("{} received {}: {}", self.name, msg.body['msg'], msg.body['order'])
self.sendMessage(msg.body['sender'], Message({"msg": "MKT_CLOSED"}))
# Don't do any further processing on these messages!
return
elif 'QUERY' in msg.body['msg']:
# Specifically do allow querying after market close, so agents can get the
# final trade of the day as their "daily close" price for a symbol.
pass
else:
log_print("{} received {}, discarded: market is closed.", self.name, msg.body['msg'])
self.sendMessage(msg.body['sender'], Message({"msg": "MKT_CLOSED"}))
# Don't do any further processing on these messages!
return
# Log order messages only if that option is configured. Log all other messages.
if msg.body['msg'] in ['LIMIT_ORDER', 'CANCEL_ORDER']:
if self.log_orders: self.logEvent(msg.body['msg'], js.dump(msg.body['order'], strip_privates=True))
else:
self.logEvent(msg.body['msg'], msg.body['sender'])
# Handle the DATA SUBSCRIPTION request and cancellation messages from the agents.
if msg.body['msg'] in ["MARKET_DATA_SUBSCRIPTION_REQUEST", "MARKET_DATA_SUBSCRIPTION_CANCELLATION"]:
log_print("{} received {} request from agent {}", self.name, msg.body['msg'], msg.body['sender'])
self.updateSubscriptionDict(msg, currentTime)
# Handle all message types understood by this exchange.
if msg.body['msg'] == "WHEN_MKT_OPEN":
log_print("{} received WHEN_MKT_OPEN request from agent {}", self.name, msg.body['sender'])
# The exchange is permitted to respond to requests for simple immutable data (like "what are your
# hours?") instantly. This does NOT include anything that queries mutable data, like equity
# quotes or trades.
self.setComputationDelay(0)
self.sendMessage(msg.body['sender'], Message({"msg": "WHEN_MKT_OPEN", "data": self.mkt_open}))
elif msg.body['msg'] == "WHEN_MKT_CLOSE":
log_print("{} received WHEN_MKT_CLOSE request from agent {}", self.name, msg.body['sender'])
# The exchange is permitted to respond to requests for simple immutable data (like "what are your
# hours?") instantly. This does NOT include anything that queries mutable data, like equity
# quotes or trades.
self.setComputationDelay(0)
self.sendMessage(msg.body['sender'], Message({"msg": "WHEN_MKT_CLOSE", "data": self.mkt_close}))
elif msg.body['msg'] == "QUERY_LAST_TRADE":
symbol = msg.body['symbol']
if symbol not in self.order_books:
log_print("Last trade request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_LAST_TRADE ({}) request from agent {}", self.name, symbol, msg.body['sender'])
# Return the single last executed trade price (currently not volume) for the requested symbol.
# This will return the average share price if multiple executions resulted from a single order.
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_LAST_TRADE", "symbol": symbol,
"data": self.order_books[symbol].last_trade,
"mkt_closed": True if currentTime > self.mkt_close else False}))
elif msg.body['msg'] == "QUERY_SPREAD":
symbol = msg.body['symbol']
depth = msg.body['depth']
if symbol not in self.order_books:
log_print("Bid-ask spread request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_SPREAD ({}:{}) request from agent {}", self.name, symbol, depth,
msg.body['sender'])
# Return the requested depth on both sides of the order book for the requested symbol.
# Returns price levels and aggregated volume at each level (not individual orders).
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_SPREAD", "symbol": symbol, "depth": depth,
"bids": self.order_books[symbol].getInsideBids(depth),
"asks": self.order_books[symbol].getInsideAsks(depth),
"data": self.order_books[symbol].last_trade,
"mkt_closed": True if currentTime > self.mkt_close else False,
"book": ''}))
# It is possible to also send the pretty-printed order book to the agent for logging, but forcing pretty-printing
# of a large order book is very slow, so we should only do it with good reason. We don't currently
# have a configurable option for it.
# "book": self.order_books[symbol].prettyPrint(silent=True) }))
elif msg.body['msg'] == "QUERY_ORDER_STREAM":
symbol = msg.body['symbol']
length = msg.body['length']
if symbol not in self.order_books:
log_print("Order stream request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_ORDER_STREAM ({}:{}) request from agent {}", self.name, symbol, length,
msg.body['sender'])
# We return indices [1:length] inclusive because the agent will want "orders leading up to the last
# L trades", and the items under index 0 are more recent than the last trade.
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_ORDER_STREAM", "symbol": symbol, "length": length,
"mkt_closed": True if currentTime > self.mkt_close else False,
"orders": self.order_books[symbol].history[1:length + 1]
}))
elif msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME':
symbol = msg.body['symbol']
lookback_period = msg.body['lookback_period']
if symbol not in self.order_books:
log_print("Order stream request discarded. Unknown symbol: {}", symbol)
else:
log_print("{} received QUERY_TRANSACTED_VOLUME ({}:{}) request from agent {}", self.name, symbol, lookback_period,
msg.body['sender'])
self.sendMessage(msg.body['sender'], Message({"msg": "QUERY_TRANSACTED_VOLUME", "symbol": symbol,
"transacted_volume": self.order_books[symbol].get_transacted_volume(lookback_period),
"mkt_closed": True if currentTime > self.mkt_close else False
}))
elif msg.body['msg'] == "LIMIT_ORDER":
order = msg.body['order']
log_print("{} received LIMIT_ORDER: {}", self.name, order)
if order.symbol not in self.order_books:
log_print("Order discarded. Unknown symbol: {}", order.symbol)
else:
# Hand the order to the order book for processing.
self.order_books[order.symbol].handleLimitOrder(deepcopy(order))
self.publishOrderBookData()
elif msg.body['msg'] == "CANCEL_ORDER":
# Note: this is somewhat open to abuse, as in theory agents could cancel other agents' orders.
# An agent could also become confused if they receive a (partial) execution on an order they
# then successfully cancel, but receive the cancel confirmation first. Things to think about
# for later...
order = msg.body['order']
log_print("{} received CANCEL_ORDER: {}", self.name, order)
if order.symbol not in self.order_books:
log_print("Cancellation request discarded. Unknown symbol: {}", order.symbol)
else:
# Hand the order to the order book for processing.
self.order_books[order.symbol].cancelOrder(deepcopy(order))
self.publishOrderBookData()
elif msg.body['msg'] == 'MODIFY_ORDER':
# Replace an existing order with a modified order. There could be some timing issues
# here. What if an order is partially executed, but the submitting agent has not
# yet received the norification, and submits a modification to the quantity of the
# (already partially executed) order? I guess it is okay if we just think of this
# as "delete and then add new" and make it the agent's problem if anything weird
# happens.
order = msg.body['order']
new_order = msg.body['new_order']
log_print("{} received MODIFY_ORDER: {}, new order: {}".format(self.name, order, new_order))
if order.symbol not in self.order_books:
log_print("Modification request discarded. Unknown symbol: {}".format(order.symbol))
else:
self.order_books[order.symbol].modifyOrder(deepcopy(order), deepcopy(new_order))
self.publishOrderBookData()
def updateSubscriptionDict(self, msg, currentTime):
# The subscription dict is a dictionary with the key = agent ID,
# value = dict (key = symbol, value = list [levels (no of levels to recieve updates for),
# frequency (min number of ns between messages), last agent update timestamp]
# e.g. {101 : {'AAPL' : [1, 10, pd.Timestamp(10:00:00)}}
if msg.body['msg'] == "MARKET_DATA_SUBSCRIPTION_REQUEST":
agent_id, symbol, levels, freq = msg.body['sender'], msg.body['symbol'], msg.body['levels'], msg.body['freq']
self.subscription_dict[agent_id] = {symbol: [levels, freq, currentTime]}
elif msg.body['msg'] == "MARKET_DATA_SUBSCRIPTION_CANCELLATION":
agent_id, symbol = msg.body['sender'], msg.body['symbol']
del self.subscription_dict[agent_id][symbol]
def publishOrderBookData(self):
'''
The exchange agents sends an order book update to the agents using the subscription API if one of the following
conditions are met:
1) agent requests ALL order book updates (freq == 0)
2) order book update timestamp > last time agent was updated AND the orderbook update time stamp is greater than
the last agent update time stamp by a period more than that specified in the freq parameter.
'''
for agent_id, params in self.subscription_dict.items():
for symbol, values in params.items():
levels, freq, last_agent_update = values[0], values[1], values[2]
orderbook_last_update = self.order_books[symbol].last_update_ts
if (freq == 0) or \
((orderbook_last_update > last_agent_update) and ((orderbook_last_update - last_agent_update).delta >= freq)):
self.sendMessage(agent_id, Message({"msg": "MARKET_DATA",
"symbol": symbol,
"bids": self.order_books[symbol].getInsideBids(levels),
"asks": self.order_books[symbol].getInsideAsks(levels),
"last_transaction": self.order_books[symbol].last_trade}))
self.subscription_dict[agent_id][symbol][2] = orderbook_last_update
def logOrderBookSnapshots(self, symbol):
"""
Log full depth quotes (price, volume) from this order book at some pre-determined frequency. Here we are looking at
the actual log for this order book (i.e. are there snapshots to export, independent of the requested frequency).
"""
def get_quote_range_iterator(s):
""" Helper method for order book logging. Takes pandas Series and returns python range() from first to last
element.
"""
forbidden_values = [0, 19999900] # TODO: Put constant value in more sensible place!
quotes = sorted(s)
for val in forbidden_values:
try: quotes.remove(val)
except ValueError:
pass
return quotes
book = self.order_books[symbol]
if book.book_log:
print("Logging order book to file...")
dfLog = pd.DataFrame(book.book_log)
dfLog.set_index('QuoteTime', inplace=True)
dfLog = dfLog[~dfLog.index.duplicated(keep='last')]
dfLog.sort_index(inplace=True)
if str(self.book_freq).isdigit() and int(self.book_freq) == 0: # Save all possible information
# With all order snapshots saved DataFrame is very sparse
dfLog = pd.SparseDataFrame(dfLog)
# Get the full range of quotes at the finest possible resolution.
quotes = get_quote_range_iterator(dfLog.columns.unique())
# Restructure the log to have multi-level rows of all possible pairs of time and quote
# with volume as the only column.
if not self.wide_book:
filledIndex = pd.MultiIndex.from_product([dfLog.index, quotes], names=['time', 'quote'])
dfLog = dfLog.stack()
dfLog = dfLog.reindex(filledIndex)
filename = f'ORDERBOOK_{symbol}_FULL'
else: # Sample at frequency self.book_freq
# With multiple quotes in a nanosecond, use the last one, then resample to the requested freq.
dfLog = dfLog.resample(self.book_freq).ffill()
dfLog.sort_index(inplace=True)
# Create a fully populated index at the desired frequency from market open to close.
# Then project the logged data into this complete index.
time_idx = pd.date_range(self.mkt_open, self.mkt_close, freq=self.book_freq, closed='right')
dfLog = dfLog.reindex(time_idx, method='ffill')
dfLog.sort_index(inplace=True)
dfLog = dfLog.stack()
dfLog.sort_index(inplace=True)
# Get the full range of quotes at the finest possible resolution.
quotes = get_quote_range_iterator(dfLog.index.get_level_values(1).unique())
# Restructure the log to have multi-level rows of all possible pairs of time and quote
# with volume as the only column.
filledIndex = pd.MultiIndex.from_product([time_idx, quotes], names=['time', 'quote'])
dfLog = dfLog.reindex(filledIndex)
filename = f'ORDERBOOK_{symbol}_FREQ_{self.book_freq}'
# Final cleanup
if not self.wide_book:
dfLog.rename('Volume')
df = pd.SparseDataFrame(index=dfLog.index)
df['Volume'] = dfLog
else:
df = dfLog
df = df.reindex(sorted(df.columns), axis=1)
# Archive the order book snapshots directly to a file named with the symbol, rather than
# to the exchange agent log.
self.writeLog(df, filename=filename)
print("Order book logging complete!")
def sendMessage (self, recipientID, msg):
# The ExchangeAgent automatically applies appropriate parallel processing pipeline delay
# to those message types which require it.
# TODO: probably organize the order types into categories once there are more, so we can
# take action by category (e.g. ORDER-related messages) instead of enumerating all message
# types to be affected.
if msg.body['msg'] in ['ORDER_ACCEPTED', 'ORDER_CANCELLED', 'ORDER_EXECUTED']:
# Messages that require order book modification (not simple queries) incur the additional
# parallel processing delay as configured.
super().sendMessage(recipientID, msg, delay = self.pipeline_delay)
if self.log_orders: self.logEvent(msg.body['msg'], js.dump(msg.body['order'], strip_privates=True))
else:
# Other message types incur only the currently-configured computation delay for this agent.
super().sendMessage(recipientID, msg)
# Simple accessor methods for the market open and close times.
def getMarketOpen(self):
return self.__mkt_open
def getMarketClose(self):
return self.__mkt_close
def wide_book_warning(self):
""" Prints warning message about wide orderbook format usage. """
if self.wide_book and (self.book_freq != 0):
log_print(f"WARNING: (wide_book == True) and (book_freq != 0). Orderbook will be logged in column MultiIndex "
"format at frequency {self.book_freq}.")
|
StarcoderdataPython
|
5197128
|
<gh_stars>0
# Add the following code to your script to create a new Custom Vision service project.
# Insert your subscription keys in the appropriate definitions.
# Also, get your Endpoint URL from the Settings page of the Custom Vision website.
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry
ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com/"
# Replace with a valid key
training_key = "edb35a52aa04460d8edcffc78893ec9c3"
prediction_key = "<your prediction key>"
prediction_resource_id = "/subscriptions/b580d1b7-163e-4a91-bb43-1d260d3753a5/resourceGroups/mygroup/providers/Microsoft.CognitiveServices/accounts/Mol_Images"
publish_iteration_name = "model1"
trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)
# Create a new project
print ("Creating project...")
project = trainer.create_project("OPVDB_classification")
# Make two tags in the new project
mol_tag = trainer.create_tag(project.id, "mol")
curve_tag = trainer.create_tag(project.id, "curve")
# uvvis_tag = trainer.create_tag(project.id, "uvvis")
imaging_tag = trainer.create_tag(project.id, "imaging")
# To add the sample images to the project, insert the following code after the tag creation.
# This code uploads each image with its corresponding tag.
# You can upload up to 64 images in a single batch.
base_image_url = "train_img/"
print("Adding images...")
image_list = []
for image_num in range(1, 12):
file_name = "mol/{}.jpg".format(image_num)
with open(base_image_url + "mol/" + file_name, "rb") as image_contents:
image_list.append(ImageFileCreateEntry(name=file_name, contents=image_contents.read(), tag_ids=[mol_tag.id]))
for image_num in range(1, 20):
file_name = "curve/{}.jpg".format(image_num)
with open(base_image_url + "curve/" + file_name, "rb") as image_contents:
image_list.append(ImageFileCreateEntry(name=file_name, contents=image_contents.read(), tag_ids=[curve_tag.id]))
for image_num in range(1, 15):
file_name = "imaging/{}.jpg".format(image_num)
with open(base_image_url + "imaging/" + file_name, "rb") as image_contents:
image_list.append(ImageFileCreateEntry(name=file_name, contents=image_contents.read(), tag_ids=[imaging_tag.id]))
upload_result = trainer.create_images_from_files(project.id, images=image_list)
if not upload_result.is_batch_successful:
print("Image batch upload failed.")
for image in upload_result.images:
print("Image status: ", image.status)
exit(-1)
# Train the classifier and publish
import time
print ("Training...")
iteration = trainer.train_project(project.id)
while (iteration.status != "Completed"):
iteration = trainer.get_iteration(project.id, iteration.id)
print ("Training status: " + iteration.status)
time.sleep(1)
# The iteration is now trained. Publish it to the project endpoint
trainer.publish_iteration(project.id, iteration.id, publish_iteration_name, prediction_resource_id)
print ("Done!")
# Get and use the published iteration on the prediction endpoint
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
# Now there is a trained endpoint that can be used to make a prediction
predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)
with open(base_image_url + "images/test_img/mol_1.png", "rb") as image_contents:
results = predictor.classify_image(
project.id, publish_iteration_name, image_contents.read())
# Display the results.
for prediction in results.predictions:
print("\t" + prediction.tag_name +
": {0:.2f}%".format(prediction.probability * 100))
|
StarcoderdataPython
|
6706097
|
'''
Applications of computer vision
'''
# pylint: disable=W0401
from .capture import *
from .ipcam import *
|
StarcoderdataPython
|
5097000
|
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C)
# 2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Request/response of PutObjectRetention and GetObjectRetention APIs."""
from __future__ import absolute_import
import datetime
from .commonconfig import COMPLIANCE, GOVERNANCE
from .time import from_iso8601utc, to_iso8601utc
from .xml import Element, SubElement, findtext
class Retention:
"""Retention configuration."""
def __init__(self, mode, retain_until_date):
if mode not in [GOVERNANCE, COMPLIANCE]:
raise ValueError(f"mode must be {GOVERNANCE} or {COMPLIANCE}")
if not isinstance(retain_until_date, datetime.datetime):
raise ValueError(
"retain until date must be datetime.datetime type",
)
self._mode = mode
self._retain_until_date = retain_until_date
@property
def mode(self):
"""Get mode."""
return self._mode
@property
def retain_until_date(self):
"""Get retain util date."""
return self._retain_until_date
@classmethod
def fromxml(cls, element):
"""Create new object with values from XML element."""
mode = findtext(element, "Mode", True)
retain_until_date = from_iso8601utc(
findtext(element, "RetainUntilDate", True),
)
return cls(mode, retain_until_date)
def toxml(self, element):
"""Convert to XML."""
element = Element("Retention")
SubElement(element, "Mode", self._mode)
SubElement(
element,
"RetainUntilDate",
to_iso8601utc(self._retain_until_date),
)
return element
|
StarcoderdataPython
|
6455195
|
#!/usr/bin/env pytest
import logging
import os
from pathlib import Path
from pytest_httpserver import HTTPServer
from ornithology import (
config,
standup,
action,
JobStatus,
ClusterState,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Unset HTTP_PROXY for correct operation in Docker containers
lowered = dict()
for k in os.environ:
lowered[k.lower()] = k
os.environ.pop(lowered.get("http_proxy", "http_proxy"), None)
@action
def server():
with HTTPServer() as httpserver:
yield httpserver
@action(params={"404": 404, "500": 500})
def bad_url(server, request):
server.expect_request("/badurl").respond_with_data(status=request.param)
return "http://localhost:{}/badurl".format(server.port)
@action
def good_url(server):
server.expect_request("/goodurl").respond_with_data("Great success!")
return "http://localhost:{}/goodurl".format(server.port)
@action
def job_with_good_url(default_condor, good_url, test_dir, path_to_sleep):
job = default_condor.submit(
{
"executable": path_to_sleep,
"arguments": "1",
"log": (test_dir / "good_url.log").as_posix(),
"transfer_input_files": good_url,
"transfer_output_files": "goodurl",
"should_transfer_files": "YES",
}
)
assert job.wait(condition=ClusterState.all_terminal)
return job
@action
def job_with_bad_url(default_condor, bad_url, test_dir, path_to_sleep):
job = default_condor.submit(
{
"executable": path_to_sleep,
"arguments": "1",
"log": (test_dir / "bad_url.log").as_posix(),
"transfer_input_files": bad_url,
"should_transfer_files": "YES",
}
)
assert job.wait(condition=ClusterState.all_terminal)
return job
class TestCurlPlugin:
def test_job_with_good_url_succeeds(self, job_with_good_url):
assert job_with_good_url.state[0] == JobStatus.COMPLETED
def test_job_with_good_url_file_contents_are_correct(
self, job_with_good_url, test_dir
):
assert Path("goodurl").read_text() == "Great success!"
def test_job_with_bad_url_holds(self, job_with_bad_url):
assert job_with_bad_url.state[0] == JobStatus.HELD
|
StarcoderdataPython
|
1746644
|
<filename>training-data/src/classification_data_tools.py
from src.classification_print_tools import print_data_statistics
def limit_negative_samples(features, targets, negative_count):
limited_features = []
limited_targets = []
for i in range(0, len(targets)):
if targets[i] == 1 or negative_count > 0:
limited_features.append(features[i])
limited_targets.append(targets[i])
if targets[i] == 0:
negative_count -= 1
return limited_features, limited_targets
def split_data(features, targets, training_ratio, neg_limit=False, print_stats=True):
boundary_index = int(len(features) * training_ratio)
training_data = [
features[:boundary_index],
targets[:boundary_index]
]
if neg_limit != False:
training_data[0], training_data[1] = limit_negative_samples(training_data[0], training_data[1], neg_limit)
test_data = [
features[boundary_index:],
targets[boundary_index:]
]
if print_stats:
print_data_statistics(training_data, test_data)
return training_data, test_data
|
StarcoderdataPython
|
6441603
|
<gh_stars>0
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
sys.path.insert(0, os.getcwd())
from twisted.internet import reactor
from twisted.internet import defer
from txjsonrpc.netstring.jsonrpc import Proxy
def printValue(value):
print("Result: %s" % str(value))
def printError(error):
print('error', error)
def shutDown(data):
print("Shutting down reactor...")
reactor.stop()
proxy = Proxy('127.0.0.1', 7080)
dl = []
d = proxy.callRemote('system.listMethods')
d.addCallbacks(printValue, printError)
dl.append(d)
d = proxy.callRemote('echo', 'bite me')
d.addCallbacks(printValue, printError)
dl.append(d)
d = proxy.callRemote('testing.getList')
d.addCallbacks(printValue, printError)
dl.append(d)
d = proxy.callRemote('math.add', 3, 5)
d.addCallbacks(printValue, printError)
dl.append(d)
dl = defer.DeferredList(dl)
dl.addCallback(shutDown)
reactor.run()
|
StarcoderdataPython
|
11322354
|
<gh_stars>0
from __future__ import unicode_literals
import frappe
from frappe.core.doctype.user.user import create_contact
import re
def execute():
""" Create Contact for each User if not present """
frappe.reload_doc('integrations', 'doctype', 'google_contacts')
frappe.reload_doc('contacts', 'doctype', 'contact')
frappe.reload_doc('core', 'doctype', 'dynamic_link')
frappe.reload_doc('communication', 'doctype', 'call_log')
contact_meta = frappe.get_meta("Contact")
if contact_meta.has_field("phone_nos") and contact_meta.has_field("email_ids"):
frappe.reload_doc('contacts', 'doctype', 'contact_phone')
frappe.reload_doc('contacts', 'doctype', 'contact_email')
users = frappe.get_all('User', filters={"name": ('not in', 'Administrator, Guest')}, fields=["*"])
for user in users:
if user.first_name:
user.first_name = re.sub("[<>]+", '', frappe.safe_decode(user.first_name))
if user.last_name:
user.last_name = re.sub("[<>]+", '', frappe.safe_decode(user.last_name))
create_contact(user, ignore_links=True, ignore_mandatory=True)
|
StarcoderdataPython
|
176572
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="Markdown-Video",
version="0.1",
url="http://github.com/Holzhaus/Python-Markdown-Video",
license="GPL",
author="<NAME>",
author_email="<EMAIL>",
description="Video Extension for Markdown",
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Documentation",
"Topic :: Utilities",
],
platforms="any",
py_modules=["markdown_video"],
include_package_data=False,
install_requires=[
"Markdown",
],
)
|
StarcoderdataPython
|
1837439
|
<gh_stars>0
# Censys exceptions
class CensysTenableException(Exception):
"""Base Exception raised for errors in Censys Tenable integration."""
def __init__(self, message=None):
self.message = message or "Error: Censys ASM assets not exported into Tenable"
super().__init__(self.message)
class MissingCensysAPIKeyError(CensysTenableException):
"""Exception raised when the Censys API key is not configured."""
def __init__(
self, message="Censys API key is missing. Please set it in the env file."
):
super().__init__(message)
class InvalidLogLevelError(CensysTenableException):
"""Exception raised when the log level is not set to one of
[ CRITICAL, ERROR, WARNING, INFO, DEBUG ]."""
def __init__(
self,
message="Invalid log level. Please set it to one of "
"[ CRITICAL, ERROR, WARNING, INFO, DEBUG ] in the env file.",
):
super().__init__(message)
class RunIntervalTooSmallError(CensysTenableException):
"""Exception raised when the run interval is set to a value less than 120 and greater than -1."""
def __init__(
self,
message="Run interval must be set to a value greater than 120 or equal to -1.",
):
super().__init__(message)
class MissingRunIntervalError(CensysTenableException):
"""Exception raised when the run interval is not configured."""
def __init__(self, message="Invalid run interval. Please set to an integer"):
super().__init__(message)
# Tenable exceptions
class MissingTenableAccessKeyError(CensysTenableException):
"""Exception raised when no Tenable access key is configured."""
def __init__(
self,
message="Missing Tenable access key. Please set it in config.yml.",
):
super().__init__(message)
class MissingTenableSecretKeyError(CensysTenableException):
"""Exception raised when the Tenable secret key is not configured."""
def __init__(
self,
message="Missing Tenable secret key. Please set it in config.yml.",
):
super().__init__(message)
class MissingTenableHostSourceError(CensysTenableException):
"""Exception raised when the Tenable host source is not configured."""
def __init__(
self,
message="Missing Tenable host source. Please set it in config.yml.",
):
super().__init__(message)
|
StarcoderdataPython
|
6582346
|
"""The Alarmo Integration."""
import logging
import bcrypt
import base64
from homeassistant.core import (
callback,
)
from homeassistant.components.alarm_control_panel import DOMAIN as PLATFORM
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_CODE,
ATTR_NAME,
)
from homeassistant.core import HomeAssistant, asyncio
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.service import (
async_register_admin_service,
)
from . import const
from .store import async_get_registry
from .panel import (
async_register_panel,
async_unregister_panel,
)
from .card import async_register_card
from .websockets import async_register_websockets
from .sensors import (
SensorHandler,
ATTR_GROUP,
ATTR_ENTITIES
)
from .automations import AutomationHandler
from .mqtt import MqttHandler
from .event import EventHandler
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Track states and offer events for sensors."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Alarmo integration from a config entry."""
session = async_get_clientsession(hass)
store = await async_get_registry(hass)
coordinator = AlarmoCoordinator(hass, session, entry, store)
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(const.DOMAIN, coordinator.id)},
name=const.NAME,
model=const.NAME,
sw_version=const.VERSION,
manufacturer=const.MANUFACTURER,
)
hass.data.setdefault(const.DOMAIN, {})
hass.data[const.DOMAIN] = {
"coordinator": coordinator,
"areas": {},
"master": None
}
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=coordinator.id, data={})
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, PLATFORM)
)
# Register the panel (frontend)
await async_register_panel(hass)
await async_register_card(hass)
# Websocket support
await async_register_websockets(hass)
# Register custom services
register_services(hass)
return True
async def async_unload_entry(hass, entry):
"""Unload Alarmo config entry."""
unload_ok = all(
await asyncio.gather(
*[hass.config_entries.async_forward_entry_unload(entry, PLATFORM)]
)
)
if not unload_ok:
return False
async_unregister_panel(hass)
coordinator = hass.data[const.DOMAIN]["coordinator"]
await coordinator.async_unload()
return True
async def async_remove_entry(hass, entry):
"""Remove Alarmo config entry."""
async_unregister_panel(hass)
coordinator = hass.data[const.DOMAIN]["coordinator"]
await coordinator.async_delete_config()
del hass.data[const.DOMAIN]
class AlarmoCoordinator(DataUpdateCoordinator):
"""Define an object to hold Alarmo device."""
def __init__(self, hass, session, entry, store):
"""Initialize."""
self.id = entry.unique_id
self.hass = hass
self.entry = entry
self.store = store
self._subscriptions = []
self._subscriptions.append(
async_dispatcher_connect(
hass, "alarmo_platform_loaded", self.setup_alarm_entities
)
)
self.register_events()
super().__init__(hass, _LOGGER, name=const.DOMAIN)
@callback
def setup_alarm_entities(self):
self.hass.data[const.DOMAIN]["sensor_handler"] = SensorHandler(self.hass)
self.hass.data[const.DOMAIN]["automation_handler"] = AutomationHandler(self.hass)
self.hass.data[const.DOMAIN]["mqtt_handler"] = MqttHandler(self.hass)
self.hass.data[const.DOMAIN]["event_handler"] = EventHandler(self.hass)
areas = self.store.async_get_areas()
config = self.store.async_get_config()
for item in areas.values():
async_dispatcher_send(self.hass, "alarmo_register_entity", item)
if len(areas) > 1 and config["master"]["enabled"]:
async_dispatcher_send(self.hass, "alarmo_register_master", config["master"])
async def async_update_config(self, data):
if "master" in data:
old_config = self.store.async_get_config()
if old_config[const.ATTR_MASTER] != data["master"]:
if self.hass.data[const.DOMAIN]["master"]:
await self.async_remove_entity("master")
if data["master"]["enabled"]:
async_dispatcher_send(self.hass, "alarmo_register_master", data["master"])
else:
automations = self.hass.data[const.DOMAIN]["automation_handler"].get_automations_by_area(None)
if len(automations):
for el in automations:
self.store.async_delete_automation(el)
async_dispatcher_send(self.hass, "alarmo_automations_updated")
self.store.async_update_config(data)
async_dispatcher_send(self.hass, "alarmo_config_updated")
async def async_update_area_config(self, area_id: str = None, data: dict = {}):
if const.ATTR_REMOVE in data:
# delete an area
res = self.store.async_get_area(area_id)
if not res:
return
sensors = self.store.async_get_sensors()
sensors = dict(filter(lambda el: el[1]["area"] == area_id, sensors.items()))
if sensors:
for el in sensors.keys():
self.store.async_delete_sensor(el)
async_dispatcher_send(self.hass, "alarmo_sensors_updated")
automations = self.hass.data[const.DOMAIN]["automation_handler"].get_automations_by_area(area_id)
if len(automations):
for el in automations:
self.store.async_delete_automation(el)
async_dispatcher_send(self.hass, "alarmo_automations_updated")
self.store.async_delete_area(area_id)
await self.async_remove_entity(area_id)
if len(self.store.async_get_areas()) == 1 and self.hass.data[const.DOMAIN]["master"]:
await self.async_remove_entity("master")
elif self.store.async_get_area(area_id):
# modify an area
entry = self.store.async_update_area(area_id, data)
if "name" not in data:
async_dispatcher_send(self.hass, "alarmo_config_updated", area_id)
else:
await self.async_remove_entity(area_id)
async_dispatcher_send(self.hass, "alarmo_register_entity", entry)
else:
# create an area
entry = self.store.async_create_area(data)
async_dispatcher_send(self.hass, "alarmo_register_entity", entry)
config = self.store.async_get_config()
if len(self.store.async_get_areas()) == 2 and config["master"]["enabled"]:
async_dispatcher_send(self.hass, "alarmo_register_master", config["master"])
def async_update_sensor_config(self, entity_id: str, data: dict):
group = None
if ATTR_GROUP in data:
group = data[ATTR_GROUP]
del data[ATTR_GROUP]
if const.ATTR_REMOVE in data:
self.store.async_delete_sensor(entity_id)
self.assign_sensor_to_group(entity_id, None)
elif self.store.async_get_sensor(entity_id):
self.store.async_update_sensor(entity_id, data)
self.assign_sensor_to_group(entity_id, group)
else:
self.store.async_create_sensor(entity_id, data)
self.assign_sensor_to_group(entity_id, group)
async_dispatcher_send(self.hass, "alarmo_sensors_updated")
def async_update_user_config(self, user_id: str = None, data: dict = {}):
if const.ATTR_REMOVE in data:
self.store.async_delete_user(user_id)
return
if ATTR_CODE in data and data[ATTR_CODE]:
data[const.ATTR_CODE_FORMAT] = "number" if data[ATTR_CODE].isdigit() else "text"
data[const.ATTR_CODE_LENGTH] = len(data[ATTR_CODE])
hashed = bcrypt.hashpw(
data[ATTR_CODE].encode("utf-8"), bcrypt.gensalt(rounds=12)
)
hashed = base64.b64encode(hashed)
data[ATTR_CODE] = hashed.decode()
if not user_id:
self.store.async_create_user(data)
else:
if ATTR_CODE in data:
if const.ATTR_OLD_CODE not in data:
return False
elif not self.async_authenticate_user(data[const.ATTR_OLD_CODE], user_id):
return False
else:
del data[const.ATTR_OLD_CODE]
self.store.async_update_user(user_id, data)
else:
self.store.async_update_user(user_id, data)
def async_authenticate_user(self, code: str, user_id: str = None):
if not user_id:
users = self.store.async_get_users()
else:
users = {
user_id: self.store.async_get_user(user_id)
}
for (user_id, user) in users.items():
if not user[const.ATTR_ENABLED]:
continue
elif not user[ATTR_CODE] and not code:
return user
elif user[ATTR_CODE]:
hash = base64.b64decode(user[ATTR_CODE])
if bcrypt.checkpw(code.encode("utf-8"), hash):
return user
return
def async_update_automation_config(self, automation_id: str = None, data: dict = {}):
if const.ATTR_REMOVE in data:
self.store.async_delete_automation(automation_id)
elif not automation_id:
self.store.async_create_automation(data)
else:
self.store.async_update_automation(automation_id, data)
async_dispatcher_send(self.hass, "alarmo_automations_updated")
def register_events(self):
# handle push notifications with action buttons
@callback
async def async_handle_push_event(event):
if not event.data:
return
action = event.data.get("actionName") if "actionName" in event.data else event.data.get("action")
if action not in [
const.EVENT_ACTION_FORCE_ARM,
const.EVENT_ACTION_RETRY_ARM,
const.EVENT_ACTION_DISARM
]:
return
if self.hass.data[const.DOMAIN]["master"]:
alarm_entity = self.hass.data[const.DOMAIN]["master"]
elif len(self.hass.data[const.DOMAIN]["areas"]) == 1:
alarm_entity = list(self.hass.data[const.DOMAIN]["areas"].values())[0]
else:
_LOGGER.info("Cannot process the push action, since there are multiple areas.")
return
arm_mode = alarm_entity.arm_mode
if not arm_mode:
_LOGGER.info("Cannot process the push action, since the arm mode is not known.")
return
if action == const.EVENT_ACTION_FORCE_ARM:
_LOGGER.info("Received request for force arming")
await alarm_entity.async_handle_arm_request(arm_mode, skip_code=True, bypass_open_sensors=True)
elif action == const.EVENT_ACTION_RETRY_ARM:
_LOGGER.info("Received request for retry arming")
await alarm_entity.async_handle_arm_request(arm_mode, skip_code=True)
elif action == const.EVENT_ACTION_DISARM:
_LOGGER.info("Received request for disarming")
await alarm_entity.async_alarm_disarm(code=None, skip_code=True)
self._subscriptions.append(
self.hass.bus.async_listen(const.PUSH_EVENT, async_handle_push_event)
)
async def async_remove_entity(self, area_id: str):
entity_registry = self.hass.helpers.entity_registry.async_get(self.hass)
if area_id == "master":
entity = self.hass.data[const.DOMAIN]["master"]
entity_registry.async_remove(entity.entity_id)
self.hass.data[const.DOMAIN]["master"] = None
else:
entity = self.hass.data[const.DOMAIN]["areas"][area_id]
entity_registry.async_remove(entity.entity_id)
self.hass.data[const.DOMAIN]["areas"].pop(area_id, None)
def async_get_sensor_groups(self):
"""fetch a list of sensor groups (websocket API hook)"""
groups = self.store.async_get_sensor_groups()
return list(groups.values())
def async_get_group_for_sensor(self, entity_id: str):
groups = self.async_get_sensor_groups()
result = next((el for el in groups if entity_id in el[ATTR_ENTITIES]), None)
return result["group_id"] if result else None
def assign_sensor_to_group(self, entity_id: str, group_id: str):
old_group = self.async_get_group_for_sensor(entity_id)
if old_group and group_id != old_group:
# remove sensor from group
el = self.store.async_get_sensor_group(old_group)
if len(el[ATTR_ENTITIES]) > 2:
self.store.async_update_sensor_group(old_group, {
ATTR_ENTITIES: [x for x in el[ATTR_ENTITIES] if x != entity_id]
})
else:
self.store.async_delete_sensor_group(old_group)
if group_id:
# add sensor to group
el = self.store.async_get_sensor_group(group_id)
if not el:
_LOGGER.error("Failed to assign entity {} to group {}".format(entity_id, group_id))
return
self.store.async_update_sensor_group(group_id, {
ATTR_ENTITIES: el[ATTR_ENTITIES] + [entity_id]
})
async_dispatcher_send(self.hass, "alarmo_sensors_updated")
def async_update_sensor_group_config(self, group_id: str = None, data: dict = {}):
if const.ATTR_REMOVE in data:
self.store.async_delete_sensor_group(group_id)
elif not group_id:
self.store.async_create_sensor_group(data)
else:
self.store.async_update_sensor_group(group_id, data)
async_dispatcher_send(self.hass, "alarmo_sensors_updated")
async def async_unload(self):
"""remove all alarmo objects"""
# remove alarm_control_panel entities
areas = list(self.hass.data[const.DOMAIN]["areas"].keys())
for area in areas:
await self.async_remove_entity(area)
if self.hass.data[const.DOMAIN]["master"]:
await self.async_remove_entity("master")
del self.hass.data[const.DOMAIN]["sensor_handler"]
del self.hass.data[const.DOMAIN]["automation_handler"]
del self.hass.data[const.DOMAIN]["mqtt_handler"]
del self.hass.data[const.DOMAIN]["event_handler"]
# remove subscriptions for coordinator
while len(self._subscriptions):
self._subscriptions.pop()()
async def async_delete_config(self):
"""wipe alarmo storage"""
await self.store.async_delete()
@callback
def register_services(hass):
"""Register services used by alarmo component."""
coordinator = hass.data[const.DOMAIN]["coordinator"]
async def async_srv_toggle_user(call):
"""Enable a user by service call"""
name = call.data.get(ATTR_NAME)
enable = True if call.service == const.SERVICE_ENABLE_USER else False
users = coordinator.store.async_get_users()
user = next((item for item in list(users.values()) if item[ATTR_NAME] == name), None)
if user is None:
_LOGGER.warning("Failed to {} user, no match for name '{}'".format("enable" if enable else "disable", name))
return
coordinator.store.async_update_user(user[const.ATTR_USER_ID], {const.ATTR_ENABLED: enable})
_LOGGER.debug("User user '{}' was {}".format(name, "enabled" if enable else "disabled"))
async_register_admin_service(
hass, const.DOMAIN, const.SERVICE_ENABLE_USER, async_srv_toggle_user, schema=const.SERVICE_TOGGLE_USER_SCHEMA
)
async_register_admin_service(
hass, const.DOMAIN, const.SERVICE_DISABLE_USER, async_srv_toggle_user, schema=const.SERVICE_TOGGLE_USER_SCHEMA
)
|
StarcoderdataPython
|
138535
|
<filename>usuarios/views.py
from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth.models import User
from products.models import Product
from cart.models import Cart
# Create your views here.
def home_view(request):
context = {}
context["products"] = Product.objects.all()
return render(request, 'home.html', context=context)
def logout_view(request):
if request.user.is_authenticated:
logout(request)
return redirect('home')
def login_view(request):
if(request.method == 'POST'):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if (user is not None):
login(request, user)
return redirect('home')
return render(request, 'login.html',{'error': 'Credenciales incorrectas, vuelta a intentarlo'})
return render(request, 'login.html')
def register_view(request):
if(request.method == 'POST'):
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
user = User.objects.create_user(username=username, email=email, password=password)
user.save()
user_cart = Cart.objects.create(user=user)
user_cart.save()
login(request, user)
return redirect('home')
return render(request, 'register.html')
|
StarcoderdataPython
|
1682952
|
<reponame>xuanqing94/NeuralSDE
import torch
import torch.nn as nn
from .diffusion_fn import MultiplicativeNoise, AdditiveNoise
from .integrated_flow import IntegratedFlow
from .flow_fn import RandFlowFn, RandFlowFn_v2
from .flow_net import MultiScaleFlow
from .layers.conv2d import RandConv2d
from .layers.linear import RandLinear
from .layers.groupnorm2d import RandGroupNorm
def rand_norm(dim, **rand_args):
return RandGroupNorm(min(32, dim), dim, **rand_args)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
shape = torch.prod(torch.tensor(x.shape[1:])).item()
return x.view(-1, shape)
def heavy_downsampling(in_nc, nc_hidden, **rand_args):
downsampling_layers = [
RandConv2d(in_nc, nc_hidden, 3, 1, 1, **rand_args),
rand_norm(nc_hidden, **rand_args),
nn.ReLU(inplace=True),
RandConv2d(nc_hidden, nc_hidden * 2, 3, 1, 1, **rand_args),
rand_norm(nc_hidden * 2, **rand_args),
nn.ReLU(inplace=True),
RandConv2d(nc_hidden * 2, nc_hidden * 4, 4, 2, 1, **rand_args),
]
return downsampling_layers, nc_hidden * 4
def light_downsampling(in_nc, nc_hidden, **rand_args):
downsampling_layers = [RandConv2d(in_nc, nc_hidden, 3, 1, **rand_args)]
return downsampling_layers, nc_hidden
class BayesianClassifier(nn.Module):
def __init__(
self,
n_scale,
nclass,
nc,
nc_hidden,
grid_size,
T,
downsampling_type="heavy",
version="v1",
**rand_args,
):
super().__init__()
if downsampling_type == "light":
layers, nc_hidden = light_downsampling(nc, nc_hidden, **rand_args)
self.downsampling_layers = nn.Sequential(*layers)
elif downsampling_type == "heavy":
layers, nc_hidden = heavy_downsampling(nc, nc_hidden, **rand_args)
self.downsampling_layers = nn.Sequential(*layers)
else:
raise ValueError("Invalid value of downsampling_type")
flows = []
for _ in range(n_scale):
flow_fn = RandFlowFn(nc_hidden, **rand_args) if version == "v1" else RandFlowFn_v2(nc_hidden, **rand_args)
flows.append(IntegratedFlow(flow_fn, None, grid_size, T))
self.multiscale_flows = MultiScaleFlow(flows)
self.fc_layers = nn.Sequential(
rand_norm(nc_hidden, **rand_args),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1)),
Flatten(),
RandLinear(nc_hidden, nclass, **rand_args),
)
def forward(self, x):
out = self.downsampling_layers(x)
out = self.multiscale_flows(out)
out = self.fc_layers(out)
return out
if __name__ == "__main__":
rand_args = {
'sigma_0': 1.0,
'N': 100,
'init_s': 1.0,
'alpha': 0.01,
}
classifier = BayesianClassifier(
n_scale=3, nclass=10, nc=3, nc_hidden=64, grid_size=0.1, T=1.0, **rand_args,
)
x = torch.randn(13, 3, 32, 32)
out = classifier(x)
print(out.size())
|
StarcoderdataPython
|
4804075
|
"""
Tests for fileio module
"""
# author: <NAME> (arm61)
import unittest
from datetime import datetime
import os.path
import pytest
import yaml
from orsopy.fileio.orso import Orso, OrsoDataset
from orsopy.fileio.data_source import (DataSource, Experiment, Sample,
Measurement, InstrumentSettings)
from orsopy.fileio.reduction import Reduction, Software
from orsopy.fileio.base import Person, ValueRange, Value, File, Column, Creator
from orsopy.fileio.base import _validate_header_data, _read_header_data
from orsopy import fileio as fileio
import numpy as np
class TestOrso(unittest.TestCase):
"""
Testing the Orso class.
"""
def test_creation(self):
"""
Creation of Orso object.
"""
c = Creator(
'A Person', 'Some Uni', datetime.now(), "",
contact="<EMAIL>"
)
e = Experiment(
'Experiment 1', 'ESTIA', datetime(2021, 7, 7, 16, 31, 10),
'neutrons'
)
s = Sample('The sample')
inst = InstrumentSettings(
Value(4.0, 'deg'), ValueRange(2., 12., 'angstrom')
)
df = [File('README.rst', None)]
m = Measurement(inst, df, scheme="angle-dispersive")
p = Person('A Person', 'Some Uni')
ds = DataSource(p, e, s, m)
soft = Software('orsopy', '0.0.1', 'macOS-10.15')
p2 = Person('<NAME>', 'European Spallation Source')
redn = Reduction(
soft, datetime(2021, 7, 14, 10, 10, 10),
p2, ['footprint', 'background']
)
cols = [Column("Qz", unit='1/angstrom'), Column("R")]
value = Orso(c, ds, redn, cols, 0)
assert value.creator.name == "<NAME>"
assert value.creator.contact == "<EMAIL>"
ds = value.data_source
dsm = ds.measurement
assert ds.owner.name == '<NAME>'
assert dsm.data_files[0].file == 'README.rst'
assert dsm.instrument_settings.incident_angle.magnitude == 4.0
assert dsm.instrument_settings.wavelength.min == 2.0
assert dsm.instrument_settings.wavelength.max == 12.0
assert value.reduction.software.name == 'orsopy'
assert value.reduction.software.version == "0.0.1"
assert value.reduction.timestamp == datetime(2021, 7, 14, 10, 10, 10)
assert value.columns[0].name == 'Qz'
assert value.columns[1].name == 'R'
assert value.data_set == 0
h = value.to_yaml()
h = "\n".join(
["# ORSO reflectivity data file | 0.1 standard | YAML encoding"
" | https://www.reflectometry.org/",
h]
)
g = yaml.safe_load_all(h)
_validate_header_data([next(g)])
def test_creation_data_set1(self):
"""
Creation of Orso object with a non-zero data_set.
"""
c = Creator(
'<NAME>', '<NAME>', datetime.now(), "",
contact="<EMAIL>"
)
e = Experiment(
'Experiment 1', 'ESTIA', datetime(2021, 7, 7, 16, 31, 10),
'neutrons'
)
s = Sample('The sample')
inst = InstrumentSettings(
Value(4.0, 'deg'), ValueRange(2., 12., 'angstrom')
)
df = [File('README.rst', None)]
m = Measurement(inst, df, scheme="angle-dispersive")
p = Person('A Person', 'Some Uni')
ds = DataSource(p, e, s, m)
soft = Software('orsopy', '0.0.1', 'macOS-10.15')
p2 = Person('<NAME>', 'European Spallation Source')
redn = Reduction(
soft, datetime(2021, 7, 14, 10, 10, 10), p2,
['footprint', 'background']
)
cols = [Column("Qz", unit='1/angstrom'), Column("R")]
value = Orso(c, ds, redn, cols, 1)
dsm = value.data_source.measurement
assert value.data_source.owner.name == 'A Person'
assert dsm.data_files[0].file == 'README.rst'
assert value.reduction.software.name == 'orsopy'
assert value.columns[0].name == 'Qz'
assert value.data_set == 1
# check that data_set can also be a string.
value = Orso(c, ds, redn, cols, 'fokdoks')
assert value.data_set == 'fokdoks'
# don't want class construction coercing a str to an int
value = Orso(c, ds, redn, cols, '1')
assert value.data_set == '1'
def test_write_read(self):
# test write and read of multiple datasets
info = fileio.Orso.empty()
info2 = fileio.Orso.empty()
data = np.zeros((100, 3))
data[:] = np.arange(100.0)[:, None]
info.columns = [
fileio.Column("Qz", "1/angstrom"),
fileio.Column("R"),
fileio.Column("sR"),
]
info2.columns = info.columns
info.data_source.measurement.instrument_settings.polarization = "p"
info2.data_source.measurement.instrument_settings.polarization = "m"
info.data_set = "up polarization"
info2.data_set = "down polarization"
info2.data_source.sample.comment = "this is a comment"
ds = fileio.OrsoDataset(info, data)
ds2 = fileio.OrsoDataset(info2, data)
info3 = fileio.Orso(
creator=fileio.Creator(
name="<NAME>",
affiliation="Paul Scherrer Institut",
timestamp=datetime.now(),
computer="localhost",
),
data_source=fileio.DataSource(
sample=fileio.Sample(
name="My Sample",
type="solid",
description="Something descriptive",
),
experiment=fileio.Experiment(
title="Main experiment",
instrument="Reflectometer",
date=datetime.now(),
probe="x-rays",
),
owner=fileio.Person("someone", "important"),
measurement=fileio.Measurement(
instrument_settings=fileio.InstrumentSettings(
incident_angle=fileio.Value(13.4, "deg"),
wavelength=fileio.Value(5.34, "A"),
),
data_files=["abc", "def", "ghi"],
references=["more", "files"],
scheme="angle-dispersive",
),
),
reduction=fileio.Reduction(software="awesome orso"),
data_set="Filled header",
columns=info.columns,
)
ds3 = fileio.OrsoDataset(info3, data)
fileio.save_orso([ds, ds2, ds3], "test.ort", comment='Interdiffusion')
ls1, ls2, ls3 = fileio.load_orso("test.ort")
assert ls1 == ds
assert ls2 == ds2
assert ls3 == ds3
_read_header_data("test.ort", validate=True)
def test_unique_dataset(self):
# checks that data_set is unique on saving of OrsoDatasets
info = Orso.empty()
info.data_set = 0
info.columns = [Column("stuff")] * 4
info2 = Orso.empty()
info2.data_set = 0
info2.columns = [Column("stuff")] * 4
ds = OrsoDataset(info, np.empty((2, 4)))
ds2 = OrsoDataset(info2, np.empty((2, 4)))
with pytest.raises(ValueError):
fileio.save_orso([ds, ds2], 'test_data_set.ort')
def test_extra_elements(self):
# if there are extra elements present in the ORT file they should still
# be loadable. They won't be there as dataclass fields, but they'll be
# visible as attributes.
datasets = fileio.load_orso(os.path.join("tests", "test_example.ort"))
info = datasets[0].info
assert hasattr(
info.data_source.measurement.instrument_settings.incident_angle,
'resolution'
)
class TestFunctions(unittest.TestCase):
"""
Tests for functionality in the Orso module.
"""
def test_make_empty(self):
"""
Creation of the empty Orso object.
"""
empty = Orso.empty()
assert issubclass(empty.__class__, Orso)
ds = empty.data_source
assert ds.owner.name is None
assert ds.experiment.title is None
assert ds.experiment.instrument is None
assert ds.experiment.date is None
assert ds.experiment.probe is None
assert ds.sample.name is None
assert ds.measurement.instrument_settings.incident_angle.magnitude is None
assert ds.measurement.instrument_settings.wavelength.magnitude is None
assert ds.measurement.data_files is None
assert empty.reduction.software.name is None
assert empty.reduction.software.version is None
assert empty.reduction.software.platform is None
assert empty.reduction.timestamp is None
assert empty.reduction.creator is None
assert ds.owner.affiliation is None
assert ds.sample.name is None
assert empty.reduction.corrections is None
assert empty.reduction.creator is None
assert empty.columns == [Column.empty()]
assert empty.data_set is None
dct = empty.to_dict()
_validate_header_data([dct])
def test_empty_to_yaml(self):
"""
Checking yaml string form empty Orso object.
TODO: Fix once correct format is known.
"""
empty = Orso.empty()
req = (
'creator:\n name: null\n affiliation: null\n timestamp: null\n'
' computer: null\ndata_source:\n owner:\n name: null\n'
' affiliation: null\n experiment:\n title: null\n'
' instrument: null\n date: null\n probe: null\n'
' sample:\n name: null\n measurement:\n'
' instrument_settings:\n incident_angle:\n magnitude: null\n'
' wavelength:\n magnitude: null\n polarization: unpolarized\n'
' data_files: null\nreduction:\n software:\n name: null\n'
'columns:\n- name: null\n'
)
assert empty.to_yaml() == req
|
StarcoderdataPython
|
1971753
|
<filename>python/sklearn/linear-regression/workload-analysis/bench-gpu/post-process/roofline/roofline.py
#!/usr/bin/env python3
from collections import OrderedDict
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns;
def extract_model_data(data_file_path, debug=True):
data_file_reader = open(data_file_path, 'r')
# key is net name, value is list of <batch size, gflops, intensity> tuples
gflops_intensity_dict = {}
try:
text_lines = data_file_reader.readlines()
# two lines, the first line is glops, the second is operational intensity
for i, line in enumerate(text_lines):
# extract the first line(GFLOPS) and then get the next line(Operational Intensity)
if i % 2 == 0:
# extract gflops
current_line = line.rstrip('\n')
gflops_list = current_line.split('\t')
# extract operational intensity
next_line = text_lines[i+1].rstrip('\n')
intensity_list = next_line.split('\t')
dict_values_list = []
for j, item in enumerate(gflops_list):
# the first item is net name
if j == 0:
continue
# batch size, gflops, op intensity
dict_values_list.append((2**(j-1), float(item), float(intensity_list[j])))
gflops_intensity_dict[gflops_list[0]] = dict_values_list
else:
continue
if debug:
print(gflops_intensity_dict)
finally:
data_file_reader.close()
return gflops_intensity_dict
def extract_op_data(data_file_path, debug=True):
data_file_reader = open(data_file_path, 'r')
# key is net name, value is list of <batch size, gflops, intensity> tuples
op_data_list = []
try:
text_lines = data_file_reader.readlines()
for line in text_lines:
line = line.rstrip('\n')
_, op_type, glops, intensity = line.split('\t')
op_data_list.append((op_type, float(glops), float(intensity)))
if debug:
print(op_data_list)
finally:
data_file_reader.close()
return op_data_list
# find out the max intensity and min gflops
def find_boundard_pairs(gflops_intensity_dict):
max_intensity = -1
min_flops = 1.79e+100
for k, v in gflops_intensity_dict.items():
for _, gflops, intensity in v:
max_intensity = max(max_intensity, intensity)
min_flops = min(min_flops, gflops)
return max_intensity, min_flops
def draw_model_roofline(gflops_intensity_dict, peak_flops, peak_membdw):
# set color palette for different dnns
net_type_set = {k for k in gflops_intensity_dict}
colors = sns.color_palette("hls", n_colors=len(net_type_set) + 2)
net_color_map = {val:i for i, val in enumerate(list(net_type_set))}
fig, ax = plt.subplots(figsize=(6, 6))
# 1. plot the <flops, intensity> pairs
for k, v in gflops_intensity_dict.items():
# k is net name
if k == 'MobileNetV1':
for batch_size, gflops, intensity in v:
ax.plot(intensity, gflops, 'x',
color=colors[net_color_map[k]], label=k, marker='x')
elif k == 'SqueezeNet':
for batch_size, gflops, intensity in v:
ax.plot(intensity, gflops, 'v',
color=colors[net_color_map[k]], label=k, marker='v')
elif k == 'DenseNet121':
for batch_size, gflops, intensity in v:
ax.plot(intensity, gflops, '*',
color=colors[net_color_map[k]], label=k, marker='*')
elif k == 'ResNet50':
for batch_size, gflops, intensity in v:
ax.plot(intensity, gflops, 's',
color=colors[net_color_map[k]], label=k, marker='s')
elif k == 'SSD_MobileNetV1':
for batch_size, gflops, intensity in v:
ax.plot(intensity, gflops, 'd',
color=colors[net_color_map[k]], label=k, marker='d')
elif k == 'SSD_VGG16':
for batch_size, gflops, intensity in v:
ax.plot(intensity, gflops, 'p',
color=colors[net_color_map[k]], label=k, marker='p')
# 2. plot the roof line
x1 = peak_flops / peak_membdw
y1 = peak_flops
max_op_intensity, min_flops = find_boundard_pairs(gflops_intensity_dict)
print('max intensity:', max_op_intensity, 'min flops:', min_flops)
if max_op_intensity < x1:
'''
for this case: -----
/
/*
/*
/* x
/ x
'''
ax.hlines(y=y1, xmin=x1,
xmax=x1+5, linewidth=1.5, color='red')
else:
ax.hlines(y=y1, xmin=x1,
xmax=max_op_intensity+10, linewidth=1.5, color='red')
x2 = min_flops/ peak_membdw
y2 = peak_membdw * x2
if x2 > x1:
'''
for this case: -------
\ * x
\ x *
\ * x
'''
x2 = peak_flops / peak_membdw - 0.1
y2 = (peak_flops / peak_membdw)*x2
print('x1:', x1, ' y1:', y1, ' x2:', x2, ' y2:', y2)
#ax.plot([x1, x2], [y1, y2], linewidth=1.5, color='red')
ax.plot([0.1, x1], [peak_membdw*0.1, y1], linewidth=1.5, color='red')
ax.set_yscale('log')
ax.set_xscale('log')
#plt.xscale('log', basex=2)
#plt.yscale('log', basey=2)
ax.set_ylabel('GFLOps/sec', fontsize=10)
ax.set_xlabel('Operational Intensity (FLOps/Byte)', fontsize=10)
handles, labels = ax.get_legend_handles_labels()
#print(labels)
labels_od = OrderedDict(zip(labels, handles))
ax.legend(labels_od.values(), labels_od.keys(), loc='upper left')
plt.show()
def draw_op_roofline(op_data_list, peak_flops, peak_membdw):
op_type_set = {record[0] for record in op_data_list}
colors = sns.color_palette("hls", n_colors=len(op_type_set) + 2)
layer_color_map = {val:i for i, val in enumerate(list(op_type_set))}
#print(layer_color_map)
fig, ax = plt.subplots(figsize=(6, 6))
# 1. plot the <flops, intensity> pairs
for i in op_data_list:
op_type, flops, intensity = str(i[0]), i[1], i[2]
if op_type == 'Convolution' or op_type == 'convolution':
ax.plot(intensity, flops, 'x',
color=colors[layer_color_map[op_type]], label=op_type, marker='x')
elif op_type == 'InnerProduct':
ax.plot(intensity, flops, 'v',
color=colors[layer_color_map[op_type]], label=op_type, marker='v')
elif op_type == 'Pooling' or op_type == 'pooling':
ax.plot(intensity, flops, '*',
color=colors[layer_color_map[op_type]], label=op_type, marker='*')
elif op_type == 'Scale' or op_type == 'scale':
ax.plot(intensity, flops, 's',
color=colors[layer_color_map[op_type]], label=op_type, marker='s')
elif op_type == 'Eltwise' or op_type == 'element-wise':
ax.plot(intensity, flops, 'd',
color=colors[layer_color_map[op_type]], label=op_type, marker='d')
elif op_type == 'ReLU' or op_type == 'relu':
ax.plot(intensity, flops, 'p',
color=colors[layer_color_map[op_type]], label=op_type, marker='p')
elif op_type == 'BatchNorm' or op_type == 'batchnorm':
ax.plot(intensity, flops, 'o',
color=colors[layer_color_map[op_type]], label=op_type, marker='o')
elif op_type == 'Softmax' or op_type == 'softmax':
ax.plot(intensity, flops, '+',
color=colors[layer_color_map[op_type]], label=op_type, marker='+')
elif op_type == 'LRN' or op_type == 'lrn':
ax.plot(intensity, flops, '^',
color=colors[layer_color_map[op_type]], label=op_type, marker='^')
elif op_type == 'GEMV' or op_type == 'gemv':
ax.plot(intensity, flops, '<',
color=colors[layer_color_map[op_type]], label=op_type, marker='<')
elif op_type == 'GEMM' or op_type == 'gemm':
ax.plot(intensity, flops, 'P',
color=colors[layer_color_map[op_type]], label=op_type, marker='P')
# 2. plot the roof line
x1 = peak_flops / peak_membdw
y1 = peak_flops
max_op_intensity = max([i[2] for i in op_data_list])
ax.hlines(y=y1, xmin=x1,
xmax=max_op_intensity+15, linewidth=1.5, color='red')
min_flops = min([i[1] for i in op_data_list])
x2 = min_flops / peak_membdw
y2 = peak_membdw * x2
ax.plot([x1, x2], [y1, y2], linewidth=1.5, color='red')
ax.set_yscale('log')
ax.set_xscale('log')
#plt.xscale('log', basex=2)
#plt.yscale('log', basey=2)
ax.set_ylabel('GFLOps/sec', fontsize=10)
ax.set_xlabel('Operational Intensity (FLOps/Byte)', fontsize=10)
handles, labels = ax.get_legend_handles_labels()
#print(labels)
labels_od = OrderedDict(zip(labels, handles))
ax.legend(labels_od.values(), labels_od.keys(), loc='upper left')
plt.show()
if __name__ == '__main__':
# Titan Xp 12.15 TFLOPS, 547.7 GB/s
# Tesla K40m 5.046 TFLOPS, 288.4 GB/s
titan_model_data = extract_model_data('titan_xp_model_throughput.txt')
titan_peak_flops = 12.15*1000
titan_peak_mem_bandwidth = 547.7
draw_model_roofline(titan_model_data, titan_peak_flops, titan_peak_mem_bandwidth)
k40m_model_data = extract_model_data('tesla_k40m_model_throughput.txt')
k40m_peak_flops = 5.046*1000
k40m_peak_mem_bandwidth = 288.4
#draw_model_roofline(k40m_model_data, titan_peak_flops, titan_peak_mem_bandwidth)
titan_op_data = extract_op_data('titan_xp_op_throughput.txt')
draw_op_roofline(titan_op_data, titan_peak_flops, titan_peak_mem_bandwidth)
k40m_op_data = extract_op_data('tesla_k40m_op_throughput.txt')
#draw_op_roofline(k40m_op_data, k40m_peak_flops, k40m_peak_mem_bandwidth)
tesla_k40m_mobilenet_op_data = extract_op_data('tesla_k40m_mobilenet_batch_op.txt')
#draw_op_roofline(tesla_k40m_mobilenet_op_data, k40m_peak_flops, k40m_peak_mem_bandwidth)
titan_xp_mobilenet_op_data = extract_op_data('titan_xp_mobilenet_batch_op.txt')
draw_op_roofline(titan_xp_mobilenet_op_data, titan_peak_flops, titan_peak_mem_bandwidth)
|
StarcoderdataPython
|
11263904
|
"""
intercepts.registration
~~~~~~~~~~~~~~~~~~~~~~~
This module implements the intercepts registration api.
"""
import atexit
import sys
import types
from functools import partial # , update_wrapper
from typing import Callable, Dict, List, Union
import intercepts.builtinhandler as builtinhandler
from .functypes import PyCFunctionObject
from .utils import (
addr,
create_code_like,
copy_builtin,
replace_builtin,
copy_function,
replace_function,
update_wrapper,
)
MethodOrFunction = Union[types.FunctionType, types.MethodType]
_HANDLERS = {} # type: Dict[int, List[Callable]]
def _intercept_handler(*args, **kwargs):
consts = sys._getframe(0).f_code.co_consts
func_id = consts[-1]
_func = _HANDLERS[func_id][0]
handler = _func
for _handler in _HANDLERS[func_id][2:]:
handler = update_wrapper(partial(_handler, handler), _func)
result = handler(*args, **kwargs)
return result
def register_builtin(func, handler):
func_addr = addr(func)
if func_addr not in _HANDLERS:
func_copy = PyCFunctionObject()
copy_builtin(addr(func_copy), func_addr)
_handler = builtinhandler.get_handler(func_addr)
_HANDLERS[func_addr] = [func_copy, _handler]
replace_builtin(func_addr, addr(_handler))
_HANDLERS[func_addr].append(handler)
return func
def register_function(
func: types.FunctionType, handler: types.FunctionType
) -> types.FunctionType:
r"""Registers an intercept handler for a function.
:param func: The function to intercept.
:param handler: A function to handle the intercept.
"""
func_addr = addr(func)
if func_addr not in _HANDLERS:
handler_code = create_code_like(
_intercept_handler.__code__,
consts=(_intercept_handler.__code__.co_consts + (func_addr,)),
name=func.__name__,
)
global_dict = _intercept_handler.__globals__
_handler = types.FunctionType(
handler_code,
global_dict,
func.__name__,
func.__defaults__,
func.__closure__,
)
_handler.__code__ = handler_code
handler_addr = addr(_handler)
def func_copy(*args, **kwargs):
pass
copy_function(addr(func_copy), func_addr)
_HANDLERS[func_addr] = [func_copy, _handler]
replace_function(func_addr, handler_addr)
_HANDLERS[func_addr].append(handler)
return func
def register_method(
method: types.MethodType, handler: types.FunctionType
) -> types.MethodType:
r"""Registers an intercept handler for a method.
:param method: The method to intercept.
:param handler: A function to handle the intercept.
"""
register_function(method.__func__, handler)
return method
def register(obj: MethodOrFunction, handler: types.FunctionType) -> MethodOrFunction:
r"""Registers an intercept handler.
:param obj: The callable to intercept.
:param handler: A function to handle the intercept.
Usage::
>>> import intercepts
>>> increment = lambda x: x + 1
>>> handler = lambda func, arg: arg - (func(arg) - arg)
>>> intercepts.register(increment, handler)
>>> increment(43)
42
"""
if not isinstance(handler, types.FunctionType):
raise ValueError("Argument `handler` must be a function.")
if not callable(obj):
raise TypeError("Cannot intercept non-callable objects")
if obj == handler:
raise ValueError("A function cannot handle itself")
if isinstance(obj, types.BuiltinFunctionType):
return register_builtin(obj, handler)
elif isinstance(obj, types.FunctionType):
return register_function(obj, handler)
elif isinstance(obj, types.MethodType):
return register_method(obj, handler)
else:
raise NotImplementedError("Unsupported type: {}".format(repr(type(obj))))
def unregister(obj: MethodOrFunction, depth: int = -1) -> MethodOrFunction:
r"""Unregisters the handlers for an object.
:param obj: The callable for which to unregister handlers.
:param depth: (optional) The maximum number of handlers to unregister. Defaults to all.
"""
# TODO : use an isinstance replacement
if isinstance(obj, (types.BuiltinFunctionType, types.FunctionType)):
func_addr = addr(obj)
else:
func_addr = addr(obj.__func__)
handlers = _HANDLERS[func_addr]
if depth < 0 or len(handlers) - depth <= 2:
orig_func = handlers[0]
if isinstance(orig_func, types.BuiltinFunctionType):
replace_builtin(func_addr, addr(orig_func))
elif isinstance(orig_func, types.FunctionType):
replace_function(func_addr, addr(orig_func))
else:
raise ValueError("Unknown type of handled function: %s" % type(orig_func))
del _HANDLERS[func_addr]
assert func_addr not in _HANDLERS
else:
_HANDLERS[func_addr] = handlers[:-depth]
return obj
@atexit.register
def unregister_all() -> None:
r"""Unregisters all handlers.
"""
global _HANDLERS
for func_addr, handlers in _HANDLERS.items():
orig_func = handlers[0]
if isinstance(orig_func, types.BuiltinFunctionType):
replace_builtin(func_addr, addr(orig_func))
elif isinstance(orig_func, types.FunctionType):
replace_function(func_addr, addr(orig_func))
else:
raise ValueError("Unknown type of handled function: %s" % type(orig_func))
_HANDLERS = {}
|
StarcoderdataPython
|
241042
|
import math
def function(x1):
while True:
try:
value = int(input(x1))
except :
print("HUEVON")
continue
else:
break
return float(value)
while True:
n2=function("Ceros: ")
a2=function("Primer valor A: ")
b2=function("Segundo valor B: ")
if a2<=b2 :
print("A no puede ser menor que B ")
continue
if a2<=0 or b2<=0 or n2<1:
print("Valores negativos no aceptados")
continue
else:
break
def trial(a,b,n):
jota= math.log(a/b,10)
ka = math.floor(jota)
if n-ka < 0 :
enemenoska = 0
else:
enemenoska = n-ka
alfa = math.floor(ka / jota)
if n/jota>=1:
beta=1
else:
beta=0
counta=0
countb=0
n=int(n)
for i in range (n+1):
val1=a*(10**i)
for j in range(n+1):
val2=b*(10**j)
if val1> val2:
counta = counta+1
elif val1 < val2:
countb = countb + 1
primero= (enemenoska*(enemenoska+1))/2
segundo1 = (n+1)**2
segundo2 = (enemenoska+1)*alfa*beta
segundo = segundo1 - segundo2
print("-----------------------")
print("j ->", jota)
print("k ->", ka)
print("n-k ->", enemenoska)
print("alfa ->", alfa)
print("---------------------")
print("countb", countb)
print("counta+b",counta+countb)
print("-------------")
print("primero",primero)
print("Segundo",segundo)
print("-------------------")
print("probability",primero,"/",segundo)
if primero == countb and segundo == (counta + countb):
print("WIII")
trial(a2,b2,n2)
|
StarcoderdataPython
|
5047584
|
from . import Population
from .genotype import Genome
import math
import logging
class Experiment(object):
"""Peforms experiment using NEAT.
Executes NEAT on a given set of data and a fitness method.
Fitness method must be a python method named evaluate wrapped in a
string. It will have one parameter that will be the neural network
being evaluated. The method will need to return a 2-tuple, containing
the fitness value and whether it is a solution, respectfully.
e.g.
def evaluate(net):
# Data passed to run method
data = ((0.0, 0.0, 1.0),
(1.0, 0.0, 1.0),
(0.0, 1.0, 1.0),
(1.0, 1.0, 1.0))
res = []
winner = False
for d in data:
res.append(net.activate(d))
error = math.fabs(res[0]+(1-res[1])+(1-res[2])+res[3])
fitness = math.pow(4-error, 2)
if res[0] < 0.5 and res[1] >= 0.5 and res[2] >= 0.5 and res[3] < 0.5:
winner = True
return fitness, winner
Attributes:
name: Name of experiment.
log: Logger for experiment class.
"""
def __init__(self):
self.log = logging.getLogger('experiment')
def run(self, name, conf, observer=None):
"""Runs experiment.
Creates a population where each organism evaluates the given data.
The results are passed to a fitness method the result is assigned
to that organism. At the end of each generation the populations
epoch occurs, creating the next generation.
Args:
num_input: Number of input neurons.
num_output: Number of output neurons.
conf: Instance of Conf class.
data: N sized array containing num_input-tuples that will be
evaluated by the population.
fitness_func: Fitness method. See class description.
runs: Number of runs to perform. Defautlts to 1.
"""
genome = Genome.minimal_fully_connected(0,
(conf.num_input, conf.num_output))
ns = {'math': math}
exec conf.fitness_func in ns
if observer:
observer.notify_experiment(name, conf)
step, max_step = 1, conf.runs*conf.generations
for r in xrange(conf.runs):
if observer:
observer.notify_population(r+1)
pop = Population(conf)
pop.spawn(genome)
for g in xrange(conf.generations):
for o in pop.organisms:
winner = False
net = o.genome.genesis()
o.fitness, o.winner = ns['evaluate'](net)
if o.winner:
if observer:
observer.notify_generation(pop.generation, pop.species)
observer.notify_experiment_end()
self.log.info('Winner!!')
return
pop.epoch(observer)
progress = round(float(step)*100/float(max_step), 2)
if observer:
observer.notify_progress(progress=progress)
step += 1
if observer:
observer.notify_experiment_end()
|
StarcoderdataPython
|
6569780
|
<filename>LSTM/ModelProcessing.py<gh_stars>0
def Run():
import os.path
import json
results={}
# if model doesn't exist,then train model
if not os.path.exists('./LSTM/model_info.txt'):
import numpy
from pandas import read_csv
import math
from keras.models import Sequential
from keras.layers import Dense, LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# convert an array of values into a dataset matrix
def create_dataset(dataset, nn_look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-nn_look_back-1):
a = dataset[i:(i+nn_look_back), 0]
dataX.append(a)
dataY.append(dataset[i + nn_look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
numpy.random.seed(7)
# load the dataset
dataframe = read_csv('./LSTM/airline.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.80)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size], dataset[train_size:len(dataset)]
# reshape into X=t and Y=t+1
nn_look_back = 20
trainX, trainY = create_dataset(train, nn_look_back)
testX, testY = create_dataset(test, nn_look_back)
# reshape input to be [samples, time steps, features]
#The LSTM network expects the input data (X) to be provided with a
#specific array structure in the form of: [samples, time steps, features].
trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(50, input_shape=(nn_look_back, 1)))
model.add(Dense(units=10))
model.add(Dense(units=20))
model.add(Dense(units=1))
nn_epochs=100
nn_batch_size=4
model.compile(loss='mean_squared_error', optimizer='Nadam')
model.fit(trainX, trainY, epochs=nn_epochs, batch_size=nn_batch_size, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
results["train_score"]=trainScore
results["test_score"]=testScore
results["epochs"]=nn_epochs
results["look_back"]=nn_look_back
# save model info
json.dump(results, open("./LSTM/model_info.txt",'w'))
else: # load data
results=json.load(open("./LSTM/model_info.txt"))
return results
|
StarcoderdataPython
|
3255961
|
<reponame>coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
from math import atan2, sin
class Stanley(object):
def __init__(self, max_angle, k):
self.max_angle = max_angle
self.k = k
self.int_val = self.last_error = 0.
def reset(self):
self.int_val = 0.0
def step(self, yaw, v, e):
val = yaw + atan2(self.k * e, v)
val = max(min(val, self.max_angle), -self.max_angle)
self.last_error = e
return val
|
StarcoderdataPython
|
297491
|
<reponame>Austinstevesk/leetcode-solutions
"""
Given an array of strings strs, group the anagrams together. You can return the answer in any order.
An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once.
Example 1:
Input: strs = ["eat","tea","tan","ate","nat","bat"]
Output: [["bat"],["nat","tan"],["ate","eat","tea"]]
Example 2:
Input: strs = [""]
Output: [[""]]
Example 3:
Input: strs = ["a"]
Output: [["a"]]
Constraints:
1 <= strs.length <= 104
0 <= strs[i].length <= 100
strs[i] consists of lowercase English letters.
"""
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
result = {}
for i in strs:
x = "".join(sorted(i))
print(x)
if x in result:
result[x].append(i)
else:
result[x] = [i]
print(result)
return list(result.values())
# another solution
from collections import defaultdict
def group_anagrams(strs):
res = defaultdict(list)
for s in strs:
count = [0] * 26 # from a....z
for c in s:
count[ord(c) - ord("a")] += 1
res[tuple(count)].append(s) # lists can never be keys so we convert this to tuple
return res.values()
print(group_anagrams(['ann', 'nan', 'jack', 'man', 'job', 'dull', 'ludl']))
|
StarcoderdataPython
|
123416
|
<reponame>PawanRamaMali/Family_Tree-Meet_The_Family<gh_stars>0
from src.family.processFileHandler import ProcessFileHandler
from src.family.clan import Clan
import pathlib
import os
import sys
def main():
clan = Clan()
file_name = 'input/initInput.txt'
dir = pathlib.Path().absolute()
fileProcessor = ProcessFileHandler()
fullPath = os.path.join(dir, file_name)
fileProcessor.processFile(clan, fullPath, True)
fullPath = os.path.join(dir, sys.argv[1])
fileProcessor.processFile(clan, fullPath, False)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8151711
|
<reponame>interhui/ovs-api
# coding=utf-8
import os
import logging
from subprocess import Popen, PIPE
enable_log_command = True
enable_log_result = False
enable_log_error = True
enable_raise = False
fmt = '%(asctime)s - %(name)s [%(process)d] : %(message)s'
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
logger = logging.getLogger('execute')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def exec_cmd(cmd):
if not cmd.strip():
raise ValueError('Command is Empty')
if cmd and isinstance(cmd, basestring):
cmd = cmd.strip()
if enable_log_command :
logger.debug(cmd)
result, error = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True).communicate()
if not error and enable_log_result:
logger.debug(result)
if error and enable_log_error:
logger.error(error)
if enable_raise:
raise IOError(error)
return result, error
raise IOError('Command is None or Command is not string')
def check_cmd(cmd):
cmd += ' >/dev/null 2>/dev/null'
return os.system(cmd) == 0
|
StarcoderdataPython
|
12854036
|
<filename>autohandshake/src/Pages/LoginPage.py
from autohandshake.src.Pages.Page import Page
from autohandshake.src.HandshakeBrowser import HandshakeBrowser
from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, \
InvalidEmailError, InvalidPasswordError
import re
class LoginPage(Page):
"""
The old Handshake login page
"""
def __init__(self, url: str, browser: HandshakeBrowser):
"""
:param url: the url of the school's Handshake login page
:type url: str
:param browser: a HandshakeBrowser that has not logged in yet
:type browser: HandshakeBrowser
"""
super().__init__(url, browser)
self.validate_url_school()
def _wait_until_page_is_loaded(self):
"""Wait until the page has finished loading.
Return immediately since there are no complex load conditions
"""
return
def _validate_url(self, url):
"""
Ensure that the given URL is a valid login URL
:param url: the url to validate
:type url: str
"""
try:
re.match(r'^https://[a-zA-Z]+\.joinhandshake\.com(/login)?$', url) \
.group(0)
except AttributeError:
raise InvalidURLError()
def validate_url_school(self):
"""Ensure that the current URL leads to a valid school's login page"""
if self._browser.element_exists_by_xpath('//span[text()=\'Please '
'select your school to '
'sign in.\']'):
raise InvalidURLError("The school specified in the URL is not valid")
@Page.require_user_type(None) # requires the user to be logged out, i.e. no user type
def login(self, email, password):
"""
Log into Handshake using the given credentials
:param email: the username with which to log in
:type email: str
:param password: the password with which to log in
:type password: str
"""
self._enter_email_address(email)
self._enter_password(password)
self._browser.wait_until_element_exists_by_xpath('//div[@class="Select-placeholder"]')
def _enter_email_address(self, email):
"""Enter email address into input field"""
EMAIL_INPUT_XPATH = "//input[@name='identifier']"
try: # if you get the old login page
EMAIL_LINK_XPATH = "//div[@class='sign-with-email-address']//a"
self._browser.click_element_by_xpath(EMAIL_LINK_XPATH)
self._browser.send_text_to_element_by_xpath(EMAIL_INPUT_XPATH, email)
EMAIL_BTN_XPATH = "//div[@class='login-main__email-box']/button"
self._browser.click_element_by_xpath(EMAIL_BTN_XPATH)
if self._browser.element_exists_by_xpath("//div[text()='Please enter a valid email address']"):
raise InvalidEmailError(f"No account found for email {email}")
except NoSuchElementError: # if you get the new login page
EMAIL_LINK_XPATH = "//div[@class='sign-in-with-email-address']//a"
self._browser.click_element_by_xpath(EMAIL_LINK_XPATH)
self._browser.send_text_to_element_by_xpath(EMAIL_INPUT_XPATH, email)
EMAIL_BTN_XPATH = "//div[@class='actions']/button"
self._browser.click_element_by_xpath(EMAIL_BTN_XPATH)
if 'known_error_message_present=true' in self._browser.current_url:
raise InvalidEmailError(f"No account found for email {email}")
def _enter_password(self, password):
"""Enter password into input field after having successfully entered email"""
try: # if you get the old login page
self._browser.click_element_by_xpath("//a[@class='no-underline']")
self._browser.send_text_to_element_by_xpath("//input[@name='password']", password)
self._browser.click_element_by_xpath("//input[@name='commit']")
if self._browser.element_exists_by_xpath("//div[contains(text(), "
"'You entered an invalid password.')]"):
raise InvalidPasswordError("Invalid password")
except NoSuchElementError: # if you get the new login page
self._browser.click_element_by_xpath("//a[@class='alternate-login-link']")
self._browser.send_text_to_element_by_xpath("//input[@name='password']", password)
self._browser.click_element_by_xpath("//button")
if self._browser.element_exists_by_xpath("//div[contains(text(), "
"'You entered an invalid password.')]"):
raise InvalidPasswordError("Invalid password")
|
StarcoderdataPython
|
1820465
|
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
""" Extension of User model """
user = models.OneToOneField(User,
on_delete=models.CASCADE,
primary_key=True)
# if vacation mode is set to true, task streaks will not be reset
vacation = models.BooleanField(default=False)
|
StarcoderdataPython
|
79849
|
<gh_stars>1-10
#batch!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
#DISTRIBUTED STRATEGY IN KERAS
import tensorflow as tf
# This file creates the trained models for a given neural network configuration
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
from keras.models import model_from_json, load_model
import keras
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import pandas as pd
import numpy as np
import sys
import os
import os.path
import json
import optparse
import time
#import matplotlib
#matplotlib.use('agg')
#import matplotlib.pyplot as plt
#from matplotlib import style
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
#chrome tracing
from tensorflow.python.client import timeline
strategy = tf.distribute.experimental.CentralStorageStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
numberPunctos = 50
sensor_data = pd.DataFrame({
"GHI AP1": [17, 31],
"GHI AP4": [41, 22],
"GHI AP5": [25,12],
"GHI AP6": [41,0],
"GHI AP7": [49,45],
"GHI DH1": [0,49],
"GHI DH2": [9,43],
"GHI DH3": [13,29],
"GHI DH4": [11,33],
"GHI DH5": [15,37],
"GHI DH6": [2,25],
"GHI DH7": [1,41],
"GHI DH8": [2,15],
"GHI DH9": [1,31],
"GHI DH10": [8,25],
"GHI DH11": [10,15]
}, index = ["latitude", "longitude"])
latlon = sensor_data.values.transpose()
gridx = np.arange(0.0, numberPunctos, 1)
gridy = np.arange(0.0, numberPunctos, 1)
def addOptions(parser):
parser.add_option("--NNfile", default="",
help="Config json file for the data to pass to the model")
parser = optparse.OptionParser()
addOptions(parser)
(options, args) = parser.parse_args()
if not options.NNfile:
print(sys.stderr, "No configuration file specified\n")
sys.exit(1)
# with open('config.json', 'r') as cfg_file:
with open(options.NNfile, 'r') as cfg_file:
cfg_data = json.load(cfg_file)
orig_folder = cfg_data['orig_folder']
dest_folder = cfg_data['dest_folder']
train_size = cfg_data['train_size'] # [1/7, 2/7, 3/7, 4/7, 5/7, 6/7, 7/7]
hor_pred = cfg_data['hor_pred'] # folder_names
days_info_file = cfg_data['days_info']
days_info = pd.read_csv(days_info_file)
day_length = days_info['length_day'][0]
days = days_info['number_train_days'][0]
tg = cfg_data['time_granularity']
seed = cfg_data['seed']
station = cfg_data['station']
batch_size = cfg_data['batch_size']
num_classes = cfg_data['num_classes']
epochs = cfg_data['epochs']
img_rows = cfg_data['img_rows']
img_cols = cfg_data['img_cols']
device = cfg_data['device']
device_name = cfg_data['device_name']
print('Loading dataframes...\n')
load_start = time.time()
x_original = np.load("../x_train.npy")
print(x_original.shape)
print(len(x_original))
y_original = pd.read_csv(orig_folder + '/Y_tr_val.csv')
load_end = time.time()
load_time = load_end - load_start
load_min = int(load_time / 60)
load_sec = load_time % 60
print('Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\n'.format(load_min, load_sec))
forecast_prediction = []
# Since we configured our matrices with an offset we have to adjust to "jump" to the sample we want to actually predict
for hp in hor_pred:
if hp.endswith("min"):
hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg)
if hp.endswith("s"):
hor_pred_indices = int(int(hp.replace('s', '')) / tg)
forecast_prediction.append(hp)
y_t = y_original # y_train y son iquals
y_t_index = y_t.index # devulve una array de index
y_t_index_valid = y_t_index[(y_t_index % day_length) < (day_length - hor_pred_indices)] # so we don't get values for the previous or next day
y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)
print('Indices computed. {} indices lost \n.'.format(y_t_indices_lost))
print('Building randomized y matrix with valid indices...\n')
y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices])
print('Building y matrix removing invalid indices for persistence model...\n')
y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid]) # una row de dataFram combia por numpy array
print('Building X matrix...Same thing as before...\n')
x_t = x_original[y_t_index_valid] # like our randomization, just picking the same indices
x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1)
#x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
#SPLIT TRAIN AND TEST SETS
#Can split 3dimensional arrays ;) like this
cut = int(0.9*len(x_t))
x_train, x_test = x_t[:cut,:], x_t[cut:,:]
y_train, y_test = y_t[:cut], y_t[cut:]
input_shape = (img_rows, img_cols, 1)
#When training a model with multiple GPUs, you can use the extra computing power effectively by increasing the batch size.
#In general, use the largest batch size that fits the GPU memory, and tune the learning rate accordingly.
#BUFFER_SIZE = 10000
BATCH_SIZE_PER_REPLICA = cfg_data['batch_size']
bs = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
with strategy.scope():
nn_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=input_shape),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
#COMPILE THE MODEL
nn_model.compile(loss='mean_squared_error', optimizer='Adam', metrics=['MAE', 'mse'])
output_text = 'metrics_' + 'CNN_' + str(device_name) + '_b_' + str(bs)
nn_model.summary()
#TENSORBOARD LOGS
log_dir = 'logs_' + str(device_name) + '_b_' + str(bs)
tb = tf.keras.callbacks.TensorBoard(log_dir = log_dir, batch_size=bs, profile_batch = bs)
print('Fitting...\n' + output_text + '\n')
fit_start = time.time()
history=nn_model.fit(x_test,y_test, batch_size=bs, epochs=epochs, verbose=1, callbacks = [tb] )
fit_end = time.time()
fit_time = fit_end - fit_start
fit_min = int(fit_time / 60)
fit_sec = fit_time % 60
print('Fitting completed in {} minutes {} seconds. Saving model to .h5 file \n'.format(fit_min, fit_sec))
#SAVE THE MODEL
model_filename = 'CNNmodel_' + device + '_' + device_name + '_b' + str(bs) + '.h5'
nn_model.save(model_filename)
print("Saved model to disk")
#INFERENCE
print('INFERENCE')
eval_start = time.time()
loss_inf = nn_model.evaluate(x_test, y_test)
eval_end = time.time()
eval_time = eval_end - eval_start
eval_min = int(eval_time / 60)
eval_sec = eval_time % 60
loss_df = pd.DataFrame(history.history['loss'])
fit_time_df = pd.DataFrame([fit_time])
eval_time_df = pd.DataFrame([eval_time])
loss_inf_df = pd.DataFrame([loss_inf[0]])
#mse_inf_df = pd.DataFrame([mse_inf])
batch_size_df = pd.DataFrame([BATCH_SIZE_PER_REPLICA])
epochs_df = pd.DataFrame([epochs])
num_classes_df = pd.DataFrame([num_classes])
img_rows_df = pd.DataFrame([img_rows])
img_cols_df = pd.DataFrame([img_cols])
device_df = pd.DataFrame([device])
device_name_df = pd.DataFrame([device_name])
epoch_time = fit_time/epochs
epoch_time_df = pd.DataFrame([epoch_time])
#GENERATE METRICS FILE
df_alphascores = pd.concat([loss_df, loss_inf_df, fit_time_df, epoch_time_df, eval_time_df, batch_size_df, epochs_df, num_classes_df, img_rows_df, img_cols_df, device_df, device_name_df], axis=1, ignore_index=True)
df_alphascores.columns = ['mse_train', 'mse_inf', 'total_time', 'epoch_time', 'inference_time', 'batch_size', 'epochs', 'num_classes', 'img_X', 'img_Y', 'device', 'device_name']
df_alphascores.to_csv(output_text + '.csv', header=True, index=False)
print('Model and metrics generated!\n')
|
StarcoderdataPython
|
9786425
|
<gh_stars>0
from __future__ import annotations
import copy
import inspect
import pathlib
import typing
from collections import Counter
from io import StringIO
from operator import eq
import matplotlib.pyplot as plt
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from chemicaldiagram.utils import color_scheme, all_elements
_edges_color_in_drawing = {
0: ("yellow", "unknown"),
1: ("black", "single"),
2: ("red", "double"),
3: ("blue", "triple"),
4: ("green", "quadruple"),
5: ("pink", "aromatic"),
7: ("brown", "delocalized"),
9: ("purple", "pi"),
}
class ChemicalDiagram:
def __init__(self, graph: nx.Graph, identifier: str = None):
self.graph = graph
self.identifier = identifier
self.symbols = nx.get_node_attributes(self.graph, "symbol")
@property
def local_graph(self):
# this is the graph where all nodes are "in the box"
return self.graph.subgraph(self.get_nodes_not_all_poly()).copy()
@property
def local_nodes(self):
return list(self.local_graph.nodes)
def __eq__(self, other):
"""edge properties are not considered in determining equality"""
return hash(self) == hash(other)
def __len__(self):
return len(self.local_graph)
@property
def graph_hash(self) -> str:
return nx.weisfeiler_lehman_graph_hash(self.graph, node_attr="symbol", iterations=5, digest_size=64)
def __hash__(self):
return hash(self.graph_hash)
def as_dict(self) -> dict:
graph = nx.node_link_data(self.graph)
d = {
"graph": graph,
"identifier": self.identifier,
}
return d
@classmethod
def from_dict(cls, d) -> ChemicalDiagram:
g = nx.node_link_graph(d["graph"])
i = d["identifier"]
return cls(g, i)
def get_nodes_not_all_poly(self) -> [int]:
"""nodes that connected to at least one non-polymeric bond"""
ns = []
for n in self.graph.nodes:
edge_poly = []
for edgedata in self.graph.edges(n, data=True):
n1, n2, eprop = edgedata
edge_poly.append(eprop["polymeric"])
if not all(edge_poly):
ns.append(n)
elif len(self.graph.edges(n)) == 0:
ns.append(n)
return ns
def get_element_list(self) -> [str]:
"""element list of the local graph"""
lst = []
for n in self.local_graph:
lst.append(self.symbols[n])
return sorted(lst)
def get_element_list_all(self) -> [str]:
lst = []
for n in self.graph:
lst.append(self.symbols[n])
return sorted(lst)
def get_formula(self):
local_symbols = []
for n in self.local_nodes:
local_symbols.append(self.symbols[n])
c = dict(Counter(local_symbols))
s = ""
for k in sorted(c.keys()):
s += "{}{}".format(k, c[k])
s += " "
return s
def __repr__(self):
return "{}: {}; charge: {}.".format(self.__class__.__name__, self.get_formula(), self.total_charge)
def get_components(self) -> [ChemicalDiagram]:
components = []
for c in nx.connected_components(self.graph):
subgraph = self.graph.subgraph(c).copy()
components.append(ChemicalDiagram(subgraph, identifier=self.identifier))
return components
@property
def total_charge(self) -> int:
c = 0
for n in self.local_graph.nodes(data=True):
c += n[1]["charge"]
return c
def get_env_dict(self, local_nodes_only=True) -> dict:
""" a lookup table for the elements of a node's neighbors, by default all keys are local nodes"""
env_dict = {}
if local_nodes_only:
nodes = self.local_graph.nodes
else:
nodes = self.graph.nodes
for n in nodes:
nb_nodes = []
nb_elements = []
for nb in self.graph.neighbors(n):
nb_nodes.append(nb)
nb_elements.append(self.symbols[nb])
env_dict[n] = {
"nb_nodes": sorted(nb_nodes),
"nb_elements": sorted(nb_elements),
}
return env_dict
def contains_subgraph(self, subgraph) -> bool:
matcher = iso.GraphMatcher(self.graph, subgraph, node_match=iso.generic_node_match('symbol', None, eq))
return matcher.subgraph_is_isomorphic()
def contains_diagram(self, other) -> bool:
return self.contains_subgraph(other.graph)
def check_symbols(self):
if set(self.symbols.keys()).issubset(set(all_elements)):
return True
return False
def draw_svgdata(self, title: str = "", urltxt: str = None, url: str = None):
f = plt.figure()
ax = plt.gca()
ax.set_title(title)
cdg = self.graph
posx = nx.get_node_attributes(cdg, 'x')
posy = nx.get_node_attributes(cdg, 'y')
cdg_labels = nx.get_node_attributes(cdg, 'show_label')
cdg_symbols = nx.get_node_attributes(cdg, 'symbol')
pltgraph = copy.deepcopy(self.graph)
coords = {}
subset_symbols = {}
show_lables = {}
missingxy = []
for k in pltgraph.nodes:
x = posx[k]
y = posy[k]
if x is None or y is None:
missingxy.append(k)
continue
coords[k] = (posx[k], posy[k])
show_lables[k] = cdg_labels[k]
subset_symbols[k] = cdg_symbols[k]
for k in missingxy:
pltgraph.remove_node(k)
jmolcolors = []
for n in pltgraph.nodes:
symb = subset_symbols[n]
if symb == "D":
symb = "H"
jmolcolors.append('#{:02x}{:02x}{:02x}'.format(*color_scheme['Jmol'][symb]))
nx.draw_networkx_labels(pltgraph, pos=coords, labels=show_lables, ax=ax)
nx.draw_networkx_nodes(pltgraph, pos=coords, node_color=jmolcolors, ax=ax)
edge_colors = []
edge_list = []
for edge in pltgraph.edges(data=True):
edge_list.append(edge[:2])
bt = edge[2]["bondtype"]
color = _edges_color_in_drawing[bt][0]
if edge[2]["polymeric"]:
color = "gray"
edge_colors.append(color)
nx.draw_networkx_edges(pltgraph, coords, edge_list, edge_color=edge_colors, ax=ax)
# nx.draw(pltgraph, with_labels=True, labels=show_lables, pos=coords, ax=ax, node_color=jmolcolors)
if urltxt and url:
xycoords_array = [np.array(list(xy)) for xy in coords.values()]
center = np.mean(xycoords_array, axis=0)
x, y = center
plt.text(x, y, urltxt, url=url, bbox=dict(alpha=0.4, url=url, facecolor="red"))
imgdata = StringIO()
f.savefig(imgdata, format='svg')
imgdata.seek(0) # rewind the data
svg_dta = imgdata.read() # this is svg data
plt.close(f)
return svg_dta
def draw_svg(self, title="", urltxt=None, url=None, fn: str = None):
if fn is None:
fn = "{}.svg".format(self.identifier)
data = self.draw_svgdata(title, urltxt, url)
with open(fn, "w") as f:
f.write(data)
class BuildingUnitDiagram(ChemicalDiagram):
_allowed_centers = (
"Si", "B", "C", "N", "P", "S", "Cl", "As", "Se", "Br", "I",
"Li", "Be", "Na", "Mg", "Al", "K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn",
"Ga", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Cs", "Ba",
"La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb", "Lu", "Hf", "Ta",
"W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "Fr", "Ra", "Ac", "Th", "Pa", "U",
"Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr", "Rf", "Db", "Sg", "Bh", "Hs", "Mt",
"Ds", "Rg", "Cn", "Nh", "Fl", "Mc", "Lv",
"Si", "Te", "Sb", "Ge"
)
def __init__(self, g: nx.Graph, identifier=None):
super().__init__(g, identifier=identifier)
@property
def local_graph(self): # no node is excluded for building units
return self.graph
@staticmethod
def get_pbus_from_diagram(diagram: ChemicalDiagram, allowed_centers=_allowed_centers):
"""
1. get a list of allowed center nodes
2. connected components now are chosen as BuildingUnitCores
3. extend each BuildingUnitCore to its first order neighbours (we can limit such neighbours to be oxygen or hydrogen)
Resulting subgraph is the BuildingUnit, one for each BuildingUnitCore.
This is suitable when PBUs share at least one node, such as oxides.
"""
graph = diagram.graph.copy()
reduced_graph = diagram.local_graph.copy()
toberemoved = []
symbol_dict = diagram.symbols
for node in reduced_graph.nodes:
if symbol_dict[node] not in allowed_centers:
toberemoved.append(node)
for node in toberemoved:
reduced_graph.remove_node(node)
building_units_cores = [reduced_graph.subgraph(c).copy() for c in nx.connected_components(reduced_graph)]
bus = []
for buc in building_units_cores:
building_unit = []
for buc_node in buc.nodes:
building_unit.append(buc_node)
building_unit = building_unit + list(graph.neighbors(buc_node))
building_unit = list(set(building_unit))
building_unit_graph = graph.subgraph(building_unit).copy()
this_bu = BuildingUnitDiagram(building_unit_graph, identifier=diagram.identifier)
bus.append(this_bu)
bus = sorted(bus, key=lambda x: len(x), reverse=True)
return bus
class ExportedEntry:
def __init__(
self,
diagram: ChemicalDiagram,
cif_string: str,
formula: str,
has_disorder: bool,
chemical_name: str,
identifier: str,
publications: [dict],
smiles: str,
):
self.smiles = smiles
self.publications = publications
self.diagram = diagram
self.cif_string = cif_string
self.formula = formula
self.has_disorder = has_disorder
self.chemical_name = chemical_name
self.identifier = identifier
def __repr__(self):
return "Entry: {}\n\tformula: {}\n\tdiagram: {}".format(self.identifier, self.formula, self.diagram)
def __hash__(self):
return hash(self.identifier)
def __eq__(self, other):
return self.identifier == other.identifier
@classmethod
def from_exported_dict(cls, d):
diagram = ChemicalDiagram.from_dict(d["diagram"])
entry_data = d["entry_data"]
init_dict = {"diagram": diagram}
signature = inspect.signature(ExportedEntry)
for p in signature.parameters:
if p in init_dict:
continue
init_dict[p] = entry_data[p]
return cls(**init_dict)
def write_cif(self, path: typing.Union[str, pathlib.Path]):
with open(path, "w") as f:
f.write(self.cif_string)
def to_dict(self):
d = {"diagram": self.diagram.as_dict()}
signature = inspect.signature(ExportedEntry)
for p in signature.parameters:
if p in d:
continue
d[p] = getattr(self, p)
return d
@classmethod
def from_dict(cls, d):
try:
diagram = ChemicalDiagram.from_dict(d["diagram"])
except KeyError:
diagram = None
init_dict = {"diagram": diagram}
signature = inspect.signature(ExportedEntry)
for p in signature.parameters:
if p in init_dict:
continue
init_dict[p] = d[p]
return cls(**init_dict)
|
StarcoderdataPython
|
191822
|
import parl
from parl import layers
class Model(parl.Model):
def __init__(self, act_dim):
self.conv1 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.conv2 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.conv3 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.conv4 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.fc = layers.fc(size=512, act='relu')
self.policy_fc = layers.fc(size=act_dim)
self.value_fc = layers.fc(size=1)
def policy(self, obs):
"""
Args:
obs: 输入的图像,shape为[N, C, H, W]
Returns:
policy_logits: N * ACTION_DIM
"""
conv1 = self.conv1(obs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
flatten = layers.flatten(conv4, axis=1)
fc_output = self.fc(flatten)
policy_logits = self.policy_fc(fc_output)
return policy_logits
def value(self, obs):
"""
Args:
obs: 输入的图像,shape为[N, C, H, W]
Returns:
values: N
"""
conv1 = self.conv1(obs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
flatten = layers.flatten(conv4, axis=1)
fc_output = self.fc(flatten)
values = self.value_fc(fc_output)
values = layers.squeeze(values, axes=[1])
return values
def policy_and_value(self, obs):
"""
Args:
obs: 输入的图像,shape为[N, C, H, W]
Returns:
policy_logits: N * ACTION_DIM
values: N
"""
conv1 = self.conv1(obs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
flatten = layers.flatten(conv4, axis=1)
fc_output = self.fc(flatten)
policy_logits = self.policy_fc(fc_output)
values = self.value_fc(fc_output)
values = layers.squeeze(values, axes=[1])
return policy_logits, values
|
StarcoderdataPython
|
3526576
|
<gh_stars>0
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QHBoxLayout, QVBoxLayout, QMessageBox, QWidget, QGroupBox, QAction, QFileDialog, qApp
from PyQt5.QtWidgets import QLabel
from PyQt5.QtGui import QPixmap, QImage, QIcon
from PyQt5.QtCore import Qt, QFile
import numpy as np
import cv2
class App(QMainWindow):
def __init__(self):
super(App, self).__init__()
self.title = 'Filtering & Geometric Transformation'
self.left = 10
self.top = 10
self.width = 1000
self.height = 600
self.imageOpen = False
self.initUI()
def openImage(self):
# ******** place image into qlabel object *********************
imagePath, _ = QFileDialog.getOpenFileName()
self.inputImg = cv2.imread(imagePath)
pixmap_label = self.qlabel1
height, width, channel = self.inputImg.shape
bytesPerLine = 3 * width
qImg = QImage(self.inputImg.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
pixmap = QPixmap(qImg)
pixmap_label.setPixmap(pixmap)
self.imageOpen = True
# **************************************************************
def saveImage(self):
self.pixmap.save("modifiedImage.png");
def placeImage(self, height, width, image):
# place the image to the qlabel
pixmap_label = self.qlabel1
bytesPerLine = 3 * width
qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format_RGB888).rgbSwapped()
self.pixmap = QPixmap(qImg)
pixmap_label.setPixmap(self.pixmap)
def padding(self, padding_size, img): # fill the outer border pixels according to kernel size
height, width, channel = img.shape
new_img = np.zeros((height + padding_size*2, width + padding_size*2, 3), dtype=np.uint8)
new_img[padding_size:(height+padding_size), padding_size:(width+padding_size), :] = img
return new_img
def avg_filter(self, kernel_size): # create average kernel
kernel = (1 / (kernel_size*kernel_size))*np.ones((kernel_size, kernel_size), np.float32)
return kernel
def conv(self, kernel, img, height, width, padding):
height2, width2, channel = self.inputImg.shape
temp_image = np.zeros((height2, width2, 3), dtype=np.uint8)
for i in range(padding, height+padding):
for j in range(padding, width+padding):
roi = img[i - padding:i + padding + 1, j - padding:j + padding + 1, 0] # take the part of image for convolution operation
conv_value = (roi * kernel).sum()
temp_image[i - padding, j - padding, 0] = int(conv_value)
roi = img[i - padding:i + padding + 1, j - padding:j + padding + 1, 1] # take the part of image for convolution operation
conv_value = (roi * kernel).sum()
temp_image[i - padding, j - padding, 1] = int(conv_value)
roi = img[i - padding:i + padding + 1, j - padding:j + padding + 1, 2] # take the part of image for convolution operation
conv_value = (roi * kernel).sum()
temp_image[i - padding, j - padding, 2] = int(conv_value)
self.placeImage(height2, width2, temp_image)
def average_3(self): # average filter 3x3
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(1, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(3) # create average kernel
self.conv(avg_kernel, image, height, width, padding=1) # apply convolution
def average_5(self): # average filter 5x5
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(2, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(5) # create average kernel
self.conv(avg_kernel, image, height, width, padding=2) # apply convolution
def average_7(self): # average filter 7x7
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(3, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(7) # create average kernel
self.conv(avg_kernel, image, height, width, padding=3) # apply convolution
def average_9(self): # average filter 9x9
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(4, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(9) # create average kernel
self.conv(avg_kernel, image, height, width, padding=4) # apply convolution
def average_11(self): # average filter 11x11
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(5, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(11) # create average kernel
self.conv(avg_kernel, image, height, width, padding=5) # apply convolution
def average_13(self): # average filter 13x13
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(6, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(13) # create average kernel
self.conv(avg_kernel, image, height, width, padding=6) # apply convolution
def average_15(self): # average filter 15x15
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(7, self.inputImg) # add padding to the image
avg_kernel = self.avg_filter(15) # create average kernel
self.conv(avg_kernel, image, height, width, padding=7) # apply convolution
def median_operation(self, img, height, width, padding):
height2, width2, channel = self.inputImg.shape
temp_image = np.zeros((height2, width2, 3), dtype=np.uint8)
for i in range(padding, height+padding):
for j in range(padding, width+padding):
roi = img[i - padding:i + padding + 1, j - padding:j + padding + 1, 0] # take the part of image for median operation
median = np.median(roi)
temp_image[i - padding, j - padding, 0] = median
roi = img[i - padding:i + padding + 1, j - padding:j + padding + 1, 1] # take the part of image for median operation
median = np.median(roi)
temp_image[i - padding, j - padding, 1] = median
roi = img[i - padding:i + padding + 1, j - padding:j + padding + 1, 2] # take the part of image for median operation
median = np.median(roi)
temp_image[i - padding, j - padding, 2] = median
self.placeImage(height2, width2, temp_image)
def median_3(self): # median filter 3x3
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(1, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=1)
def median_5(self): # median filter 5x5
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(2, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=2)
def median_7(self): # median filter 7x7
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(3, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=3)
def median_9(self): # median filter 9x9
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(4, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=4)
def median_11(self): # median filter 11x11
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(5, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=5)
def median_13(self): # median filter 13x13
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(6, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=6)
def median_15(self): # median filter 15x15
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(7, self.inputImg) # add padding to the image
self.median_operation(image, height, width, padding=7)
def gaussian_filter(self, sigma, size):
x, y = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))
return g / g.sum()
def gaussian_3(self): # gaussian filter 3x3
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(1, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=3) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=1) # apply convolution
def gaussian_5(self): # gaussian filter 5x5
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(2, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=5) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=2) # apply convolution
def gaussian_7(self): # gaussian filter 7x7
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(3, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=7) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=3) # apply convolution
def gaussian_9(self): # gaussian filter 9x9
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(4, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=9) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=4) # apply convolution
def gaussian_11(self): # gaussian filter 11x11
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(5, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=11) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=5) # apply convolution
def gaussian_13(self): # gaussian filter 9x9
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(6, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=13) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=6) # apply convolution
def gaussian_15(self): # gaussian filter 15x15
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
image = self.padding(7, self.inputImg) # add padding to the image
gaussian_kernel = self.gaussian_filter(sigma=5, size=15) # create average kernel
self.conv(gaussian_kernel, image, height, width, padding=7) # apply convolution
def rotate_right(self):
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
center_y = height//2
center_x = width//2
result_image = np.zeros((height, width, 3), dtype=np.uint8)
angle = np.pi/18
for j in range(0, height):
for k in range(0, width):
try:
coord = [k - center_x, j - center_y, 1]
rot_mat = np.asarray([[np.cos(angle), -1*np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
inv_rot_mat = np.linalg.inv(rot_mat)
new_coord = np.matmul(inv_rot_mat, coord)
new_coord[1] += center_y
new_coord[0] += center_x
pixel_value = self.inputImg[int(new_coord[1]), int(new_coord[0]), :]
result_image[j, k, :] = pixel_value
except Exception:
pass
self.placeImage(height, width, result_image)
def rotate_left(self):
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
center_y = height//2
center_x = width//2
result_image = np.zeros((height, width, 3), dtype=np.uint8)
angle = np.pi/18
for j in range(0, height):
for k in range(0, width):
try:
coord = [k-center_x, j-center_y, 1]
rot_mat = np.asarray([[np.cos(angle), np.sin(angle), 0],
[-1*np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
inv_rot_mat = np.linalg.inv(rot_mat)
new_coord = np.matmul(inv_rot_mat, coord)
new_coord[1] += center_y
new_coord[0] += center_x
pixel_value = self.inputImg[int(new_coord[1]), int(new_coord[0]), :]
result_image[j, k, :] = pixel_value
except Exception:
pass
self.placeImage(height, width, result_image)
def scale_twox(self):
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
result_image = np.zeros((height*2, width*2, 3), dtype=np.uint8)
for j in range(0, height*2):
for k in range(0, width*2):
try:
coord = [j, k, 1]
rot_mat = np.asarray([[2, 0, 0],
[0, 2, 0],
[0, 0, 1]])
inv_rot_mat = np.linalg.inv(rot_mat)
new_coord = np.matmul(inv_rot_mat, coord)
pixel_value = self.inputImg[int(new_coord[0]), int(new_coord[1]), :]
result_image[j, k, :] = pixel_value
except Exception:
pass
self.placeImage(height*2, width*2, result_image)
def scale_oneovertwox(self):
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
center_y = height // 2
center_x = width // 2
result_image = np.zeros((height, width, 3), dtype=np.uint8)
for j in range(0, height):
for k in range(0, width):
coord = [j, k, 1]
new_coord = np.matmul(np.asarray([[1 / 2, 0, 0],
[0, 1 / 2, 0],
[0, 0, 1]]), coord)
pixel_value = self.inputImg[j, k, :]
result_image[int(new_coord[0] + (center_y / 2)), int(new_coord[1] + (center_x / 2)), :] = pixel_value
self.placeImage(height, width, result_image)
def trans_right(self):
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
result_image = np.zeros((height, width, 3), dtype=np.uint8)
for j in range(0, height):
for k in range(0, width):
try:
coord = [k, j, 1]
new_coord = np.matmul(np.asarray([[1, 0, 50],
[0, 1, 0],
[0, 0, 1]]), coord)
pixel_value = self.inputImg[j, k, :]
result_image[int(new_coord[1]), int(new_coord[0]), :] = pixel_value
except Exception:
pass
self.placeImage(height, width, result_image)
def trans_left(self):
if (self.imageOpen == False):
return QMessageBox.question(self, 'Error Message', "Please, load the image", QMessageBox.Ok, QMessageBox.Ok)
height, width, channel = self.inputImg.shape
result_image = np.zeros((height, width, 3), dtype=np.uint8)
for j in range(0, height):
for k in range(0, width):
try:
coord = [k, j, 1]
rot_mat = np.asarray([[1, 0, -50],
[0, 1, 0],
[0, 0, 1]])
inv_rot_mat = np.linalg.inv(rot_mat)
new_coord = np.matmul(inv_rot_mat, coord)
pixel_value = self.inputImg[int(new_coord[1]), int(new_coord[0]), :]
result_image[j, k, :] = pixel_value
except Exception:
pass
self.placeImage(height, width, result_image)
def initUI(self):
# Write GUI initialization code
self.setGeometry(self.left, self.top, self.width, self.height)
self.setWindowTitle(self.title)
# ****************add the label for image*********************
wid = QWidget(self)
self.setCentralWidget(wid)
self.groupBox = QGroupBox()
self.hBoxlayout = QHBoxLayout()
self.qlabel1 = QLabel('Image', self)
self.qlabel1.setStyleSheet("border: 1px inset grey; min-height: 200px; ")
self.qlabel1.setAlignment(Qt.AlignCenter)
self.hBoxlayout.addWidget(self.qlabel1)
self.groupBox.setLayout(self.hBoxlayout)
vBox = QVBoxLayout()
vBox.addWidget(self.groupBox)
wid.setLayout(vBox)
# ****************menu bar***********
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
filters = menubar.addMenu('Filters')
geometric_transform = menubar.addMenu('Geometric Transform')
openAction = QAction('Open Image', self)
openAction.triggered.connect(self.openImage)
fileMenu.addAction(openAction)
openAction2 = QAction('Save Image', self)
openAction2.triggered.connect(self.saveImage)
fileMenu.addAction(openAction2)
exitAct = QAction(QIcon('exit.png'), '&Exit', self)
exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(qApp.quit)
fileMenu.addAction(exitAct)
## ************ AVERAGE FILTERS ************ ##
average_filters = QMenu('Average Filters', self)
three_avg = QAction('3x3', self)
five_avg = QAction('5x5', self)
seven_avg = QAction('7x7', self)
nine_avg = QAction('9x9', self)
eleven_avg = QAction('11x11', self)
thirteen_avg = QAction('13x13', self)
fifteen_avg = QAction('15x15', self)
three_avg.triggered.connect(self.average_3)
five_avg.triggered.connect(self.average_5)
seven_avg.triggered.connect(self.average_7)
nine_avg.triggered.connect(self.average_9)
eleven_avg.triggered.connect(self.average_11)
thirteen_avg.triggered.connect(self.average_13)
fifteen_avg.triggered.connect(self.average_15)
average_filters.addAction(three_avg)
average_filters.addAction(five_avg)
average_filters.addAction(seven_avg)
average_filters.addAction(nine_avg)
average_filters.addAction(eleven_avg)
average_filters.addAction(thirteen_avg)
average_filters.addAction(fifteen_avg)
filters.addMenu(average_filters)
## **************************************** ##
## ************ GAUSSIAN FILTERS ************ ##
gaussian_filters = QMenu('Gaussian Filters', self)
three_gaus = QAction('3x3', self)
five_gaus = QAction('5x5', self)
seven_gaus = QAction('7x7', self)
nine_gaus = QAction('9x9', self)
eleven_gaus = QAction('11x11', self)
thirteen_gaus = QAction('13x13', self)
fifteen_gaus = QAction('15x15', self)
three_gaus.triggered.connect(self.gaussian_3)
five_gaus.triggered.connect(self.gaussian_5)
seven_gaus.triggered.connect(self.gaussian_7)
nine_gaus.triggered.connect(self.gaussian_9)
eleven_gaus.triggered.connect(self.gaussian_11)
thirteen_gaus.triggered.connect(self.gaussian_13)
fifteen_gaus.triggered.connect(self.gaussian_15)
gaussian_filters.addAction(three_gaus)
gaussian_filters.addAction(five_gaus)
gaussian_filters.addAction(seven_gaus)
gaussian_filters.addAction(nine_gaus)
gaussian_filters.addAction(eleven_gaus)
gaussian_filters.addAction(thirteen_gaus)
gaussian_filters.addAction(fifteen_gaus)
filters.addMenu(gaussian_filters)
## **************************************** ##
## ************ MEDIAN FILTERS ************ ##
median_filters = QMenu('Median Filters', self)
three_med = QAction('3x3', self)
five_med = QAction('5x5', self)
seven_med = QAction('7x7', self)
nine_med = QAction('9x9', self)
eleven_med = QAction('11x11', self)
thirteen_med = QAction('13x13', self)
fifteen_med = QAction('15x15', self)
three_med.triggered.connect(self.median_3)
five_med.triggered.connect(self.median_5)
seven_med.triggered.connect(self.median_7)
nine_med.triggered.connect(self.median_9)
eleven_med.triggered.connect(self.median_11)
thirteen_med.triggered.connect(self.median_13)
fifteen_med.triggered.connect(self.median_15)
median_filters.addAction(three_med)
median_filters.addAction(five_med)
median_filters.addAction(seven_med)
median_filters.addAction(nine_med)
median_filters.addAction(eleven_med)
median_filters.addAction(thirteen_med)
median_filters.addAction(fifteen_med)
filters.addMenu(median_filters)
## **************************************** ##
### 5. create rotate, scale and translate menu ###
rotate = QMenu('Rotate', self)
scale = QMenu('Scale', self)
translate = QMenu('Translate', self)
rotate_right = QAction('Rotate 10 Degree Right', self)
rotate_left = QAction('Rotate 10 Degree Left', self)
twox = QAction('2x', self)
oneovertwox = QAction('1/2x', self)
right = QAction('Right', self)
left = QAction('Left', self)
### add function when the action is triggered ###
rotate_right.triggered.connect(self.rotate_right)
rotate_left.triggered.connect(self.rotate_left)
twox.triggered.connect(self.scale_twox)
oneovertwox.triggered.connect(self.scale_oneovertwox)
right.triggered.connect(self.trans_right)
left.triggered.connect(self.trans_left)
### add action ###
rotate.addAction(rotate_right)
rotate.addAction(rotate_left)
scale.addAction(twox)
scale.addAction(oneovertwox)
translate.addAction(right)
translate.addAction(left)
geometric_transform.addMenu(rotate)
geometric_transform.addMenu(scale)
geometric_transform.addMenu(translate)
# ------------------------------------
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
StarcoderdataPython
|
4839784
|
#!/usr/bin/env python
import os
import glob as gl
import lxml.etree as etree
import argparse as ap
def Main():
path = ParseArguments().path
FormatXmlsInPath(path)
def ParseArguments():
parser = ap.ArgumentParser(description = 'Indents the xml in given path')
parser.add_argument('path', help = 'path to files to be processed')
return parser.parse_args()
def Indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
Indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def FormatXmlsInPath(pathname):
for xml_fname in gl.glob(pathname + '*.xml'):
xml = etree.parse(xml_fname)
Indent(xml.getroot())
xml.write(xml_fname, encoding="utf-8", pretty_print=True, xml_declaration=True)
if __name__ == '__main__':
Main()
|
StarcoderdataPython
|
83776
|
<reponame>Dreem-Organization/bender-api
"""bender URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from bender_service.social_login import FacebookLogin, GoogleLogin
from django.views.generic import TemplateView
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
# Rest auth (login signup etc.)
url(r'^', include('rest_auth.urls')),
# This is not meant to be used, it just allows allauth to send email of reset
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
TemplateView.as_view(),
name='password_reset_confirm'), # https://github.com/Tivix/django-rest-auth/issues/63
# Registration
url(r'^registration/', include('rest_auth.registration.urls')),
url(r'^facebook/', FacebookLogin.as_view(), name='facebook_login'),
url(r'^google/', GoogleLogin.as_view(), name='google_login'),
# Necessary imports (profile seems useless for now.)
url(r'^accounts/profile/$',
RedirectView.as_view(url='/', permanent=True),
name='profile-redirect'),
url(r'^account/', include('allauth.urls')),
# Bender import
url(r'^api/', include('bender.urls')),
]
|
StarcoderdataPython
|
11272133
|
# -*- coding: utf-8 -*-
u"""Run all experiments defined on a json file, storing results on database.
Usage:
run_experiments.py <configs.json> <dbname> [--dbserver=<dbserver>]
Options:
-h --help Show this screen.
--version Show Version.
--dbserver=<dbserver> URI of the mongodb server for storing results. Typically "ip:port" [default: localhost]
"""
from __future__ import division
from copy import copy
import json
import logging
import sys
from docopt import docopt
from progress.bar import Bar
from featureforge.experimentation.stats_manager import StatsManager
from featureforge.experimentation.utils import get_git_info
# Measured in seconds
BOOKING_DURATION = 10 * 60 # just a default
def main(single_runner,
conf_extender=None,
booking_duration=BOOKING_DURATION,
use_git_info_from_path=None,
version=u'Run experiments 0.1',
stop_on_first_error=False):
command_name = sys.argv[0]
custom__doc__ = __doc__.replace(u'run_experiments.py', command_name)
logging.basicConfig(level=logging.DEBUG,
format=u"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
opts = docopt(custom__doc__, version=version)
stats = StatsManager(db_name=opts[u"<dbname>"],
booking_duration=booking_duration,
db_uri=opts[u"--dbserver"])
experiment_configurations = json.load(open(opts[u"<configs.json>"]))
bar = Bar(u'Processing', max=len(experiment_configurations))
if use_git_info_from_path is not None:
GIT_INFO = get_git_info(use_git_info_from_path)
else:
GIT_INFO = None
for config in experiment_configurations:
# Extend individual experiment config with the dynamic extender, if any
config = copy(config)
if conf_extender is not None:
config = conf_extender(config)
# Adding GIT info to the config if computed and not present
if GIT_INFO is not None and u'git_info' not in config:
config[u'git_info'] = GIT_INFO
# Book experiment
ticket = stats.book_if_available(config)
if ticket is None:
bar.next()
continue
# Run experiment
try:
result = single_runner(config)
except KeyboardInterrupt:
logging.error(u"Interrupted by keyboard, terminating...")
break
except Exception as e:
bar.next()
logging.error(u"Experiment failed because of {} {}, "
u"skipping...".format(type(e).__name__, e))
if stop_on_first_error:
raise
continue
else:
# Store result
bar.next()
if not stats.store_results(ticket, result):
logging.error(u"Experiment successful but could not stored! "
"Skipping... ")
bar.finish()
|
StarcoderdataPython
|
6662549
|
<filename>metagraph/plugins/numpy/types.py
from typing import Set, Dict, Any
import numpy as np
from metagraph import dtypes, Wrapper, ConcreteType
from ..core.types import Vector, Matrix, NodeSet, NodeMap
from ..core.wrappers import NodeSetWrapper, NodeMapWrapper
class NumpyVectorType(ConcreteType, abstract=Vector):
@classmethod
def is_typeclass_of(cls, obj):
"""Is obj described by this type class?"""
return isinstance(obj, np.ndarray) and len(obj.shape) == 1
@classmethod
def _compute_abstract_properties(
cls, obj, props: Set[str], known_props: Dict[str, Any]
) -> Dict[str, Any]:
ret = known_props.copy()
# fast properties
for prop in {"dtype"} - ret.keys():
if prop == "dtype":
ret[prop] = dtypes.dtypes_simplified[obj.dtype]
return ret
@classmethod
def assert_equal(
cls,
obj1,
obj2,
aprops1,
aprops2,
cprops1,
cprops2,
*,
rel_tol=1e-9,
abs_tol=0.0,
):
assert obj1.shape == obj2.shape, f"shape mismatch {obj1.shape} != {obj2.shape}"
assert aprops1 == aprops2, f"property mismatch: {aprops1} != {aprops2}"
# Compare
if issubclass(obj1.dtype.type, np.floating):
assert np.isclose(obj1, obj2, rtol=rel_tol, atol=abs_tol).all()
else:
assert (obj1 == obj2).all()
class NumpyNodeSet(NodeSetWrapper, abstract=NodeSet):
def __init__(self, nodes, *, aprops=None):
super().__init__(aprops=aprops)
self._assert_instance(nodes, (np.ndarray, list, tuple, set))
if not isinstance(nodes, np.ndarray):
if isinstance(nodes, set):
nodes = tuple(nodes) # np.array doesn't accept sets
nodes = np.array(nodes)
if len(nodes.shape) != 1:
raise TypeError(f"Invalid number of dimensions: {len(nodes.shape)}")
if not issubclass(nodes.dtype.type, np.integer):
raise TypeError(f"Invalid dtype for NodeSet: {nodes.dtype}")
# Ensure sorted
nodes.sort()
# Ensure no duplicates
unique = np.diff(nodes) > 0
if not unique.all():
tmp = np.empty((unique.sum() + 1,), dtype=nodes.dtype)
tmp[0] = nodes[0]
tmp[1:] = nodes[1:][unique]
nodes = tmp
self.value = nodes
@classmethod
def from_mask(cls, mask, *, aprops=None):
"""
The mask must be a boolean numpy array.
NodeIds are based on position within the mask.
"""
cls._assert_instance(mask, np.ndarray)
cls._assert(mask.dtype == bool, "Only boolean masks are allowed")
node_ids = np.flatnonzero(mask)
return NumpyNodeSet(node_ids, aprops=aprops)
def __len__(self):
return len(self.value)
# def copy(self):
# aprops = NumpyNodeSet.Type.compute_abstract_properties(self, {})
# return NumpyNodeSet(self.value.copy(), aprops=aprops)
def __iter__(self):
return iter(self.value)
def __contains__(self, key):
index = np.searchsorted(self.value, key)
if hasattr(index, "__len__"):
return (self.value[index] == key).all()
else:
return self.value[index] == key
class TypeMixin:
@classmethod
def assert_equal(
cls,
obj1,
obj2,
aprops1,
aprops2,
cprops1,
cprops2,
*,
rel_tol=None,
abs_tol=None,
):
assert aprops1 == aprops2, f"property mismatch: {aprops1} != {aprops2}"
assert len(obj1) == len(obj2), f"size mismatch: {len(obj1)} != {len(obj2)}"
assert (obj1.value == obj2.value).all(), f"node sets do not match"
class NumpyNodeMap(NodeMapWrapper, abstract=NodeMap):
"""
NumpyNodeMap stores data using numpy arrays. A mask of present values or
a compact representation can be used.
"""
def __init__(self, data, nodes=None, *, aprops=None):
"""
data: values for each node
nodes: array of node_ids corresponding to elements in data
If there are no missing nodes, nodes are not required. It will be assumed that node ids
are sequential and the same size as `data`.
"""
super().__init__(aprops=aprops)
self._assert_instance(data, (np.ndarray, list, tuple))
if not isinstance(data, np.ndarray):
data = np.array(data)
if len(data.shape) != 1:
raise TypeError(f"Invalid number of dimensions: {len(data.shape)}")
if nodes is None:
nodes = np.arange(len(data))
else:
self._assert_instance(nodes, (np.ndarray, list, tuple))
if not isinstance(nodes, np.ndarray):
nodes = np.array(nodes)
if nodes.shape != data.shape:
raise TypeError(
f"Nodes must be same shape and size as data: {nodes.shape} != {data.shape}"
)
if not issubclass(nodes.dtype.type, np.integer):
raise TypeError(f"Invalid dtype for nodes: {nodes.dtype}")
# Ensure sorted
if not np.all(np.diff(nodes) > 0):
sorter = np.argsort(nodes)
nodes = nodes[sorter]
data = data[sorter]
# Ensure no duplicates
unique = np.diff(nodes) > 0
if not unique.all():
raise TypeError(f"Duplicate node ids found: {set(nodes[1:][~unique])}")
self.value = data
self.nodes = nodes
@classmethod
def from_mask(cls, data, mask, *, aprops=None):
"""
Values in data are kept where mask is True.
The mask must be a boolean numpy array.
NodeIds are based on position within the mask.
"""
cls._assert_instance(mask, np.ndarray)
cls._assert(mask.dtype == bool, "Only boolean masks are allowed")
data = data[mask]
nodes = np.flatnonzero(mask)
return NumpyNodeMap(data, nodes, aprops=aprops)
def __len__(self):
return len(self.value)
# def copy(self):
# aprops = NumpyNodeMap.Type.compute_abstract_properties(self, {})
# return NumpyNodeMap(self.value.copy(), nodes=self.nodes.copy(), aprops=aprops)
def __contains__(self, key):
index = np.searchsorted(self.nodes, key)
if hasattr(index, "__len__"):
return (self.nodes[index] == key).all()
else:
return self.nodes[index] == key
def __getitem__(self, key):
index = np.searchsorted(self.nodes, key)
if hasattr(index, "__len__"):
if not (self.nodes[index] == key).all():
raise KeyError(f"nodes {key} are not all in the NodeMap")
else:
if self.nodes[index] != key:
raise KeyError(f"node {key} is not in the NodeMap")
return self.value[index]
class TypeMixin:
@classmethod
def _compute_abstract_properties(
cls, obj, props: Set[str], known_props: Dict[str, Any]
) -> Dict[str, Any]:
ret = known_props.copy()
# fast properties
for prop in {"dtype"} - ret.keys():
if prop == "dtype":
ret[prop] = dtypes.dtypes_simplified[obj.value.dtype]
return ret
@classmethod
def assert_equal(
cls,
obj1,
obj2,
aprops1,
aprops2,
cprops1,
cprops2,
*,
rel_tol=1e-9,
abs_tol=0.0,
):
assert len(obj1) == len(
obj2
), f"length mismatch: {len(obj1)} != {len(obj2)}"
assert aprops1 == aprops2, f"property mismatch: {aprops1} != {aprops2}"
nodes1, vals1 = obj1.nodes, obj1.value
nodes2, vals2 = obj2.nodes, obj2.value
# Compare
assert (nodes1 == nodes2).all(), f"node id mismatch: {nodes1} != {nodes2}"
if issubclass(vals1.dtype.type, np.floating):
assert np.isclose(vals1, vals2, rtol=rel_tol, atol=abs_tol).all()
else:
assert (vals1 == vals2).all()
class NumpyMatrixType(ConcreteType, abstract=Matrix):
@classmethod
def is_typeclass_of(cls, obj):
"""Is obj described by this type class?"""
return isinstance(obj, np.ndarray) and len(obj.shape) == 2
@classmethod
def _compute_abstract_properties(
cls, obj, props: Set[str], known_props: Dict[str, Any]
) -> Dict[str, Any]:
ret = known_props.copy()
# fast properties
for prop in {"is_dense", "is_square", "dtype"} - ret.keys():
if prop == "dtype":
ret[prop] = dtypes.dtypes_simplified[obj.dtype]
return ret
@classmethod
def assert_equal(
cls,
obj1,
obj2,
aprops1,
aprops2,
cprops1,
cprops2,
*,
rel_tol=1e-9,
abs_tol=0.0,
):
assert obj1.shape == obj2.shape, f"shape mismatch: {obj1.shape} != {obj2.shape}"
assert aprops1 == aprops2, f"property mismatch: {aprops1} != {aprops2}"
assert obj1.shape == obj2.shape, f"{obj1.shape} != {obj2.shape}"
# Compare
if issubclass(obj1.dtype.type, np.floating):
assert np.isclose(obj1, obj2, rtol=rel_tol, atol=abs_tol).all().all()
else:
assert (obj1 == obj2).all().all()
|
StarcoderdataPython
|
82102
|
import numpy as np
def B_to_b(B):
x_indices = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
y_indices = [0, 0, 3, 1, 3, 1, 3, 2, 3, 2, 3]
return np.array(B[x_indices, y_indices])
def b_to_B(b):
B = np.zeros((6, 4))
x_indices = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
y_indices = [0, 0, -1, 1, -1, 1, -1, 2, -1, 2, -1]
B[x_indices, y_indices] = b
return B
def Bchain_to_bchain(B_chain):
chain_length = B_chain.shape[0]
x_indices = [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]
y_indices = [0, 0, 3, 1, 3, 1, 3, 2, 3, 2, 3]
b_chain = np.zeros((chain_length, 11))
for sample in range(chain_length):
b_chain[sample] = B_chain[sample, x_indices, y_indices]
return b_chain
def Lambdachain_to_lambdachain(Lambda_chain):
chain_length = Lambda_chain.shape[0]
x_indices = [0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8]
y_indices = [0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5]
# lambda_chain = np.zeros((chain_length, 12))
# for sample in range(chain_length):
# lambda_chain[sample] = Lambda_chain[sample, x_indices, y_indices]
lambda_chain = Lambda_chain[:, x_indices, y_indices]
return lambda_chain
def Tauchain_to_tauchain(Tau_chain):
chain_length = Tau_chain.shape[0]
x_indices = [0, 0, 1, 1, 2, 2]
y_indices = [0, 1, 2, 3, 4, 5]
# tau_chain = np.zeros((chain_length, 3, 2))
# for sample in range(chain_length):
# tau_chain[sample] = Tau_chain[sample, x_indices, y_indices].reshape((3, 2))
tau_chain = Tau_chain[:, x_indices, y_indices].reshape(chain_length, 3, 2)
return tau_chain
def Tau_to_tau(Tau):
x_indices = [0, 0, 1, 1, 2, 2]
y_indices = [0, 1, 2, 3, 4, 5]
tau = Tau[x_indices, y_indices].reshape((3, 2))
return tau
def w_to_W(w):
row_indices = [0,1,1,2,2,3,3,4,4,5,5]
col_indices = [0,1,2,3,4,5,6,7,8,9,10]
N = w.shape[0]
W = np.zeros((N, 6, 11))
for n in range(N):
li = w[n, [0,0,3,1,3,1,3,2,3,2,3]]
W[n, row_indices, col_indices] = li
return W
def lambda_to_Lambda(lambda_):
Lambda = np.zeros((9, 6))
li = lambda_[[0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7]]
row_indices = [0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8]
col_indices = [0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5]
Lambda[row_indices, col_indices] = li
return Lambda
def Lambda_to_lambda(Lambda):
row_indices = [0,0,1,2,3,3,4,5,6,6,7,8]
col_indices = [0,1,0,1,2,3,2,3,4,5,4,5]
return Lambda[row_indices, col_indices]
|
StarcoderdataPython
|
3385030
|
<reponame>philip-shen/note_python<gh_stars>0
from concurrent import futures
import sys
import tkinter
import tkinter.scrolledtext
import os, os.path
import datetime
import glob
from PIL import Image, ImageTk
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.animation as animation
import numpy as np
import random
import importlib
from telloedu.status import *
from telloedu.streaming import *
from telloedu.tellostart import *
from telloedu.command import *
import ugoki
def _quit():
end()
#root.quit()
root.destroy()
def init_tof():
global x_tof, y_tof
x_tof = np.arange(-50, 0, 0.5)
y_tof = np.zeros(100)
line_tof.set_ydata(y_tof)
line_tof.set_xdata(x_tof)
return line_tof,
def animate_tof(n):
global t_tof,x_tof,y_tof
if get_drone_flg():
y_tof = np.append(y_tof, get_tof())
y_tof = np.delete(y_tof, 0)
t_tof = t_tof + 1
x_tof = np.append(x_tof, t_tof)
x_tof = np.delete(x_tof, 0)
plt_tof.set_ylim(0, 200)
plt_tof.set_xlim(min(x_tof), max(x_tof))
line_tof.set_ydata(y_tof)
line_tof.set_xdata(x_tof)
return line_tof,
def init_h():
global x_h, y_h
x_h = np.arange(-50, 0, 0.5)
y_h = np.zeros(100)
line_h.set_ydata(y_h)
line_h.set_xdata(x_h)
return line_h,
def animate_h(n):
global t_h,x_h,y_h
if get_drone_flg():
y_h = np.append(y_h, get_height())
y_h = np.delete(y_h, 0)
t_h = t_h + 1
x_h = np.append(x_h, t_h)
x_h = np.delete(x_h, 0)
plt_h.set_ylim(0, 200)
plt_h.set_xlim(min(x_h), max(x_h))
line_h.set_ydata(y_h)
line_h.set_xdata(x_h)
return line_h,
def init_temp():
global x_temp, y_temp
x_temp = np.arange(-50, 0, 0.5)
y_temp = np.zeros(100)
line_h.set_ydata(y_temp)
line_h.set_xdata(x_temp)
return line_temp,
def animate_temp(n):
global t_temp,x_temp,y_temp
if get_drone_flg():
y_temp = np.append(y_temp, get_temph())
y_temp = np.delete(y_temp, 0)
t_temp = t_temp + 1
x_temp = np.append(x_temp, t_temp)
x_temp = np.delete(x_temp, 0)
plt_temp.set_ylim(0, 100)
plt_temp.set_xlim(min(x_temp), max(x_temp))
line_temp.set_ydata(y_temp)
line_temp.set_xdata(x_temp)
return line_temp,
def init_bat():
global x_bat, y_bat
x_bat = np.arange(-50, 0, 0.5)
y_bat = np.zeros(100)
line_bat.set_ydata(y_bat)
line_bat.set_xdata(x_bat)
return line_bat,
def animate_bat(n):
global t_bat,x_bat,y_bat
if get_drone_flg():
y_bat = np.append(y_bat, get_bat())
y_bat = np.delete(y_bat, 0)
t_bat = t_bat + 1
x_bat = np.append(x_bat, t_bat)
x_bat = np.delete(x_bat, 0)
plt_bat.set_ylim(0, 100)
plt_bat.set_xlim(min(x_bat), max(x_bat))
line_bat.set_ydata(y_bat)
line_bat.set_xdata(x_bat)
return line_bat,
def _godrone():
global drone_flg
textbox1.delete('1.0','end')
importlib.reload(ugoki)
set_drone_flg(True)
def _droneEmergency():
emergency()
def _thumbnail():
qr_image_path = './img/qrcode-*.png'
qr_files = glob.glob(qr_image_path)
#print(qr_files)
qr_files.sort(key = os.path.getmtime, reverse = True)
#print(qr_files)
if qr_files:
qr_image = Image.open(qr_files[0])
qr_image = qr_image.resize((184, 144))
m1_image = ImageTk.PhotoImage(qr_image)
dt = datetime.datetime.fromtimestamp(os.stat(qr_files[0]).st_mtime)
'''
python中time.strftime不支持中文,报错UnicodeEncodeError: 'locale' codec can't encode character '\u5e74' in position 2: encoding error
https://www.cnblogs.com/feiquan/p/11217534.html
'''
#qr_image_label = tkinter.Label(canvas2, image = m1_image, text = 'QR Code 撮影日\n'+ dt.strftime(
# '%Y年%m月%d日 %H:%M:%S'), compound='top')
qr_image_label = tkinter.Label(canvas2, image = m1_image, text = 'QR Code 撮影日\n'+ dt.strftime(
'%YY%mM%dD %H:%M:%S'), compound='top')
qr_image_label.grid(column = 1, row = 2)
qr_image_label.image = m1_image
pic_image_path = './img/photo-*.png'
pic_files = glob.glob(pic_image_path)
#print(pic_files)
pic_files.sort(key = os.path.getmtime, reverse = True)
#print(pic_files)
if pic_files:
pic_image = Image.open(pic_files[0])
pic_image = pic_image.resize((184, 144))
m2_image = ImageTk.PhotoImage(pic_image)
dt = datetime.datetime.fromtimestamp(os.stat(pic_files[0]).st_mtime)
#pic_image_label = tkinter.Label(canvas2, image = m2_image, text = 'Photo 撮影日\n'+ dt.strftime(
# '%Y年%m月%d日 %H:%M:%S'), compound='top')
pic_image_label = tkinter.Label(canvas2, image = m2_image, text = 'Photo 撮影日\n'+ dt.strftime(
'%YY%mM%dD %H:%M:%S'), compound='top')
pic_image_label.grid(column = 2, row = 2)
pic_image_label.image = m2_image
def redirector(inputStr):
textbox1.insert(tkinter.INSERT, inputStr)
if __name__ == "__main__":
root = tkinter.Tk()
root.wm_title("ドローン・プログラミング支援システム")
frame1 = tkinter.Frame(root, width = 1200, height = 400, borderwidth = 4)
frame1.pack(padx = 5, pady = 5)
fig = Figure(figsize = (10, 7))
fig.suptitle('Drone flight status')
canvas = FigureCanvasTkAgg(fig, master = frame1) # A tk.DrawingArea.
# ToF
t_tof = 1
x_tof = np.arange(-50, 0, 0.5)
y_tof = np.zeros(100)
plt_tof = fig.add_subplot(221)
plt_tof.set_title('ToF')
plt_tof.set_xlabel("time[s]")
plt_tof.set_ylabel("Height[cm]")
line_tof, = plt_tof.plot(x_tof, y_tof)
ani_tof = animation.FuncAnimation(fig, animate_tof, init_func = init_tof, interval = 500, blit = False)
# Height
t_h = 1
x_h = np.arange(-50, 0, 0.5)
y_h = np.zeros(100)
plt_h = fig.add_subplot(222)
plt_h.set_title('Height')
plt_h.set_xlabel("time[s]")
plt_h.set_ylabel("Height[cm]")
line_h, = plt_h.plot(x_h, y_h)
ani_h = animation.FuncAnimation(fig, animate_h, init_func = init_h, interval = 500, blit = False)
# Temperature
t_temp = 1
x_temp = np.arange(-50, 0, 0.5)
y_temp = np.zeros(100)
plt_temp = fig.add_subplot(223)
plt_temp.set_title('Temperature')
plt_temp.set_xlabel("time[s]")
plt_temp.set_ylabel("Temperature[c]")
line_temp, = plt_temp.plot(x_temp, y_temp)
ani_temp = animation.FuncAnimation(fig, animate_temp, init_func = init_temp, interval = 500, blit = False)
# battery
t_bat = 1
x_bat = np.arange(-50, 0, 0.5)
y_bat = np.zeros(100)
plt_bat = fig.add_subplot(224)
plt_bat.set_title('Battery')
plt_bat.set_xlabel("time[s]")
plt_bat.set_ylabel("battery[%]")
line_bat, = plt_bat.plot(x_bat, y_bat)
ani_bat = animation.FuncAnimation(fig, animate_bat, init_func = init_bat, interval = 500, blit = False)
fig.subplots_adjust(wspace = 0.5, hspace = 0.5)
toolbar = NavigationToolbar2Tk(canvas, root)
canvas.get_tk_widget().pack(side = 'left')
frame2 = tkinter.Frame(frame1, borderwidth = 4)
frame2.pack(padx = 5, pady = 20)
m1_text_label = tkinter.Label(frame2, text = '送信コマンド・ログ',font = ("",18))
m1_text_label.pack(side = 'top', fill = 'both')
#textbox1 = tkinter.Text(frame2)
textbox1 = tkinter.scrolledtext.ScrolledText(frame2)
textbox1.configure(bd=1, highlightbackground='gray')
textbox1.pack(side = 'top', fill = 'both', padx=10)
sys.stdout.write = redirector
m2_text_label = tkinter.Label(frame2, text = '\n\n\n撮影した写真データ', font = ("",18))
m2_text_label.pack(side = 'top', fill = 'both', padx = 10)
canvas2 = tkinter.Canvas(frame2, width = 400)
canvas2.pack(side = 'top', fill = 'both')
_thumbnail()
frame = tkinter.Frame(root, width = 60, height = 40, borderwidth = 4, bg = 'gray')
frame.pack(padx = 5, pady = 5)
button1 = tkinter.Button(master = frame, text = "プログラム実行", command = _godrone, width = 20, fg = '#0000ff')
button1.pack(fill = 'x', padx = 30, side = 'left')
button2 = tkinter.Button(master = frame, text = "撮影した写真表示", command = _thumbnail, width = 20)
button2.pack(fill = 'x', padx = 30, side = 'left')
button3 = tkinter.Button(master = frame, text = "終了", command = _quit, width = 20)
button3.pack(fill = 'x', padx = 30, side = 'left')
button4 = tkinter.Button(master = frame, text = "ドローン緊急停止", command = _droneEmergency, width = 20, fg = '#ff0000')
button4.pack(fill = 'x', padx = 30, side = 'left')
with futures.ThreadPoolExecutor(max_workers = 10) as executor:
executor.submit(tello_status_thread)
executor.submit(video_recording_thread)
executor.submit(start_thread)
set_drone_flg(False)
tkinter.mainloop()
|
StarcoderdataPython
|
1630401
|
<gh_stars>1-10
from os import listdir, getcwd
from os.path import isfile, join
from math import sin, cos
from setting_utils import timeLimit, heightLimit, input_stream
files = [f for f in listdir(join(getcwd(), 'uploads')) if isfile(join(getcwd(), 'uploads', f))]
files = [f for f in files if f.endswith(".txt")]
rgbColor = ['255, 0, 0','51, 204, 51','0, 153, 255','255, 255, 0','204, 0, 153','51, 51, 0','255, 0, 102','200, 200, 200','0, 51, 102','255, 153, 255']
czml = '$(document).ready(function(){\n'
colorIndex = 0;
fileIndex = 0;
for file in files :
FILE_PATH = join(getcwd(), 'uploads', str(file))
data = []
with open(FILE_PATH, 'r') as input_stream :
lines = input_stream.readlines()
words = lines[4].split(' ')
words = [x for x in words if len(x) > 0]
lat = float(words[11])
lon = float(words[12])
_hieght = 0
data.append([lon, lat, _hieght])
for i in range( 4, len(lines)) : #avoid head text
words = lines[i].split(' ')
words = [x for x in words if len(x) > 0]
#---Setting---
minutes = float(words[0]) + float(words[1])/60
height = float(words[3])
if(minutes > timeLimit):
break
if(height > heightLimit):
break
#-------------
if (len(words)>15) : #avoid crash data
dir_degree = 3.1415926*(float(words[8])+180)/180
speed = float(words[9])
u = cos(dir_degree)*speed
v = sin(dir_degree)*speed
lat += u/110736
lon += v/102189
data.append([lon, lat, _hieght])
input_stream.close()
czml += (
'var line_points_%d = [\n'
) %fileIndex
for j in range(0, len(data)) :
czml += ('[%f,%f]' %(float(data[j][1]), float(data[j][0]))) #lat lon
if(j!=len(data)-1):
czml +=(',\n')
else:
czml +=('\n')
czml += (
'];\n'
'var polyline_options_%d = {\n'
' color: "rgb(%s)"\n'
'};\n'
'var polyline = L.polyline(line_points_%d, polyline_options_%d).addTo(map);\n'
) % (fileIndex, rgbColor[colorIndex%10], fileIndex, fileIndex)
colorIndex += 1
fileIndex += 1
czml += (
'map.fitBounds(line_points_0);\n'
'})'
)
fout = open(join(getcwd(), 'balloon', 'data', '2dpath.js'), 'w')
fout.write(czml)
fout.close()
|
StarcoderdataPython
|
11398894
|
<filename>Algo-DS/10_Sorting/bubble.py<gh_stars>1-10
''' Bubble Sort '''
def bubble_sort(arr):
for n in range(len(arr)-1, 0, -1):
print('loops: ', n)
for k in range(n):
print('bubble: ', k)
if arr[k] < arr[k+1]:
arr[k], arr[k+1] = arr[k+1], arr[k]
print('arr: ', arr)
return arr
if __name__ == '__main__':
arr = [10, 9, 33, 0, -1, 5]
print(bubble_sort(arr))
|
StarcoderdataPython
|
1677375
|
import os
import stat
import shutil
import filecmp
from dvc.main import main
from dvc.command.repro import CmdRepro
from dvc.project import ReproductionError
from dvc.utils import file_md5
from tests.basic_env import TestDvc
class TestRepro(TestDvc):
def setUp(self):
super(TestRepro, self).setUp()
self.foo_stage = self.dvc.add(self.FOO)
self.file1 = 'file1'
self.file1_stage = self.file1 + '.dvc'
self.dvc.run(fname=self.file1_stage,
outs=[self.file1],
deps=[self.FOO, self.CODE],
cmd='python {} {} {}'.format(self.CODE, self.FOO, self.file1))
class TestReproChangedCode(TestRepro):
def test(self):
self.swap_code()
stages = self.dvc.reproduce(self.file1_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR))
self.assertEqual(len(stages), 1)
def swap_code(self):
os.unlink(self.CODE)
new_contents = self.CODE_CONTENTS
new_contents += "\nshutil.copyfile('{}', sys.argv[2])\n".format(self.BAR)
self.create(self.CODE, new_contents)
class TestReproChangedData(TestRepro):
def test(self):
self.swap_foo_with_bar()
stages = self.dvc.reproduce(self.file1_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR))
self.assertEqual(len(stages), 2)
def swap_foo_with_bar(self):
os.unlink(self.FOO)
shutil.copyfile(self.BAR, self.FOO)
class TestReproChangedDeepData(TestReproChangedData):
def test(self):
file2 = 'file2'
file2_stage = file2 + '.dvc'
self.dvc.run(fname=file2_stage,
outs=[file2],
deps=[self.file1, self.CODE],
cmd='python {} {} {}'.format(self.CODE, self.file1, file2))
self.swap_foo_with_bar()
stages = self.dvc.reproduce(file2_stage)
self.assertTrue(filecmp.cmp(self.file1, self.BAR))
self.assertTrue(filecmp.cmp(file2, self.BAR))
self.assertEqual(len(stages), 3)
class TestReproPhony(TestReproChangedData):
def test(self):
stage = self.dvc.run(deps=[self.file1])
self.swap_foo_with_bar()
self.dvc.reproduce(stage.path)
self.assertTrue(filecmp.cmp(self.file1, self.BAR))
class TestNonExistingOutput(TestRepro):
def test(self):
os.unlink(self.FOO)
with self.assertRaises(ReproductionError) as cx:
self.dvc.reproduce(self.file1_stage)
class TestReproDataSource(TestReproChangedData):
def test(self):
self.swap_foo_with_bar()
stages = self.dvc.reproduce(self.foo_stage.path)
self.assertTrue(filecmp.cmp(self.FOO, self.BAR))
self.assertEqual(stages[0].outs[0].md5, file_md5(self.BAR)[0])
class TestReproChangedDir(TestDvc):
def test(self):
file_name = 'file'
shutil.copyfile(self.FOO, file_name)
stage_name = 'dir.dvc'
dir_name = 'dir'
dir_code = 'dir.py'
with open(dir_code, 'w+') as fd:
fd.write("import os; import shutil; os.mkdir(\"{}\"); shutil.copyfile(\"{}\", os.path.join(\"{}\", \"{}\"))".format(dir_name, file_name, dir_name, file_name))
self.dvc.run(fname=stage_name,
outs=[dir_name],
deps=[file_name, dir_code],
cmd="python {}".format(dir_code))
stages = self.dvc.reproduce(stage_name)
self.assertEqual(len(stages), 0)
os.unlink(file_name)
shutil.copyfile(self.BAR, file_name)
stages = self.dvc.reproduce(stage_name)
self.assertEqual(len(stages), 1)
class TestCmdRepro(TestRepro):
def test(self):
ret = main(['repro',
self.file1_stage])
self.assertEqual(ret, 0)
ret = main(['repro',
'non-existing-file'])
self.assertNotEqual(ret, 0)
|
StarcoderdataPython
|
9700706
|
from PIL import Image
from pywebio.input import *
from pywebio.output import *
from pywebio.session import *
from pywebio import start_server
import io
from datetime import datetime
import time
now = datetime.now()
# loading spin
def loading():
with put_loading(shape='border', color='primary').style('width:4rem; height:4rem'):
time.sleep(2)
# validate form data
def check_form(data): # input group validation: return (input name, error msg) when validation fail
for k, v in data.items():
if v is None or len(str(v)) < 0:
return k, 'Requierd!'
def main():
try:
func = select('Which function you want?', ['Resize image', 'Transparency image'])
if func == 'Resize image':
resize()
else:
tspncy()
except Exception as e:
put_text(f'Error : {e}').style('color: red;')
def resize():
put_text('Home').style('color: blue;text-decoration: underline;').onclick(
lambda: run_js('window.location.reload()'))
data = input_group("Image info", [
input('width new image', name='w', type=NUMBER),
input('high new image', name='h', type=NUMBER)
], validate=check_form)
img = file_upload("Select a image:", accept="image/*")
if img is not None:
w = data['w']
h = data['h']
image = Image.open(io.BytesIO(img['content']))
new_image = image.resize((w, h))
dt_string = now.strftime("%d%m%Y%H%M%S")
new_name = f'{dt_string}_{w}_{h}.{image.format}'
loading()
out_image = new_image.convert('RGB')
put_image(out_image)
# File Output
img_byte_arr = io.BytesIO()
new_image.save(img_byte_arr, format=image.format)
put_file(new_name, img_byte_arr.getvalue())
put_table([
['format', image.format],
['mode', image.mode],
['orginal size', image.size],
['palette', image.palette],
['new size', new_image.size],
])
def tspncy():
put_text('Home').style('color: blue;text-decoration: underline;').onclick(
lambda: run_js('window.location.reload()'))
img = file_upload("Select a image:", accept="image/*")
if img is not None:
image = Image.open(io.BytesIO(img['content']))
image = image.convert("RGBA")
datas = image.getdata()
new_data = []
with put_loading(shape='border', color='warning').style('width:4rem; height:4rem'):
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
new_data.append((255, 255, 255, 0))
else:
if item[0] > 150:
new_data.append((255, 255, 255, 0))
else:
new_data.append(item)
image.putdata(new_data)
dt_string = now.strftime("%d%m%Y%H%M%S")
new_name = f'{dt_string}_transparency.png'
loading()
# # File Output
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
put_image(image.convert('RGB'))
put_file(new_name, img_byte_arr.getvalue())
if __name__ == '__main__':
start_server(main, debug=True, port=8080, cdn=False)
|
StarcoderdataPython
|
1663755
|
import cv2 as cv
img = cv.imread("Lenna.png")
cv.namedWindow("BRISK", cv.WINDOW_NORMAL)
def f(x):
return
# Initiate BRISK detector
cv.createTrackbar("Threshold", "BRISK", 30, 128, f)
cv.createTrackbar("Octaves", "BRISK", 3, 9, f)
cv.createTrackbar("Pattern Scale", "BRISK", 3, 9, f)
while True:
current_threshold = cv.getTrackbarPos("Threshold", "BRISK")
current_octaves = cv.getTrackbarPos("Octaves", "BRISK")
current_scale = (cv.getTrackbarPos("Pattern Scale", "BRISK") + 1) / 3
# Additional parameters for customization:
# radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for keypoint scale 1).
# numberList defines the number of sampling points on the sampling circle. Must be the same size as radiusList..
# dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint scale 1).
# dMin threshold for the long pairings used for orientation determination (in pixels for keypoint scale 1).
# indexChange index remapping of the bits.
brisk = cv.BRISK_create(
thresh=current_threshold, octaves=current_octaves, patternScale=current_scale
)
# find the keypoints with BRISK
kp = brisk.detect(img, None)
# draw only keypoints location,not size and orientation
img2 = cv.drawKeypoints(img, kp, None, color=(0, 255, 0))
cv.imshow("BRISK", img2)
if cv.waitKey(10) & 0xFF == 27:
break
cv.destroyAllWindows()
|
StarcoderdataPython
|
3525545
|
<reponame>wcsodw1/Computer-Vision-with-Artificial-intelligence
# python David_1_1_detect_face.py -i "../../../CV_PyImageSearch/Dataset/data/basketball.jpg"
# Summary :
# 1.Detect image
# 2.save(imwrite) bondingbox_image to File
# API : 1.cv2.waitKey(0) # Visualize the image until 設定手動關閉Visualize Image
# 1. import the necessary packages
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image", required = True, help = "The Path to image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
# Resize
image = cv2.resize(image, (500, 400), interpolation=cv2.INTER_CUBIC)
## visualize
cv2.imshow("Faces", image)
cv2.waitKey(0)
# 2. load our image and convert it to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
## visualize
cv2.imshow("Faces", gray)
cv2.waitKey(0)
# 3. Draw the Bondingbox
# 3.1 load the face detector
detector = cv2.CascadeClassifier("../../../detector/haarcascade.xml")
print(detector)
# 3.2 detect faces in the image
rects = detector.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 9,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
# loop over the faces and draw a rectangle surrounding each
for (x, y, w, h) in rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 3)
#做一個有bonding box的灰階圖
for (x, y, w, h) in rects:
cv2.rectangle(gray, (x, y), (x + w, y + h), (0, 255, 0), 3)
# visualize
cv2.imshow("Faces", image)
cv2.waitKey(0)
cv2.imshow("Faces", gray)
cv2.waitKey(0) # Visualize the image until手動把它關閉
# imwrite : Save the converted image
cv2.imwrite("../../data/imwrite/chp1_1/basketball_box.jpg", image)
cv2.imwrite("../../data/imwrite/chp1_1/basketball_gray_box.jpg", gray)
|
StarcoderdataPython
|
6486675
|
from sklearn import mixture
import sklearn.datasets
import matplotlib.pyplot as plt
import numpy as np
import generator as g;
from sklearn import preprocessing
def em(input_array,no_of_clusters):
model = sklearn.mixture.GaussianMixture(n_components=no_of_clusters,covariance_type='diag')
a = model.fit(X)
print a.means_
print a.weights_
em = model.predict(X)
l1=em[:59];
l1.sort();
print l1;
l = [len(list(group)) for key, group in groupby(l1)]
print l;
l1=em[59:130];
l1.sort();
print l1;
l = [len(list(group)) for key, group in groupby(l1)]
print l;
l1=em[130:178];
l1.sort();
print l1;
l = [len(list(group)) for key, group in groupby(l1)]
print l;
try:
input_data = np.genfromtxt("C:\\Users\\<NAME>\\Desktop\\Deep Learning\\Datasets\\wine_sort.csv",delimiter=',');
except:
print("Could not open file");
input_data = input_data[:,:13];
print input_data.shape;
em(input_data,3);
print("Completed");
|
StarcoderdataPython
|
1612110
|
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from django.http import Http404, HttpResponse
from django.urls import reverse
from scorecard.profiles import get_profile
from scorecard.models import Geography, LocationNotFound
from infrastructure.models import Project
from household.models import HouseholdServiceTotal, HouseholdBillTotal
from household.chart import stack_chart, chart_data, percent_increase, yearly_percent
import json
from . import models
import municipal_finance
from . import serializers
from rest_framework import viewsets
import subprocess
from django.conf import settings
class GeographyViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Geography.objects.all()
serializer_class = serializers.GeographySerializer
class MunicipalityProfileViewSet(viewsets.ReadOnlyModelViewSet):
queryset = municipal_finance.models.MunicipalityProfile.objects.all()
serializer_class = serializers.MunicipalityProfileSerializer
def infra_dict(project):
return {
"description": project.project_description,
"expenditure_amount": project.expenditure.first().amount,
"url": reverse('project-detail-view', args=[project.id]),
}
class LocateView(TemplateView):
template_name = "webflow/locate.html"
def get(self, request, *args, **kwargs):
self.lat = self.request.GET.get("lat", None)
self.lon = self.request.GET.get("lon", None)
self.nope = False
if self.lat and self.lon:
place = None
places = Geography.get_locations_from_coords(
latitude=self.lat, longitude=self.lon
)
if places:
place = places[0]
# if multiple, prefer the metro/local municipality if available
if len(places) > 1:
places = [p for p in places if p.geo_level == "municipality"]
if places:
place = places[0]
return redirect(
reverse("geography_detail", kwargs={"geography_id": place.geoid})
)
self.nope = True
return super(LocateView, self).get(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
return {
"page_data_json": json.dumps(
{"nope": self.nope},
cls=serializers.JSONEncoder,
sort_keys=True,
indent=4 if settings.DEBUG else None
),
}
class GeographyDetailView(TemplateView):
template_name = "webflow/muni-profile.html"
def dispatch(self, *args, **kwargs):
self.geo_id = self.kwargs.get("geography_id", None)
try:
self.geo_level, self.geo_code = self.geo_id.split("-", 1)
self.geo = Geography.find(self.geo_code, self.geo_level)
except (ValueError, LocationNotFound):
raise Http404
# check slug
if kwargs.get("slug") or self.geo.slug:
if kwargs["slug"] != self.geo.slug:
kwargs["slug"] = self.geo.slug
url = "/profiles/%s-%s-%s/" % (
self.geo_level,
self.geo_code,
self.geo.slug,
)
return redirect(url, permanent=True)
return super(GeographyDetailView, self).dispatch(*args, **kwargs)
def pdf_url(self):
return "/profiles/%s-%s-%s.pdf" % (
self.geo_level,
self.geo_code,
self.geo.slug,
)
def get_context_data(self, *args, **kwargs):
page_json = {}
profile = get_profile(self.geo)
page_json.update(profile)
profile["geography"] = self.geo.as_dict()
page_json["geography"] = self.geo
page_json["pdf_url"] = self.pdf_url()
profile["demarcation"]["disestablished_to_geos"] = [
Geography.objects.filter(geo_code=code).first().as_dict()
for code in profile["demarcation"].get("disestablished_to", [])
]
profile["demarcation"]["established_from_geos"] = [
Geography.objects.filter(geo_code=code).first().as_dict()
for code in profile["demarcation"].get("established_from", [])
]
for date in profile["demarcation"]["land_gained"]:
for change in date["changes"]:
change["geo"] = (
Geography.objects.filter(geo_code=change["demarcation_code"])
.first()
.as_dict()
)
for date in profile["demarcation"]["land_lost"]:
for change in date["changes"]:
change["geo"] = (
Geography.objects.filter(geo_code=change["demarcation_code"])
.first()
.as_dict()
)
infrastructure_financial_year = "2019/2020"
infrastructure = (
Project.objects.prefetch_related(
"geography",
"expenditure__budget_phase",
"expenditure__financial_year",
"expenditure",
)
.filter(
geography__geo_code=self.geo_code,
expenditure__budget_phase__name="Budget year",
expenditure__financial_year__budget_year=infrastructure_financial_year,
)
.order_by("-expenditure__amount")
)
page_json["infrastructure_summary"] = {
"projects": [infra_dict(p) for p in infrastructure[:5]],
"project_count": infrastructure.count(),
"financial_year": infrastructure_financial_year[5:9]
}
households = HouseholdBillTotal.summary.bill_totals(self.geo_code)
page_json["household_percent"] = percent_increase(households)
page_json["yearly_percent"] = yearly_percent(households)
chart = chart_data(households)
page_json["household_chart_overall"] = chart
service_middle = (
HouseholdServiceTotal.summary.active(self.geo_code)
.middle()
.order_by("financial_year__budget_year")
)
service_affordable = (
HouseholdServiceTotal.summary.active(self.geo_code)
.affordable()
.order_by("financial_year__budget_year")
)
service_indigent = (
HouseholdServiceTotal.summary.active(self.geo_code)
.indigent()
.order_by("financial_year__budget_year")
)
chart_middle = stack_chart(service_middle, households)
chart_affordable = stack_chart(service_affordable, households)
chart_indigent = stack_chart(service_indigent, households)
page_json["household_chart_middle"] = chart_middle
page_json["household_chart_affordable"] = chart_affordable
page_json["household_chart_indigent"] = chart_indigent
page_context = {
"page_data_json": json.dumps(
page_json,
cls=serializers.JSONEncoder,
sort_keys=True,
indent=4 if settings.DEBUG else None
),
"page_title": f"{ self.geo.name} - Municipal Money",
"page_description": f"Financial Performance for { self.geo.name }, and other information.",
}
return page_context
class GeographyPDFView(GeographyDetailView):
def get(self, request, *args, **kwargs):
# render as pdf
path = "/profiles/%s-%s-%s?print=1" % (
self.geo_level,
self.geo_code,
self.geo.slug,
)
url = request.build_absolute_uri(path)
# !!! This relies on GeographyDetailView validating the user-provided
# input to the path to avoid arbitraty command execution
command = ["node", "makepdf.js", url]
try:
completed_process = subprocess.run(
command,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
print(e.output)
raise e
filename = "%s-%s-%s.pdf" % (self.geo_level, self.geo_code, self.geo.slug)
response = HttpResponse(completed_process.stdout, content_type='application/pdf')
response['Content-Disposition'] = f'attachment; filename="{ filename }"'
return response
class SitemapView(TemplateView):
template_name = "sitemap.txt"
content_type = "text/plain"
def get_context_data(self):
return {"geos": Geography.objects.all()}
|
StarcoderdataPython
|
4918812
|
<filename>ecommercejockey/main/serializers.py
from rest_framework.serializers import Serializer
class ProductOrderCreateSerializer(Serializer):
def update(self, instance, validated_data):
print(validated_data)
x = {
'id': 820982911946154508,
'email': '<EMAIL>',
'closed_at': None,
'created_at': '2019-11-07T13:19:14-07:00',
'updated_at': '2019-11-07T13:19:14-07:00',
'number': 234,
'note': None,
'token': '<KEY>',
'gateway': None,
'test': True,
'total_price': '493.18',
'subtotal_price': '483.18',
'total_weight': 0,
'total_tax': '0.00',
'taxes_included': False,
'currency': 'CAD',
'financial_status': 'voided',
'confirmed': False,
'total_discounts': '5.00',
'total_line_items_price': '488.18',
'cart_token': None,
'buyer_accepts_marketing': True,
'name': '#9999',
'referring_site': None,
'landing_site': None,
'cancelled_at': '2019-11-07T13:19:14-07:00',
'cancel_reason': 'customer',
'total_price_usd': None,
'checkout_token': None,
'reference': None,
'user_id': None,
'location_id': None,
'source_identifier': None,
'source_url': None,
'processed_at': None,
'device_id': None,
'phone': None,
'customer_locale': 'en',
'app_id': None,
'browser_ip': None,
'landing_site_ref': None,
'order_number': 1234,
'discount_applications': [
{
'type': 'manual',
'value': '5.0',
'value_type': 'fixed_amount',
'allocation_method': 'one',
'target_selection': 'explicit',
'target_type': 'line_item',
'description': 'Discount',
'title': 'Discount'
}
],
'discount_codes': [],
'note_attributes': [],
'payment_gateway_names': [
'visa',
'bogus'
],
'processing_method': '',
'checkout_id': None,
'source_name': 'web',
'fulfillment_status': 'pending',
'tax_lines': [],
'tags': '',
'contact_email': '<EMAIL>',
'order_status_url': 'https://diesler-corp.myshopify.com/8018133082/orders/123456abcd/authenticate?key=abcdefg',
'presentment_currency': 'CAD',
'total_line_items_price_set': {
'shop_money': {
'amount': '488.18',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '488.18',
'currency_code': 'CAD'
}
},
'total_discounts_set': {
'shop_money': {
'amount': '5.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '5.00',
'currency_code': 'CAD'
}
},
'total_shipping_price_set': {
'shop_money': {
'amount': '10.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '10.00',
'currency_code': 'CAD'
}
},
'subtotal_price_set': {
'shop_money': {
'amount': '483.18',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '483.18',
'currency_code': 'CAD'
}
},
'total_price_set': {
'shop_money': {
'amount': '493.18',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '493.18',
'currency_code': 'CAD'
}
},
'total_tax_set': {
'shop_money': {
'amount': '0.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '0.00',
'currency_code': 'CAD'
}
},
'total_tip_received': '0.0',
'admin_graphql_api_id': 'gid://shopify/Order/820982911946154508',
'line_items': [
{
'id': 866550311766439020,
'variant_id': 18052577198170,
'title': 'Short sleeve t-shirt',
'quantity': 1,
'sku': 'aefasfasfsdd-1',
'variant_title': None,
'vendor': None,
'fulfillment_service': 'manual',
'product_id': 1995182735450,
'requires_shipping': True,
'taxable': True,
'gift_card': False,
'name': 'Short sleeve t-shirt',
'variant_inventory_management': 'shopify',
'properties': [],
'product_exists': True,
'fulfillable_quantity': 1,
'grams': 1000,
'price': '20.00',
'total_discount': '0.00',
'fulfillment_status': None,
'price_set': {
'shop_money': {
'amount': '20.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '20.00',
'currency_code': 'CAD'
}
},
'total_discount_set': {
'shop_money': {
'amount': '0.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '0.00',
'currency_code': 'CAD'
}
},
'discount_allocations': [],
'admin_graphql_api_id': 'gid://shopify/LineItem/866550311766439020',
'tax_lines': []
},
{
'id': 141249953214522974,
'variant_id': 31154156109914,
'title': 'External Oil FIlter',
'quantity': 1,
'sku': 'SINSDEOF5906',
'variant_title': None,
'vendor': None,
'fulfillment_service': 'manual',
'product_id': 4353136066650,
'requires_shipping': True,
'taxable': True,
'gift_card': False,
'name': 'External Oil FIlter',
'variant_inventory_management': None,
'properties': [],
'product_exists': True,
'fulfillable_quantity': 1,
'grams': 3629,
'price': '468.18',
'total_discount': '5.00',
'fulfillment_status': None,
'price_set': {
'shop_money': {
'amount': '468.18',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '468.18',
'currency_code': 'CAD'
}
},
'total_discount_set': {
'shop_money': {
'amount': '5.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '5.00',
'currency_code': 'CAD'
}
},
'discount_allocations': [
{
'amount': '5.00',
'discount_application_index': 0,
'amount_set': {
'shop_money': {
'amount': '5.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '5.00',
'currency_code': 'CAD'
}
}
}
],
'admin_graphql_api_id': 'gid://shopify/LineItem/141249953214522974',
'tax_lines': []
}
],
'shipping_lines': [
{
'id': 271878346596884015,
'title': 'Generic Shipping',
'price': '10.00',
'code': None,
'source': 'shopify',
'phone': None,
'requested_fulfillment_service_id': None,
'delivery_category': None,
'carrier_identifier': None,
'discounted_price': '10.00',
'price_set': {
'shop_money': {
'amount': '10.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '10.00',
'currency_code': 'CAD'
}
},
'discounted_price_set': {
'shop_money': {
'amount': '10.00',
'currency_code': 'CAD'
},
'presentment_money': {
'amount': '10.00',
'currency_code': 'CAD'
}
},
'discount_allocations': [],
'tax_lines': []}],
'billing_address': {
'first_name': 'Bob',
'address1': '123 Billing Street',
'phone': '555-555-BILL',
'city': 'Billtown',
'zip': 'K2P0B0',
'province': 'Kentucky',
'country': 'United States',
'last_name': 'Biller',
'address2': None,
'company': 'My Company',
'latitude': None,
'longitude': None,
'name': '<NAME>',
'country_code': 'US',
'province_code': 'KY'
},
'shipping_address': {
'first_name': 'Steve',
'address1': '123 Shipping Street',
'phone': '555-555-SHIP',
'city': 'Shippington',
'zip': '40003',
'province': 'Kentucky',
'country': 'United States',
'last_name': 'Shipper',
'address2': None,
'company': 'Shipping Company',
'latitude': None,
'longitude': None,
'name': '<NAME>',
'country_code': 'US',
'province_code': 'KY'
},
'fulfillments': [],
'refunds': [],
'customer': {
'id': 115310627314723954,
'email': '<EMAIL>',
'accepts_marketing': False,
'created_at': None,
'updated_at': None,
'first_name': 'John',
'last_name': 'Smith',
'orders_count': 0,
'state': 'disabled',
'total_spent': '0.00',
'last_order_id': None,
'note': None,
'verified_email': True,
'multipass_identifier': None,
'tax_exempt': False,
'phone': None,
'tags': '',
'last_order_name': None,
'currency': 'CAD',
'accepts_marketing_updated_at': None,
'marketing_opt_in_level': None,
'admin_graphql_api_id': 'gid://shopify/Customer/115310627314723954',
'default_address': {
'id': 715243470612851245,
'customer_id': 115310627314723954,
'first_name': None,
'last_name': None,
'company': None,
'address1': '123 Elm St.',
'address2': None,
'city': 'Ottawa',
'province': 'Ontario',
'country': 'Canada',
'zip': 'K2H7A8',
'phone': '123-123-1234',
'name': '',
'province_code': 'ON',
'country_code': 'CA',
'country_name': 'Canada',
'default': True
}
}
}
def create(self, validated_data):
print('hello')
return {
'hello': 'hello'
}
def save(self, **kwargs):
print('hi')
|
StarcoderdataPython
|
1959816
|
from rpi_inky_layout import Layout, Rotation
from PIL import Image, ImageDraw
# Uncomment if you want to test on your Pi/Inky combo.
# from inky.auto import auto
topLayout = Layout((400, 100), packingMode='h', border=(1, 2))
# Uncomment if you want to test on your Pi/Inky combo.
# board = auto()
# topLayout = Layout(board.resolution, 'h', (0, 0))
sublayout1 = topLayout.addLayer()
sublayout2 = topLayout.addLayer()
sublayout3 = topLayout.addLayer()
sublayout31 = sublayout3.addLayer()
sublayout32 = sublayout3.addLayer()
mode = "P"
bgColour = 0
image31 = Image.new(mode, sublayout31.size, bgColour)
draw = ImageDraw.Draw(image31)
draw.text(tuple(s/2 for s in sublayout31.size), "Hello", 1)
sublayout31.setImage(image31)
image32 = Image.new(mode, sublayout32.size, bgColour)
draw = ImageDraw.Draw(image32)
draw.text(tuple(s/2 for s in sublayout32.size), "World!", 1)
sublayout32.setImage(image32)
topLayout.draw()
topLayout.write("hello-world.png")
# Uncomment if you want to test on your Pi/Inky combo.
# inky_image = Image.open("hello-world.png")
# board.set_image(inky_image)
|
StarcoderdataPython
|
1944359
|
import mock
import numpy as np
from emukit.core import ContinuousParameter, ParameterSpace
from emukit.core.acquisition import Acquisition
from emukit.core.interfaces import IModel
from emukit.core.loop import (FixedIntervalUpdater, FixedIterationsStoppingCondition, LoopState, SequentialPointCalculator,
UserFunctionWrapper, RandomSampling)
from emukit.core.optimization import GradientAcquisitionOptimizer
def test_fixed_iteration_stopping_condition():
stopping_condition = FixedIterationsStoppingCondition(5)
loop_state_mock = mock.create_autospec(LoopState)
loop_state_mock.iteration = 0
assert(stopping_condition.should_stop(loop_state_mock) is False)
loop_state_mock = mock.create_autospec(LoopState)
loop_state_mock.iteration = 5
assert(stopping_condition.should_stop(loop_state_mock) is True)
def test_every_iteration_model_updater():
mock_model = mock.create_autospec(IModel)
mock_model.optimize.return_value(None)
updater = FixedIntervalUpdater(mock_model, 1)
loop_state_mock = mock.create_autospec(LoopState)
loop_state_mock.iteration = 1
loop_state_mock.X.return_value(np.random.rand(5, 1))
loop_state_mock.Y.return_value(np.random.rand(5, 1))
updater.update(loop_state_mock)
mock_model.optimize.assert_called_once()
def test_every_iteration_model_updater_with_cost():
"""
Tests that the model updater can use a different attribute from loop_state as the training targets
"""
class MockModel(IModel):
def optimize(self):
pass
def set_data(self, X: np.ndarray, Y: np.ndarray):
self._X = X
self._Y = Y
@property
def X(self):
return self._X
@property
def Y(self):
return self._Y
mock_model = MockModel()
updater = FixedIntervalUpdater(mock_model, 1, lambda loop_state: loop_state.cost)
loop_state_mock = mock.create_autospec(LoopState)
loop_state_mock.iteration = 1
loop_state_mock.X.return_value(np.random.rand(5, 1))
loop_state_mock.cost = np.random.rand(5, 1)
cost = np.random.rand(5, 1)
loop_state_mock.cost = cost
updater.update(loop_state_mock)
assert np.array_equiv(mock_model.X, cost)
def test_sequential_evaluator():
# SequentialPointCalculator should just return result of the acquisition optimizer
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition_optimizer = mock.create_autospec(GradientAcquisitionOptimizer)
mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, mock_acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock)
# "SequentialPointCalculator" should only ever return 1 value
assert(len(next_points) == 1)
# Value should be result of acquisition optimization
assert(np.equal(np.array([[0.]]), next_points[0]))
def test_sequential_with_context():
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.has_gradients = False
mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
acquisition_optimizer = GradientAcquisitionOptimizer(space)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})
# "SequentialPointCalculator" should only ever return 1 value
assert(len(next_points) == 1)
# Context value should be what we set
assert np.isclose(next_points[0, 0], 0.25)
def test_sequential_with_all_parameters_fixed():
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.has_gradients = False
mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
acquisition_optimizer = GradientAcquisitionOptimizer(space)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25, 'y': 0.25})
assert np.array_equiv(next_points, np.array([0.25, 0.25]))
def test_random_sampling_without_context():
space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
rs = RandomSampling(space)
loop_state_mock = mock.create_autospec(LoopState)
next_points = rs.compute_next_points(loop_state_mock)
assert(len(next_points) == 1)
def test_random_sampling_with_context():
space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
rs = RandomSampling(space)
loop_state_mock = mock.create_autospec(LoopState)
next_points = rs.compute_next_points(loop_state_mock, context={'x': 0.25})
assert(len(next_points) == 1)
# Context value should be what we set
assert np.isclose(next_points[0, 0], 0.25)
def test_user_function_wrapper():
def function_test(x):
return x[:, 0:1]**2 + x[:, 1:2]**2
user_function = UserFunctionWrapper(function_test)
results = user_function.evaluate(np.random.rand(10, 2))
assert len(results) == 10, "A different number of results were expected"
for res in results:
assert res.X.ndim == 1, "X results are expected to be 1 dimensional"
assert res.Y.ndim == 1, "Y results are expected to be 1 dimensional"
|
StarcoderdataPython
|
6473629
|
# coding:utf-8
# @Time : 2019/5/15
# @Author : xuyouze
# @File Name : __init__.py
import importlib
from config.base_config import BaseConfig
from models.base_model import BaseModel
from models.build import build_model
__all__ = ["create_model"]
#
# def find_model_using_name(model_name: str):
# model_filename = "models." + model_name + "_model"
# modellib = importlib.import_module(model_filename)
#
# model = None
# target_model_name = model_name.replace("_", "") + "model"
# target_model_name = target_model_name.replace("-", "")
# for name, cls in modellib.__dict__.items():
# if name.lower() == target_model_name.lower() and issubclass(cls, BaseModel):
# model = cls
# break
#
# if not model:
# raise NotImplementedError(
# "In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (
# model_filename, target_model_name))
# return model
def create_model(config: BaseConfig):
# model = find_model_using_name(config.model_name)
model = build_model(config.model_name)
instance = model(config)
config.logger.info("{0} model has been created".format(config.model_name))
return instance
|
StarcoderdataPython
|
6489267
|
<filename>sender.py
import smtplib
from config import SMTP_USER, SMTP_PWD
def notify(recipients, subject, body):
# build smtp message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s""" % \
(SMTP_USER, ", ".join(recipients), subject, body)
# send email
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(SMTP_USER, SMTP_PWD)
server.sendmail(SMTP_USER, recipients, message)
server.close()
print 'Email sent'
except:
print "Failed to send mail"
|
StarcoderdataPython
|
5053733
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 08:59:32 2018
@author: ymamo
"""
import NetAgent as N
import ResourceScape as R
def form_connection(model):
for agent in model.ml.agents_by_type[N.NetAgent].values():
meta = []
meta.append(agent)
meta.append(model.ml.agents_by_type[R.resource][agent.pos])
yield meta
def reassess(meta):
resource = list(meta.subs_by_type[R.resource].values())
agent = list(meta.subs_by_type[N.NetAgent].values())
if resource[0].pos != agent[0].pos:
return [resource[0], agent[0]]
|
StarcoderdataPython
|
3385412
|
import setuptools
def long_description():
with open('README.md', 'r') as file:
return file.read()
setuptools.setup(
name='stream-unzip',
version='0.0.69',
author='Department for International Trade',
author_email='<EMAIL>',
description='Python function to stream unzip all the files in a ZIP archive, without loading the entire ZIP file into memory or any of its uncompressed files',
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://github.com/uktrade/stream-unzip',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Topic :: System :: Archiving :: Compression',
],
python_requires='>=3.5.0',
install_requires=[
'pycryptodome>=3.10.1',
'stream-inflate>=0.0.12',
],
py_modules=[
'stream_unzip',
],
)
|
StarcoderdataPython
|
8032148
|
#-*- coding:utf-8 -*-
from core_backend import context
from core_backend import conf
from core_backend.rpc.amqp import AMQPRpc
from functools import wraps
from contextlib import contextmanager
from core_backend.libs.exception import Error
import sys
import traceback
import logging
import plugin
import settings
import pprint
import tempfile
logger = logging.getLogger(__name__)
@contextmanager
def service_handler(instance):
""" 标准服务调用"""
if not isinstance(instance, handler):
raise Exception("instance is not a service handler")
logger.debug("begin to dispatch service: %s", instance._service)
service_prepare(instance)
instance._state = 'PREPARE'
try:
yield instance
instance._state = 'SUCCESS'
logger.debug("service instance %s has bee dispatched",
instance._service)
instance.response(0, u"处理成功")
except Error, e:
logger.error('error to dispatch service %s, %s', e.code, e.msg)
instance.response(e.code, e.msg)
except:
logger.error('error to dispatch service %s', instance._service)
instance._state = 'FAIL'
exc_type, exc_value, exc_traceback = sys.exc_info()
err_stack = traceback.format_exception(
exc_type, exc_value, exc_traceback)
for line in err_stack:
logger.error(line.strip())
instance.response(-1, u"调用服务[%s]失败:%s" %
(instance._service, ','.join(exc_value)))
finally:
service_post(instance)
instance._state = 'TERMINATE'
def service_decorator(callback):
""" 服务步骤decorator """
@wraps(callback)
def wrapper(*args, **kwargs):
instance = args[0]
result = callback(*args, **kwargs)
logger.debug("service %s:%s has being dispathced",
instance._service, callback.func_name)
return result
return wrapper
@service_decorator
def service_prepare(instance):
# 新的配置方式
if hasattr(settings, 'DB_URL'):
context.connect(settings.DB_URL)
else:
context.connect()
with context.session_scope() as session:
return instance.prepare_request(session)
@service_decorator
def service_post(instance):
with context.session_scope() as session:
return instance.post_request(session)
@service_decorator
def service_dispatch(instance):
with context.session_scope(instance=instance) as session:
logger.debug(
u"**************** SERVICE 【%s】 START ******************" % (instance._service))
logger.debug(u"请求报文: %s", instance.context.request)
instance.context.session = session
# plugin 的session和dispatch一致
plg_handler = plugin.PluginHandler(instance, session)
plg_handler.load_plugins(settings.PLUGINS)
plg_handler.run_plugins()
result = instance.dispatch(session)
plg_handler.run_post_plugins()
logger.debug(
u"++++++++++++++++ SERVICE 【%s】 END ++++++++++++++++++" % (instance._service))
return result
class handler(object):
xa = False
_record_jrnl = True
"""
service_code 服务名
channel RabbitMQ Channel, 用于自行分发对应的消息
deliver 消息分发属性
properties 消息属性
body 消息体/json报文
"""
def __init__(self, service, request):
""" @param _service servce code or name
@param _request RabbitMQ information
@param body request body packet
_respond check whether current service is respond
_responable check whether service need to response
"""
self._service = service
self._request = request
self.body = request.body
self.context = context.Context(request.body, _request=self._request)
self._respond = False
self._responable = True if self._get_reply_queue() is not None else False
self._state = 'INIT'
# 是否记录日志
"""
call user' s init
"""
self.init()
def init(self):
pass
def post(self, session):
raise Error(-1, 'method POST undefined.')
def get(self, session):
raise Error(-1, 'method GET undefined.')
def delete(self, session):
raise Error(-1, 'method DELETE undefined.')
def put(self, session):
raise Error(-1, 'method PUT undefined.')
def _get_reply_queue(self):
""" 根据rabbitmq信息获取响应队列"""
properties = self._request.properties
if properties.reply_to is not None:
res = properties.reply_to
logger.debug("response queue is :%s", res)
return res
else:
return None
def _get_dlq(self):
""" 取死信队列"""
properties = self._request.properties
basic_deliver = self._request.basic_deliver
if properties.headers.has_key('dlq'):
dlq = properties.headers.get('dlq')
logger.error("Reply queue not defined, using dlq:%s", dlq)
return dlq
else:
logger.debug('MQ properties:%s' % (properties))
dlq = basic_deliver.routing_key + ".dlq"
logger.error("Reply queue and DLQ not defined, using dlq:%s", dlq)
return dlq
def allow_anonymous(self):
'''
默认是不允许 匿名访问
如果服务需要支持匿名访问,请重载该函数
'''
return False
def dlq_declare(self, frame):
logger.debug('DLQ Queue [%s] Declared.' % (self._dlq))
def response(self, code, msg):
""" 用于返回失败或错误信息 """
if self._responable is False and code is 0:
""" FIXME responable 需要使用其它参数定义,而非reply_to?"""
return
if self._respond is True:
logger.warning(u"当前服务[%s]已回复", self._service)
return
reply_queue = self._get_reply_queue()
if reply_queue is None:
# FIXME DLQ的消息是需要处理的,该处需要重构
# 至少应包含以下几种信息:1、原请求报文,2、出错原因 3、原服务
# 可以不分服务么?还是统一至一个DLQ,而不是一个服务一个DLQ,则处理服务需要定制一个即可?
DLQ = self._get_dlq()
logger.error(
"serice [%s] error:[%s,%s], put message to DLQ [%s]", self._service, code, msg, DLQ)
self._dlq = DLQ
self._request.channel.queue_declare(
queue=DLQ, durable=True, callback=self.dlq_declare)
self._request.channel.basic_publish(
exchange='', routing_key=DLQ, properties=self._request.properties, mandatory=1, body=self.body)
else:
if code is not 0:
logger.error("%s,%s", code, msg)
else:
logger.info("service %s dispatched ok:%s,%s",
self._service, code, msg)
self.context.error(code, msg)
logger.debug(u"响应报文: %s", self.context.response)
payload = self.context.jsonify()
# 避免body过大时导致请求响应缓慢
logger.debug("service response:%s", payload[:2048])
self._request.channel.basic_publish(
exchange='', routing_key=reply_queue, properties=self._request.properties, mandatory=1, body=payload)
self._respond = True
def dispatch(self, session):
'''
如果服务不关心 请求方法, 则直接重载该方法即可
否则,请实现对应方法
'''
callback_func = {
"POST": self.post,
"GET": self.get,
"DELETE": self.delete,
"PUT": self.put
}
req_method = self.context.request.header.method
if req_method not in callback_func.keys():
raise Error(-1, "method: %s not supported." % (req_method,))
return callback_func[req_method](session)
def new_file(self, filename):
"""
创建一个临时文件,该文件将做为内容发往客户端
"""
resp_header = self.context.response.header
resp_header.send_file_name = filename
resp_header.tmp_file_name = tempfile.mktemp()
return open(resp_header.tmp_file_name, 'w')
def prepare_request(self, session):
""" 服务前准备"""
logger.debug("default prepare for service...")
def post_request(self, session):
""" 服务后处理"""
logger.debug("default post for service...")
|
StarcoderdataPython
|
54750
|
from CreatureRogue.data_layer.species import Species
class Encounter:
def __init__(self, species: Species, min_level: int, max_level: int, rarity):
self.species = species
self.min_level = min_level
self.max_level = max_level
self.rarity = rarity
def __str__(self):
return "Encounter: {0} ({1},{2})".format(self.species, self.min_level, self.max_level)
|
StarcoderdataPython
|
11225023
|
<gh_stars>0
from google.appengine.api import urlfetch
from django.shortcuts import render
from mock_data import EGFR_GBM_LGG as FAKE_PLOT_DATA
from maf_api_mock_data import EGFR_BLCA_BRCA as FAKE_MAF_DATA
from django.conf import settings
#############################################
# this is file is an abstraction for all visualizations to access for gathering data.
#############################################
# static endpoints
MAF_ENDPOINT_URI_TEMPLATE = settings.BASE_API_URL + '/_ah/api/maf_api/v1/maf_search?gene={gene}&{tumor_parameters}'
BQ_ENDPOINT_URL = settings.BASE_API_URL + '/_ah/api/bq_api/v1'
INTERPRO_BQ_ENDPOINT_URI_TEMPLATE = settings.BASE_API_URL + '/_ah/api/bq_api/v1/bq_interpro?uniprot_id={uniprot_id}'
# Static definitions
SEQPEEK_VIEW_DEBUG_MODE = False
SAMPLE_ID_FIELD_NAME = 'tumor_sample_barcode'
TUMOR_TYPE_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'amino_acid_position'
PROTEIN_DOMAIN_DB = 'PFAM'
# Static definitions
friendly_name_map = {
'disease_code':'Disease Code',
'gender':'Gender',
'mirnPlatform':'microRNA expression platform',
'gexpPlatform':'gene (mRNA) expression platform',
'methPlatform':'DNA methylation platform',
'rppaPlatform':'protein quantification platform',
'cnvrPlatform':'copy-number platform',
'age_at_initial_pathologic_diagnosis':'age at diagnosis',
'hsa_miR_146a_5p':'hsa-miR-146a-5p expression (log2[normalized_counts+1])',
'hsa_miR_7_7p':'hsa-miR-7-5p expression (log2[normalized_counts+1])',
'CNVR_EGFR':'EGFR copy-number (log2[CN/2])',
'EGFR_chr7_55086714_55324313':'EGFR expression (log2[normalized_counts+1])',
'EGFR_chr7_55086714_55324313_EGFR':'EGFR protein quantification',
'EGFR_chr7_55086288_cg03860890_TSS1500_Island':'EGFR methylation (TSS1500, CpG island)',
'EGFR_chr7_55086890_cg14094960_5pUTR_Island':"EGFR methylation (5' UTR, CpG island)",
'EGFR_chr7_55089770_cg10002850_Body_SShore':'EGFR methylation (first intron, cg10002850)',
'EGFR_chr7_55177623_cg18809076_Body':'EGFR methylation (first intron, cg18809076)'
}
numerical_attributes = [
'age_at_initial_pathologic_diagnosis',
'hsa_miR_146a_5p',
'hsa_miR_7_7p',
'CNVR_EGFR',
'EGFR_chr7_55086714_55324313',
'EGFR_chr7_55086714_55324313_EGFR',
'EGFR_chr7_55086288_cg03860890_TSS1500_Island',
'EGFR_chr7_55086890_cg14094960_5pUTR_Island',
'EGFR_chr7_55089770_cg10002850_Body_SShore',
'EGFR_chr7_55177623_cg18809076_Body'
]
categorical_attributes = [
'disease_code',
'gender',
'mirnPlatform',
'gexpPlatform',
'methPlatform',
'rppaPlatform',
'cnvrPlatform'
]
fm_friendly_name_map = {
'percent_lymphocyte_infiltration':'Percent Lymphocyte Infiltration',
'percent_monocyte_infiltration':'Percent Monocyte Infiltration',
'percent_necrosis':'Percent Necrosis',
'percent_neutrophil_infiltration':'Percent Neutrophil Infiltration',
'percent_normal_cells':'Percent Normal Cells',
'percent_stromal_cells':'Percent Stromal Cells',
'percent_tumor_cells':'Percent Tumor Cells',
'percent_tumor_nuclei':'Percent Tumor Nuclei',
'age_at_initial_pathologic_diagnosis':'Age at Diagnosis',
'days_to_birth':'Days to Birth',
'days_to_initial_pathologic_diagnosis':'Days to Diagnosis',
'year_of_initial_pathologic_diagnosis':'Year of Diagnosis',
'days_to_last_known_alive':'Days to Last Known Alive',
'tumor_necrosis_percent':'Tumor Necrosis Percent',
'tumor_nuclei_percent':'Tumor Nuclei Percent',
'tumor_weight':'Tumor Weight',
'days_to_last_followup':'Days to Last Followup',
'gender':'Gender',
'history_of_neoadjuvant_treatment':'History of Neoadjuvant Treatment',
'icd_o_3_histology':'ICD-O-3 Code',
'prior_dx':'Prior Diagnosis',
'vital_status':'Vital Status',
'country':'Country',
'disease_code':'Disease Code',
'histological_type':'Histological Type',
'icd_10':'ICD-10 Category',
'icd_o_3_site':'ICD-O-3 Site',
'tumor_tissue_site':'Tumor Tissue Site',
'tumor_type':'Tumor Type',
'person_neoplasm_cancer_status':'Neoplasm Cancer Status',
'pathologic_N':'Pathologic N Stage',
'radiation_therapy':'Radiation Therapy',
'pathologic_T':'Pathologic T Stage',
'race':'Race',
'ethnicity':'Ethnicity',
'sampleType':'Sample Type',
'DNAseq_data':'DNA Sequencing Data',
'mirnPlatform':'microRNA expression platform',
'gexpPlatform':'gene (mRNA) expression platform',
'methPlatform':'DNA methylation platform',
'rppaPlatform':'protein quantification platform',
'cnvrPlatform':'copy-number platform',
}
fm_numerical_attributes = [
'percent_lymphocyte_infiltration',
'percent_monocyte_infiltration',
'percent_necrosis',
'percent_neutrophil_infiltration',
'percent_normal_cells',
'percent_stromal_cells',
'percent_tumor_cells',
'percent_tumor_nuclei',
'age_at_initial_pathologic_diagnosis',
'days_to_birth',
'days_to_initial_pathologic_diagnosis',
'year_of_initial_pathologic_diagnosis',
'days_to_last_known_alive',
'tumor_necrosis_percent',
'tumor_nuclei_percent',
'tumor_weight',
'days_to_last_followup'
]
fm_categorical_attributes = [
'gender',
'history_of_neoadjuvant_treatment',
'icd_o_3_histology',
'prior_dx',
'vital_status',
'country',
'disease_code',
'histological_type',
'icd_10',
'icd_o_3_site',
'tumor_tissue_site',
'tumor_type',
'person_neoplasm_cancer_status',
'pathologic_N',
'radiation_therapy',
'pathologic_T',
'race',
'ethnicity',
'sampleType',
'DNAseq_data',
'mirnPlatform',
'cnvrPlatform',
'methPlatform',
'gexpPlatform',
'rppaPlatform'
]
|
StarcoderdataPython
|
5194356
|
import sys
puctuation_removal = [".", ",", "'", "!", "%", "$", "@", "#", "^", "&", "*", "(", ")", "-", "_", "+", "=", "{", "}", "[", "]", "|", ";", ":", "<", ">", "?"]
def remove_punction(text_list):
"""
This Function removes any puctuation from provided text
Punctuations, listed in puctuation_removal, will be removed
"""
for i in range(len(text_list)):
for punc in puctuation_removal:
text_list[i] = text_list[i].replace(punc, "")
print(text_list)
return text_list
def split_space_text(text_list):
"""
This Function splits text to words
"""
temp_text_list = []
for i in range(len(text_list)):
if " " in text_list[i]:
temp_text_list.extend(text_list[i].split())
text_list[i] = ""
print(temp_text_list)
print(text_list)
text_list.extend(temp_text_list)
return(text_list)
def find_word_length_frequency(text_list):
"""
This function calculates the frequency of
word length
"""
text_list = split_space_text(text_list)
text_length_list = [len(txt) for txt in remove_punction(text_list) if len(txt) > 0]
freq_dict = {}
unique_length_set = set(text_length_list)
for i in unique_length_set:
freq_dict[i] = text_length_list.count(i)
return(freq_dict)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Please specify a text")
else:
freq_dict = find_word_length_frequency(sys.argv[1:])
print(freq_dict)
|
StarcoderdataPython
|
5091495
|
<filename>tensorflow_datasets/core/registered.py
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access registered datasets."""
import abc
import collections
import contextlib
import inspect
from typing import ClassVar, Iterator
from tensorflow_datasets.core import naming
from tensorflow_datasets.core.utils import py_utils
# Internal registry containing <str registered_name, DatasetBuilder subclass>
_DATASET_REGISTRY = {}
# Internal registry containing:
# <str snake_cased_name, abstract DatasetBuilder subclass>
_ABSTRACT_DATASET_REGISTRY = {}
# Datasets that are under active development and which we can't therefore load.
# <str snake_cased_name, in development DatasetBuilder subclass>
_IN_DEVELOPMENT_REGISTRY = {}
# Keep track of Dict[str (module name), List[DatasetBuilder]]
# This is directly accessed by `tfds.community.builder_cls_from_module` when
# importing community packages.
_MODULE_TO_DATASETS = collections.defaultdict(list)
_skip_registration = False
@contextlib.contextmanager
def skip_registration() -> Iterator[None]:
"""Context manager within which dataset builders are not registered."""
global _skip_registration
try:
_skip_registration = True
yield
finally:
_skip_registration = False
class RegisteredDataset(abc.ABC):
"""Subclasses will be registered and given a `name` property."""
# Name of the dataset, automatically filled.
name: ClassVar[str]
# Set to True for datasets that are under active development and should not
# be available through tfds.{load, builder} or documented in overview.md.
IN_DEVELOPMENT: ClassVar[bool] = False
def __init_subclass__(cls, skip_registration=False, **kwargs): # pylint: disable=redefined-outer-name
super().__init_subclass__(**kwargs)
# Set the name if the dataset does not define it.
# Use __dict__ rather than getattr so subclasses are not affected.
if not cls.__dict__.get('name'):
cls.name = naming.camelcase_to_snakecase(cls.__name__)
is_abstract = inspect.isabstract(cls)
# Capture all concrete datasets, including when skip registration is True.
# This ensure that `builder_cls_from_module` can load the datasets
# even when the module has been imported inside a `skip_registration`
# context.
if not is_abstract:
_MODULE_TO_DATASETS[cls.__module__].append(cls)
# Skip dataset registration within contextmanager, or if skip_registration
# is passed as meta argument.
if skip_registration or _skip_registration:
return
# Check for name collisions
if py_utils.is_notebook(): # On Colab/Jupyter, we allow overwriting
pass
elif cls.name in _DATASET_REGISTRY:
raise ValueError(f'Dataset with name {cls.name} already registered.')
elif cls.name in _IN_DEVELOPMENT_REGISTRY:
raise ValueError(
f'Dataset with name {cls.name} already registered as in development.'
)
elif cls.name in _ABSTRACT_DATASET_REGISTRY:
raise ValueError(
f'Dataset with name {cls.name} already registered as abstract.'
)
# Add the dataset to the registers
if is_abstract:
_ABSTRACT_DATASET_REGISTRY[cls.name] = cls
elif cls.IN_DEVELOPMENT:
_IN_DEVELOPMENT_REGISTRY[cls.name] = cls
else:
_DATASET_REGISTRY[cls.name] = cls
|
StarcoderdataPython
|
5085983
|
# pyOCD debugger
# Copyright (c) 2006-2021 Arm Limited
# Copyright (c) 2020 <NAME>
# Copyright (c) 2021 mentha
# Copyright (c) <NAME>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import six
from time import sleep
import platform
import errno
from .interface import Interface
from .common import (
USB_CLASS_HID,
filter_device_by_class,
is_known_cmsis_dap_vid_pid,
check_ep,
generate_device_unique_id,
)
from ..dap_access_api import DAPAccessIntf
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSB(Interface):
"""! @brief CMSIS-DAP USB interface class using pyusb for the backend.
"""
isAvailable = IS_AVAILABLE
did_show_no_libusb_warning = False
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 64
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get count of HID interfaces and create the matcher object
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
matcher = MatchCmsisDapv1Interface(hid_interface_count)
# Get CMSIS-DAPv1 interface
interface = usb.util.find_descriptor(config, custom_match=matcher)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv1 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & usb.util.ENDPOINT_IN:
ep_in = endpoint
else:
ep_out = endpoint
# Detach kernel driver
self.kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
LOG.debug("Detaching Kernel Driver of Interface %d from USB device (VID=%04x PID=%04x).", interface_number, dev.idVendor, dev.idProduct)
dev.detach_kernel_driver(interface_number)
self.kernel_driver_was_attached = True
except (NotImplementedError, usb.core.USBError) as e:
# Some implementations don't don't have kernel attach/detach
LOG.warning("USB Kernel Driver Detach Failed ([%s] %s). Attached driver may interfere with pyOCD operations.", e.errno, e.strerror)
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected CMSIS-DAP devices.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
except usb.core.NoBackendError:
if not PyUSB.did_show_no_libusb_warning:
LOG.warning("CMSIS-DAPv1 probes may not be detected because no libusb library was found.")
PyUSB.did_show_no_libusb_warning = True
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product or f"{board.idProduct:#06x}"
new_board.vendor_name = board.manufacturer or f"{board.idVendor:#06x}"
new_board.serial_number = board.serial_number \
or generate_device_unique_id(board.idProduct, board.idVendor, board.bus, board.address)
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
self.ep_out.write(data)
def read(self):
"""! @brief Read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def close(self):
"""! @brief Close the interface
"""
assert self.closed is False
LOG.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
LOG.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class MatchCmsisDapv1Interface(object):
"""! @brief Match class for finding CMSIS-DAPv1 interface.
This match class performs several tests on the provided USB interface descriptor, to
determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the
interface:
1. If there is more than one HID interface on the device, the interface must have an interface
name string containing "CMSIS-DAP".
2. bInterfaceClass must be 0x03 (HID).
3. bInterfaceSubClass must be 0.
4. Must have interrupt in endpoint, with an optional interrupt out endpoint, in that order.
"""
def __init__(self, hid_interface_count):
"""! @brief Constructor."""
self._hid_count = hid_interface_count
def __call__(self, interface):
"""! @brief Return True if this is a CMSIS-DAPv1 interface."""
try:
if self._hid_count > 1:
interface_name = usb.util.get_string(interface.device, interface.iInterface)
# This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2.
if (interface_name is None) or ("CMSIS-DAP" not in interface_name):
return False
# Now check the interface class to distinguish v1 from v2.
if (interface.bInterfaceClass != USB_CLASS_HID) \
or (interface.bInterfaceSubClass != 0):
return False
# Must have either 1 or 2 endpoints.
if interface.bNumEndpoints not in (1, 2):
return False
endpoint_attrs = [
(usb.util.endpoint_direction(ep.bEndpointAddress),
usb.util.endpoint_type(ep.bmAttributes))
for ep in interface
]
# Possible combinations of endpoints
ENDPOINT_ATTRS_ALLOWED = [
# One interrupt endpoint IN
[(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR)],
# Two interrupt endpoints, first one IN, second one OUT
[(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR),
(usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR)],
# Two interrupt endpoints, first one OUT, second one IN
[(usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR),
(usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR)],
]
if endpoint_attrs not in ENDPOINT_ATTRS_ALLOWED:
return False
# All checks passed, this is a CMSIS-DAPv2 interface!
return True
except (UnicodeDecodeError, IndexError):
# UnicodeDecodeError exception can be raised if the device has a corrupted interface name.
# Certain versions of STLinkV2 are known to have this problem. If we can't read the
# interface name, there's no way to tell if it's a CMSIS-DAPv2 interface.
#
# IndexError can be raised if an endpoint is missing.
return False
class FindDap(object):
"""! @brief CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a DAP device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
# First attempt to get the active config. This produces a more direct error
# when you don't have device permissions on Linux
config = dev.get_active_configuration()
# Now read the product name string.
device_string = dev.product
if (device_string is None) or ("CMSIS-DAP" not in device_string):
return False
# Get count of HID interfaces.
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
# Find the CMSIS-DAPv1 interface.
matcher = MatchCmsisDapv1Interface(hid_interface_count)
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=matcher)
except usb.core.USBError as error:
if error.errno == errno.EACCES and platform.system() == "Linux":
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/pyocd/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
else:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s",
dev.idVendor, dev.idProduct, error)
return False
except (IndexError, NotImplementedError, ValueError, UnicodeDecodeError) as error:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error)
return False
if cmsis_dap_interface is None:
return False
if self._serial is not None:
if self._serial == "" and dev.serial_number is None:
return True
if self._serial != dev.serial_number:
return False
return True
|
StarcoderdataPython
|
6518741
|
#!/usr/bin/python
#coding=utf-8
def plot():
import numpy as np
import matplotlib.pyplot as plt
plt.figure(1) # 创建图表1
plt.figure(2) # 创建图表2
ax1 = plt.subplot(211) # 在图表2中创建子图1
ax2 = plt.subplot(212) # 在图表2中创建子图2
x = np.linspace(0, 3, 100)
for i in xrange(5):
plt.figure(1) # ① # 选择图表1
plt.plot(x, np.exp(i * x / 3))
plt.sca(ax1) # ② # 选择图表2的子图1
plt.plot(x, np.sin(i * x))
plt.sca(ax2) # 选择图表2的子图2
plt.plot(x, np.cos(i * x))
plt.show()
class StackIntern:
def __init__(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def clear(self):
del self.items[:]
def size(self):
return len(self.items)
def isEmpty(self):
return self.size() == 0
def top(self):
return self.items[self.size()-1]
def dec2bin(dec_num):
#十进制转二进制 采用“除2取余"
rem_stack = StackIntern()
while dec_num > 0:
rem = dec_num % 2
rem_stack.push(rem)
dec_num = dec_num //2
bin_string = ""
i = 0
while not rem_stack.isEmpty():
bin_string = bin_string + str(rem_stack.pop())
return bin_string
class Stack():
def __init__(self,top=None):
self.ll = LinkedList(top)
def push(self, new_element):
"Push (add) a new element onto the top of the stack"
self.ll.insert_first(new_element)
def pop(self):
"Pop (remove) the first element off the top of the stack and return it"
return self.ll.delete_first()
def testStack():
# Test cases
# Set up some Elements
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
# Start setting up a Stack
stack = Stack(e1)
# Test stack functionality
stack.push(e2)
stack.push(e3)
print stack.pop().value
print stack.pop().value
print stack.pop().value
print stack.pop()
stack.push(e4)
print stack.pop().value
class Queue:
def __init__(self):
self.items = []
def enqueue(self, item):
self.items.append(item)
def dequeue(self):
return self.items.pop(0)
def clear(self):
del self.items[:]
def size(self):
return len(self.items)
def isEmpty(self):
return self.size() == 0
def __str__(self):
str_item ="/"
num = 0
for item in self.items:
print("item" + str(num) +" "+ item)
str_item = str_item +item +"/"
num = num +1
return str_item
def josephus(namelist, num):
#著名的 约瑟夫斯问题(Josephus Problem)是应用队列(确切地说,是循环队列)的典型案例。
#在 约瑟夫斯问题 中,参与者围成一个圆圈,从某个人(队首)开始报数,报数到n+1的人退出圆圈,然后从退出人的下一位重新开始报数;
#重复以上动作,直到只剩下一个人为止。
sim_queue = Queue()
for name in namelist:
sim_queue.enqueue(name)
while sim_queue.size() > 1:
for i in xrange(num):
print("num-->" + str(i))
sim_queue.enqueue(sim_queue.dequeue())
print(sim_queue)
print(sim_queue.dequeue())
return sim_queue.dequeue()
class Node:
def __init__(self, initdata):
self.__data = initdata
self.__next = None
def getData(self):
return self.__data
def getNext(self):
return self.__next
def setData(self, newdata):
self.__data = newdata
def setNext(self, newnext):
self.__next = newnext
class SinCycLinkedlist:
def __init__(self):
self.head = Node(None)
self.head.setNext(self.head)
def add(self, item):
temp = Node(item)
temp.setNext(self.head.getNext())
self.head.setNext(temp)
def remove(self, item):
prev = self.head
while prev.getNext() != self.head:
cur = prev.getNext()
if cur.getData() == item:
prev.setNext(cur.getNext())
prev = prev.getNext()
def search(self, item):
cur = self.head.getNext()
while cur != self.head:
if cur.getData() == item:
return True
cur = cur.getNext()
return False
def empty(self):
return self.head.getNext() == self.head
def size(self):
count = 0
cur = self.head.getNext()
while cur != self.head:
count += 1
cur = cur.getNext()
return count
def SinCycLinkedlistTest():
s = SinCycLinkedlist()
print('s.empty() == %s, s.size() == %s' % (s.empty(), s.size()))
s.add(19)
s.add(86)
s.remove(19)
print('s.empty() == %s, s.size() == %s' % (s.empty(), s.size()))
print('86 is%s in s' % ('' if s.search(86) else ' not',))
print('4 is%s in s' % ('' if s.search(4) else ' not',))
print('s.empty() == %s, s.size() == %s' % (s.empty(), s.size()))
s.remove(19)
print('s.empty() == %s, s.size() == %s' % (s.empty(), s.size()))
class vportIdList:
def __init__(self, vport_index):
self.header = Node(None)
self.index = vport_index
def add(self, data):
temp = Node(data)
temp.setNext(self.header.getNext())
self.header.setNext(temp)
def clear(self):
prev = self.header.getNext()
while prev is not None:
print(str(prev) + str(prev.getData()))
prev = prev.getNext()
self.header.setNext(prev)
def __str__(self):
cur = self.header.getNext()
str_list = ""
while cur is not None:
str_list = str_list + " / " + str(cur.getData())
cur = cur.getNext()
return str_list
def vportIdListTest():
vlist = vportIdList(2001)
vlist.add(1)
vlist.add(2)
vlist.add(3)
vlist.add(4)
vlist.add(5)
vlist.add(6)
vlist.add(7)
vlist.add(8)
vlist.add(9)
vlist.add(10)
print(vlist)
vlist.clear()
print(vlist)
"""The LinkedList code from before is provided below.
Add three functions to the LinkedList.
"get_position" returns the element at a certain position.
The "insert" function will add an element to a particular
spot in the list.
"delete" will delete the first element with that
particular value.
Then, use "Test Run" and "Submit" to run the test cases
at the bottom."""
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
"""Get an element from a particular position.
Assume the first position is "1".
Return "None" if position is not in the list."""
counter = 1
current = self.head
if position < 1:
return None
while current and counter <= position:
if counter == position:
return current
current = current.next
counter += 1
return None
def insert(self, new_element, position):
"""Insert a new node at the given position.
Assume the first position is "1".
Inserting at position 3 means between
the 2nd and 3rd elements."""
counter = 1
current = self.head
if position > 1:
while current and counter < position:
if counter == position - 1:
new_element.next = current.next
current.next = new_element
current = current.next
counter += 1
elif position == 1:
new_element.next = self.head
self.head = new_element
def delete(self, value):
"""Delete the first node with a given value."""
current = self.head
previous = None
while current.value != value and current.next:
previous = current
current = current.next
if current.value == value:
if previous:
previous.next = current.next
else:
self.head = current.next
def insert_first(self, new_element):
new_element.next = self.head
self.head = new_element
def delete_first(self):
if self.head:
deleted_element = self.head
temp = deleted_element.next
self.head = temp
return deleted_element
else:
return None
def testLinkedList():
# Test cases
# Set up some Elements
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
# Start setting up a LinkedList
ll = LinkedList(e1)
ll.append(e2)
ll.append(e3)
# Test get_position
# Should print 3
print ll.head.next.next.value
# Should also print 3
print ll.get_position(3).value
# Test insert
ll.insert(e4,3)
# Should print 4 now
print ll.get_position(3).value
# Test delete
ll.delete(1)
# Should print 2 now
print ll.get_position(1).value
# Should print 4 now
print ll.get_position(2).value
# Should print 3 now
print ll.get_position(3).value
def binarySearch(listData, value):
low = 0
high = len(listData)-1
while low <= high:
mid = (low + high)/2
if listData[mid] == value:
return mid
elif listData[mid] < value:
low = mid + 1
else:
high = mid - 1
return -1
def testBinarySearch():
list_data = [1,2,3,4,5,6,7,8,9]
index = binarySearch(list_data,9)
print("ist_data[%s] = %s"%(str(index),str(9)))
if __name__ == '__main__':
# print("main")
# print(josephus(["A", "B", "C", "D", "E", "F"], 3))
# SinCycLinkedlistTest()
# vportIdListTest()
# testLinkedList()
# testStack()
testBinarySearch()
|
StarcoderdataPython
|
4978487
|
from . import schema, verify, apply
import os, platform
import pytest
import jsonschema
root = {
'username':'root',
'uid': 0,
'gid': 0,
}
non_existant_user = {
'username':'nonexistantuser333'
}
def test_schema():
jsonschema.validate(root, schema())
jsonschema.validate(non_existant_user, schema())
def test_verify():
result = verify([root, non_existant_user])
assert len(result) == 1
assert result[0] == non_existant_user
def integration_test_apply():
result = verify([root, non_existant_user])
apply_result = apply(result, "")
assert len(apply_result) == 0
|
StarcoderdataPython
|
3389359
|
<filename>controller/controller.py<gh_stars>10-100
#!/usr/bin/env python2
import json
import sys
import threading
import time
from app.ipsec import IPsecS2SApplication
from cli import start_cli
import switch_controller
# Load configuration
with open('config/topology.json', 'r') as f:
config = json.load(f)
# Setup base controller and switch connections
base_controller = switch_controller.BaseController(config['controller_config']['p4info'],
config['controller_config']['bmv2_json'])
for switch in config["switches"]:
base_controller.add_switch_connection(switch['name'], address=switch['address'], device_id=switch['device_id'])
base_controller.startup()
# Insert table entries for forwarding
base_controller.install_table_entries_from_json("config/forwarding.json")
ipsec_app = IPsecS2SApplication(base_controller)
ipsec_app.setup_tunnel("config/tunnel_s1_s2_null.json")
# Setup CLI
cli_t = threading.Thread(target=start_cli, args=(base_controller,))
cli_t.daemon = True
cli_t.start()
# Exit CLI when CTRL-C ist pressed or when the CLI is stopped by entering 'exit'
try:
while cli_t.is_alive():
time.sleep(1)
except KeyboardInterrupt:
print('shutting down')
sys.exit(0)
|
StarcoderdataPython
|
264303
|
from datetime import datetime
from fms_core.template_importer.row_handlers._generic import GenericRowHandler
from fms_core.services.container import get_container, move_container
class ContainerRowHandler(GenericRowHandler):
def __init__(self):
super().__init__()
def process_row_inner(self, container, destination_container):
# Container related section
container_obj, self.errors['container'], self.warnings['container'] = get_container(barcode=container['barcode'])
if not self.errors['container']:
#Update
_, self.errors['container_move'], self.warnings['container_move'] = move_container(
container_to_move=container_obj,
destination_barcode=destination_container['destination_barcode'],
destination_coordinates=destination_container['destination_coordinates'],
update_comment=destination_container['comment'],
)
|
StarcoderdataPython
|
11388305
|
import json
import re
import glob
from pathlib import Path
from typing import List, Optional
from rate import ClipRater
from scripts.helper import render
DEVICE = "auto"
INIT_SCRIPT = """
epochs: 20
optimizer: rmsprob
learnrate: 2
init:
mean: 0.33
std: 0.03
resolution: 10
targets:
- name: full scale
batch_size: 10
features:
- text: {{text}}
transforms:
- pad:
size: 32
mode: edge
- random_rotate:
degree: -30 30
center: 0 1
- mul: 1/2
- mean: 0.5
- noise: 1/5
constraints:
- blur:
kernel_size: 11
weight: .4
"""
DETAIL_SCRIPT = """
epochs: 300
optimizer: rmsprob
init:
image: {{image}}
mean: 0
std: 1
targets:
- name: full scale
batch_size: 10
features:
- text: {{text}}
transforms:
- pad:
size: 32
mode: edge
- random_rotate:
degree: -20 20
center: 0 1
- random_crop: 224
- mul: 1/3
- mean: 0.5
- bwnoise: 1/5*ti
constraints:
- blur:
kernel_size: 11
start: 70%
- saturation:
below: 0.005
"""
RESO_ONE_PASS_SCRIPT = """
epochs: 300
resolution:
- '{0: 8, 1: 8, 2: 16, 3: 32, 4: 64, 5: 128}.get(int(t*16),
224)'
optimizer: rmsprob
init:
mean: 0.3
std: .01
targets:
- name: full scale
batch_size: 10
features:
- text: {{text}}
transforms:
- resize: 224
- pad:
size: 32
mode: edge
- random_rotate:
degree: -20 20
center: 0 1
- random_crop: 224
- mul: 1/3
- mean: 0.5
- bwnoise: 1/5*ti
constraints:
- blur:
kernel_size: 11
start: 50%
- saturation:
below: 0.005
"""
RESO_INIT_SCRIPT = """
epochs: 20
resolution:
- 8 if t < .4 else 16
optimizer: rmsprob
learnrate: .8
init:
mean: 0.3
std: .01
targets:
- name: full scale
batch_size: 10
features:
- text: {{text}}
transforms:
- resize: 224
- pad:
size: 32
mode: edge
- random_rotate:
degree: -20 20
center: 0 1
- random_crop: 224
- mul: 1/3
- mean: 0.5
- bwnoise: 1/5
constraints:
- saturation:
below: 0.005
"""
RESO_DETAIL_SCRIPT = """
epochs: 300
resolution:
- max(8,min(224, int(t*448/8)*8 ))
optimizer: rmsprob
init:
image: {{image}}
mean: 0
std: 1
targets:
- name: full scale
batch_size: 10
features:
- text: {{text}}
transforms:
- resize: 224
- pad:
size: 32
mode: edge
- random_rotate:
degree: -20 20
center: 0 1
- random_crop: 224
- mul: 1/3
- mean: 0.5
- bwnoise: 1/5*ti
constraints:
- blur:
kernel_size: 11
start: 45%
- saturation:
below: 0.005
"""
def get_prompts() -> List[dict]:
with open(Path(__file__).resolve().parent / "dall-e-samples.json") as fp:
data = json.load(fp)
def _repl(m):
return m.groups()[0]
prompts = []
for info in data["completion_info"]:
text = info["text"]
text = re.sub(r"{\d:([^}]+)}", _repl, text)
keys = info["variable_assignments"]
ascii_keys = [
"".join(c if c.isalpha() else "-" for c in key)
for key in keys
]
prompts.append({
"text": text,
"keys": ascii_keys,
"orig_keys": keys,
"filename": f"{ascii_keys[0]}-of-{ascii_keys[1]}",
})
prompts.sort(key=lambda p: p["keys"])
return prompts
def get_own_prompts() -> List[dict]:
animals = [
"macgyver",
"h.p. lovecraft",
"cthulhu",
"bob dobbs",
"<NAME>",
"a skull",
"an evil teddy bear",
"a friendly teddy bear",
]
things = [
"bits and bytes",
"a bloody mess",
"led zeppelin",
"microbes",
"furry dwarfs",
"voxel graphics",
"flowers in harmony",
"philosophic contemplation",
"fractal",
"spiderweb",
]
prompts = []
for animal in sorted(animals):
for thing in sorted(things):
keys = [animal, thing]
ascii_keys = [
"".join(c if c.isalpha() else "-" for c in key)
for key in keys
]
prompts.append({
"text": f"{animal} made of {thing}. {animal} with the texture of {thing}",
"keys": ascii_keys,
"orig_keys": keys,
"filename": f"{ascii_keys[0]}-of-{ascii_keys[1]}",
})
return prompts
class Renderer:
def __init__(
self,
out_dir: str,
init_out_dir: Optional[str] = None,
one_pass_script: Optional[str] = None,
init_script: Optional[str] = None,
detail_script: Optional[str] = None,
prompts: Optional[List[dict]] = None
):
self.prompts = prompts or get_prompts()
self.output_path_base = (
Path(__file__).resolve().parent.parent
/ "images" / "dalle"
)
self.output_path = self.output_path_base / out_dir
self.init_output_path = None if init_out_dir is None else self.output_path_base / init_out_dir
self.one_pass_script = one_pass_script
self.init_script = init_script
self.detail_script = detail_script
def prompt(self, *keys: str) -> Optional[dict]:
keys = list(keys)
for p in self.prompts:
if p["keys"] == keys:
return p
def render_one_pass(self, prompt: dict):
output_name = self.output_path / f"{prompt['filename']}.png"
if not output_name.exists():
render(
device=DEVICE,
source=self.one_pass_script,
output_name=output_name,
template_context={"text": prompt["text"].split(".")[0] + "."}
)
def render_init(self, prompt: dict, count: int = 20):
for i in range(count):
output_name = (
self.init_output_path / prompt['filename']
/ f"{prompt['filename']}-{i}.png"
)
if not output_name.exists():
break
count = count - 1
if not output_name.exists():
render(
device=DEVICE,
source=self.init_script,
output_name=output_name,
template_context={"text": prompt["text"]},
extra_args=["--repeat", str(count)]
)
def render_detail(self, prompt: dict, init_filename: str, suffix: str = ""):
output_name = (
self.output_path / prompt['filename'] / f"{prompt['filename']}{suffix}.png"
)
if not output_name.exists():
render(
device=DEVICE,
source=self.detail_script,
output_name=output_name,
template_context={
"text": prompt["text"],
"image": init_filename,
},
snapshot_interval=60.,
)
def render_all_one_pass(self):
for p in self.prompts:
self.render_one_pass(p)
def render_all_init(self, count: int = 20):
for p in self.prompts:
self.render_init(p, count=count)
def render_all_detail(self, num_best: int = 1, num_worst: int = 0, keys: Optional[List[str]] = None):
for prompt in self.prompts:
if keys and prompt["keys"] != keys:
continue
input_path = self.init_output_path / prompt['filename']
filenames = sorted(glob.glob(str(input_path / "*.png")))
if not filenames:
continue
rater = ClipRater(
filenames=filenames,
texts=[prompt["text"]],
device="cpu",
caching=True,
)
similarities = rater.rate()
similarities = similarities.sort_values(by=similarities.columns[0], ascending=False)
input_names = list(similarities.index[:num_best])
if num_worst:
input_names += list(similarities.index[-num_worst:])
for i, input_name in enumerate(input_names):
number = input_name[:-4].split("-")[-1]
self.render_detail(
prompt,
init_filename=input_name,
suffix=f"-from-{number}",
)
def dump_prompts_json(self):
things = dict()
for p in self.prompts:
animal, thing = p["orig_keys"]
if thing not in things:
things[thing] = dict()
things[thing][animal] = p["filename"]
print(json.dumps(things, indent=4))
def dump_dalle_image_urls():
with open(Path(__file__).resolve().parent / "dall-e-samples.json") as fp:
data = json.load(fp)
urls = []
for i in data["completion_info"]:
if i["variable_assignments"] in (
["snail", "harp"],
["penguin", "piano"],
):
url = i["completions"][0]["image_url_prefix"]
for n in range(30):
urls.append(f"{data['base_image_url']}/{url}_{n}.png")
print("\n".join(urls))
if __name__ == "__main__":
# dump_dalle_image_urls(); exit()
if 0:
renderer = Renderer(
out_dir="own-onepass",
one_pass_script=RESO_ONE_PASS_SCRIPT,
prompts=get_own_prompts(),
)
renderer.render_all_one_pass()
if 1:
renderer = Renderer(
out_dir="detail",
init_out_dir="init",
init_script=INIT_SCRIPT,
detail_script=DETAIL_SCRIPT,
)
#renderer.render_init(renderer.prompt("penguin", "piano"), 512)
#renderer.render_all_init(20)
#renderer.render_all_detail(num_best=6, num_worst=2, keys=["penguin", "piano"])
renderer.render_all_detail(num_best=6, num_worst=2, keys=["snail", "harp"])
if 0:
renderer = Renderer(
out_dir="reso-detail",
init_out_dir="reso-init",
# init_script=INIT_SCRIPT,
#init_script=RESO_INIT_SCRIPT,
detail_script=RESO_DETAIL_SCRIPT,
)
#renderer.render_init(renderer.prompt("penguin", "piano"), 512)
#renderer.render_all_init(20)
renderer.render_all_detail(num_best=6, num_worst=2, keys=["penguin", "piano"])
|
StarcoderdataPython
|
248439
|
<gh_stars>0
# (c) 2021 <NAME> <<EMAIL>>
import os,sys
import numpy as np
import imageio
from ccvtools import rawio
import random
import string
import mutagen
import json
def fill(full_times_file,ccv_file,ccv_times_file):
ccv_out_file = ccv_file[0:-4]+'_filled'+ccv_file[-4:]
ccv_times = np.loadtxt(ccv_times_file)
full_times = np.loadtxt(full_times_file)
assert ccv_times[0] > full_times[0] - 0.001
doesnotexist = np.zeros(full_times.size,dtype=np.bool)
ccv_times_idx = 0
for i,t in enumerate(full_times):
if len(ccv_times) <= ccv_times_idx:
doesnotexist[i:] = True
break
if not np.abs(ccv_times[ccv_times_idx]-t) < 0.001:
doesnotexist[i] = True
else:
ccv_times_idx = ccv_times_idx+1
reader = imageio.get_reader(ccv_file)
camera_type_len = reader.header.camera_type.length+5
image_type_len = reader.header.image_type.length+5
frcount_offset = int((8*32 + 64 +8)/8 + camera_type_len + image_type_len)
blackframe = np.zeros(reader.header.frame_bytes_on_disk,dtype=np.uint8)
with open(ccv_file,'rb') as sourcefile:
with open(ccv_out_file,'wb') as targetfile:
targetfile.write(sourcefile.read(frcount_offset))
targetfile.write(np.uint32(len(full_times)))
sourcefile.seek(frcount_offset+4,os.SEEK_SET)
targetfile.write(sourcefile.read(reader.header.header_size - 4 - frcount_offset))
for writeblack in tqdm(doesnotexist):
if writeblack:
targetfile.write(blackframe)
else:
targetfile.write(sourcefile.read(reader.header.frame_bytes_on_disk))
def truncate(ccv_file,idx_range,ccv_out_file=None):
reader = imageio.get_reader(ccv_file)
camera_type_len = reader.header.camera_type.length+5
image_type_len = reader.header.image_type.length+5
frcount_offset = int((8*32 + 64 +8)/8 + camera_type_len + image_type_len)
if ccv_out_file is None:
ccv_tmp_file = f'.{ccv_file[0:-4]}_{"".join(random.choices(string.ascii_uppercase + string.digits, k=5))}_truncate.tmp'
else:
ccv_tmp_file = f'.{ccv_out_file[0:-4]}_{"".join(random.choices(string.ascii_uppercase + string.digits, k=5))}_truncate.tmp'
with open(ccv_file,'rb') as sourcefile:
with open(ccv_tmp_file,'wb') as targetfile:
# Copy header
targetfile.write(sourcefile.read(reader.header.header_size))
# Write frame number to header
targetfile.seek(frcount_offset,os.SEEK_SET)
targetfile.write(np.uint32(len(idx_range)))
targetfile.seek(reader.header.header_size,os.SEEK_SET)
# Copy desired frames
prev_fr_idx = None
for (i,fr_idx) in enumerate(idx_range):
# Save first frame from iterator
if i==0:
start_frame = fr_idx
# Set point in case iterator is not consecutive
if prev_fr_idx is None or not fr_idx-prev_fr_idx==1:
sourcefile.seek(reader.header.header_size+reader.header.frame_bytes_on_disk*fr_idx,os.SEEK_SET)
targetfile.write(sourcefile.read(reader.header.frame_bytes_on_disk))
prev_fr_idx = fr_idx
# Save last frame from iterator
end_frame = fr_idx
# Rename tmp file to final file name
if ccv_out_file is None:
ccv_out_file = "{ccv_file[0:-4]}_truncated_{start_frame}-{end_frame}.ccv".format(ccv_file=ccv_file,start_frame=start_frame,end_frame=end_frame)
os.rename(ccv_tmp_file,ccv_out_file)
def convert(ccv_file,video_file,idx_range,fps=25,codec="libx264",quality=10,min_contrast=0,max_contrast=None,out_type=np.uint8):
reader = imageio.get_reader(ccv_file)
writer = imageio.get_writer(video_file, fps=fps, codec=codec, quality=quality)
prev_fr_idx=None
indices = np.zeros(shape=len(idx_range),dtype=int)
timestamps = np.zeros(shape=len(idx_range),dtype=np.float)
for (i,fr_idx) in enumerate(idx_range):
# Set point in case iterator is not consecutive
if prev_fr_idx is None or not fr_idx-prev_fr_idx==1:
im = reader.get_data(fr_idx)
else:
im = reader.get_next_data()
meta = reader.get_meta_data(fr_idx);
indices[i] = meta["index"];
timestamps[i] = meta["timestamp"];
# Get max value of movie if not specified
if i==0 and max_contrast is None:
max_contrast = np.iinfo(np.asarray(im).dtype).max
# Adjust contrast / Reduce to 8 bit
im = np.uint64(im)
im = (im-min_contrast)*np.iinfo(out_type).max/(max_contrast-min_contrast)
im[im<0] = 0
im[im>max_contrast] = np.iinfo(out_type).max
writer.append_data(out_type(im))
prev_fr_idx = fr_idx
writer.close()
headerdict = dict(reader.header)
del headerdict['_io']
headerdict['camera_type'] = headerdict['camera_type'].data.decode("ascii")
headerdict['image_type'] = headerdict['image_type'].data.decode("ascii")
headerdict['sensor'] = {'offset': list(headerdict['sensor'].offset),
'size': list(headerdict['sensor'].size),
'clock': headerdict['sensor'].clock,
'exposure': headerdict['sensor'].exposure,
'gain': headerdict['sensor'].gain
}
headerdict['indices'] = [int(val) for val in indices]
print(timestamps)
headerdict['timestamps'] = [float(val) for val in timestamps]
print(headerdict['timestamps'])
if len(headerdict['sensor']['offset'])==1: headerdict['sensor']['offset'].append(headerdict['sensor']['offset'][0])
if len(headerdict['sensor']['size'])==1: headerdict['sensor']['size'].append(headerdict['sensor']['size'][0])
with open(video_file, 'r+b') as file:
media_file = mutagen.File(file, easy=True)
media_file['description'] = json.dumps(headerdict)
media_file.save(file)
|
StarcoderdataPython
|
1758521
|
import typing as T
class BaseView:
def __init__(self, screen: T.Any) -> None:
self.screen = screen
def start(self) -> None:
pass
def stop(self) -> None:
pass
def idle(self) -> None:
pass
def keypress(self, key: int) -> None:
pass
|
StarcoderdataPython
|
9688774
|
<reponame>corinneherzog/tea-lang
import pandas as pd
import tea
import os
from tea.logging import TeaLoggerConfiguration, TeaLogger
import logging
configuration = TeaLoggerConfiguration()
configuration.logging_level = logging.DEBUG
TeaLogger.initialize_logger(configuration)
# This example is adapted from http://www.real-statistics.com/non-parametric-tests/wilcoxon-rank-sum-test/wilcoxon-rank-sum-exact-test/
def test_mann_whitney_0():
tea.data('./tests/data/real_stats_3.csv')
variables = [
{
'name': 'Treatment',
'data type': 'nominal',
'categories': ['Control', 'Drug']
},
{
'name': 'Score',
'data type': 'ratio'
}
]
experimental_design = {
'study type': 'experiment',
'independent variables': 'Treatment',
'dependent variables': 'Score'
}
assumptions = {
'Type I (False Positive) Error Rate': 0.05
}
tea.define_variables(variables)
# Allows for using multiple study designs for the same dataset (could lead to phishing but also practical for saving analyses and reusing as many parts of analyses as possible)
tea.define_study_design(experimental_design)
tea.assume(assumptions)
tea.hypothesize(['Treatment', 'Score'], ['Treatment:Control != Drug'])
|
StarcoderdataPython
|
2279
|
<gh_stars>100-1000
import numpy as np
import unittest
from pydlm.modeler.trends import trend
from pydlm.modeler.seasonality import seasonality
from pydlm.modeler.builder import builder
from pydlm.base.kalmanFilter import kalmanFilter
class testKalmanFilter(unittest.TestCase):
def setUp(self):
self.kf1 = kalmanFilter(discount=[1])
self.kf0 = kalmanFilter(discount=[1e-10])
self.kf11 = kalmanFilter(discount=[1, 1])
self.trend0 = trend(degree=0, discount=1, w=1.0)
self.trend0_90 = trend(degree=0, discount=0.9, w=1.0)
self.trend0_98 = trend(degree=0, discount=0.98, w=1.0, name='a')
self.trend1 = trend(degree=1, discount=1, w=1.0)
def testForwardFilter(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with
# discount = 1, one should expect the filterd mean to be 0.5
self.kf1.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
self.assertAlmostEqual(dlm.model.sysVar, 0.375)
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0.5)
dlm.initialize()
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with discount = 0
# one should expect the filtered mean close to 1
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 0)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.5)
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 1)
def testForwardFilterMultiDim(self):
dlm = builder()
dlm.add(seasonality(period=2, discount=1, w=1.0))
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], 0.33333333333)
self.assertAlmostEqual(dlm.model.state[1][0, 0], -0.33333333333)
self.kf11.forwardFilter(dlm.model, -1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], -0.5)
self.assertAlmostEqual(dlm.model.state[1][0, 0], 0.5)
def testBackwardSmoother(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
# with mean being 0 and observe 1 and 0 consectively, one shall
# expect the smoothed mean at 1 will be 1/3, for discount = 1
self.kf1.forwardFilter(dlm.model, 1)
self.kf1.forwardFilter(dlm.model, 0)
self.kf1.backwardSmoother(dlm.model, \
np.matrix([[0.5]]), \
np.matrix([[0.375]]))
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0/3)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.18518519)
# second order trend with discount = 1. The smoothed result should be
# equal to a direct fit on the three data points, 0, 1, -1. Thus, the
# smoothed observation should be 0.0
def testBackwardSmootherMultiDim(self):
dlm = builder()
dlm.add(self.trend1)
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
state1 = dlm.model.state
cov1 = dlm.model.sysVar
self.kf11.forwardFilter(dlm.model, -1)
self.kf11.backwardSmoother(dlm.model, \
rawState = state1, \
rawSysVar = cov1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingData(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0], 1.0)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, 0)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingEvaluation(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
dlm.model.evaluation = np.matrix([[None]])
self.kf1.forwardFilter(dlm.model, 1.0, dealWithMissingEvaluation = True)
self.assertAlmostEqual(dlm.model.obs, 0.0)
self.assertAlmostEqual(dlm.model.transition, 1.0)
def testEvolveMode(self):
dlm = builder()
dlm.add(self.trend0_90)
dlm.add(self.trend0_98)
dlm.initialize()
kf2 = kalmanFilter(discount=[0.9, 0.98],
updateInnovation='component',
index=dlm.componentIndex)
kf2.forwardFilter(dlm.model, 1.0)
self.assertAlmostEqual(dlm.model.innovation[0, 1], 0.0)
self.assertAlmostEqual(dlm.model.innovation[1, 0], 0.0)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.