id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
4924330
|
<gh_stars>10-100
from .subscriptions import Subscriptions
from .settings import Settings
|
StarcoderdataPython
|
5177374
|
<filename>limix_legacy/test/lmm_lasso/test_lmmlasso.py
"""Variance Decomposition testing code"""
import unittest
import scipy as SP
import numpy as np
import scipy.stats
import pdb
import os
import sys
import limix_legacy.deprecated as dlimix_legacy
import limix_legacy.deprecated.modules.lmmlasso as lmmlasso
from limix_legacy.test import data
class Lmmlasso_test(unittest.TestCase):
"""test class for lmm-lasso
"""
def genGeno(self):
X = (SP.rand(self.N,self.S)<0.2)*1.
X -= X.mean(0)
X /= X.std(0)
self.D['X'] = X
def genKernel(self):
X = (SP.rand(self.N,10)<0.2)*1.
K = SP.dot(X,X.T)
K /= SP.diag(K).mean()
K += 1e-3*SP.eye(self.N)
self.D['K'] = K
def genPheno(self):
idx_causal = SP.random.randint(0,self.S,10)
sigma_g = 0.25
sigma_e = 0.25
sigma_f = 0.50
u = SP.random.multivariate_normal(SP.zeros(self.N),self.D['K'])
u*= SP.sqrt(sigma_g)/u.std()
e = SP.random.randn(self.N)
e*= SP.sqrt(sigma_e)/e.std()
f = SP.sum(self.D['X'][:,idx_causal],axis=1)
f*= SP.sqrt(sigma_f)/f.std()
y = u + e + f
self.D['y']= y
self.D['causal_idx'] = idx_causal
def setUp(self):
#check: do we have a csv File?
self.dir_name = os.path.dirname(os.path.realpath(__file__))
self.dataset = os.path.join(self.dir_name,'lmmlasso')
if (not os.path.exists(self.dataset)) or 'recalc' in sys.argv:
if not os.path.exists(self.dataset):
os.makedirs(self.dataset)
SP.random.seed(1)
self.N = 500
self.S = 100
self.D = {}
self.genGeno()
self.genKernel()
self.genPheno()
self.generate = True
else:
self.generate=False
self.D = data.load(self.dataset)
self.N = self.D['X'].shape[0]
self.S = self.D['X'].shape[1]
self.lmmlasso = lmmlasso.LmmLasso()
def test_fit(self):
""" test fitting """
self.lmmlasso.set_params(alpha=1e-1)
self.lmmlasso.fit(self.D['X'],self.D['y'],self.D['K'])
params = self.lmmlasso.coef_
yhat = self.lmmlasso.predict(self.D['X'],self.D['K'])
if self.generate:
self.D['params_true'] = params
self.D['yhat'] = yhat
data.dump(self.D,self.dataset)
self.generate=False
params_true = self.D['params_true']
yhat_true = self.D['yhat']
RV = ((SP.absolute(params)-SP.absolute(params_true))**2).max()
np.testing.assert_almost_equal(RV, 0., decimal=4)
RV = ((SP.absolute(yhat)-SP.absolute(yhat_true))**2).max()
np.testing.assert_almost_equal(RV, 0., decimal=2)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
8027978
|
<filename>cf/267c.py
#!/usr/bin/env python
from sys import stdin
def arg(n, cap):
d = [n]*n
d[0] = 0
p = [i for i in range(n)]
q = [0]
while len(q) > 0:
i = q[0]
del q[0]
for j in range(n):
if cap[i][j] > 0 and d[i] + 1 < d[j]:
d[j] = d[i] + 1
p[j] = i
q.append(j)
if d[n-1] >= n: return 0
i = n-1
lim = cap[p[i]][i]
while i != 0:
lim = min(lim, cap[p[i]][i])
i = p[i]
i = n-1
while i != 0:
cap[p[i]][i] -= lim
i = p[i]
return lim
def traffic(n, cap):
total = 0
while True:
tmp = arg(n, cap)
if tmp < 1: break
total += tmp
return total
def main():
n = int(stdin.readline().strip())
m = int(stdin.readline().strip())
cap = [[0]*n]*n
road = []
for i in range(m):
x, y, c = stdin.readline().strip().split()
x, y, c = int(x)-1, int(y)-1, int(c)
road.append((x, y, c))
cap[x][y] = c
cap[y][x] = c
print '%.5f' % traffic(n, cap)
for x, y, c in road:
print '%.5f' % (c - cap[x][y])
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8100405
|
<gh_stars>10-100
__all__ = ["dataset", "utils"]
|
StarcoderdataPython
|
3425963
|
<filename>RL/src/bundle_entropy.py
import numpy as np
def logistic(x):
return 1. / (1. + np.exp(-x))
def logexp1p(x):
""" Numerically stable log(1+exp(x))"""
y = np.zeros_like(x)
I = x>1
y[I] = np.log1p(np.exp(-x[I]))+x[I]
y[~I] = np.log1p(np.exp(x[~I]))
return y
def proj_newton_logistic(A,b,lam0=None, line_search=True):
""" minimize_{lam>=0, sum(lam)=1} -(A*1 + b)^T*lam + sum(log(1+exp(A^T*lam)))"""
n = A.shape[0]
c = np.sum(A,axis=1) + b
e = np.ones(n)
eps = 1e-12
ALPHA = 1e-5
BETA = 0.5
if lam0 is None:
lam = np.ones(n)/n
else:
lam = lam0.copy()
for i in range(20):
# compute gradient and Hessian of objective
ATlam = A.T.dot(lam)
z = 1/(1+np.exp(-ATlam))
f = -c.dot(lam) + np.sum(logexp1p(ATlam))
g = -c + A.dot(z)
H = (A*(z*(1-z))).dot(A.T)
# change of variables
i = np.argmax(lam)
y = lam.copy()
y[i] = 1
e[i] = 0
g0 = g - e*g[i]
H0 = H - np.outer(e,H[:,i]) - np.outer(H[:,i],e) + H[i,i]*np.outer(e,e)
# compute bound set and Hessian of free set
I = (y <= eps) & (g0 > 0)
I[i] = True
if np.linalg.norm(g0[~I]) < 1e-10:
return lam
d = np.zeros(n)
H0_ = H0[~I,:][:,~I]
try:
d[~I] = np.linalg.solve(H0_, -g0[~I])
except:
# print('\n=== A\n\n', A)
# print('\n=== H\n\n', H)
# print('\n=== H0\n\n', H0)
# print('\n=== H0_\n\n', H0_)
# print('\n=== z\n\n', z)
# print('\n=== iter: {}\n\n'.format(i))
break
# line search
t = min(1. / np.max(abs(d)), 1.)
for _ in range(10):
y_n = np.maximum(y + t*d,0)
y_n[i] = 1
lam_n = y_n.copy()
lam_n[i] = 1.-e.dot(y_n)
if lam_n[i] >= 0:
if line_search:
fn = -c.dot(lam_n) + np.sum(logexp1p(A.T.dot(lam_n)))
if fn < f + t*ALPHA*d.dot(g0):
break
else:
break
if max(t * abs(d)) < 1e-10:
return lam_n
t *= BETA
e[i] = 1.
lam = lam_n.copy()
return lam
def solveBatch(fg, initXs, nIter=5, callback=None):
bsize = initXs.shape[0]
A = [[] for i in range(bsize)]
b = [[] for i in range(bsize)]
xs = [[] for i in range(bsize)]
lam = [None]*bsize
x = initXs
finished = []
nIters = [nIter]*bsize
finished = set()
for t in range(nIter):
fi, gi = fg(x)
Ai = gi
bi = fi - np.sum(gi * x, axis=1)
if callback is not None:
callback(t, fi)
for u in range(bsize):
if u in finished:
continue
A[u].append(Ai[u])
b[u].append(bi[u])
xs[u].append(np.copy(x[u]))
prev_x = x[u].copy()
if len(A[u]) > 1:
lam[u] = proj_newton_logistic(np.array(A[u]), np.array(b[u]), None)
x[u] = 1/(1+np.exp(np.array(A[u]).T.dot(lam[u])))
x[u] = np.clip(x[u], 0.03, 0.97)
else:
lam[u] = np.array([1])
x[u] = 1/(1+np.exp(A[u][0]))
x[u] = np.clip(x[u], 0.03, 0.97)
if max(abs((prev_x - x[u]))) < 1e-6:
finished.add(u)
A[u] = [y for i,y in enumerate(A[u]) if lam[u][i] > 0]
b[u] = [y for i,y in enumerate(b[u]) if lam[u][i] > 0]
xs[u] = [y for i,y in enumerate(xs[u]) if lam[u][i] > 0]
lam[u] = lam[u][lam[u] > 0]
if len(finished) == bsize:
return x, A, b, lam, xs, nIters
return x, A, b, lam, xs, nIters
|
StarcoderdataPython
|
5111524
|
from extruder_turtle import ExtruderTurtle
import math
## Parameters for the spiral
N = 40 ## Number of subdivisions of one round-trip
radius = 20 ## Outer radius of the spiral
dtheta = 2*math.pi/N ## Change in heading after each step
dx = radius * dtheta ## Forward movement during each step
dr = -0.5/N ## Change in radius with each step
t = ExtruderTurtle()
t.name("palm-tree.gcode")
t.setup(x=100, y=100)
t.set_density(0.06) # 0.05
t.rate(500)
while radius>0:
t.forward(dx)
t.right(dtheta)
radius += dr
dx = radius * dtheta
for l in range(50):
t.extrude(0.1)
t.dwell(100)
t.lift(0.1) # 0.1, 0.05
for x in range(60):
prog = x/60
frond_length = prog**4 * 20
for l in range(10):
t.extrude(0.1)
t.dwell(100)
t.lift(0.1)
for n in range(5):
t.forward(frond_length)
t.left(math.pi/6)
t.forward(frond_length)
t.left(5*math.pi/6)
t.forward(frond_length)
t.left(math.pi/6)
t.forward(frond_length)
t.left(math.pi/6)
t.left(math.pi/5)
t.left(math.pi/7)
## Save to a GCODE file
t.finish()
|
StarcoderdataPython
|
1686985
|
<reponame>hugorodgerbrown/django-visitor
from __future__ import annotations
import functools
import logging
from typing import Any, Callable
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.http.response import HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import gettext as _
from .exceptions import VisitorAccessDenied
from .models import Visitor, VisitorLog
logger = logging.getLogger(__name__)
# universal scope - essentially unscoped access
SCOPE_ANY = "*"
# for typing
BypassFunc = Callable[[HttpRequest], bool]
def is_visitor(user: settings.AUTH_USER_MODEL) -> bool:
"""Shortcut function for use with user_passes_test decorator."""
return user.is_visitor
def is_staff(user: settings.AUTH_USER_MODEL) -> bool:
"""Shortcut function for use with user_passes_test decorator."""
return user.is_staff
def is_superuser(user: settings.AUTH_USER_MODEL) -> bool:
"""Shortcut function for use with user_passes_test decorator."""
return user.is_superuser
def is_authenticated(user: settings.AUTH_USER_MODEL) -> bool:
"""Shortcut function for use with user_passes_test decorator."""
return user.is_authenticated
def _get_request_arg(*args: Any) -> HttpRequest | None:
"""Extract the arg that is an HttpRequest object."""
for arg in args:
if isinstance(arg, HttpRequest):
return arg
return None
def user_is_visitor( # noqa: C901
view_func: Callable | None = None,
scope: str = "",
bypass_func: BypassFunc | None = None,
log_visit: bool = True,
self_service: bool = False,
) -> Callable:
"""
Decorate view functions that supports Visitor access.
The 'scope' param is mapped to the request.visitor.scope attribute - if
the scope is SCOPE_ANY then this is ignored.
The 'bypass_func' is a callable that can be used to provide exceptions
to the scope - e.g. allowing authenticate users, or staff, to bypass the
visitor restriction. Defaults to None (only visitors with appropriate
scope allowed).
The 'log_visit' arg can be used to override the default logging - if this
is too noisy, for instance.
If 'self_service' is True, then instead of a straight PermissionDenied error
we raise VisitorAccessDenied, passing along the scope. This is then picked
up in the middleware, and the user redirected to a page where they can
enter their details and effectively invite themselves. Caveat emptor.
"""
if not scope:
raise ValueError("Decorator scope cannot be empty.")
if view_func is None:
return functools.partial(
user_is_visitor,
scope=scope,
bypass_func=bypass_func,
log_visit=log_visit,
self_service=self_service,
)
@functools.wraps(view_func)
def inner(*args: Any, **kwargs: Any) -> HttpResponse:
# should never happen, but keeps mypy happy as it _could_
if not view_func:
raise ValueError("Callable (view_func) missing.")
# HACK: if this is decorating a method, then the first arg will be
# the object (self), and not the request. In order to make this work
# with functions and methods we need to determine where the request
# arg is.
request = _get_request_arg(*args)
if not request:
raise ValueError("Request argument missing.")
# Allow custom rules to bypass the visitor checks
if bypass_func and bypass_func(request):
return view_func(*args, **kwargs)
if not is_valid_request(request, scope):
if self_service:
return redirect_to_self_service(request, scope)
raise VisitorAccessDenied(_("Visitor access denied"), scope)
response = view_func(*args, **kwargs)
if log_visit:
VisitorLog.objects.create_log(request, response.status_code)
return response
return inner
def is_valid_request(request: HttpRequest, scope: str) -> bool:
"""Return True if the request matches the scope."""
if not request.user.is_visitor:
return False
if scope == SCOPE_ANY:
return True
return request.visitor.scope == scope
def redirect_to_self_service(request: HttpRequest, scope: str) -> HttpResponseRedirect:
"""Create inactive Visitor token and redirect to enable self-service."""
# create an inactive token for the time being. This will be used by
# the auto-enroll view. The user fills in their name and email, which
# overwrites the blank values here, and sets the token to be active.
visitor = Visitor.objects.create_temp_visitor(
scope=scope, redirect_to=request.get_full_path()
)
return HttpResponseRedirect(
reverse(
"visitors:self-service",
kwargs={"visitor_uuid": visitor.uuid},
)
)
|
StarcoderdataPython
|
4926174
|
"""
Save and plot the final MGRIT approximation of the solution
Note: This example assumes a sequential run of the simulation.
"""
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from pymgrit.dahlquist.dahlquist import Dahlquist
from pymgrit.core.simple_setup_problem import simple_setup_problem
from pymgrit.core.mgrit import Mgrit
def main():
# Define output function that writes the solution to a file
def output_fcn(self):
# Set path to solution
path = 'results/' + 'dahlquist'
# Create path if not existing
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
# Save solution to file; here, we just have a single solution value at each time point.
# Useful member variables of MGRIT solver:
# - self.t[0] : local fine-grid (level 0) time interval
# - self.index_local[0] : indices of local fine-grid (level 0) time interval
# - self.u[0] : fine-grid (level 0) solution values
np.save(path + '/dahlquist',
[self.u[0][i].get_values() for i in self.index_local[0]]) # Solution values at local time points
# Create Dahlquist's test problem with 101 time steps in the interval [0, 5]
dahlquist = Dahlquist(t_start=0, t_stop=5, nt=101)
# Construct a two-level multigrid hierarchy for the test problem using a coarsening factor of 2
dahlquist_multilevel_structure = simple_setup_problem(problem=dahlquist, level=2, coarsening=2)
# Set up the MGRIT solver for the test problem and set the output function
mgrit = Mgrit(problem=dahlquist_multilevel_structure, output_fcn=output_fcn)
# Solve the test problem
info = mgrit.solve()
# Plot the solution (Note: modifications necessary if more than one process is used for the simulation!)
t = np.linspace(dahlquist.t_start, dahlquist.t_end, dahlquist.nt)
sol = np.load('results/dahlquist/dahlquist.npy')
plt.plot(t, sol)
plt.xlabel('t')
plt.ylabel('u(t)')
plt.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
235109
|
<reponame>vladshablinsky/algo<gh_stars>1-10
import random
import string
N = 8
ALPH = 'abc'
ans = ''
print(N)
for i in range(N):
ans += random.choice(ALPH)
print(ans)
|
StarcoderdataPython
|
12812014
|
from flask import Blueprint
bp = Blueprint("download", __name__)
from app.download import routes
|
StarcoderdataPython
|
11256313
|
import argparse
import re
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from amep import const
from amep.commands.make_dataset.util import cleaner, make_count_vectorizer_based_vocab
from amep.common.util import filter_by_length
def _cleaner_for_newsgroups(text):
text = cleaner(text)
text = re.sub(r"(\W)+", r" \1 ", text)
text = re.sub(r"\s+", " ", text)
return text.strip()
def make_newsgroups_dataset(args: argparse.Namespace):
data_20 = fetch_20newsgroups(
subset="all", shuffle=True, remove=("headers", "footers", "quotes")
)
baseball = np.where(data_20.target == 9)[0]
hockey = np.where(data_20.target == 10)[0]
all_sentences = list(baseball) + list(hockey)
sentences = [data_20.data[i] for i in all_sentences]
label = [0 if data_20.target[i] == 9 else 1 for i in all_sentences]
sentences = [_cleaner_for_newsgroups(s) for s in sentences]
sentences, label = zip(*[(s, t) for s, t in zip(sentences, label) if len(s) != 0])
train_idx, test_idx = train_test_split(
range(len(sentences)), stratify=label, test_size=0.2, random_state=13478
)
train_idx, dev_idx = train_test_split(
train_idx,
stratify=[label[i] for i in train_idx],
test_size=0.2,
random_state=13478,
)
X_train = [sentences[i] for i in train_idx]
X_dev = [sentences[i] for i in dev_idx]
X_test = [sentences[i] for i in test_idx]
y_train = [label[i] for i in train_idx]
y_dev = [label[i] for i in dev_idx]
y_test = [label[i] for i in test_idx]
texts = {"train": X_train, "test": X_test, "dev": X_dev}
label = {"train": y_train, "test": y_test, "dev": y_dev}
df_texts = []
df_label = []
df_exp_splits = []
for key in ["train", "test", "dev"]:
df_texts += texts[key]
df_label += label[key]
df_exp_splits += [key] * len(texts[key])
df = pd.DataFrame({"text": df_texts, "label": df_label, "exp_split": df_exp_splits})
make_count_vectorizer_based_vocab(
df[df["exp_split"] == "train"].text,
save_fpath=const.DATASET_FPATHS["newsgroups"].parent / "vocab.txt",
min_df=2,
)
df = filter_by_length(df, min_length=6, max_length=500)
print(df.exp_split.value_counts())
df.to_json(const.DATASET_FPATHS["newsgroups"], orient="records", lines=True)
df[df["exp_split"] == "test"].to_json(
const.DATASET_FPATHS["newsgroups"].parent / "test_dataset.jsonl",
orient="records",
lines=True,
)
|
StarcoderdataPython
|
4882553
|
<reponame>TaprisSugarbell/Bo.py-Test
from modles.gen import *
from modles.pytb import *
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler,ConversationHandler, MessageHandler, Filters
def start(update, context):
update.message.reply_text(
text='Hola bienvenido a Bopy\n/gen - Genera tarjeta\n/pytb - Descargar Video',
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton(text='Generador de tarjeta', callback_data='gen')],
[InlineKeyboardButton(text='Descargar Video', callback_data='pytb')],
[InlineKeyboardButton(
text='Repositorio', url='https://github.com/TaprisSugarbell/Bo.py-Test/tree/main')],
])
)
if __name__ == '__main__':
updater = Updater(token=Token, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', start))
# Pytube
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler('pytb', pytbcommand),
CallbackQueryHandler(pattern='pytb', callback=pytb_callback_handler)],
states={INPUTpy: [MessageHandler(Filters.text, input_pytb)]},
fallbacks=[]))
# Random Gen
dp.add_handler(ConversationHandler(
entry_points=[CommandHandler('gen', input_gen),
CallbackQueryHandler(pattern='gen', callback=gen_callback_handler)],
states={INPUTNUM: [MessageHandler(Filters.text, input_gen)]},
fallbacks=[]))
# add handler
updater.start_polling()
updater.idle()
|
StarcoderdataPython
|
1707283
|
<reponame>DimaDK02/tripman
from django.db import models
class Client(models.Model):
name = models.CharField(max_length=50, unique=True, verbose_name='Имя')
discount = models.IntegerField(default=0, verbose_name='Скидка (%)')
total_saved = models.IntegerField(default=0,
verbose_name='Всего сэкономлено (руб)')
class Meta:
verbose_name = 'клиент'
verbose_name_plural = 'клиенты'
def __str__(self):
return self.name
|
StarcoderdataPython
|
1645247
|
<gh_stars>1-10
import os
from ament_index_python.packages import get_package_share_directory
from ament_index_python.packages import get_package_prefix
import launch
import launch_ros.actions
def generate_launch_description():
if not "tesseract_collision" in os.environ["AMENT_PREFIX_PATH"]:
head, tail = os.path.split(get_package_prefix('tesseract_monitoring'))
path = os.path.join(head, 'tesseract_collision')
os.environ["AMENT_PREFIX_PATH"] += os.pathsep + path
print(os.environ["AMENT_PREFIX_PATH"])
urdf = os.path.join(get_package_share_directory('ur_description'), 'urdf', 'ur10_robot.urdf')
srdf = os.path.join(get_package_share_directory('ur_description'), 'urdf', 'ur10_robot.srdf')
return launch.LaunchDescription([
launch_ros.actions.Node(
node_name='environment_monitor',
package='tesseract_monitoring',
node_executable='tesseract_monitoring_environment_node',
output='screen',
arguments=[],
parameters=[{'desc_param': 'robot_description',
'robot_description': urdf,
'robot_description_semantic': srdf}]),
launch_ros.actions.Node(
node_name='joint_state_pub',
package='joint_state_publisher',
node_executable='joint_state_publisher',
output='screen',
arguments=[urdf],
parameters=[{'use_gui': 'false'}]),
])
|
StarcoderdataPython
|
3515128
|
# -*- coding: utf-8 -*-
import sys
import os
import torch
import numpy as np
import random
import csv
import os
import random
from random import shuffle
from os import listdir
from os.path import join
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms,utils
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
logging.getLogger().setLevel(logging.INFO)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class basic_blocks(nn.Module):
def __init__(self,input_channel,mid_channel,output_channel):
super(basic_blocks,self).__init__()
self.conv_block1 = nn.Sequential(nn.Conv2d(input_channel,mid_channel,kernel_size = 3,padding = 1,padding_mode = 'replicate'),nn.BatchNorm2d(mid_channel),nn.ReLU())
self.conv_block2 = nn.Sequential(nn.Conv2d(mid_channel, output_channel, kernel_size = 3,padding = 1,padding_mode = 'replicate'),nn.BatchNorm2d(output_channel),nn.ReLU())
def forward(self,x):
x = self.conv_block1(x)
x = self.conv_block2(x)
return x
class encoder(nn.Module):
def __init__(self,input_channel,output_channel):
super(encoder,self).__init__()
self.encode = nn.Sequential(nn.MaxPool2d(2),basic_blocks(input_channel,output_channel,output_channel))
def forward(self,x):
return self.encode(x)
class decoder(nn.Module):
def __init__(self,input_channel,output_channel,bilinear = True):
super(decoder,self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor = 2, mode = 'bilinear', align_corners = True)
self.conv_block = basic_blocks(input_channel,input_channel//2,output_channel)
def forward(self,x1,x2):
x1 = self.up(x1)
x = torch.cat([x2,x1],dim=1)
return self.conv_block(x)
class Unet_defocus(nn.Module):
def __init__(self,input_channel = 2,output_channel = 13,bilinear = True):
super(Unet_defocus,self).__init__()
factor = 2 if bilinear else 1
self.en0 = basic_blocks(input_channel,64,64)
self.en1 = encoder(64,128)
self.en2 = encoder(128,256)
self.en3 = encoder(256,512)
self.en4 = encoder(512,1024 // factor)
self.de1 = decoder(1024,512//factor)
self.de2 = decoder(512,256//factor)
self.de3 = decoder(256,128//factor)
self.de4 = decoder(128,64)
self.output = nn.Conv2d(64,output_channel,kernel_size=1)
def forward(self,x):
x0 = self.en0(x)
x1 = self.en1(x0)
x2 = self.en2(x1)
x3 = self.en3(x2)
x4 = self.en4(x3)
x = self.de1(x4,x3)
x = self.de2(x,x2)
x = self.de3(x,x1)
x = self.de4(x,x0)
x = self.output(x)
return x
class paper1_net(nn.Module):
def __init__(self):
super(paper1_net,self).__init__()
self.conv1 = nn.Conv2d(2, 32, 3, padding = 1,padding_mode = 'reflect')
self.bn1 = nn.BatchNorm2d(32)
self.maxpool = nn.MaxPool2d(2,stride=2)
self.conv2 = nn.Conv2d(32, 64, 3, padding = 1,padding_mode = 'reflect')
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, 3, padding = 1,padding_mode = 'reflect')
self.bn3 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(128, 256, 3, padding = 1,padding_mode = 'reflect')
self.bn4 = nn.BatchNorm2d(256)
self.fc1 = nn.Linear(256*8*8,1024)
self.dropout = nn.Dropout()
self.fc2 = nn.Linear(1024,13)
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.maxpool(x)
x = F.relu(self.conv2(x))
x = self.maxpool(x)
x = F.relu(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = F.relu(self.bn4(self.conv4(x)))
x = self.maxpool(x)
x = x.view(-1,256*8*8)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
if __name__ == '__main__':
x = torch.randn(1,2,512,512)
model = Unet_defocus()
out = model(x)
print(out.shape)
|
StarcoderdataPython
|
5160851
|
<filename>src/KAOSBank_simple_triggers.py
# Banking (2.0) by Lazeras
# Released 1 December 2011
from header_common import *
from header_operations import *
from header_parties import *
from header_items import *
from header_skills import *
from header_triggers import *
from header_troops import *
from header_music import *
from module_constants import *
# Manualy all lines under the `simple_triggers` into the bottom of the module_simple_triggers at the bottom of the file
simple_triggers=[
########################################################################################################################
# KAOS BANKING KIT START #
########################################################################################################################
########################################################################################################################
# Triggers the Banking report with apply changes active once a week.
########################################################################################################################
(24 * 7,
[
(assign, "$g_apply_Kaoses_bank_report_to_gold", 1),
(try_begin),
(eq, "$bank_availability", 0),
(start_presentation, "prsnt_Lazeras_bank_prsnt"),
(else_try),
(eq, "$bank_availability", 1),
(start_presentation, "prsnt_Lazeras_bank_faction_prsnt"),
(try_end),
]
),
########################################################################################################################
# Once a day check if debt payment is overdue and if so apply penalties
########################################################################################################################
(24,
[
(try_begin),
(eq, "$bank_availability", 0),
(try_for_range, ":center_no", towns_begin, towns_end),
(party_get_slot, ":has_bank", ":center_no", slot_town_has_bank),
(party_get_slot,":player_debt",":center_no",slot_town_bank_debt),
(party_get_slot,":debt_effect",":center_no",slot_town_bank_debt_repayment_effect),
(eq, ":has_bank", 1),
(gt, ":player_debt", 0),
(str_store_party_name,s9, ":center_no"),
(party_get_slot,":due_date",":center_no",slot_town_bank_debt_repayment_date),
(store_current_day, ":cur_day"),
(store_sub, ":week_to_go", ":due_date", 7),
(party_get_slot, ":centre_owner", ":center_no", slot_town_lord),
(str_store_troop_name,s8,":centre_owner"),
#########################################################################
# Checking for a week to go on a debt payment and notifying player
#########################################################################
(try_begin),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(eq, ":debt_effect", 3),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt .You have already missed the deadline multiple times"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt and that you have already missed severale payments "),
#(display_log_message, "@ activated first try week to go!", 0xFF0000),
(else_try),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(ge, ":debt_effect", 2),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt .You have already missed the deadline more than once"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt and that you have already missed more than one payment "),
#(display_log_message, "@ activated second try week to go!", 0xFF0000),
(else_try),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(eq, ":debt_effect", 1),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt. You have already missed the deadline once"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt and that you have already missed a payment"),
#(display_log_message, "@ activated third try week to go!", 0xFF0000),
(else_try),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt "),
#(display_log_message, "@ activated fourth try week to go!", 0xFF0000),
(try_end),
#########################################################################
# Checking if a debt payment is over due and if so apply penalties and inform player
#########################################################################
(try_begin),
(eq,":cur_day",":due_date"),
(gt,":player_debt", 0),
(try_begin),
(eq, ":debt_effect", 0),
(display_log_message, "@A messenger informs you that you have failed to make the required debt repayment and have lost 1 reputation with {s9} You have 20 days to make a payment "),
(call_script, "script_change_player_relation_with_center", ":center_no", -1),
(val_add, ":due_date", 20),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 2),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated first try after due date!", 0xFF0000),
(else_try),
(eq, ":debt_effect", 1),
(call_script, "script_change_player_relation_with_center", ":center_no", -1),
(display_log_message, "@A messenger informs you that you have failed to make the required debt repayment and have lost 1 reputation with {s9} You have 20 days to make a payment "),
(val_add, ":due_date", 20),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 2),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated second try after due date!", 0xFF0000),
(else_try),
(eq, ":debt_effect", 2),
(call_script, "script_change_player_relation_with_troop", ":centre_owner", -1),
(call_script, "script_change_player_relation_with_center", ":center_no", -2),
(display_log_message, "@A messenger informs you that you have failed to make the required debt repayment again and have lost 2 reputation with {s9} this has also caused you to lose 1 reputation with {s8} You have 15 days to make a payment "),
(val_add, ":due_date", 15),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 3),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated third try after due date!", 0xFF0000),
(else_try),
(ge, ":debt_effect", 3),
(call_script, "script_change_player_relation_with_troop", ":centre_owner", -2),
(call_script, "script_change_player_relation_with_center", ":center_no", -3),
(display_log_message, "@A messenger informs you that you have failed to make another required debt repayment again and have lost 3 reputation with {s9} this has also caused you to lose 1 reputation with {s8}. You have 15 days to make a payment "),
(val_add, ":due_date", 15),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 3),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated forth try after due date!", 0xFF0000),
(try_end),
(try_end),
(try_end),
(else_try),
(eq, "$bank_availability", 1),
(try_for_range, ":faction", "fac_kingdom_1", kingdoms_end),
(party_get_slot,":player_debt",":faction",slot_town_bank_debt),
(party_get_slot,":debt_effect",":faction",slot_town_bank_debt_repayment_effect),
(faction_get_slot, ":king", ":faction", slot_faction_leader),
(gt, ":player_debt", 0),
(str_store_faction_name,s9, ":faction"),
(party_get_slot,":due_date",":faction",slot_town_bank_debt_repayment_date),
(store_current_day, ":cur_day"),
(store_sub, ":week_to_go", ":due_date", 7),
(str_store_troop_name,s8,":king"),
#########################################################################
# Checking for a week to go on a debt payment and notifying player
#########################################################################
(try_begin),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(eq, ":debt_effect", 3),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt .You have already missed the deadline multiple times"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt and that you have already missed severale payments "),
#(display_log_message, "@ activated first try week to go!", 0xFF0000),
(else_try),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(ge, ":debt_effect", 2),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt .You have already missed the deadline more than once"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt and that you have already missed more than one payment "),
#(display_log_message, "@ activated second try week to go!", 0xFF0000),
(else_try),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(eq, ":debt_effect", 1),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt. You have already missed the deadline once"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt and that you have already missed a payment"),
#(display_log_message, "@ activated third try week to go!", 0xFF0000),
(else_try),
(eq,":week_to_go",":cur_day"),
(gt, ":player_debt", 0),
(display_log_message, "@A messenger informs you that you have a week to make a repayment of your debt"),
(dialog_box, "@A messenger informs you that you have a week to make a repayment of you debt "),
#(display_log_message, "@ activated fourth try week to go!", 0xFF0000),
(try_end),
#########################################################################
# Checking if a debt payment is over due and if so apply penalties and inform player
#########################################################################
(try_begin),
(eq,":cur_day",":due_date"),
(gt,":player_debt", 0),
(try_begin),
(eq, ":debt_effect", 0),
(display_log_message, "@A messenger informs you that you have failed to make the required debt repayment and have lost 1 reputation with {s9} You have 20 days to make a payment "),
(call_script, "script_change_player_relation_with_center", ":center_no", -1),
(val_add, ":due_date", 20),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 2),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated first try after due date!", 0xFF0000),
(else_try),
(eq, ":debt_effect", 1),
(call_script, "script_change_player_relation_with_center", ":center_no", -1),
(display_log_message, "@A messenger informs you that you have failed to make the required debt repayment and have lost 1 reputation with {s9} You have 20 days to make a payment "),
(val_add, ":due_date", 20),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 2),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated second try after due date!", 0xFF0000),
(else_try),
(eq, ":debt_effect", 2),
(call_script, "script_change_player_relation_with_troop", ":centre_owner", -1),
(call_script, "script_change_player_relation_with_center", ":center_no", -2),
(display_log_message, "@A messenger informs you that you have failed to make the required debt repayment again and have lost 2 reputation with {s9} this has also caused you to lose 1 reputation with {s8} You have 15 days to make a payment "),
(val_add, ":due_date", 15),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 3),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated third try after due date!", 0xFF0000),
(else_try),
(ge, ":debt_effect", 3),
(call_script, "script_change_player_relation_with_troop", ":centre_owner", -2),
(call_script, "script_change_player_relation_with_center", ":center_no", -3),
(display_log_message, "@A messenger informs you that you have failed to make another required debt repayment again and have lost 3 reputation with {s9} this has also caused you to lose 1 reputation with {s8}. You have 15 days to make a payment "),
(val_add, ":due_date", 15),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_effect, 3),
(party_set_slot,":center_no",slot_town_bank_debt_repayment_date,":due_date"),
#(display_log_message, "@ activated forth try after due date!", 0xFF0000),
(try_end),
(try_end),
(try_end),
]),
########################################################################################################################
# KAOS BANKING KIT END #
########################################################################################################################
]
from util_common import *
def modmerge(var_set):
try:
from modmerger_options import module_sys_info
version = module_sys_info["version"]
except:
version = 1127 # version not specified. assume latest warband at this time
try:
var_name_1 = "simple_triggers"
orig_simple_triggers = var_set[var_name_1]
add_objects(orig_simple_triggers, simple_triggers, False)
except KeyError:
errstring = "Variable set does not contain expected variable: \"%s\"." % var_name_1
raise ValueError(errstring)
|
StarcoderdataPython
|
6566216
|
import base64
import getpass
import re
import socket
import sys
import quopri
from conf import *
import logging
import email
CRLF = '\r\n'
TERMINATOR = CRLF + '.' + CRLF
# Searches for a particular Tag in raw message
def decode(input_str):
result = ''
search_result = re.search('=\?([^\?]*)\?([^\?]*)\?([^\?]*)\?=', input_str)
while search_result is not None:
charset, tp, text = search_result.groups()
s = search_result.start(0)
e = search_result.end(0)
text = text.encode('cp866', 'ignore').decode('cp866', 'ignore')
result += input_str[:s]
input_str = input_str[e:].lstrip()
if tp.lower() != 'q':
result += base64.b64decode(text.encode('cp866')).decode(charset, 'ignore')
else:
result += quopri.decodestring(text).decode(charset, 'ignore')
search_result = re.search('=\?([^\?]*)\?([^\?]*)\?([^\?]*)\?=', input_str)
else:
result += input_str
return result
class pop3lib:
message_list = []
# sends a message m
def send_message(self,m):
logging.debug("\nC: "+m)
self.sock.send((m + '\r\n').encode('utf-8'))
# sends a message m without logging
def send_password(self,m):
self.sock.send((m + '\r\n').encode('utf-8'))
# sends a message mes and receives a line from socket
def send_and_receiveline(self,mes):
logging.debug("\nC: "+mes)
self.sock.send((mes + '\r\n').encode('utf-8'))
line = self.sock.recv(2048)
logging.debug("\nS: " + line)
return line
# Receives a line from socket
def receiveline(self):
line = self.sock.recv(2048)
logging.debug("\nS: " + line)
return line
# Receives until terminated
def receive_till_term(self, terminator):
response = self.sock.recv(2048)
while not response.endswith(TERMINATOR) and not response.endswith('.\r\n') and not response.endswith('.\r\n\r\n'):
new_response = self.sock.recv(2048)
response += new_response
print new_response
return response
# returns the total number of messages
def get_message_count(self):
self.send_message("list")
mes = self.receive_till_term(TERMINATOR)
cnt = len(mes.split('\r\n')) - 3
return cnt
# Parses the response to get Subject, Sender address and Date from raw message
def get_result(self,data):
addr = 'No \'From: ...\''
subj = 'No \'Subject: ...\''
data = data.decode('cp866', 'ignore')
from_data = re.search('^From:.*(.*\r?\n\s.*)*$', data, re.M | re.I)
if from_data is not None:
addr = decode(from_data.group(0))
subj_data = re.search('^Subject:.*(.*\r?\n\s.*)*', data, re.M | re.I)
if subj_data is not None:
subj = decode(subj_data.group(0))
date_data = re.search('^Date:.*(.*\r?\n\s.*)*', data, re.M | re.I)
if date_data is not None:
date = decode(date_data.group(0))
return [subj, addr, date]
def __init__(self, host_name, host_port, user_id, passw, log_level=logging.DEBUG):
logging.basicConfig(format='%(levelname)s:%(message)s', level=log_level)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
sock.connect((host_name, host_port))
self.connection = True
except Exception as e:
print('can\'t connect to {0} on {1} port \r\n{2}'.format(host, port, e.__repr__()))
self.connection = False
return
self.server_info = self.receiveline()
auto = [
'user {0}'.format(user_id),
'pass {0}'.format(passw),
]
response = []
for m in auto:
self.send_password(m)
response.append(self.receiveline())
print response
if response[0].find("+OK Name is a valid mailbox") != -1:
self.username_valid = True
else:
self.username_valid = False
if response[1].find("+OK Maildrop ready") != -1:
self.password_valid = True
else:
self.password_valid = False
# returns back list of senders, subjects and date within the index LOWER_INDEX and UPPER_INDEX
def get_message_list(self, LOWER_INDEX, UPPER_INDEX):
addr_list = []
subj_list = []
date_list = []
self.message_count = self.get_message_count()
if(self.message_count - UPPER_INDEX <= 0):
UPPER_INDEX = self.message_count - 1
if(self.message_count - LOWER_INDEX <= 0):
return addr_list, subj_list, date_list
for message_number in range(self.message_count - LOWER_INDEX,self.message_count - UPPER_INDEX - 1,-1):
self.send_message('top {0} 0'.format(message_number))
# response = self.receive_till_term(TERMINATOR)
response = self.sock.recv(2048)
while not response.endswith(b'\r\n.\r\n'):
response += self.sock.recv(2048)
subj, addr, date = self.get_result(response)
subj_list.append(subj)
addr_list.append(addr)
date_list.append(date)
logging.info(('\nS: {0}\n{1}\n{2}\n'.format(addr, subj, date)))
return addr_list, subj_list, date_list
# returns the message body from message at position index
def get_email_body(self, index):
index = self.message_count - index
self.send_message("RETR "+str(index))
response = self.receive_till_term(TERMINATOR)
response = '\n'.join(response.split('\n')[1:])
b = email.message_from_string(response)
body = ""
if b.is_multipart():
for payload in b.get_payload():
body = body + payload.get_payload()
else:
body = b.get_payload()
body = body.replace("\r","")
body = body[:-2]
f = open("message_retrieved.html","w")
f.write(body)
f.close()
return body
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf8')
pop_obj = pop3lib(HOST_ADDR,POP3_PORT,USERNAME,PASSWORD)
|
StarcoderdataPython
|
8098609
|
import sys
from spider.exam_lib.offcn_exam_lib import OffcnExamLib
from spider.exam_lib.huatu_exam_lib import HuatuExamLib
import os
def parse_arg(arg, kv):
args = arg.split('=')
if len(args) != 2:
print(arg+'参数错误')
kv[args[0]] = args[1]
def main():
argv = sys.argv
lib = argv[1]
if lib == 'help':
print('使用方法:python examspider doc=[doc_name.docx] [options]-------\n\
type=option option可取值[行测,申论,面试]\n\
from=option 为爬取开始页面\n\
to=option 为爬取结束页面')
else:
file_path = './exam_doc'
if not os.path.exists(file_path):
os.mkdir(file_path)
kv = {}
for arg in argv[2:]:
parse_arg(arg, kv)
doc_name = kv.get('doc')
if doc_name == None:
print('没有输入生成的word文件名称,请输入要保存到的文件名称')
return
exam_type = kv.get('type')
if exam_type == None:
exam_type = '行测'
from_page = kv.get('from')
if from_page == None:
from_page = '1'
to_page = kv.get('to')
if to_page == None:
to_page = '5'
if lib == 'offcn':
# 中公
OffcnExamLib(file_path).request(doc_name, exam_type, int(from_page), int(to_page))
elif lib == 'huatu':
# 华图
HuatuExamLib(file_path).request(doc_name, exam_type, int(from_page), int(to_page))
else:
print('参数不正确,使用help查看使用方法')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1780765
|
def test_run_shell_command(get_v):
res = get_v.run_shell_command('uname -a')
assert res.returncode == 0
def test_get_version_list(get_v):
v_list = get_v.get_versions_list()
assert isinstance(v_list, list)
def test_get_last_version(get_v):
version_list = ['0.0.1', '0.0.2', '0.0.3']
last_v = get_v.get_last_version(version_list)
assert last_v == version_list[-1]
def test_write_or_create_version_file(get_v, tmpdir):
path_to_file = tmpdir.mkdir("sub").join("version.txt")
get_v.write_or_create_version_file(path_to_file)
assert path_to_file.read() == "No versions yet!"
|
StarcoderdataPython
|
5141697
|
"""
QuPathBinMaskImporter class
Import binary masks form QuPath - via export script
!highly experimental - pre-alpha state
@author: <NAME> / MR Cancer / MH / ISB / NTNU Trondheim Norway
<EMAIL>
"""
from typing import List
import cv2
import numpy as np
import os
from dataclasses import dataclass
# local imports
from .base_importer import QuPathBaseImporter
from .coregistration import CoRegistrationData
from st_toolbox import BinaryMask
class MaskNameSplitterInterface:
"""
Interface for splitting the input / path strings into meaningfull mask class names
Implement function get_mask_name and pass class in as parameter mask_name_splitter into QuPathBinMaskImporter
Look at DefaultMaskNameSplitter class for an example
"""
@staticmethod
def get_mask_name(input_str: str) -> str:
pass
class DefaultMaskNameSplitter(MaskNameSplitterInterface):
@staticmethod
def get_mask_name(input_str: str) -> str:
if input_str.split('_')[-3] == "Annotation":
return input_str.split('_')[-2]
else:
return '_'.join([input_str.split('_')[-2], input_str.split('_')[-3]])
class QuPathBinMaskImporter(QuPathBaseImporter):
_mask_type: str
img: np.ndarray
output_file_path: str
def __init__(self,
qp_export_file_path: str,
output_folder: str,
co_registration_data: CoRegistrationData = None,
mask_name_splitter: MaskNameSplitterInterface = None,
name: str = None):
super().__init__(qp_export_file_path=qp_export_file_path,
output_folder=output_folder,
co_registration_data=co_registration_data,
name=name)
if mask_name_splitter is None:
self.mask_name_splitter = DefaultMaskNameSplitter
elif issubclass(mask_name_splitter, MaskNameSplitterInterface):
self.mask_name_splitter = mask_name_splitter
else:
raise ValueError("mask_name_splitter has to be a subclass of MaskNameSplitterInterface")
self._mask_type = None
self.img = None
self.output_file_path = None
@property
def mask(self) -> BinaryMask:
if self.mask_type is not None:
return BinaryMask(
name=self.mask_type,
path=self.output_file_path,
img=self.img
)
else:
return BinaryMask()
def _child_check(self) -> bool:
return True
def _child_run(self) -> bool:
moving_img = cv2.imread(self.qp_export_file_path, cv2.IMREAD_GRAYSCALE)
if self.co_registration_data is not None:
transformed_img = cv2.warpPerspective(moving_img, self.co_registration_data.transform_matrix, (self.co_registration_data.target_w, self.co_registration_data.target_h))
else:
transformed_img = moving_img
out_file_name = os.path.join(self.output_folder, 'process_out_{}_mask_{}.png'.format(self.name, self.mask_type))
cv2.imwrite(out_file_name, transformed_img)
self.img = transformed_img
self.output_file_path = out_file_name
return True
@property
def mask_type(self) -> str:
if self._mask_type is None and self.qp_export_file_path is not None:
self._mask_type = self.mask_name_splitter.get_mask_name(self.qp_export_file_path)
return self._mask_type
@staticmethod
def batch_import(qp_export_path_list: List[str],
output_folder: str, mask_name_splitter: MaskNameSplitterInterface = None,
co_registration_data_list: List[CoRegistrationData] = None,
names: List[str] = None) -> List['QuPathBinMaskImporter']:
qp_mask_imps = []
if co_registration_data_list is None:
co_registration_data_list = [None for i in range(0, len(qp_export_path_list))]
if names is None:
names = [None for i in range(0, len(qp_export_path_list))]
for qp, co_reg_data, name in zip(qp_export_path_list, co_registration_data_list, names):
qp_mask_imps.append(QuPathBinMaskImporter(qp_export_file_path=qp,
output_folder=output_folder,
co_registration_data=co_reg_data,
mask_name_splitter=mask_name_splitter,
name=name))
qp_mask_imps[-1].run()
return qp_mask_imps
|
StarcoderdataPython
|
8095693
|
import os
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
# Code so automated tests will run properly
# Check for environment variable, if that fails, use getpass().
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
my_device = {
"device_type": "arista_eos",
"host": "arista1.lasthop.io",
"username": "pyclass",
"password": password,
}
net_connect = ConnectHandler(**my_device)
output = net_connect.send_command(
"show vlan", use_ttp=True, ttp_template="show_vlan.ttp"
)
net_connect.disconnect()
print()
print("VLAN Table:")
print("-" * 18)
pprint(output)
print()
# Strip outer lists
data = output[0][0]
for vlan_dict in data:
if vlan_dict["vlan_id"] == "7":
print()
print(f"VLAN ID: {vlan_dict['vlan_id']}")
print(f"VLAN name: {vlan_dict['vlan_name']}")
print()
|
StarcoderdataPython
|
9761751
|
import click
import cowait.cli
@click.command(help='create a new context')
@click.argument('name', type=str, required=False)
@click.option('--image', type=str, required=False, help='image name')
@click.option('--base', type=str, required=False, help='base image name')
@click.pass_context
def new(ctx, name: str, image: str, base: str):
cowait.cli.new_context(
ctx.obj,
name=name,
image=image,
base=base,
)
|
StarcoderdataPython
|
11236323
|
from anduin import Data
r = Data.find('user',[('id','=',1)])
# r = Data.find('user',[('ip','=',1)])
|
StarcoderdataPython
|
1934826
|
<reponame>arccode/factory
#!/usr/bin/env python3
#
# Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for e5071c_scpi module.
Currently, the test list covered some complicated logic on auxiliary
function only, in the future, we might want to use the e5071c_mock to
extend the test coverage as well.
"""
import unittest
from cros.factory.test.rf.e5071c_scpi import CheckTraceValid
from cros.factory.test.rf.e5071c_scpi import Interpolate
X_VALUES = [10, 10, 15, 18, 20, 20, 30, 30, 40]
Y_VALUES = [0.5, 0.7, 0.9, 1.2, 0.6, 0.7, 0.1, 1.1, 9.1]
class TestInterpolation(unittest.TestCase):
def testInterpolateNormal(self):
"""Tests whether the Interpolate function works for query in range."""
# Test cases for non-ambiguous situation.
self.assertAlmostEqual(0.90, Interpolate(X_VALUES, Y_VALUES, 15))
self.assertAlmostEqual(1.10, Interpolate(X_VALUES, Y_VALUES, 17))
self.assertAlmostEqual(9.10, Interpolate(X_VALUES, Y_VALUES, 40))
# Test cases for duplicated values presented in X_VALUES.
self.assertAlmostEqual(0.50, Interpolate(X_VALUES, Y_VALUES, 10))
self.assertAlmostEqual(0.78, Interpolate(X_VALUES, Y_VALUES, 12))
self.assertAlmostEqual(0.90, Interpolate(X_VALUES, Y_VALUES, 19))
self.assertAlmostEqual(0.40, Interpolate(X_VALUES, Y_VALUES, 25))
self.assertAlmostEqual(6.70, Interpolate(X_VALUES, Y_VALUES, 37))
def testInterpolateException(self):
"""Tests whether the Interpolate function raises exception as expected."""
# Should fail in TraceValid function.
self.assertRaises(ValueError, Interpolate, [10, 50], [0.1], 44)
# Out of range exceptions.
self.assertRaises(ValueError, Interpolate, X_VALUES, Y_VALUES, 5)
self.assertRaises(ValueError, Interpolate, X_VALUES, Y_VALUES, 45)
class TestTraceValid(unittest.TestCase):
def testCheckTraceValid(self):
# Check whether x_values is empty.
self.assertRaises(ValueError, CheckTraceValid, [], [])
# Check whether x_values and values are not equal in length.
self.assertRaises(ValueError, CheckTraceValid, [10, 20], [0.5])
# Check whether x_values is not an increasing sequence.
self.assertRaises(ValueError, CheckTraceValid, [10, 20, 19], [0, 0, 0])
# Check for valid case
CheckTraceValid([10, 50], [0, 1])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4860437
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.education.doctype.student.test_student import create_student
from erpnext.education.doctype.student.test_student import get_student
from erpnext.education.doctype.program.test_program import setup_program
from erpnext.education.doctype.course_activity.test_course_activity import make_course_activity
class TestCourseEnrollment(unittest.TestCase):
def setUp(self):
setup_program()
student = create_student({"first_name": "_Test First", "last_name": <NAME>", "email": <EMAIL>"})
program_enrollment = student.enroll_in_program("_Test Program")
course_enrollment = frappe.db.get_value("Course Enrollment",
{"course": "_Test Course 1", "student": student.name, "program_enrollment": program_enrollment.name}, 'name')
make_course_activity(course_enrollment, "Article", "_Test Article 1-1")
def test_get_progress(self):
student = get_student("_<EMAIL>")
program_enrollment_name = frappe.get_list("Program Enrollment", filters={"student": student.name, "Program": "_Test Program"})[0].name
course_enrollment_name = frappe.get_list("Course Enrollment", filters={"student": student.name, "course": "_Test Course 1", "program_enrollment": program_enrollment_name})[0].name
course_enrollment = frappe.get_doc("Course Enrollment", course_enrollment_name)
progress = course_enrollment.get_progress(student)
finished = {'content': '_Test Article 1-1', 'content_type': 'Article', 'is_complete': True}
self.assertTrue(finished in progress)
frappe.db.rollback()
def tearDown(self):
for entry in frappe.db.get_all("Course Enrollment"):
frappe.delete_doc("Course Enrollment", entry.name)
for entry in frappe.db.get_all("Program Enrollment"):
doc = frappe.get_doc("Program Enrollment", entry.name)
doc.cancel()
doc.delete()
|
StarcoderdataPython
|
9649631
|
<filename>venv/lib/python3.6/site-packages/examples/sqla/app2.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import flask_admin as admin
from flask_admin.contrib import sqla
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config["SECRET_KEY"] = "123456790"
# Create in-memory database
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///sample_db_2.sqlite"
app.config["SQLALCHEMY_ECHO"] = True
db = SQLAlchemy(app)
# Flask views
@app.route("/")
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
class Car(db.Model):
__tablename__ = "cars"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
desc = db.Column(db.String(50))
def __str__(self):
return self.desc
class Tyre(db.Model):
__tablename__ = "tyres"
car_id = db.Column(db.Integer, db.ForeignKey("cars.id"), primary_key=True)
tyre_id = db.Column(db.Integer, primary_key=True)
car = db.relationship("Car", backref="tyres")
desc = db.Column(db.String(50))
class CarAdmin(sqla.ModelView):
column_display_pk = True
form_columns = ["id", "desc"]
class TyreAdmin(sqla.ModelView):
column_display_pk = True
form_columns = ["car", "tyre_id", "desc"]
# Create admin
admin = admin.Admin(app, name="Example: SQLAlchemy2", template_mode="bootstrap3")
admin.add_view(CarAdmin(Car, db.session))
admin.add_view(TyreAdmin(Tyre, db.session))
if __name__ == "__main__":
# Create DB
db.create_all()
# Start app
app.run(debug=True)
|
StarcoderdataPython
|
9740256
|
from app import app
from flask import render_template,request,jsonify
from flask_pymongo import PyMongo
from bson import ObjectId
mongo = PyMongo(app)
@app.route('/')
def index_page():
output = []
result = mongo.db.train_sample.aggregate([{"$sample":{"size":10 }}])
for item in result:
tweet = {
"raw_tweet": item["tweet"],
"sentiment" : item["sentiment"]["label"],
"raw_token" : item["stop_words_token"],
"token" : item["token"]
}
output.append(tweet)
title = "Bitcoin Sentiment Analysis with VADER & Fuzzy String Matching using Twitter Data "
date = ["2017-12-26","2017-12-27","2017-12-28","2017-12-29","2017-12-30","2017-12-31","2018-01-01"]
return render_template("sentiment.html", title=title , tweets=output , date=date)
@app.route('/api_get_sentiment_analysis_daily_url',methods=['GET','POST'])
def sentiment_daily_result():
data = request.form.to_dict()
sample_date = data["sample_date"]
result = mongo.db.sentiment_analysis.find_one({
"sample_date" : sample_date
})
result["_id"] = ""
return jsonify({ "result" : result })
@app.route('/api_get_sentiment_analysis_summary_url',methods=['GET'])
def sentiment_summary_result():
result = mongo.db.sentiment_analysis.find_one({
"_id" : ObjectId("5a4991f3a081af5a3f529b33")
})
result["_id"] = ""
return jsonify({ "result" : result })
|
StarcoderdataPython
|
1737928
|
<gh_stars>0
#
# coding=utf-8
import os
import setuptools
#
# get the long description from the README file
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='cmd2-ext-test',
version='0.2.0',
# TODO: Figure out why this doesn't work on CI Server
# use_scm_version={
# 'root': '../..',
# 'relative_to': __file__,
# 'git_describe_command': 'git describe --dirty --tags --long --match plugin-ext-test*'
# },
description='External test plugin for cmd2. Allows for external invocation of commands as if from a cmd2 pyscript',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='cmd2 test plugin',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/python-cmd2/cmd2-ext-test',
license='MIT',
packages=['cmd2_ext_test'],
python_requires='>=3.4',
install_requires=['cmd2 >= 0.9.4, <=2'],
setup_requires=['setuptools_scm >= 3.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# dependencies for development and testing
# $ pip install -e .[dev]
extras_require={
'test': [
'codecov',
'coverage',
'pytest',
'pytest-cov',
],
'dev': ['setuptools_scm', 'pytest', 'codecov', 'pytest-cov',
'pylint', 'invoke', 'wheel', 'twine']
},
)
|
StarcoderdataPython
|
3278814
|
from aws_cdk import (
aws_ec2,
core
)
class Vpc(core.Construct):
def __init__(self, scope: core.Construct, id: str, **props):
super().__init__(scope, id)
#
# PARAMETERS
#
self.id_param = core.CfnParameter(
self,
"Id",
default="",
description="Optional: Specify the VPC ID. If not specified, a VPC will be created."
)
self.id_param.override_logical_id(f"{id}Id")
self.cidr_param = core.CfnParameter(
self,
"Cidr",
allowed_pattern="^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$",
default="10.0.0.0/16",
description="Optional: VPC IPv4 CIDR block if no VPC provided."
)
self.cidr_param.override_logical_id(f"{id}Cidr")
self.nat_gateway_per_subnet_param = core.CfnParameter(
self,
"NatGatewayPerSubnet",
allowed_values=[ "true", "false" ],
default="false",
description="Optional: Set to 'true' to provision a NAT Gateway in each public subnet for AZ HA."
)
self.nat_gateway_per_subnet_param.override_logical_id(f"{id}NatGatewayPerSubnet")
self.private_subnet1_id_param = core.CfnParameter(
self,
"PrivateSubnet1Id",
default="",
description="Optional: Specify Subnet ID for private subnet 1."
)
self.private_subnet1_id_param.override_logical_id(f"{id}PrivateSubnet1Id")
self.private_subnet1_cidr_param = core.CfnParameter(
self,
"PrivateSubnet1Cidr",
allowed_pattern="^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$",
default="10.0.128.0/18",
description="Optional: VPC IPv4 CIDR block of private subnet 1 if no VPC provided."
)
self.private_subnet1_cidr_param.override_logical_id(f"{id}PrivateSubnet1Cidr")
self.private_subnet2_id_param = core.CfnParameter(
self,
"PrivateSubnet2Id",
default="",
description="Optional: Specify Subnet ID for private subnet 2."
)
self.private_subnet2_id_param.override_logical_id(f"{id}PrivateSubnet2Id")
self.private_subnet2_cidr_param = core.CfnParameter(
self,
"PrivateSubnet2Cidr",
allowed_pattern="^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$",
default="10.0.192.0/18",
description="Optional: VPC IPv4 CIDR block of private subnet 2 if no VPC provided."
)
self.private_subnet2_cidr_param.override_logical_id(f"{id}PrivateSubnet2Cidr")
self.public_subnet1_id_param = core.CfnParameter(
self,
"PublicSubnetId1",
default="",
description="Optional: Specify Subnet ID for public subnet 1."
)
self.public_subnet1_id_param.override_logical_id(f"{id}PublicSubnet1Id")
self.public_subnet1_cidr_param = core.CfnParameter(
self,
"PublicSubnet1Cidr",
allowed_pattern="^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$",
default="10.0.0.0/18",
description="Optional: VPC IPv4 CIDR block of public subnet 1 if no VPC provided."
)
self.public_subnet1_cidr_param.override_logical_id(f"{id}PublicSubnet1Cidr")
self.public_subnet2_id_param = core.CfnParameter(
self,
"PublicSubnet2Id",
default="",
description="Optional: Specify Subnet ID for public subnet 2."
)
self.public_subnet2_id_param.override_logical_id(f"{id}PublicSubnet2Id")
self.public_subnet2_cidr_param = core.CfnParameter(
self,
"PublicSubnet2Cidr",
default="10.0.64.0/18",
description="Optional: VPC IPv4 CIDR block of public subnet 2 if no VPC provided."
)
self.public_subnet2_cidr_param.override_logical_id(f"{id}PublicSubnet2Cidr")
#
# CONDITIONS
#
self.not_given_condition = core.CfnCondition(
self,
"NotGiven",
expression=core.Fn.condition_equals(self.id_param.value, "")
)
self.not_given_condition.override_logical_id(f"{id}NotGiven")
self.not_given_and_nat_gateway_per_subnet_condition = core.CfnCondition(
self,
"NotGivenAndNatGatewayPerSubnetCondition",
expression=core.Fn.condition_and(
core.Fn.condition_equals(self.id_param.value, ""),
core.Fn.condition_equals(self.nat_gateway_per_subnet_param.value, "true")
)
)
self.not_given_and_nat_gateway_per_subnet_condition.override_logical_id(f"{id}NotGivenAndNatGatewayPerSubnet")
#
# RESOURCES
#
self.vpc = aws_ec2.CfnVPC(
self,
f"{id}",
cidr_block=self.cidr_param.value_as_string,
enable_dns_hostnames=True,
enable_dns_support=True,
instance_tenancy="default",
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}")]
)
self.vpc.cfn_options.condition=self.not_given_condition
self.vpc.override_logical_id(f"{id}")
self.igw = aws_ec2.CfnInternetGateway(
self,
"InternetGateway",
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}")]
)
self.igw.cfn_options.condition=self.not_given_condition
self.igw.override_logical_id(f"{id}InternetGateway")
self.igw_attachment = aws_ec2.CfnVPCGatewayAttachment(
self,
"IGWAttachment",
vpc_id=self.vpc.ref,
internet_gateway_id=self.igw.ref
)
self.igw_attachment.cfn_options.condition=self.not_given_condition
self.igw_attachment.override_logical_id(f"{id}IGWAttachment")
self.public_route_table = aws_ec2.CfnRouteTable(
self,
"PublicRouteTable",
vpc_id=self.vpc.ref,
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PublicRouteTable")]
)
self.public_route_table.cfn_options.condition=self.not_given_condition
self.public_route_table.override_logical_id(f"{id}PublicRouteTable")
self.public_default_route = aws_ec2.CfnRoute(
self,
"PublicDefaultRoute",
route_table_id=self.public_route_table.ref,
destination_cidr_block="0.0.0.0/0",
gateway_id=self.igw.ref
)
self.public_default_route.cfn_options.condition=self.not_given_condition
self.public_default_route.override_logical_id(f"{id}PublicDefaultRoute")
self.public_subnet1 = aws_ec2.CfnSubnet(
self,
"PublicSubnet1",
cidr_block=self.public_subnet1_cidr_param.value_as_string,
vpc_id=self.vpc.ref,
assign_ipv6_address_on_creation=None,
availability_zone=core.Fn.select(0, core.Fn.get_azs()),
map_public_ip_on_launch=True,
tags=[
core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PublicSubnet1")
]
)
self.public_subnet1.cfn_options.condition=self.not_given_condition
self.public_subnet1.override_logical_id(f"{id}PublicSubnet1")
self.public_subnet1_route_table_association = aws_ec2.CfnSubnetRouteTableAssociation(
self,
"PublicSubnet1RouteTableAssociation",
route_table_id=self.public_route_table.ref,
subnet_id=self.public_subnet1.ref
)
self.public_subnet1_route_table_association.cfn_options.condition=self.not_given_condition
self.public_subnet1_route_table_association.override_logical_id(f"{id}PublicSubnet1RouteTableAssociation")
self.public_subnet1_eip = aws_ec2.CfnEIP(
self,
"PublicSubnet1EIP",
domain="vpc"
)
self.public_subnet1_eip.cfn_options.condition=self.not_given_condition
self.public_subnet1_eip.override_logical_id(f"{id}PublicSubnet1EIP")
self.public_subnet1_nat_gateway = aws_ec2.CfnNatGateway(
self,
"PublicSubnet1NATGateway",
allocation_id=self.public_subnet1_eip.attr_allocation_id,
subnet_id=self.public_subnet1.ref,
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PublicSubnet1")]
)
self.public_subnet1_nat_gateway.cfn_options.condition=self.not_given_condition
self.public_subnet1_nat_gateway.override_logical_id(f"{id}PublicSubnet1NATGateway")
self.public_subnet2 = aws_ec2.CfnSubnet(
self,
"PublicSubnet2",
cidr_block=self.public_subnet2_cidr_param.value_as_string,
vpc_id=self.vpc.ref,
assign_ipv6_address_on_creation=None,
availability_zone=core.Fn.select(1, core.Fn.get_azs()),
map_public_ip_on_launch=True,
tags=[
core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PublicSubnet2")
]
)
self.public_subnet2.cfn_options.condition=self.not_given_condition
self.public_subnet2.override_logical_id(f"{id}PublicSubnet2")
self.public_subnet2_route_table_association = aws_ec2.CfnSubnetRouteTableAssociation(
self,
"PublicSubnet2RouteTableAssociation",
route_table_id=self.public_route_table.ref,
subnet_id=self.public_subnet2.ref
)
self.public_subnet2_route_table_association.cfn_options.condition=self.not_given_condition
self.public_subnet2_route_table_association.override_logical_id(f"{id}PublicSubnet2RouteTableAssociation")
self.public_subnet2_eip = aws_ec2.CfnEIP(
self,
"PublicSubnet2EIP",
domain="vpc"
)
self.public_subnet2_eip.cfn_options.condition=self.not_given_and_nat_gateway_per_subnet_condition
self.public_subnet2_eip.override_logical_id(f"{id}PublicSubnet2EIP")
self.public_subnet2_nat_gateway = aws_ec2.CfnNatGateway(
self,
"PublicSubnet2NATGateway",
allocation_id=self.public_subnet2_eip.attr_allocation_id,
subnet_id=self.public_subnet1.ref,
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PublicSubnet2")]
)
self.public_subnet2_nat_gateway.cfn_options.condition=self.not_given_and_nat_gateway_per_subnet_condition
self.public_subnet2_nat_gateway.override_logical_id(f"{id}PublicSubnet2NATGateway")
self.private_subnet1 = aws_ec2.CfnSubnet(
self,
"PrivateSubnet1",
cidr_block=self.private_subnet1_cidr_param.value_as_string,
vpc_id=self.vpc.ref,
assign_ipv6_address_on_creation=None,
availability_zone=core.Fn.select(0, core.Fn.get_azs()),
map_public_ip_on_launch=False,
tags=[
core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PrivateSubnet1")
]
)
self.private_subnet1.cfn_options.condition=self.not_given_condition
self.private_subnet1.override_logical_id(f"{id}PrivateSubnet1")
self.private_subnet1_route_table = aws_ec2.CfnRouteTable(
self,
"PrivateSubnet1RouteTable",
vpc_id=self.vpc.ref,
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PrivateSubnet1")]
)
self.private_subnet1_route_table.cfn_options.condition=self.not_given_condition
self.private_subnet1_route_table.override_logical_id(f"{id}PrivateSubnet1RouteTable")
self.private_subnet1_route_table_association = aws_ec2.CfnSubnetRouteTableAssociation(
self,
"PrivateSubnet1RouteTableAssociation",
route_table_id=self.private_subnet1_route_table.ref,
subnet_id=self.private_subnet1.ref
)
self.private_subnet1_route_table_association.cfn_options.condition=self.not_given_condition
self.private_subnet1_route_table_association.override_logical_id(f"{id}PrivateSubnet1RouteTableAssociation")
self.private_subnet1_default_route = aws_ec2.CfnRoute(
self,
"PrivateSubnet1DefaultRoute",
route_table_id=self.private_subnet1_route_table.ref,
destination_cidr_block="0.0.0.0/0",
nat_gateway_id=self.public_subnet1_nat_gateway.ref
)
self.private_subnet1_default_route.cfn_options.condition=self.not_given_condition
self.private_subnet1_default_route.override_logical_id(f"{id}PrivateSubnet1DefaultRoute")
self.private_subnet2 = aws_ec2.CfnSubnet(
self,
"PrivateSubnet2",
cidr_block=self.private_subnet2_cidr_param.value_as_string,
vpc_id=self.vpc.ref,
assign_ipv6_address_on_creation=None,
availability_zone=core.Fn.select(1, core.Fn.get_azs()),
map_public_ip_on_launch=False,
tags=[
core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PrivateSubnet2")
]
)
self.private_subnet2.cfn_options.condition=self.not_given_condition
self.private_subnet2.override_logical_id(f"{id}PrivateSubnet2")
self.private_subnet2_route_table = aws_ec2.CfnRouteTable(
self,
"PrivateSubnet2RouteTable",
vpc_id=self.vpc.ref,
tags=[core.CfnTag(key="Name", value=f"{core.Aws.STACK_NAME}/{id}/PrivateSubnet2")]
)
self.private_subnet2_route_table.cfn_options.condition=self.not_given_condition
self.private_subnet2_route_table.override_logical_id(f"{id}PrivateSubnet2RouteTable")
self.private_subnet2_route_table_association = aws_ec2.CfnSubnetRouteTableAssociation(
self,
"PrivateSubnet2RouteTableAssociation",
route_table_id=self.private_subnet2_route_table.ref,
subnet_id=self.private_subnet2.ref
)
self.private_subnet2_route_table_association.cfn_options.condition=self.not_given_condition
self.private_subnet2_route_table_association.override_logical_id(f"{id}PrivateSubnet2RouteTableAssociation")
self.private_subnet2_default_route = aws_ec2.CfnRoute(
self,
"PrivateSubnet2DefaultRoute",
route_table_id=self.private_subnet2_route_table.ref,
destination_cidr_block="0.0.0.0/0",
nat_gateway_id=core.Token.as_string(
core.Fn.condition_if(
self.not_given_and_nat_gateway_per_subnet_condition.logical_id,
self.public_subnet2_nat_gateway.ref,
self.public_subnet1_nat_gateway.ref
)
)
)
self.private_subnet2_default_route.cfn_options.condition=self.not_given_condition
self.private_subnet2_default_route.override_logical_id(f"{id}PrivateSubnet2DefaultRoute")
#
# OUTPUTS
#
self.id_output = core.CfnOutput(
self,
"IdOutput",
description="The ID of the VPC.",
value=self.id()
)
self.id_output.override_logical_id(f"{id}IdOutput")
self.private_subnet1_id_output = core.CfnOutput(
self,
"PrivateSubnet1IdOutput",
description="The ID of the first private VPC subnet.",
value=self.private_subnet1_id()
)
self.private_subnet1_id_output.override_logical_id(f"{id}PrivateSubnet1IdOutput")
self.private_subnet2_id_output = core.CfnOutput(
self,
"PrivateSubnet2IdOutput",
description="The ID of the second private VPC subnet.",
value=self.private_subnet2_id()
)
self.private_subnet2_id_output.override_logical_id(f"{id}PrivateSubnet2IdOutput")
self.public_subnet1_id_output = core.CfnOutput(
self,
"PublicSubnet1IdOutput",
description="The ID of the first public VPC subnet.",
value=self.public_subnet1_id()
)
self.public_subnet1_id_output.override_logical_id(f"{id}PublicSubnet1IdOutput")
self.public_subnet2_id_output = core.CfnOutput(
self,
"PublicSubnet2IdOutput",
description="The ID of the second public VPC subnet.",
value=self.public_subnet2_id()
)
self.public_subnet2_id_output.override_logical_id(f"{id}PublicSubnet2IdOutput")
#
# HELPERS
#
def id(self):
return core.Token.as_string(
core.Fn.condition_if(
self.not_given_condition.logical_id,
self.vpc.ref,
self.id_param.value_as_string
)
)
def metadata_parameter_group(self):
return [
{
"Label": {
"default": "VPC: Use Existing"
},
"Parameters": [
self.id_param.logical_id,
self.private_subnet1_id_param.logical_id,
self.private_subnet2_id_param.logical_id,
self.public_subnet1_id_param.logical_id,
self.public_subnet2_id_param.logical_id
],
},
{
"Label": {
"default": "VPC: Create New"
},
"Parameters": [
self.cidr_param.logical_id,
self.nat_gateway_per_subnet_param.logical_id,
self.private_subnet1_cidr_param.logical_id,
self.private_subnet2_cidr_param.logical_id,
self.public_subnet1_cidr_param.logical_id,
self.public_subnet2_cidr_param.logical_id
]
}
]
def metadata_parameter_labels(self):
return {
self.cidr_param.logical_id: {
"default": "VPC IPv4 CIDR"
},
self.id_param.logical_id: {
"default": "VPC ID"
},
self.nat_gateway_per_subnet_param.logical_id: {
"default": "Provision NAT Gateways Per Public Subnet (for HA but with higher cost)"
},
self.private_subnet1_cidr_param.logical_id: {
"default": "Private Subnet 1 IPv4 CIDR"
},
self.private_subnet1_id_param.logical_id: {
"default": "Private Subnet 1 ID"
},
self.private_subnet2_cidr_param.logical_id: {
"default": "Private Subnet 2 IPv4 CIDR"
},
self.private_subnet2_id_param.logical_id: {
"default": "Private Subnet 2 ID"
},
self.public_subnet1_id_param.logical_id: {
"default": "Public Subnet 1 ID"
},
self.public_subnet1_cidr_param.logical_id: {
"default": "Public Subnet 1 IPv4 CIDR"
},
self.public_subnet2_id_param.logical_id: {
"default": "Public Subnet 2 ID"
},
self.public_subnet2_cidr_param.logical_id: {
"default": "Public Subnet 2 IPv4 CIDR"
}
}
def private_subnet1_id(self):
return core.Token.as_string(
core.Fn.condition_if(
self.not_given_condition.logical_id,
self.private_subnet1.ref,
self.private_subnet1_id_param.value_as_string
)
)
def private_subnet2_id(self):
return core.Token.as_string(
core.Fn.condition_if(
self.not_given_condition.logical_id,
self.private_subnet2.ref,
self.private_subnet2_id_param.value_as_string
)
)
def private_subnet_ids(self):
return core.Token.as_list(
core.Fn.condition_if(
self.not_given_condition.logical_id,
[
self.private_subnet1.ref,
self.private_subnet2.ref
],
[
self.private_subnet1_id_param.value_as_string,
self.private_subnet2_id_param.value_as_string
]
)
)
def public_subnet1_id(self):
return core.Token.as_string(
core.Fn.condition_if(
self.not_given_condition.logical_id,
self.public_subnet1.ref,
self.public_subnet1_id_param.value_as_string
)
)
def public_subnet2_id(self):
return core.Token.as_string(
core.Fn.condition_if(
self.not_given_condition.logical_id,
self.public_subnet2.ref,
self.public_subnet2_id_param.value_as_string
)
)
def public_subnet_ids(self):
return core.Token.as_list(
core.Fn.condition_if(
self.not_given_condition.logical_id,
[
self.public_subnet1.ref,
self.public_subnet2.ref
],
[
self.public_subnet1_id_param.value_as_string,
self.public_subnet2_id_param.value_as_string
]
)
)
|
StarcoderdataPython
|
11315212
|
import serial
from struct import pack
from random import randint, uniform
from secrets import token_bytes
import math
ser = serial.Serial('COM3')
while True:
strg = bytes(f'Hello, World! {randint(0, 100)} \0', 'ascii')
data = pack(f'<2h10p{len(strg)}sQd', randint(0, 10), 0, token_bytes(10), strg, randint(0, 10000000), math.pi)
ser.write(data)
|
StarcoderdataPython
|
11275865
|
<filename>leads/models.py<gh_stars>1-10
from django.db import models
# Create your models here.
class Lead(models.Model):
name = models.CharField(max_length=100,verbose_name="lead名字", help_text="lead名字")
email = models.EmailField(verbose_name="邮箱", help_text="邮箱")
message = models.CharField(max_length=300, verbose_name="信息", help_text="信息")
created_at = models.DateTimeField(auto_now_add=True,verbose_name="创建时间", help_text="创建时间")
|
StarcoderdataPython
|
1672391
|
<filename>app/database.py<gh_stars>0
'''
Created on May 13, 2019
@author: <NAME>
'''
from flask import request
from flask_restful import Resource
from flask import current_app as app
from app import utils_common
from app import utils_db
# delete server from database
class DatabaseHandler(Resource):
def delete(self):
# Track actions through different webservices.
uuidcode = request.headers.get('uuidcode', '<no uuidcode>')
app.log.info("uuidcode={} - Delete Database entry".format(uuidcode))
app.log.trace("uuidcode={} - Headers: {}".format(uuidcode, request.headers))
# Check for the J4J intern token
utils_common.validate_auth(app.log,
uuidcode,
request.headers.get('intern-authorization', None))
if request.headers.get('servername'):
servername = request.headers.get('servername')
infos = utils_db.get_entry_infos(app.log,
uuidcode,
servername,
app.database)
if len(infos) == 0:
return '', 204
utils_db.remove_entrys(app.log,
uuidcode,
servername,
app.database)
return '', 200
return '', 422
|
StarcoderdataPython
|
11256641
|
<reponame>dkBrazz/zserio
import unittest
import zserio
from testutils import getZserioApi
class OptionalMemberAlignmentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "alignment.zs").optional_member_alignment
def testBitSizeOfWithOptional(self):
optionalMemberAlignment = self.api.OptionalMemberAlignment(True, 0x4433, 0x1122)
self.assertEqual(self.WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE, optionalMemberAlignment.bitsizeof())
def testBitSizeOfWithoutOptional(self):
optionalMemberAlignment = self.api.OptionalMemberAlignment(False, None, 0x7624)
self.assertEqual(self.WITHOUT_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE, optionalMemberAlignment.bitsizeof())
def testInitializeOffsetsWithOptional(self):
optionalMemberAlignment = self.api.OptionalMemberAlignment(True, 0x1111, 0x3333)
for bitPosition in range(32):
self.assertEqual(self.WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE,
optionalMemberAlignment.initialize_offsets(bitPosition))
bitPosition = 32
self.assertEqual(self.WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE + bitPosition,
optionalMemberAlignment.initialize_offsets(bitPosition))
def testInitializeOffsetsWithoutOptional(self):
optionalMemberAlignment = self.api.OptionalMemberAlignment(field_=0x3334)
bitPosition = 1
self.assertEqual(self.WITHOUT_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE + bitPosition,
optionalMemberAlignment.initialize_offsets(bitPosition))
def testReadWithOptional(self):
hasOptional = True
optionalField = 0x1234
field = 0x7654
writer = zserio.BitStreamWriter()
OptionalMemberAlignmentTest._writeOptionalMemberAlignmentToStream(writer, hasOptional, optionalField,
field)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
optionalMemberAlignment = self.api.OptionalMemberAlignment.from_reader(reader)
self._checkOptionalMemberAlignment(optionalMemberAlignment, hasOptional, optionalField, field)
def testReadWithoutOptional(self):
hasOptional = False
field = 0x2222
writer = zserio.BitStreamWriter()
OptionalMemberAlignmentTest._writeOptionalMemberAlignmentToStream(writer, hasOptional, None, field)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
optionalMemberAlignment = self.api.OptionalMemberAlignment.from_reader(reader)
self._checkOptionalMemberAlignment(optionalMemberAlignment, hasOptional, None, field)
def testWriteWithOptional(self):
hasOptional = True
optionalField = 0x9ADB
field = 0x8ACD
optionalMemberAlignment = self.api.OptionalMemberAlignment(hasOptional, optionalField, field)
bitBuffer = zserio.serialize(optionalMemberAlignment)
readOptionalMemberAlignment = zserio.deserialize(self.api.OptionalMemberAlignment, bitBuffer)
self._checkOptionalMemberAlignment(readOptionalMemberAlignment, hasOptional, optionalField, field)
self.assertTrue(optionalMemberAlignment == readOptionalMemberAlignment)
def testWriteWithoutOptional(self):
hasOptional = False
field = 0x7ACF
optionalMemberAlignment = self.api.OptionalMemberAlignment(has_optional_=hasOptional, field_=field)
bitBuffer = zserio.serialize(optionalMemberAlignment)
readOptionalMemberAlignment = zserio.deserialize(self.api.OptionalMemberAlignment, bitBuffer)
self._checkOptionalMemberAlignment(readOptionalMemberAlignment, hasOptional, None, field)
self.assertTrue(optionalMemberAlignment == readOptionalMemberAlignment)
@staticmethod
def _writeOptionalMemberAlignmentToStream(writer, hasOptional, optionalField, field):
writer.write_bool(hasOptional)
if hasOptional:
writer.write_bits(0, 31)
writer.write_bits(optionalField, 32)
writer.write_bits(field, 32)
def _checkOptionalMemberAlignment(self, optionalMemberAlignment, hasOptional, optionalField, field):
self.assertEqual(hasOptional, optionalMemberAlignment.has_optional)
if hasOptional:
self.assertTrue(optionalMemberAlignment.is_optional_field_used())
self.assertEqual(optionalField, optionalMemberAlignment.optional_field)
else:
self.assertFalse(optionalMemberAlignment.is_optional_field_used())
self.assertEqual(field, optionalMemberAlignment.field)
WITH_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE = 96
WITHOUT_OPTIONAL_MEMBER_ALIGNMENT_BIT_SIZE = 33
|
StarcoderdataPython
|
11361650
|
import logging
import os
import subprocess
from enum import Enum, auto
from typing import List
def find_files(path: str, prefix: str, extension: str = ''):
return [filename for filename in next(os.walk(path))[2]
if filename.startswith(prefix) and filename.endswith(extension)]
def find_youngest_file(path: str, prefix: str, extension: str = ''):
filenames = find_files(path, prefix, extension)
return max(filenames, key=lambda fn: os.stat(os.path.join(path, fn)).st_mtime) if filenames else None
def find_oldest_file(path: str, prefix: str, extension: str = ''):
filenames = find_files(path, prefix, extension)
return min(filenames, key=lambda fn: os.stat(os.path.join(path, fn)).st_mtime) if filenames else None
def to_human_readable(num: float, prefix: str = '', suffix: str = 'B'):
start = False
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
start = start or prefix == unit
if not start:
continue
if abs(num) < 1024.0:
return "%3.2f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.3f %s%s" % (num, 'Yi', suffix)
class FSStats:
def __init__(self, total_size: float, available: float):
self.total_size = total_size
self.available = available
def __str__(self):
return "FSStats: size:{}, avail:{}".format(self.total_size, self.available)
@property
def used(self):
return self.total_size - self.available
def get_fs_stats(path: str):
stats = os.statvfs(path)
total_size = stats.f_blocks * stats.f_bsize
available = stats.f_bavail * stats.f_bsize
return FSStats(total_size, available)
def get_total_size(path: str, log: logging.Logger = None):
command_list = ['du', '-s', path]
if log:
log.debug("Getting dir size: {}".format(" ".join(command_list)))
run_out = subprocess.run(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if run_out.returncode == 0:
size = int(run_out.stdout.split()[0])
if log:
log.debug("Got {} kilobytes for {}".format(size, path))
return size
else:
if log:
log.warning("Failed to get dir size! {}".format(path))
log.warning(run_out.stderr)
return False
class PoolState(Enum):
HEALTHY = auto()
DEGRADED = auto()
DOWN = auto()
ERROR = auto()
class DriveStatus:
def __init__(self, drive: str, state: PoolState):
self.drive = drive
self.state = state
def __str__(self):
return "{} {}".format(self.drive, self.state.name)
class PoolStatus:
def __init__(self, pool: str, path: str, state: PoolState, drive_status: List[DriveStatus],
total_size: float, used: float, message: str = None):
self.pool = pool
self.path = path
self.state = state
self.message = message
self.drive_status = drive_status
self.total_size = total_size
self.used = used
def __str__(self):
return "PoolStatus: {} ({}) -{}- {:.2%} used\n\t{}{}".format(self.pool, self.path, self.state.name,
self.utilization,
"\n\t".join([str(ds) for ds in self.drive_status]),
"\n\t" + self.message if self.message else "")
@property
def available(self):
return self.total_size - self.used
@property
def utilization(self):
return self.used / self.total_size
|
StarcoderdataPython
|
8147358
|
<filename>tests/version_parsing.py
import re
find_version = re.compile(r"^(?P<namever>(?P<name>.+?)-(?P<version>(\d*)(\.(a|b|rc)?\d+)?(\.(post|dev)?\d+)?))$").match
# find_version = re.compile(r"^(?P<namever>(?P<name>.+?)-(?P<version>\d.*?))$").match
print(find_version('ddvla-14.rc2.dev0').groupdict())
try:
# This should fail. Must be rc
print(find_version('ddvla-14.cr.dev0').groupdict())
except AttributeError:
pass
|
StarcoderdataPython
|
239168
|
"""
Examples for error propagation
------------------------------
Hear we use the same config in `particle_amplitude.py`
"""
config_str = """
decay:
A:
- [R1, B]
- [R2, C]
- [R3, D]
R1: [C, D]
R2: [B, D]
R3: [B, C]
particle:
$top:
A: { mass: 1.86, J: 0, P: -1}
$finals:
B: { mass: 0.494, J: 0, P: -1}
C: { mass: 0.139, J: 0, P: -1}
D: { mass: 0.139, J: 0, P: -1}
R1: [ R1_a, R1_b ]
R1_a: { mass: 0.7, width: 0.05, J: 1, P: -1}
R1_b: { mass: 0.5, width: 0.05, J: 0, P: +1}
R2: { mass: 0.824, width: 0.05, J: 0, P: +1}
R3: { mass: 0.824, width: 0.05, J: 0, P: +1}
"""
import matplotlib.pyplot as plt
import yaml
from tf_pwa.config_loader import ConfigLoader
from tf_pwa.histogram import Hist1D
config = ConfigLoader(yaml.full_load(config_str))
input_params = {
"A->R1_a.BR1_a->C.D_total_0r": 6.0,
"A->R1_b.BR1_b->C.D_total_0r": 1.0,
"A->R2.CR2->B.D_total_0r": 2.0,
"A->R3.DR3->B.C_total_0r": 1.0,
}
config.set_params(input_params)
data = config.generate_toy(1000)
phsp = config.generate_phsp(10000)
# %%
# After we calculated the parameters error, we will have an error matrix `config.inv_he` (using the inverse hessain).
# It is possible to save such matrix directly by `numpy.save` and to load it by `numpy.load`.
config.get_params_error(data=[data], phsp=[phsp])
# %%
# We can use the following method to profamance the error propagation
#
# .. math::
# \sigma_{f} = \sqrt{\frac{\partial f}{\partial x_i} V_{ij} \frac{\partial f}{\partial x_j }}
#
# by adding some calculation here. We need to use `tensorflow` functions instead of those of `math` or `numpy`.
#
import tensorflow as tf
with config.params_trans() as pt:
a2_r = pt["A->R2.CR2->B.D_total_0r"]
a2_i = pt["A->R2.CR2->B.D_total_0r"]
a2_x = a2_r * tf.cos(a2_i)
# %%
# And then we can calculate the error we needed as
print(a2_x.numpy(), pt.get_error(a2_x).numpy())
# %%
# We can also calculate some more complex examples, such as the ratio in mass range (0.75, 0.85) over full phace space.
# Even further, we can get the error of error in the meaning of error propagation.
from tf_pwa.data import data_mask
m_R2 = phsp.get_mass("(B, D)")
cut_cond = (m_R2 < 0.85) & (m_R2 > 0.75)
amp = config.get_amplitude()
with config.params_trans() as pt1:
with config.params_trans() as pt:
int_mc = tf.reduce_sum(amp(phsp))
cut_phsp = data_mask(phsp, cut_cond)
cut_int_mc = tf.reduce_sum(amp(cut_phsp))
ratio = cut_int_mc / int_mc
error = pt.get_error(ratio)
print(ratio.numpy(), "+/-", error.numpy())
print(error.numpy(), "+/-", pt1.get_error(error).numpy())
|
StarcoderdataPython
|
8132538
|
<filename>leetcode.com/python/130_Surrounded_Regions.py
from collections import deque
# Source: https://tinyurl.com/yx859xh6
# TODO: Solve using Union-Find
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: None Do not return anything, modify board in-place instead.
"""
queue = deque()
for r in range(len(board)):
for c in range(len(board[0])):
if (r in [0, len(board) - 1] or c in [0, len(board[0]) - 1]) and board[r][c] == "O":
queue.append((r, c))
while queue:
r, c = queue.popleft()
if 0<=r<len(board) and 0<=c<len(board[0]) and board[r][c] == "O":
board[r][c] = "S"
queue.append((r - 1, c))
queue.append((r + 1, c))
queue.append((r, c - 1))
queue.append((r, c + 1))
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] == "O":
board[r][c] = "X"
elif board[r][c] == "S":
board[r][c] = "O"
|
StarcoderdataPython
|
4986685
|
<reponame>DanilooSilva/Cursos_de_Python
from dataclasses import dataclass
@dataclass
class Pessoa:
nome: str
sobrenome: str
def __post_init__(self):
self.nome_completo = f'{self.nome} {self.sobrenome}'
p1 = Pessoa('Danilo', 'Gomes')
print(p1.nome_completo)
|
StarcoderdataPython
|
200121
|
#!/usr/bin/env python3
r"""
Overview
________
This scripts converts bibliographies from RIS to BibTeX format, preserving only
relevant information. The RIS input file can be generated, for example, using
Zotero's "Quick Copy" feature (Edit > Preferences > Export). It may contain
certain Unicode characters, e.g., subscript numbers. The BibTeX output file
ought to contain ASCII characters only. Uppercase letters in acronyms, chemical
formulas, and common physicist's names are protected by automatic insertion of
curly braces.
Usage
_____
ris2bib.py <input file> <output file>
[--sub=<format string>] [--super=<format string>] [--colcap=<0 or 1>]
[--short-year=<0 or 1>] [--skip-a=<0 or 1>] [--arxiv=<0 or 1>]
[--nature=<0 or 1>] [--scipost=<0 or 1>]
The optional arguments --sub and --super specify the markup used to convert
sub- and superscript Unicode sequences in titles to LaTeX code. The default
values are --sub='\textsubscript{X}' and --super='\textsuperscript{X}', where X
is the placeholder for the replaced sequence. Possible alternative values are
--sub='$_{X}$' and --super='$^{X}$'.
If --colcap=1, words following a colon, e.g., at the beginning of subtitles,
are capitalized. This is the default.
If --short-year=1, only the last two digits of the year are used for the article
identifier. The default is --short-year=0.
If --skip-a=1, sublabels "a" are omitted. The default is --skip-a=0.
If --arxiv=1, eprint identifiers are included even if an article has already
been published. This is the default.
If --nature=1, DOIs and eprint identifiers are provided via the URL entry. The
default is --nature=0.
If --scipost=1, eprints are provided with a full URL rather than an archive
prefix and identifier, and the entry type "misc" instead of "unpublished" is
used. The default is --scipost=0.
"""
import re
import sys
# Read input (.ris) and output (.bib) file given as command-line arguments:
try:
ris, bib = [argument for argument in sys.argv[1:]
if not argument.startswith('-')]
except:
raise SystemExit(__doc__)
# Read optional command-line arguments:
sup = r'\textsuperscript{X}'
sub = r'\textsubscript{X}'
colcap = True
short_year = False
skip_a = False
arxiv = True
nature = False
scipost = False
for argument in sys.argv[1:]:
if argument.startswith('-') and '=' in argument:
key, value = argument.split('=')
if key == '--sub':
sub = value
print('Subscript format: %s' % sub)
elif key == '--super':
sup = value
print('Superscript format: %s' % sup)
elif key == '--colap':
colcap = bool(int(value))
print('Capitalize after colon: %s' % colcap)
elif key == '--short-year':
short_year = bool(int(value))
print('Use short year in identifiers: %s' % short_year)
elif key == '--skip-a':
skip_a = bool(int(value))
print('Omit sublabel a: %s' % skip_a)
elif key == '--arxiv':
arxiv = bool(int(value))
print('Include eprint identifiers: %s' % arxiv)
elif key == '--nature':
nature = bool(int(value))
print('Nature DOI style: %s' % nature)
elif key == '--scipost':
scipost = bool(int(value))
print('SciPost eprint style: %s' % scipost)
else:
print('Unknown argument: %s' % key)
sup = sup.replace('\\', '\\\\').replace('X', '\\1')
sub = sub.replace('\\', '\\\\').replace('X', '\\1')
# Data for text replacements:
accents = {
'\u00c1': r"\'A",
'\u00d6': r'\"{O}',
'\u00dc': r'\"{U}',
'\u00df': r'{\ss}',
'\u00e0': r'\`a',
'\u00e1': r"\'a",
'\u00e4': r'\"a',
'\u00e7': r'\c{c}',
'\u00e8': r'\`e',
'\u00e9': r"\'e",
'\u00ed': r"\'i",
'\u00ef': r'\"i',
'\u00f1': r'\~n',
'\u00f2': r'\`o',
'\u00f3': r"\'o",
'\u00f4': r'\^o',
'\u00f6': r'\"o',
'\u00f8': r'{\o}',
'\u00fa': r"\'u",
'\u00fc': r'\"u',
'\u0107': r"\'c",
'\u010c': r'\v{C}',
'\u010d': r'\v{c}',
'\u0119': r'\k{e}',
'\u011b': r'\v{e}',
'\u011f': r'\u{g}',
'\u012d': r'{\u\i}',
'\u0130': r'\.I',
'\u0131': r'{\i}',
'\u0144': r"\'n",
'\u0159': r'\v{r}',
'\u015b': r"\'s",
'\u015e': r'\c{S}',
'\u015f': r'\c{s}',
'\u0160': r'\v{S}',
'\u0161': r'\v{s}',
'\u0163': r'\c{t}',
'\u017c': r'\.c',
'\u017e': r'\v{z}',
'\u01e7': r'\v{g}',
'\u2018': "`",
'\u2019': "'",
'\u201c': "``",
'\u201d': "''",
}
dashes = {
'\u2010': '-', # unbreakable
'\u2013': '--',
'\u2014': '---',
}
accents.update(dashes)
simplifications = {
key: value.replace('}', '')[-1]
for key, value in accents.items()
}
spaces = {
'\u00a0': '~',
'\u2009': '\,',
}
quotes = {
'\u00ab': r'{\guillemotleft}',
'\u00bb': r'{\guillemotright}',
}
superscripts = {
'\u00b2': '2',
'\u00b3': '3',
'\u00b9': '1',
'\u2070': '0',
'\u2071': 'i',
'\u2074': '4',
'\u2075': '5',
'\u2076': '6',
'\u2077': '7',
'\u2078': '8',
'\u2079': '9',
'\u207a': '+',
'\u207b': r'\ensuremath-',
'\u207c': '=',
'\u207d': '(',
'\u207e': ')',
'\u207f': 'n',
}
superscripts_any = ''.join(superscripts.keys())
superscripts_range = '([{0}]+)'.format(superscripts_any)
subscripts = {
'\u1d62': 'i',
'\u2080': '0',
'\u2081': '1',
'\u2082': '2',
'\u2083': '3',
'\u2084': '4',
'\u2085': '5',
'\u2086': '6',
'\u2087': '7',
'\u2088': '8',
'\u2089': '9',
'\u208a': '+',
'\u208b': r'\ensuremath-',
'\u208c': '=',
'\u208d': '(',
'\u208e': ')',
'\u2090': 'a',
'\u2091': 'e',
'\u2092': 'o',
'\u2093': 'x',
'\u2095': 'h',
'\u2096': 'k',
'\u2097': 'l',
'\u2098': 'm',
'\u2099': 'n',
'\u209a': 'p',
'\u209b': 's',
'\u209c': 't',
}
subscripts_any = ''.join(subscripts.keys())
subscripts_range = '([{0}]+([{0}.,]+[{0}])?)'.format(subscripts_any)
math = {
'\u00d7': r'\times',
'\u0393': r'\Gamma',
'\u0394': r'\Delta',
'\u0398': r'\Theta',
'\u039b': r'\Lambda',
'\u039e': r'\Xi',
'\u03a0': r'\Pi',
'\u03a3': r'\Sigma',
'\u03a5': r'\Upsilon',
'\u03a6': r'\Phi',
'\u03a8': r'\Psi',
'\u03a9': r'\Omega',
'\u03b1': r'\alpha',
'\u03b2': r'\beta',
'\u03b3': r'\gamma',
'\u03b4': r'\delta',
'\u03b5': r'\varepsilon',
'\u03b6': r'\zeta',
'\u03b7': r'\eta',
'\u03b8': r'\theta',
'\u03b9': r'\iota',
'\u03ba': r'\kappa',
'\u03bb': r'\lambda',
'\u03bc': r'\mu',
'\u03bd': r'\nu',
'\u03be': r'\xi',
'\u03c0': r'\pi',
'\u03c1': r'\rho',
'\u03c2': r'\varsigma',
'\u03c3': r'\sigma',
'\u03c4': r'\tau',
'\u03c5': r'\upsilon',
'\u03c6': r'\varphi',
'\u03c7': r'\chi',
'\u03c8': r'\psi',
'\u03c9': r'\omega',
'\u03d1': r'\vartheta',
'\u03d5': r'\phi',
'\u03d6': r'\varpi',
'\u03f1': r'\varrho',
'\u03f5': r'\epsilon',
'\u2032': r"'",
'\u2033': r"''",
'\u2202': r'\partial',
'\u2212': r'-',
'\u221a': r'\sqrt',
'\u221e': r'\infty',
'\u2264': r'\leq',
}
math_any = ''.join(math.keys())
math_range = '(([{0}\d][{0}\d\sx]*)?[{0}]+([{0}\d\sx]*[{0}\d])?)'.format(math_any)
others = {
'&': r'\&',
'\u00ad': r'\-',
}
names = {
'Bethe',
'Born',
'Bose',
'Brillouin',
'Burke',
'Carlo',
'Cooper',
'Coulomb',
'Dirac',
'Eliashberg',
'Ernzerhof',
'Fermi',
'Feynman',
'Fock',
'Fr\u00f6hlich',
'Gauss',
'Goldstone',
'Green',
'Haeckel',
'Hall',
'Hamilton',
'Hartree',
'Heeger',
'Heisenberg',
'Hove',
'Huang',
'Hubbard',
'Hund',
'Ising',
'Jahn',
'Kasuya',
'Kittel',
'Kohn',
'Lifshitz',
'Luttinger',
'Matsubara',
'Migdal',
'Monte',
'Mott',
'Oppenheimer',
'Pad\u00e9',
'Pariser',
'Parr',
'Peierls',
'Perdew',
'Pople',
'Python',
'Raman',
'Ruderman',
'Salpeter',
'Schrieffer',
'Schwinger',
'Stark',
'Sternheimer',
'Su',
'Teller',
'Tomonaga',
'Van',
'Vanderbilt',
'Waals',
'Wagner',
'Wannier',
'Ward',
'Weyl',
'Wick',
'Wigner',
'Yosida',
}
elements = {
'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg',
'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr',
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd',
'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd',
'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf',
'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm',
'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs',
'Mt', 'Ds', 'Rg', 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts', 'Og',
}
elements -= {'Bi', 'In'}
# Considered entry types:
types = dict(
article = [
('author', 'AU'),
('title', 'TI'),
('journal', 'J2'),
('volume', 'VL'),
('pages', 'SP'),
('year', 'PY'),
],
unpublished = [
('author', 'AU'),
('title', 'TI'),
('year', 'PY'),
],
book = [
('author', 'AU'),
('title', 'TI'),
('edition', 'ET'),
('publisher', 'PB'),
('address', 'CY'),
('year', 'PY'),
],
electronic = [
('author', 'AU'),
('title', 'TI'),
('urldate', 'Y2'),
],
incollection = [
('author', 'AU'),
('title', 'TI'),
('editor', 'A2'),
('booktitle', 'J2'),
('edition', 'ET'),
('publisher', 'PB'),
('address', 'CY'),
('year', 'PY'),
],
phdthesis = [
('author', 'AU'),
('title', 'TI'),
('type', 'M3'),
('school', 'PB'),
('year', 'PY'),
],
misc = [
('author', 'AU'),
('title', 'TI'),
('howpublished', 'HP'),
('year', 'PY'),
],
techreport = [
('author', 'AU'),
('title', 'TI'),
('institution', 'PB'),
('year', 'PY'),
],
)
for key, value in types.items():
value.extend([
('url', 'UR'),
('doi', 'DO'),
])
if arxiv or key in {'misc', 'unpublished'}:
value.extend([
('archiveprefix', 'AP'),
('eprint', 'AR'),
])
search_keys = set(ris_key
for value in types.values()
for bib_key, ris_key in value)
# Long journal name (T2) and link to PDF (L1 etc.) are read complementarily:
search_keys |= {'T2', 'L1', 'L2', 'L3', 'L4'}
def simplify(name):
"""Simplify author names for reference identifier."""
# Remove LaTeX commands:
name = re.sub(r'\\\w+', '', name)
# Remove accents etc.:
for a, b in simplifications.items():
name = name.replace(a, b)
# Keep only ASCII alphanumericals:
name = ''.join([c for c in name
if 65 <= ord(c) <= 90 or 97 <= ord(c) <= 122])
return name
def fragile(token, previous=None):
"""Check if case of token/word must be protected using curly braces."""
upper = re.search('[A-Z]', token)
if upper:
# Token contains at least two uppercase characters or one uppercase
# character and a number, e.g., "NaCl" or "W90":
if len(re.findall('[A-Z0-9]', token)) > 1:
return True
# Token contains an uppercase character following a lowercase character,
# e.g., "eV":
lower = re.search('[a-z]', token)
if lower and lower.start() < upper.start():
return True
# Token starts with an uppercase letter that must be protected
# (unnecessary at the beginning of the entry):
if previous is None:
return False
# The first letter after ": " is protected by BibTeX by default:
if previous == ': ':
return False
# Token is a single uppercase letter (except "A"), e.g., "T":
if len(token) == 1 and token != 'A':
return True
# Token stars with/derives from famous name, e.g., "Gaussian":
for name in names:
if token == name:
return True
if len(name) > 3 and token.startswith(name):
if not re.match('i?ons?$', token[len(name):]):
return True
# Literal part of token is symbol for chemical element, e.g., "Li":
letters = ''.join(re.findall('[a-zA-Z]+', token))
for element in elements:
if letters == element:
return True
# Token follows on period:
if re.search('\.', previous):
return True
return False
def protect(s):
"""Protect certain uppercase characters using curly braces."""
print('%s...' % s[:50])
# Identify groups that are already enclosed in curly braces and substitute
# them with placeholders:
groups = []
while '{' in s:
for group in re.findall(r'\{[^{]*?\}', s):
groups.append(group)
replacement = '<#%d>' % len(groups)
s = s.replace(group, replacement)
print('Group: %s = %s' % (replacement, group))
# Also substitute inline math with placeholders and protect them if they
# contain an uppercase letter.
for group in re.findall(r'\$.+?\$', s):
groups.append(group)
replacement = '<#%d>' % len(groups)
s = s.replace(group, replacement)
if re.search('[A-Z]', group):
groups[-1] = '{%s}' % group
print('Math: %s = %s' % (replacement, group))
# Split string into tokens:
separator = ' \\-.:,;()\[\]/' + ''.join(spaces) + ''.join(dashes)
tokens = re.findall('[{0}]+|[^{0}]+'.format(separator), s)
# Protect tokens where necessary:
for n, token in enumerate(tokens):
if fragile(token, tokens[n - 1] if n > 0 else None):
tokens[n] = '{%s}' % token.replace('<#', '<$')
print('Protect: %s' % token)
# Join tokens into one string:
s = ''.join(tokens)
# Substitute groups and inline math back:
for n, group in reversed(list(enumerate(groups, 1))):
s = s.replace('<#%d>' % n, group)
s = s.replace('<$%d>' % n, group.strip('{}'))
return s
def escape(s):
"""Replace non-ASCII Unicode characters by LaTeX escape sequences."""
# Add markup to ranges of certain characters:
s = re.sub(superscripts_range, sup, s)
s = re.sub( subscripts_range, sub, s)
s = re.sub( math_range, r'$\1$', s)
# Replace certain Unicode characters by LaTeX commands:
for key, value in accents.items():
s = s.replace(key, value)
for key, value in spaces.items():
s = s.replace(key, value)
for key, value in quotes.items():
s = s.replace(key, value)
for key, value in superscripts.items():
s = s.replace(key, value)
for key, value in subscripts.items():
s = s.replace(key, value)
for key, value in math.items():
s = s.replace(key, value)
for key, value in others.items():
s = s.replace(key, value)
# Remove unnecessary curly braces and commands:
s = re.sub(r'\{(\d)\}', r'\1', s)
s = re.sub(r'_\{(\w)\}', r'_\1', s)
s = re.sub(r'(\$.+?\$)', lambda x: x.group().replace(r'\ensuremath', ''), s)
# Avoid line breaks at equals signs:
s = re.sub(' = ', '~=~', s)
# Remove redundant spaces:
s = re.sub(' +', ' ', s)
return s
# Read RIS input file:
entries = []
with open(ris) as infile:
text = infile.read()
for block in re.split('\n{2,}', text):
entry = dict()
for line in re.split('\n', block):
parts = re.split('\s*-\s*', line, maxsplit=1)
if len(parts) == 2:
key, value = parts
else:
continue
if key == 'TY':
if value == 'JOUR':
entry['TY'] = 'article'
elif value == 'BOOK':
entry['TY'] = 'book'
elif value == 'ELEC':
entry['TY'] = 'electronic'
elif value == 'CHAP':
entry['TY'] = 'incollection'
if value == 'THES':
entry['TY'] = 'phdthesis'
elif value == 'COMP':
entry['TY'] = 'misc'
elif value == 'RPRT':
entry['TY'] = 'techreport'
if key in {'AU', 'A2'} and key in entry:
entry[key] += ' and ' + value
elif key in search_keys:
entry[key] = value
if entry:
# Generate entry identifier from first author and year:
entry['ID'] = entry.get('AU', 'Unknown').split(',', 1)[0]
entry['ID'] = simplify(entry['ID'])
if short_year:
entry['ID'] += entry.get('PY', 'XX')[-2:]
else:
entry['ID'] += entry.get('PY', 'XXXX')
# Protect (and change) capitalization of titles:
entry['TI'] = protect(entry['TI'])
if colcap:
entry['TI'] = re.sub('(: [^A-Z0-9\s]*?[a-z])',
lambda x: x.group().upper(), entry['TI'])
# Remove special spaces from authors and editors:
for key in 'AU', 'A2':
if key in entry:
for space in spaces:
entry[key] = entry[key].replace(space, ' ')
# Distinguish different types of thesis:
if 'M3' in entry:
first = entry.pop('M3')[0].lower()
if first == 'b':
entry['M3'] = "Bachelor's thesis"
elif first == 'm':
entry['M3'] = "Master's thesis"
# Replace non-ASCII Unicode characters by LaTeX escape sequences:
for key in entry:
if key not in ('AR', 'DO', 'UR'):
entry[key] = escape(entry[key])
# Use long journal name (T2) if short journal name (J2) not given:
if 'J2' not in entry and 'T2' in entry:
entry['J2'] = entry.pop('T2')
# Use type "unpublished" for articles with "arXiv:..." as journal:
if 'J2' in entry and entry['J2'].startswith('arXiv'):
entry['TY'] = 'unpublished'
entry['AP'] = 'arXiv'
entry['AR'] = entry.pop('J2').split()[0].split(':')[1]
# Try to extract arXiv identifier or DOI from links:
for key in 'UR', 'L1', 'L2', 'L3', 'L4':
if key in entry:
link = entry[key].lower()
if not 'AR' in entry and 'arxiv' in link:
entry['AP'] = 'arXiv'
entry['AR'] = re.search('(abs|pdf)/(.+?)(.pdf|$)',
entry.pop(key)).group(2)
if not 'DO' in entry and 'doi.org' in link:
entry['DO'] = re.search('doi\.org/(.+?)/?$',
entry.pop(key)).group(1)
# Strip protocol/scheme from URL shown as "howpublished":
if entry.get('TY') == 'misc' and 'UR' in entry:
entry['HP'] = re.sub('^.*?//', '', entry['UR'])
entry['HP'] = entry['HP'].replace('/', r'/\allowbreak ')
# Prefer DOI or e-print identifier over URL:
if 'UR' in entry and ('DO' in entry or 'AR' in entry):
entry.pop('UR')
# Consider journal-specific bibliography style files:
if nature:
if 'DO' in entry:
entry['UR'] = 'https://doi.org/%s' % entry.pop('DO')
elif entry.get('AP') == 'arXiv':
entry['UR'] = 'https://arxiv.org/abs/%s' % entry.pop('AR')
entry.pop('AP')
elif scipost:
if entry.get('AP') == 'arXiv':
entry['AR'] = 'https://arxiv.org/abs/%s' % entry['AR']
entry.pop('AP')
if entry.get('TY') == 'unpublished':
entry['TY'] = 'misc'
entries.append(entry)
def parseInt(string):
"""Parse string as integer, ignoring all non-numbers."""
number = ''.join(c for c in string if 48 <= ord(c) <= 57)
if number:
return int(number)
else:
return 0
# Sort entries:
entries = sorted(entries, key=lambda entry: (
parseInt(entry.get('PY', '')),
entry['ID'],
entry.get('J2', ''),
parseInt(entry.get('VL', '')),
parseInt(entry.get('SP', '')),
entry.get('TI', ''),
))
# Add suffices non-unique identifiers:
labels = 'abcdefghijklmnopqrstuvwxyz'
if skip_a:
labels = [''] + [label for label in labels[1:]]
n = 0
while n < len(entries):
n0 = n
while n < len(entries) and entries[n]['ID'] == entries[n0]['ID']:
n += 1
if len(entries[n0:n]) > 1:
for label, entry in zip(labels, entries[n0:n]):
entry['ID'] += label
print('Sublabel: %s' % entry['ID'])
# Write BibTeX output file:
with open(bib, 'w') as outfile:
for entry in entries:
if 'TY' not in entry:
entry['TY'] = 'article'
print('Unknown type (set to "article"): %s' % entry['ID'])
length = max(len(name) for name, key in types[entry['TY']] if key in entry)
form = "%%%ds = {%%s},\n" % length
outfile.write("@%s{%s,\n" % (entry['TY'], entry['ID']))
for name, key in types[entry['TY']]:
if key in entry:
outfile.write(form % (name, entry[key]))
outfile.write("}\n")
|
StarcoderdataPython
|
1776853
|
translate = {
'English': {
'main_title': 'Sahm',
'sub_title': 'Intelligent financial system',
'Insert_Data': 'Insert Data',
'Symbol': 'Symbol',
'Prediction_days': 'Prediction days',
'Date_Range': 'Date Range',
'Select_a_range_of_Date': 'Select a range of Date',
'From': 'From',
'until_now': 'until now',
'Period': 'Period',
'Current_price': 'Current price',
'SVM_Accuracy': 'SVM Accuracy',
'SVM_Prediction': 'SVM Prediction',
'LR_Accuracy': 'LR Accuracy',
'LR_Prediction': 'LR Prediction',
'TrendLines': 'TrendLines',
'SMA': 'SMA',
'MACD': 'MACD',
'Month': 'Month',
'Months': 'Months',
'Languages': 'Languages',
'RSI': 'RSI',
'Deep_Learning': 'Deep Learning',
'Deep_Learning_Prediction': 'Deep Learning Prediction'
},
'Persian': {
'main_title': 'سهم',
'sub_title': 'سامانه هوشمند مالی',
'Insert_Data': 'اضافه کردن اطلاعات',
'Symbol': 'نماد',
'Prediction_days': 'روز های پیشبینی',
'Date_Range': 'محدوده زمانی',
'Select_a_range_of_Date': 'محدوده ای از تاریخ را انتخاب کنید',
'From': 'از',
'until_now': 'تا امروز',
'Period': 'دوره',
'Current_price': 'قیمت فعلی',
'SVM_Accuracy': 'SVM دقت',
'SVM_Prediction': 'SVM پیشبینی',
'LR_Accuracy': 'LR دقت',
'LR_Prediction': 'LR پیشبینی',
'TrendLines': 'خطوط روند',
'SMA': 'میانگین متحرک',
'MACD': 'مکدی',
'Month': 'ماه',
'Months': 'ماه ها',
'Languages': 'زبان ها',
'RSI': 'آراسآی',
'Deep_Learning': 'یادگیری عمیق',
'Deep_Learning_Prediction': 'پیشبینی یادگیری عمیق'
}
}
|
StarcoderdataPython
|
12858087
|
<reponame>kingspp/epyodbc<gh_stars>0
# -*- coding: utf-8 -*-
"""
| **@created on:** 9/4/20,
| **@author:** prathyushsp,
| **@version:** v0.0.1
|
| **Description:**
|
|
| **Sphinx Documentation Status:**
"""
from abc import ABCMeta, abstractmethod
import json
class BaseClass(metaclass=ABCMeta):
@abstractmethod
def pretty(self):
pass
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=2)
|
StarcoderdataPython
|
330715
|
# -*- coding: utf-8 -*-
# @Time : 2019/5/30 14:49
# @Author : <EMAIL>
# @简介 :
# @File : orcl2Mongo.py
from fetchData import get_gps_data
from pymongo import MongoClient
from datetime import datetime, timedelta
from time import clock
def debug_time(func):
def wrapper(*args, **kwargs):
bt = clock()
a = func(*args, **kwargs)
et = clock()
print "mongo.py", func.__name__, "cost", round(et - bt, 2), "secs"
return a
return wrapper
@debug_time
def trans2mongo():
bt = datetime(2018, 5, 1, 1)
et = bt + timedelta(hours=4)
trace_dict = get_gps_data(True, bt, et)
client = MongoClient("mongodb://192.168.11.88:27017/")
db = client['taxi']
col = db['gps']
col.delete_many({})
data_list = []
for veh, trace in trace_dict.items():
for data in trace:
data = data.__dict__
stime = data['stime'].strftime("%Y-%m-%d %H:%M:%S")
data['stime'] = stime
data_list.append(data)
col.insert_many(data_list)
@debug_time
def get_mongodb():
client = MongoClient("mongodb://192.168.11.88:27017/")
db = client['taxi']
col = db['gps']
data_list = []
for data in col.find():
data_list.append(data)
print len(data_list)
return data_list
get_mongodb()
|
StarcoderdataPython
|
3365038
|
tx1 = 139
tx2 = 187
ty1 = -148
ty2 = -89
min_dx = 17
max_dx = 188
min_dy = -148
max_dy = 147
max_y = 0
solutions = []
for dx in range(min_dx, max_dx + 1):
for dy in range(min_dy, max_dy + 1):
x = 0
y = 0
ndx = dx
ndy = dy
while x <= tx2 and y >= ty1:
x = x + ndx
y = y + ndy
if y > max_y:
max_y = y
if ndx > 0:
ndx = ndx - 1
ndy = ndy - 1
if x >= tx1 and x <= tx2 and y >= ty1 and y <= ty2:
solutions.append((dx, dy))
break
print("Part 1:", max_y)
print("Part 2:", len(solutions))
|
StarcoderdataPython
|
1663289
|
import random #Imported random Library
print("------------------------------------------INSTRUCTIONS------------------------------------------ \n1. Choose ODD or EVEN \n2. Enter your input for Toss between 1 to 10 \n3. If you win the toss, Opt Bat or Ball \n4. If Number matches then it's OUT \n------------------------------------------------------------------------------------------------")
def playagain(): #Playagain
print("------------------------------------------------------------------------------------------------ \n1. Enter \"Yes\" to Play again. \n2. Enter \"No\" to Exit. \n------------------------------------------------------------------------------------------------ ")
PLAYAGAIN = input('Play Again..?: ')
if PLAYAGAIN.upper() == 'YES':
main()
elif PLAYAGAIN.upper() == 'NO':
print("------------------------------------------------------------------------------------------------ \nThanks for playing..!! \nSee you soon.. \n-------------------------------------------------------------------------------------------------")
exit()
else:
print('------Invalid input------\n-----Please try again-----')
playagain()
def main():#Start of the game
def firstbatting(): #If user or AI won Toss and opted to bat first
print("You\'re all set Bat now..")
score = 0
while (True):
AI = random.randint(1, 10)
MINE = int(input("Bat: ")) #User Batting Runs Input
if MINE > 10:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
print("AI Bowls: ", AI) #AI Bowling Input
score = score + MINE #Score Count
if (AI == MINE): #Out
print("Out!!!..Your Innings comes to an end..")
print("Score: ", score)
break
print('AI require ', score + 1, 'runs to win..!!') #Target
print('AI is ready to chase down the target..!!')
ai_score = 0
while (True):
AI = random.randint(1, 10)
MINE = int(input("Bowl: "))#User Bowling input
if MINE > 10:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
print("AI Bats: ", AI) #AI Batting Input
ai_score = ai_score + AI
if ai_score > score:
print("AI Score", ai_score)
print('Hard Luck Buddy, YOU LOSE..!! AI sealed the WIN with great knock')
break
if (AI == MINE):
print("Gone...!! You dismissed AI...Out...!!")
print("AI Score: ", ai_score)
if score > ai_score:
print('Superb Victory..Won by ', ((score)-(ai_score)), 'runs, Congratulations..!!')
else:
print('----- MATCH TIED -----')
break
def secondbatting(): #If user or AI won Toss and opted to bowl first
print('AI is ready to start the innings..!!')
ai_score = 0
while (True):
AI = random.randint(1, 10)
MINE = int(input("Bowl: ")) #User Bowling Input
if MINE > 10:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
print("AI Bats: ", AI) #AI Batting
ai_score = ai_score + AI
if (AI == MINE): #Out
print("Gone...!! You dismissed AI...Out...!!")
print("AI Score: ", ai_score)
break
print('AI is ready to bowl, Most exciting match coming soon.. ALL THE BEST...')
score = 0
while (True):
AI = random.randint(1, 10)
MINE = int(input("Bat: ")) #User Batting Input
if MINE > 10:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
print("AI Bowls: ", AI)
score = score + MINE
if ai_score < score:
print("Score", score)
print('OMG..!! You Win..What a win that is..!! Chased down the target perfectly..Unbelievable knock..')
break
if (AI == MINE):
print("Gone...!! AI dismissed you...Out...!!")
print("Score", score)
if ai_score > score:
print('Hard Luck Buddy..!! AI sealed the win by',(ai_score - score) ,'runs...A spectacular performance..')
else:
print('----- TIE MATCH -----')
break
def toss():
TOSS = input('Toss time..What you want \"ODD or EVEN \": ')
My_input = int(input('Please, Enter any number between 1 to 10: '))
if My_input > 10:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
AI_input = random.randint(1, 10)
print('Input of AI: ', AI_input)
if TOSS.upper() == 'EVEN':
if ((My_input + AI_input) % 2) == 0: #User Toss even
print('Great..!!, You won the Toss..What you wanna choose ?')
YOURTOSS = input('BAT or BALL:')
if YOURTOSS.upper() == 'BAT':
firstbatting()
elif YOURTOSS.upper() == 'BOWL':
secondbatting()
else:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----') #Invalid
toss()
else:
AITOSS = random.choice(['bat', 'bowl'])
if AITOSS == 'bat':
print('AI opted Bat first..')
secondbatting()
elif AITOSS == 'bowl':
print('AI opted Bowl first..')
firstbatting()
elif TOSS.upper() == 'ODD':
if ((My_input + AI_input) % 2) == 0:
AITOSS = random.choice(['bat', 'bowl'])
if AITOSS == 'bat':
print('AI opted Bat first..')
secondbatting()
elif AITOSS == 'bowl':
print('AI opted Bowl first..')
firstbatting()
else:
print('Great..!!, You won the Toss..What you wanna choose ?')
YOURTOSS = input('BAT or BOWL: ')
if YOURTOSS.upper() == 'BAT':
firstbatting()
elif YOURTOSS.upper() == 'BOWL':
secondbatting()
else:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
else:
print('----- INVALID INPUT ------ \n----- GAME RESTARTING -----')
toss()
toss()
playagain()
main()
|
StarcoderdataPython
|
9735094
|
<reponame>in-rolls/indian-politician-bios
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
import argparse
import logging
import time
import re
from collections import defaultdict
import requests
from bs4 import BeautifulSoup
from csv import DictWriter
logging.getLogger("requests").setLevel(logging.WARNING)
def setup_logger():
""" Set up logging
"""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='india_mps_women.log',
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def get_html(url):
# FIXME: should have maximum retry count
while True:
try:
r = requests.get(url)
return r
except Exception as e:
logging.error('{0}'.format(e))
time.sleep(5)
def get_state_assemblies():
links = {}
r = get_html('http://www.myneta.info/')
soup = BeautifulSoup(r.text, "lxml")
for div in soup.find_all('div', {'class': 'item'}):
for a in div.find_all('a'):
href = a['href']
m = re.match('.*state_assembly\.php\?state\=(.*)', href)
if m:
state = m.group(1)
links[state] = href
return links
def get_elections(url):
links = {}
r = get_html(url)
soup = BeautifulSoup(r.text, "lxml")
for div in soup.select('h3.title.yellow'):
election = div.text
for a in div.parent.select('a'):
if a.text.lower().strip() == 'all candidates':
href = a['href']
links[election] = href
return links
def get_nation_elections(url):
links = {}
r = get_html(url)
soup = BeautifulSoup(r.text, "lxml")
for div in soup.select('h3.title.blue'):
election = div.text
if election == 'Lok Sabha Election':
for div2 in div.parent.select('div.item'):
election_year = div2.contents[0].strip()
for a in div2.select('a'):
if a.text.lower().strip() == 'all candidates':
href = a['href']
links[election_year] = href
break
return links
def get_local_elections(url):
links = {}
r = get_html(url)
soup = BeautifulSoup(r.text, "lxml")
for div in soup.select('h3.title.blue'):
election = div.text
if election == 'Local Body Elections':
for div2 in div.parent.select('div.item'):
election_year = div2.contents[0].strip()
for a in div2.select('a'):
if a.text.lower().strip() == 'all candidates':
href = a['href']
links[election_year] = href
break
return links
def get_women_candidates(url):
cans = []
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
a = soup.find_all('a', href=re.compile('.*women_candidate.*'))
if len(a):
m = re.match(r'.*=\s+(\d+)\s+.*', a[0].text)
if m:
total = int(m.group(1))
else:
total = 0
women_url = url + a[0].attrs['href']
r = requests.get(women_url)
soup = BeautifulSoup(r.text, 'html.parser')
count = 0
for a in soup.find_all('a', href=re.compile(r'.*candidate_id=.*')):
pol_url = url + a.attrs['href']
cans.append({'politician_url': pol_url})
count += 1
logging.info("Count = {0}, Total = {1}".format(count, total))
if count != total:
raise
return cans
if __name__ == "__main__":
setup_logger()
parser = argparse.ArgumentParser(description='India MPs Women candidates to CSV')
parser.add_argument('-o', '--output', default='output-women.csv',
help='Output CSV file name')
parser.add_argument('-t', '--type', default='all',
help='Type (all|state|nation|local)')
parser.add_argument('--no-header', dest='header', action='store_false',
help='Output without header at the first row')
parser.set_defaults(header=True)
args = parser.parse_args()
logging.info('Output file: {0}'.format(args.output))
of = open(args.output, 'wb')
headers = ['politician_url']
writer = DictWriter(of, fieldnames=headers)
if args.header:
writer.writeheader()
count = 0
if args.type in ['all', 'nation']:
# Get Nation
elections = get_nation_elections('http://www.myneta.info/')
for e in sorted(elections):
logging.info(e)
year = e.split()[-1]
logging.info("Year: {0}".format(year))
url = elections[e]
cans = get_women_candidates(url)
writer.writerows(cans)
count += len(cans)
#break
if args.type in ['all', 'local']:
# Get Local
elections = get_local_elections('http://www.myneta.info/')
for e in sorted(elections):
print e
year = e.split()[-1]
logging.info("Year: {0}".format(year))
url = elections[e]
cans = get_women_candidates(url)
writer.writerows(cans)
count += len(cans)
#break
if args.type in ['all', 'state']:
# Get state
states = get_state_assemblies()
for s in sorted(states):
logging.info("State: '{0}'".format(s))
href = states[s]
elections = get_elections(href)
for e in sorted(elections):
year = e.split()[-1]
logging.info("Year: {0}".format(year))
url = elections[e]
cans = get_women_candidates(url)
writer.writerows(cans)
count += len(cans)
#break
#break
of.close()
logging.info("Total: {0}".format(count))
|
StarcoderdataPython
|
366878
|
import argparse
import json
import os
from glob import glob
from tqdm import tqdm
def convert_annotations_to_one_json_file_per_image(dataset_directory: str):
if len(glob(f'{dataset_directory}/**/dataset.json', recursive=True)) == 0:
print(f"Could not find json files in {dataset_directory} directory.")
for dataset in tqdm(glob(f'{dataset_directory}/**/dataset.json', recursive=True), desc="Converting annotations"):
with open(dataset, "r") as file:
dataset_annotations = json.load(file)
root_directory = dataset_annotations["root_dir"]
for name, source in dataset_annotations["sources"].items():
source_directory = source["root_dir"]
engraving = source["type"]
for page in source["pages"]:
system_measures = []
width = page["width"]
height = page["height"]
for measure in page["annotations"]["measures"]:
left = int(measure["bbox"]["x"])
top = int(measure["bbox"]["y"])
right = int(measure["bbox"]["x"] + measure["bbox"]["width"])
bottom = int(measure["bbox"]["y"] + measure["bbox"]["height"])
data = {'left': left, 'top': top, 'right': right, 'bottom': bottom}
system_measures.append(data)
# Currently, the dataset only has system measure annotation, so we leave the other two types empty
stave_measures = []
staves = []
json_filename = os.path.splitext(page["image"])[0] + ".json"
json_path = os.path.join(dataset_directory, root_directory, source_directory, json_filename)
with open(json_path, 'w') as file:
json.dump({'width': width, 'height': height, 'engraving': engraving, 'system_measures': system_measures,
'stave_measures': stave_measures, 'staves': staves}, file, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Prepare single-file annotations. Seaches the given directory '
'recursively for dataset.json files that will be extracted into'
'plain json annotation files, one file per image.')
parser.add_argument("--dataset_directory", type=str, default="data/weber",
help="The directory, where the extracted dataset resides")
flags = parser.parse_args()
convert_annotations_to_one_json_file_per_image(flags.dataset_directory)
|
StarcoderdataPython
|
3438139
|
<filename>guillotina_volto/directives.py
from guillotina.directives import Directive
class MetadataDictMergeDirective(Directive):
"""Store a dict value in the tagged value under the key.
"""
key: str
def store(self, tags, value):
tags.setdefault(self.key, {})
for key in value.keys():
if key in tags[self.key]:
tags[self.key][key].append(value[key])
else:
tags[self.key][key] = [value[key]]
class fieldset_field(MetadataDictMergeDirective): # noqa: N801
"""
Directive used to set fieldset attributes.
"""
key = "guillotina_volto.directives.fieldset"
def factory(self, name, fieldset):
return {fieldset: name}
def merged_tagged_value_dict_merged(schema, name):
"""Look up the tagged value 'name' in schema and all its bases, assuming
that the value under 'name' is a dict. Return a dict that consists of
all dict items, with those from more-specific interfaces overriding those
from more-general ones.
"""
tv = {}
for iface in reversed(schema.__iro__):
value = iface.queryTaggedValue(name, {})
for key, item in value.items():
if key in tv:
tv[key].extend(item)
tv[key] = list(set(tv[key]))
else:
tv[key] = item.copy()
return tv
fieldset = fieldset_field # b/w compat
|
StarcoderdataPython
|
1680568
|
<gh_stars>0
#!/usr/bin/env python3
import http
import database
import os
import re
database = database.Database()
err = ""
redirect = ""
sql = ""
show_form = ""
nameList = []
varList = []
varLength = []
print("Content-Type: text/html\r\n\r\n");
redirect = database.check_cookie(os.environ)
if os.environ['REQUEST_METHOD'] == 'POST':
#Selects all column names
if 'table' in http.post:
sql = "SHOW columns FROM %s" % http.post['table'].value
database.cur.execute(sql)
show_form += """<form action="edit.py" method="POST">"""
if database.cur.rowcount > 0:
query = database.cur.fetchall()
#filters out primary keys and timestamps and
#hashes and salts
#Decides the correct input type for each column
for row in query:
name = [row[0]]
if name[0].find('pass') >= 0:
continue
if name[0].find('salt') >= 0:
continue
nameList += name
vartype = row[1]
varLength += re.findall(r'\d+', vartype)
if row[3] == 'PRI':
varList += ['hidden']
elif vartype.find('timestamp') >= 0:
varList += ['hidden']
varLength += ['10']
elif vartype.find('int') >= 0:
varList += ['number']
elif vartype.find('char') >= 0:
varList += ['text']
elif vartype.find('varchar') >= 0:
varList += ['text']
elif vartype == 'date':
varList += ['date']
varLength += ['10']
else:
varList += ['text']
varLength += ['10']
#Retrieves data from columns
sql = "SELECT"
for name in nameList:
sql += " %s," % name
sql = sql[:-1]
sql += " FROM %s WHERE %s = %s" % (http.post['table'].value, nameList[0], http.post['edit'].value)
if 'second' in http.post:
sql += " AND WHERE %s = %s" % (nameList[1], http.post['key'].value)
database.cur.execute(sql)
query = database.cur.fetchone();
#Creates header for form
show_form += "<table>"
show_form += "<tr>"
for name in nameList:
show_form += "<th>%s</th>" % name
show_form += "</tr><tr>"
#Generates form
for i in range(len(varList)):
show_form += """<td><input name="%s" type="%s" maxlength="%s" value="%s"></td>""" % (nameList[i], varList[i], varLength[i], query[i])
show_form += """</td><input type="hidden" name="insert_table" value="%s"></td>""" % http.post['table'].value
show_form += """<td><input type="submit" name="makeedit" value="submit"></td>"""
show_form += """</form>"""
show_form += "</table>"
#Updates table with new values
if 'insert_table' in http.post:
sql = "SHOW COLUMNS FROM %s" % http.post['insert_table'].value
database.cur.execute(sql)
query = database.cur.fetchall()
sql = "UPDATE %s SET" % http.post['insert_table'].value
for row in query:
if row[0].find('pass') >= 0:
continue
if row[0].find('salt') >= 0:
continue
if row[3] != 'PRI':
if row[1].find('char') >= 0:
sql += " %s = '%s'," % (row[0], http.post[row[0]].value)
elif row[1].find('date') >= 0:
sql += " %s = '%s'," % (row[0], http.post[row[0]].value)
elif row[1].find('timestamp') >= 0:
pass
else:
sql += " %s = %s," % (row[0], http.post[row[0]].value)
sql = sql[:-1]
sql += " WHERE %s=%s" % (query[0][0], http.post[query[0][0]].value)
if query[1][3] == 'PRI':
sql += " AND %s=%s" % (query[1][0], http.post[query[1][0]].value)
database.cur.execute(sql)
if database.cur.rowcount > 0:
print("Successfully Updated<br>")
else:
print("No changes/Failed to update<br>")
database.close()
print(""" <html>
<head>
%s
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Edit</title>
<style type="text/css">
body
{
color:#444;
padding:0
10px
}
h1,h2,h3{line-height:1.2}
table
{
border-collapse: collapse;
width: 100%%;
}
th, td
{
text-align: left;
padding: 8px;
}
tr:nth-child(even){background-color: #f2f2f2}
th
{
background-color: #4564a8;
color: white;
}
</style>
</head>
</body>
<a href="home.py">Back to Home</a>
%s
</body>
</html>""" % (redirect, show_form))
|
StarcoderdataPython
|
8155735
|
<filename>pipeline/main.py
from __future__ import print_function
from importlib import import_module
class StopProcessing(Exception):
pass
class Pipeline(object):
""" processes nodes passing the results of one to the next """
def __init__(self, nodes=None):
self._nodes = []
if nodes:
self.load(nodes)
def import_function(self, path):
path, last = path.rsplit(".", 1)
return getattr(import_module(path), last)
def load(self, modules):
for module in modules:
self._nodes.append(self.import_function(module))
def process(self, **kwargs):
try:
for node in self._nodes:
result = node(**kwargs)
if result:
kwargs = result
except StopProcessing:
pass
return kwargs
|
StarcoderdataPython
|
12853994
|
<reponame>Robinson04/inoft_vocal_framework<filename>platforms_handlers/dialogflow/request.py
from typing import Optional, List
from pydantic import Field
from pydantic.main import BaseModel
from inoft_vocal_framework.utils.formatters import normalize_intent_name
class Intent(BaseModel):
name: str
displayName: str
class User(BaseModel):
_VERIFICATION_NAME_GUEST = "GUEST"
_VERIFICATION_NAME_VERIFIED = "VERIFIED"
_PERMISSION_UPDATE_TYPE = "UPDATE"
permissions: Optional[list] = None
locale: Optional[str] = None
lastSeen: Optional[str] = None
userStorage: Optional[str] = None
userVerificationStatus: Optional[str] = None
class Payload(BaseModel):
_INPUT_TYPE_OPTION = "OPTION"
user: User = Field(default_factory=User)
class Conversation(BaseModel):
conversationId: str
type: str
conversation: Optional[Conversation] = None
isInSandbox: bool
requestType: str
class InputsCustomList(list):
# todo: make the check that the current device has the capabilities to use an interactive list
class InputItem(BaseModel):
intent: str
rawInputs: list
class ArgumentItemsCustomList(list):
class ArgumentItem(BaseModel):
name: str
textValue: str
rawText: str
def append(self, item: dict) -> None:
if isinstance(item, dict):
argument_item_object = self.ArgumentItem(**item)
super().append(argument_item_object)
def custom_set_from(self, list_object: list) -> None:
for item in list_object:
self.append(item=item)
arguments: Optional[ArgumentItemsCustomList] = Field(default_factory=ArgumentItemsCustomList)
def append(self, item: dict) -> None:
if isinstance(item, dict):
input_item_object = self.InputItem(**item)
super().append(input_item_object)
def custom_set_from(self, list_object: list) -> None:
for item in list_object:
self.append(item=item)
inputs: InputsCustomList = Field(default_factory=InputsCustomList)
class Surface(BaseModel):
capabilities: list = Field(default_factory=list)
surface: Surface = Field(default_factory=Surface)
class AvailableSurfaceItem(BaseModel):
capabilities: list = Field(default_factory=list)
availableSurfaces: List[AvailableSurfaceItem] = Field(default_factory=list)
def get_first_input_of_type(self, type_name: str) -> Optional[dict]:
for input_item in self.inputs:
for argument_item in input_item.arguments:
if argument_item.name == type_name:
return argument_item
return None
class OriginalDetectIntentRequest(BaseModel):
source: str
version: str
payload: Payload
class QueryResult(BaseModel):
queryText: str
action: str
parameters: dict
allRequiredParamsPresent: bool
fulfillmentText: Optional[str] = None
fulfillmentMessages: Optional[List[str]] = None
outputContexts: List[dict]
intent: Intent
intentDetectionConfidence: Optional[int] = None
diagnosticInfo: Optional[dict] = None
LanguageModel: str
class Request(BaseModel):
# General for LaunchRequest, IntentRequest and SessionEndedRequest
responseId: str
queryResult: QueryResult
originalDetectIntentRequest: OriginalDetectIntentRequest
session: str
def is_option_select_request(self) -> bool:
return self.queryResult.queryText == "actions_intent_OPTION"
def get_updates_user_id_if_present(self) -> Optional[str]:
for output_context in self.queryResult.outputContexts:
context_parameters: Optional[dict] = output_context.get('parameters', None)
if context_parameters is not None:
context_parameters_permission: Optional[bool] = context_parameters.get('PERMISSION')
if context_parameters_permission is True:
context_parameters_updates_user_id: Optional[str] = context_parameters.get('UPDATES_USER_ID', None)
if context_parameters_updates_user_id is not None:
return context_parameters_updates_user_id
return None
def selected_option_identifier(self) -> str:
argument_item = self.originalDetectIntentRequest.payload.get_first_input_of_type(self.originalDetectIntentRequest.payload._INPUT_TYPE_OPTION)
if isinstance(argument_item, self.originalDetectIntentRequest.payload.InputsCustomList.InputItem.ArgumentItemsCustomList.ArgumentItem):
return argument_item.textValue
def is_launch_request(self) -> bool:
return self.queryResult.queryText == "GOOGLE_ASSISTANT_WELCOME"
def active_intent_name(self) -> str:
return normalize_intent_name(intent_name=self.queryResult.intent.displayName)
def is_in_intent_names(self, intent_names_list: List[str] or str) -> bool:
intent_name: str = self.active_intent_name()
if isinstance(intent_names_list, list):
return intent_name in [normalize_intent_name(intent_name=name) for name in intent_names_list]
elif isinstance(intent_names_list, str):
return intent_name == normalize_intent_name(intent_name=intent_names_list)
else:
raise Exception(f"intent_names_list type not supported : {type(intent_names_list)}")
def get_intent_parameter_value(self, parameter_key: str, default=None):
return self.queryResult.parameters.get(dict_key=parameter_key).to_any(default=default)
def is_not_usable(self):
return False
if self.type is not None and self.type not in [self.LaunchRequestKeyName, self.IntentRequestKeyName, self.SessionEndedRequestKeyName]:
raise Exception(f"The request type '{self.type}' is not None or any of the supported types.")
return False
if (self._type == str()
or self._requestId == str()
or self._timestamp == str()
or self._locale == str()):
return True
else:
return False
def to_dict(self) -> dict:
return self.dict()
|
StarcoderdataPython
|
3223224
|
<reponame>eriktews/canvasapi
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import requests_mock
from canvasapi import Canvas
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestContentExport(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
requires = {
"course": ["get_by_id", "single_content_export"],
"group": ["get_by_id", "single_content_export"],
"user": ["get_by_id", "single_content_export"],
}
register_uris(requires, m)
self.course = self.canvas.get_course(1)
self.group = self.canvas.get_group(1)
self.user = self.canvas.get_user(1)
self.content_export_course = self.course.get_content_export(11)
self.content_export_group = self.group.get_content_export(11)
self.content_export_user = self.user.get_content_export(11)
# __str__()
def test__str__(self, m):
string = str(self.content_export_course)
self.assertIsInstance(string, str)
|
StarcoderdataPython
|
6582357
|
<reponame>dehort/insights-core
import os
import tempfile
import uuid
import insights.client.utilities as util
from insights.client.constants import InsightsConstants as constants
import re
from mock.mock import patch
machine_id = str(uuid.uuid4())
remove_file_content = """
[remove]
foo = bar
potato = pancake
""".strip().encode("utf-8")
def test_display_name():
assert util.determine_hostname(display_name='foo') == 'foo'
def test_determine_hostname():
import socket
hostname = socket.gethostname()
fqdn = socket.getfqdn()
assert util.determine_hostname() in (hostname, fqdn)
assert util.determine_hostname() != 'foo'
def test_get_time():
time_regex = re.match('\d{4}-\d{2}-\d{2}\D\d{2}:\d{2}:\d{2}\.\d+',
util.get_time())
assert time_regex.group(0) is not None
def test_write_to_disk():
content = 'boop'
filename = '/tmp/testing'
util.write_to_disk(filename, content=content)
assert os.path.isfile(filename)
with open(filename, 'r') as f:
result = f.read()
assert result == 'boop'
util.write_to_disk(filename, delete=True) is None
def test_generate_machine_id():
orig_dir = constants.insights_ansible_facts_dir
constants.insights_ansible_facts_dir = tempfile.mkdtemp()
constants.insights_ansible_machine_id_file = os.path.join(
constants.insights_ansible_facts_dir, "ansible_machine_id.fact")
machine_id_regex = re.match('\w{8}-\w{4}-\w{4}-\w{4}-\w{12}',
util.generate_machine_id(destination_file='/tmp/testmachineid'))
assert machine_id_regex.group(0) is not None
with open('/tmp/testmachineid', 'r') as _file:
machine_id = _file.read()
assert util.generate_machine_id(destination_file='/tmp/testmachineid') == machine_id
os.remove('/tmp/testmachineid')
os.remove(constants.insights_ansible_machine_id_file)
os.rmdir(constants.insights_ansible_facts_dir)
constants.insights_ansible_facts_dir = orig_dir
constants.insights_ansible_machine_id_file = os.path.join(
constants.insights_ansible_facts_dir, "ansible_machine_id.fact")
def test_expand_paths():
assert util._expand_paths('/tmp') == ['/tmp']
def test_magic_plan_b():
tf = tempfile.NamedTemporaryFile()
with open(tf.name, 'w') as f:
f.write('testing stuff')
assert util.magic_plan_b(tf.name) == 'text/plain; charset=us-ascii'
def test_run_command_get_output():
cmd = 'echo hello'
assert util.run_command_get_output(cmd) == {'status': 0, 'output': u'hello\n'}
def test_validate_remove_file():
tf = '/tmp/remove.cfg'
with open(tf, 'wb') as f:
f.write(remove_file_content)
assert util.validate_remove_file(remove_file='/tmp/boop') is False
os.chmod(tf, 0o644)
assert util.validate_remove_file(remove_file=tf) is False
os.chmod(tf, 0o600)
assert util.validate_remove_file(remove_file=tf) is not False
# TODO: DRY
@patch('insights.client.utilities.constants.registered_files',
['/tmp/insights-client.registered',
'/tmp/redhat-access-insights.registered'])
@patch('insights.client.utilities.constants.unregistered_files',
['/tmp/insights-client.unregistered',
'/tmp/redhat-access-insights.unregistered'])
def test_write_registered_file():
util.write_registered_file()
for r in constants.registered_files:
assert os.path.isfile(r) is True
for u in constants.unregistered_files:
assert os.path.isfile(u) is False
@patch('insights.client.utilities.constants.registered_files',
['/tmp/insights-client.registered',
'/tmp/redhat-access-insights.registered'])
def test_delete_registered_file():
util.write_registered_file()
util.delete_registered_file()
for r in constants.registered_files:
assert os.path.isfile(r) is False
@patch('insights.client.utilities.constants.registered_files',
['/tmp/insights-client.registered',
'/tmp/redhat-access-insights.registered'])
@patch('insights.client.utilities.constants.unregistered_files',
['/tmp/insights-client.unregistered',
'/tmp/redhat-access-insights.unregistered'])
def test_write_unregistered_file():
util.write_unregistered_file()
for r in constants.registered_files:
assert os.path.isfile(r) is False
for u in constants.unregistered_files:
assert os.path.isfile(u) is True
@patch('insights.client.utilities.constants.unregistered_files',
['/tmp/insights-client.unregistered',
'/tmp/redhat-access-insights.unregistered'])
def test_delete_unregistered_file():
util.write_unregistered_file()
util.delete_unregistered_file()
for u in constants.unregistered_files:
assert os.path.isfile(u) is False
|
StarcoderdataPython
|
3244405
|
"""Tests for backend module."""
import pytest
from unittest import mock
from ghmonitor.backend import (
get_backend_by_name, LoggerBackend, SelinonBackend, InvalidBackendClass
)
@mock.patch('ghmonitor.backend.init_celery')
@mock.patch('ghmonitor.backend.init_selinon')
def test_get_backend_by_name(init_celery_mock, init_selinon_mock):
"""Test get_backend_by_name() function."""
assert init_celery_mock is not None
assert init_selinon_mock is not None
backend = get_backend_by_name('LoggerBackend')
assert isinstance(backend, LoggerBackend)
backend = get_backend_by_name('SelinonBackend')
assert isinstance(backend, SelinonBackend)
def test_get_backend_by_name_invalid():
"""Test get_backend_by_name() with invalid backend class name."""
with pytest.raises(InvalidBackendClass):
get_backend_by_name('InvalidBackendName')
|
StarcoderdataPython
|
9670164
|
<filename>ratracer/simple.py
#%%
import numpy as np
from ratracer import radio
isotropic_rp = lambda a: 1.0
dipole_rp = lambda a: np.abs(np.cos(np.pi/2 * np.sin(a)) / np.cos(a))
constant_r = lambda a: -1.0
w2db = lambda w: 10.0 * np.log10(w)
class Node:
def __init__(self, height, angle=None, rp=None):
self.height = height
self.angle = angle if angle is not None else np.pi/2
self.rp = rp if rp else dipole_rp
def G(self, theta):
return self.rp(self.angle - theta)
class Pathloss:
def __init__(self, freq=860e6):
self.freq = freq
self.wavelen = radio.LIGHT_SPEED / freq
self.K = self.wavelen / (4 * np.pi)
def los(self, distance, node_a, node_p):
d0 = np.sqrt((node_a.height - node_p.height) ** 2 + distance ** 2)
a0 = np.arctan(distance / (node_a.height - node_p.height))
g0 = node_a.G(a0) * node_p.G(a0)
return (self.K*g0/d0)**2
def tworay(self, distance, node_a, node_p, gr=constant_r):
d0 = np.sqrt((node_a.height - node_p.height) ** 2 + distance ** 2)
d1 = np.sqrt((node_a.height + node_p.height) ** 2 + distance ** 2)
a0 = np.arctan(distance / (node_a.height - node_p.height))
a1 = np.arctan(distance / (node_a.height + node_p.height))
g0 = node_a.G(a0) * node_p.G(a0)
g1 = node_a.G(a1) * node_p.G(a1)
gd = gr(a1)
# print('tworay', g0, g1, gd)
return self.K**2 * ((g0/d0)**2 + (g1*gd/d1)**2 +
2*(g0*g1*gd) / (d0*d1) * np.cos((d1-d0)/(2*self.K)))
if __name__ == '__main__':
import sys
if sys.stdin.isatty():
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
try:
from jupyterthemes import jtplot
jtplot.style(theme='monokai', fscale=0.9, figsize=(8,6))
except:
pass
iso_reader = Node(height=5, rp=isotropic_rp)
iso_tag = Node(height=0.5, rp=isotropic_rp)
dip_reader = Node(height=5, angle=np.pi/2)
dip_tag = Node(height=.5, angle=np.pi/2)
pl = Pathloss()
ox = np.linspace(0.1, 20, 1000)
pl_los_iso = w2db(pl.los(ox, iso_reader, iso_tag))
pl_2ray_iso = w2db(pl.tworay(ox, iso_reader, iso_tag))
pl_2ray_dip = w2db(pl.tworay(ox, dip_reader, dip_tag))
# print(w2db(pl.tworay(ox, dip_reader, dip_tag)))
fig = plt.figure()
ax = plt.subplot(111)
plt.plot(ox, pl_los_iso, 'y--', label='FSPL for isotropic antennas')
plt.plot(ox, pl_2ray_iso, 'r', label='2-Ray PL for isotropic antennas')
plt.plot(ox, pl_2ray_dip, 'b', label='2-Ray PL for dipole antennas')
ax.set_ybound(lower=-90, upper=-30)
plt.legend()
plt.show()
#%%
|
StarcoderdataPython
|
11327272
|
<reponame>iammhk/AI_Poet
from lxml import html
import requests
import json
from bs4 import BeautifulSoup
import sys
global json_content
import sqlite3
conn = sqlite3.connect("khusrau.db")
cur = conn.cursor()
top100=["/ghazals/koii-din-gar-zindagaanii-aur-hai-mirza-ghalib-ghazals"]
url="https://rekhta.org"
for m in range(0,len(top100)):
net_url=url+top100[m]
page = requests.get(net_url)
soup = BeautifulSoup(page.content, 'lxml')
#tree = html.fromstring(page.content)
meaning_api="https://world.rekhta.org/api/v2/shayari/GetWordMeaning?lang=1"
x=0
poem_title=soup.find("title").get_text()
print(poem_title)
poem_author = soup.find(class_="ghazalAuthor")
print(poem_author.get_text())
word_id=[]
desc=[]
sentence1=""
sentence0=""
translate=""
list1=""
list0=""
poem_raw = soup.find(class_="pMC")
poem_couplet = poem_raw.find_all(class_="w")
cur.execute("INSERT INTO ghazal VALUES (?, ?, ?, ?, ?, ?);", (poem_title,poem_author.get_text(),net_url,None,None,None))
cur.execute("INSERT INTO author VALUES (?, ?, ?, ?, ?);", (poem_author.get_text(), None, None, None, None))
for y in range(0,len(poem_couplet)): #finds sher
print(y)
couplet = poem_couplet[y]
poem_line = couplet.find(class_="c")
#for g in range(0,len(poem_line)):
line = poem_line.find_all("p")
for h in range(0, len(line)): #finds line
single_line=line[h]
#print(h)
poem_text = single_line.find_all("span")
for x in range(0, len(poem_text)): #finds words
poem_word=poem_text[x]
word=poem_word.get_text()
word_id=poem_word['data-m']
#print(word_id)
l = {'Word':word_id}
url = 'https://world.rekhta.org/api/v2/shayari/GetWordMeaning?lang=1'
json_content = requests.post(url, l)
word_json=json_content.json()
#word=word_json['R']
#eng=word['E']
word_json = json.dumps(word_json)
if h is 0:
sentence0 += word
sentence0 += " "
list0 += word_id
list0 += " "
else:
sentence1+=word
sentence1+=" "
list1 += word_id
list1 += " "
#print(word, end=' ')
cur.execute("INSERT INTO words VALUES (?, ?, ?);", (word_id, word, word_json)) # word inserter
#conn.commit()
cur.execute("INSERT INTO sher2ghazal VALUES (?, ?, ?);", (poem_title,y,sentence0)) #sher2ghazal inserter
trans_couplet = couplet.find(class_="t")
if (trans_couplet is not None):
trans = trans_couplet.find_all("p")
for h in range(0, len(line)):
single_line = trans[h]
# print(h)
poem_text = single_line.find_all("span")
for x in range(0, len(poem_text)):
poem_word = poem_text[x]
desc = poem_word.get_text()
translate += desc
translate += " "
translate+="+"
print(sentence0)
print(list0)
print(sentence1)
print(list1)
cur.execute("INSERT INTO sher VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",(sentence0, sentence0, list0, sentence1, list1, poem_author.get_text(), None, translate, None, None)) #sher inserter
sentence1 = ""
sentence0 = ""
translate = ""
list0 = ""
list1 = ""
page=None
conn.commit()
conn.close()
#meaning_url= meaning_api + word_id + "&lang=0"
#print(meaning_url)
#meaning_page = requests.get(meaning_url)
#json_content= meaning_page.json()
#print(json_content['Hi'], end=' ')
#f.writerow([json_content['En'],json_content['Hi'],json_content['Ur'],json_content['Enm'],json_content['Him'],json_content['Urm']])
#f = csv.writer(open("test.csv"))
# with open('poem.csv', 'w') as csvfile:
# fieldnames = ["En", "Hi", "Ur", "Enm", "Him" , "Urm"]
#f = csv.DictWriter(csvfile, fieldnames=fieldnames)
# Write CSV Header, If you dont need that, remove this line
#f.writerow(["En", "Hi", "Ur", "Enm", "Him" , "Urm"])
#for json_content in json_content:
#f.writerow([json_content['En'],json_content['Hi'],json_content['Ur'],json_content['Enm'],json_content['Him'],json_content['Urm']])
#data= json.loads(json_content.decode("utf8"))
#print(data)
#word_meaning= data['En']['Hi']['Ur']['Enm']
#print(data)
#print(word_id)
#soup = BeautifulSoup(page.content)
#print(soup.find_all('li'))
|
StarcoderdataPython
|
338666
|
<gh_stars>0
# Translation API - https://azure.microsoft.com/en-gb/services/cognitive-services/translator-text-api/
# Example Request - http://localhost:3001/?lang=fr&text=hello
import http.server
import socketserver
import urllib.parse
import urllib.request
import ssl
from xml.etree import ElementTree
import http.client, urllib.request, urllib.parse, json
from helpers import *
ssl._create_default_https_context = ssl._create_unverified_context
subscription_key = get_environment_variable('TRANSLATOR_TEXT_KEY')
class Handler(http.server.SimpleHTTPRequestHandler):
__data = "";
# This helper function formats parts of our response properly
def write(self,text):
self.wfile.write(str.encode(text))
def set_headers(self):
self.send_response(200) # 200 means everything is OK
self.send_header('Content-type', 'text/html') #Our response contains text
self.end_headers()
def do_GET(self):
self.set_headers()
self.write('Translation server is listening.')
"""
Handles the GET request, sends to translate method and returns a response formatted
for Chatfuel
"""
def do_POST(self):
print('Responding to POST request')
print(self.path)
description = self.translate()
self.send_response(200)
self.send_header("Content-type", "text/json")
self.end_headers()
self.wfile.write(str.encode("{\"user_id\" : \"" + self.get_param_from_url("user_id") +"\","))
self.wfile.write(str.encode("\"bot_id\" : \"" + self.get_param_from_url("bot_id") +"\","))
self.wfile.write(str.encode("\"module_id\" : \"" + self.get_param_from_url("module_id") +"\","))
self.wfile.write(str.encode("\"message\" : \"" + str(description[0]['translations'][0]['text']) +"\"}"))
return
"""
Gets the image path from the url and makes an api request, parses
JSON response and returns
"""
def translate(self):
print("We're doing translation")
text = self.get_param_from_url("incoming_message")
lang = "es"
returned = self.make_api_request(text, lang)
return returned
"""
Method to send an API Request to Azure
"""
def make_api_request(self, text, lang):
host = 'api.cognitive.microsofttranslator.com'
path = '/translate?api-version=3.0'
params = '&to=' + lang
headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type' : 'application/json'}
conn = http.client.HTTPSConnection(host)
body = "[{'Text':'" + text + "'}]" #urllib.parse.quote(text)
conn.request ("POST", path + params, body, headers)
response = conn.getresponse ()
data = response.read()
parsed = json.loads(data)
conn.close()
return parsed
def get_param_from_url(self, param_name):
queryStarts = self.path.find("?") + 1
if self.__data == "":
self.__data = self.rfile.read(int(self.headers['Content-Length'])).decode("utf-8")
from urllib.parse import parse_qs
parsed = parse_qs(self.path[queryStarts:])
parsed = parse_qs(self.__data)
return parsed[param_name][0]
# Get the server ready and start listening
port = 3001
httpd = socketserver.TCPServer(('', port), Handler)
print('The server is now listening on port ' + str(port) + '. Visit localhost:' + str(port) +' in your browser!')
httpd.serve_forever()
|
StarcoderdataPython
|
9757741
|
<reponame>panoptes-organization/panoptes
from flask import jsonify, request, Response
from panoptes.server_utilities.db_queries import get_db_workflows_by_id, get_db_workflows, get_db_jobs, get_db_job_by_id, delete_db_wf, get_db_workflows_by_status, delete_whole_db, get_db_table_is_empty, rename_db_wf, rename_db_job
from . import routes
'''
/api/workflows
/api/workflow/<workflow_id>
/api/workflow/<workflow_id>/jobs
/api/workflow<workflow_id>/job/<job_id>
/api/workflows/all
'''
@routes.route('/api/service-info', methods=['GET'])
def get_service_info():
return jsonify({'status': "running"}), 200
@routes.route('/api/workflows', methods=['GET'])
def get_workflows():
workflows = [wf.get_workflow() for wf in get_db_workflows()]
return jsonify({'workflows': workflows,
'count': len(workflows)}), 200
@routes.route('/api/workflow/<workflow_id>', methods=['GET'])
def get_workflow_by_id(workflow_id):
workflows = get_db_workflows_by_id(workflow_id)
if workflows:
return jsonify({'workflow': workflows.get_workflow()}), 200
else:
response = Response(status=404)
return response
@routes.route('/api/workflow/<workflow_id>/jobs', methods=['GET'])
def get_jobs_of_workflow(workflow_id):
workflows = get_db_workflows_by_id(workflow_id)
if workflows:
jobs = [j.get_job_json() for j in get_db_jobs(workflows.id)]
return jsonify({'jobs': jobs,
'count': len(jobs)}), 200
else:
response = Response(status=404)
return response
def get_jobs(wf_id):
return [j.get_job_json() for j in get_db_jobs(wf_id)]
def get_job(wf_id, job_id):
return get_db_job_by_id(wf_id, job_id).get_job_json()
@routes.route('/api/workflow/<workflow_id>/job/<job_id>', methods=['GET'])
def get_job_of_workflow(workflow_id, job_id):
workflows = get_db_workflows_by_id(workflow_id)
if workflows:
job = get_db_job_by_id(workflows.id, job_id)
if job:
return jsonify({'jobs': job.get_job_json()}), 200
else:
response = Response(status=404)
return response
else:
response = Response(status=404)
return response
@routes.route('/api/workflow/<workflow_id>', methods=['PUT'])
def rename_workflow_by_id(workflow_id):
data = request.json
if data is None or 'name' not in data or len(data['name']) < 1 or data['name'].isspace():
response = Response(status=400)
return response
workflows = get_db_workflows_by_id(workflow_id)
if workflows:
if rename_db_wf(workflow_id, data['name']):
return jsonify({'workflow': workflows.get_workflow()}), 200
else:
return jsonify({'msg': 'Database error'}), 500
else:
response = Response(status=404)
return response
@routes.route('/api/workflow/<workflow_id>', methods=['DELETE'])
def set_db_delete(workflow_id):
if(get_db_workflows_by_id(workflow_id) is None):
response = Response(status=404)
return response
elif(get_db_workflows_by_status(workflow_id) == 'Running'):
return jsonify({'msg': 'You cannot delete Running Workflow '}), 403
else:
delete = delete_db_wf(workflow_id)
if delete:
response = Response(status=204)
return response
else:
return jsonify({'msg': 'The server is unable to store the '
'representation needed to complete the delete request.'}), 507
@routes.route('/api/workflows/all', methods=['DELETE'])
def set_whole_db_delete():
if get_db_table_is_empty('Workflows'):
response = Response(status=410)
return response
else:
delete = delete_whole_db()
if delete:
return jsonify({'msg': 'Database clean up is complete'}), 200
else:
return jsonify({'msg': 'The server is unable to store the '
'representation needed to complete the delete request.'}), 507
|
StarcoderdataPython
|
12810752
|
from main import normaliseAndScale, readData, binariseLabels, getTrueLabels, buildDataMatrix, removePacketsAfterChange, printMetrics
from voting import Voting
import pickle
from sklearn import metrics
from matplotlib import pyplot as plt
import matplotlib2tikz
# models = {
# "boost": dict(),
# "logistic": dict(),
# "SVM_linear": dict(),
# "SVM_RBF": dict(),
# "SVM_poly": dict(),
# "SVM_sigmoid": dict()
# }
test_set = 2
number_of_models = {
"boost": 4,
"logistic": 4,
}
print "Test set:", (test_set+1)
print "Used models:", number_of_models
test_data = normaliseAndScale(readData("..\\data\\test5_results_" + str(test_set + 1) + "_all.csv"))
test_labels = readData("..\\data\\test5_targets_" + str(test_set + 1) + ".csv")
labels = getTrueLabels(test_labels)
binarised_labels = [binariseLabels(labels, target) for target in [1,2,3]]
data_matrix = [buildDataMatrix(test_data, target) for target in [0,1,2]]
for target in [0,1,2]:
data_matrix[target], binarised_labels[target] =\
removePacketsAfterChange(data_matrix[target], binarised_labels[target], test_labels, 256)
print "The data has been read in!"
estimators = {
1: [],
2: [],
3: []
}
for target in [1, 2, 3]:
for dataset in [0, 1, 2]:
if dataset != test_set:
for model_name in number_of_models:
for k in range(number_of_models[model_name]):
file_name = model_name + "_dataset_" + str(dataset) + "_target_" + str(target) + "_" + str(k) + ".pkl"
estimators[target].append((file_name, pickle.load(file('../pickle/models/' + file_name))))
type = "soft"
voters = {
1: Voting(estimators[1], voting=type), #, weights={True: 0.8, False: 0.2}
2: Voting(estimators[2], voting=type),
3: Voting(estimators[3], voting=type)
}
# voters = { # Estimators used in DM project
# 1: Voting(estimators[1][8:10]+estimators[1][23:24], voting=type), #, weights={True: 0.8, False: 0.2}
# 2: Voting(estimators[2][0:8]+estimators[2][21:22], voting=type),
# 3: Voting(estimators[3][0:8]+estimators[3][23:24], voting=type)
# }
print "Models have been read in!"
for target in [1, 2, 3]:
decision = voters[target].transform(data_matrix[target-1])
if type == "soft":
decision = sum(decision).transpose()[0]
elif type == "hard":
decision = sum(decision.transpose())
fpr, tpr, threshold = metrics.roc_curve(binarised_labels[target-1], decision, pos_label=True)
# printMetrics(fpr, tpr, threshold, 0.99, decision[0], binarised_labels[target-1])
# printMetrics(fpr, tpr, threshold, 1, decision[0], binarised_labels[target-1])
prediction = printMetrics(fpr, tpr, threshold, 0.01, decision, binarised_labels[target-1])
printMetrics(fpr, tpr, threshold, 0, decision, binarised_labels[target-1])
plt.subplot(2, 2, 1)
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot((0, 1), (0, 1))
plt.subplot(2, 2, target+1)
axes = plt.gca()
axes.set_ylim([-0.1, 1.1])
plt.plot(map(lambda x: x, prediction))
plt.plot(binarised_labels[target-1], "--")
matplotlib2tikz.save("roc.tex")
plt.show()
|
StarcoderdataPython
|
5064317
|
<gh_stars>0
import datetime
import requests
class Skywalking(object):
def __init__(self, base_url):
self.base_url = base_url
self.summary_result = {}
"""
{
"service_name": {
"count": 1,
"endpoint_name":{
"count": 1,
"duration_min": 1000,
"duration_max": 1000,
"duration_avg": 1000,
"trace_id": "c105d6f8909d454497d41ee6a042bb52.151.16327400972690297"
}
}
}
展示时:
先展示service_name层级count值高的作为service层级
再展示endpoint层级count值高的作为endpoint层级
"""
def do_query(self, data):
"""
核心执行查询
:param data:
:return:
"""
headers = {'content-type': 'application/json'}
r = requests.post(self.base_url, data=data, headers=headers)
return r.json()
def push_2_summary_result(self, service_code, endpoint_name, duration, trace_id):
if not self.summary_result.__contains__(service_code):
self.summary_result[service_code] = {}
self.summary_result[service_code]["count"] = 1
else:
self.summary_result[service_code]["count"] += 1
if not self.summary_result[service_code].__contains__(endpoint_name):
self.summary_result[service_code][endpoint_name] = {}
self.summary_result[service_code][endpoint_name]["count"] = 1
self.summary_result[service_code][endpoint_name]["duration_min"] = duration
self.summary_result[service_code][endpoint_name]["duration_max"] = duration
self.summary_result[service_code][endpoint_name]["duration_avg"] = duration
self.summary_result[service_code][endpoint_name]["trace_id"] = trace_id
else:
self.summary_result[service_code][endpoint_name]["count"] += 1
if self.summary_result[service_code][endpoint_name]["duration_min"] > duration:
self.summary_result[service_code][endpoint_name]["duration_min"] = duration
if self.summary_result[service_code][endpoint_name]["duration_max"] < duration:
self.summary_result[service_code][endpoint_name]["duration_max"] = duration
duration_avg = int((self.summary_result[service_code][endpoint_name]["duration_avg"] + duration) / 2)
self.summary_result[service_code][endpoint_name]["duration_avg"] = duration_avg
def query_service_code__by_trace_id(self, trace_id):
"""
通过trace_id查询服务编码
:param trace_id:
:return:
"""
query = """
{"query":"query queryTrace($traceId: ID!) {
trace: queryTrace(traceId: $traceId) {
spans {
traceId
segmentId
spanId
parentSpanId
refs {
traceId
parentSegmentId
parentSpanId
type
}
serviceCode
serviceInstanceName
startTime
endTime
endpointName
type
peer
component
isError
layer
tags {
key
value
}
logs {
time
data {
key
value
}
} } }
}"
,"variables":{"traceId":"%s"}}
""" % trace_id
query_result = self.do_query(query)
service_code = query_result["data"]["trace"]["spans"][0]["serviceCode"]
return service_code
def query_slow_endpoints(self, query_time_start, query_time_end, duration_threshold):
# 查看所有的慢接口
query = """{"query":"query queryTraces($condition: TraceQueryCondition) {
data: queryBasicTraces(condition: $condition) {
traces {
key: segmentId
endpointNames
duration
start
isError
traceIds
}
total
}}"
,"variables":{"condition":{"queryDuration":{"start":"%s","end":"%s","step":"SECOND"}
,"traceState":"ALL","paging":{"pageNum":1,"pageSize":100,"needTotal":true}
,"queryOrder":"BY_DURATION","minTraceDuration":"%s","tags":[]}}}
""" % (query_time_start, query_time_end, duration_threshold)
return self.do_query(query)
def get_slow_endpoints(self, query_time_start, query_time_end, duration_threshold, ignore_endpoints,
ignore_services, query_compensate_timezone):
result = []
# 将补偿查询时区加到时间查询条件上, 并且转换为skywalking查询所需要的时间格式
query_time_start = (datetime.datetime.fromtimestamp(query_time_start) + datetime.timedelta(
hours=query_compensate_timezone)).strftime("%Y-%m-%d %H%M%S")
query_time_end = (datetime.datetime.fromtimestamp(query_time_end) + datetime.timedelta(
hours=query_compensate_timezone)).strftime("%Y-%m-%d %H%M%S")
# 查询基础数据
slow_endpoints = self.query_slow_endpoints(query_time_start, query_time_end, duration_threshold)
for item in slow_endpoints["data"]["data"]["traces"]:
endpoint_name = item["endpointNames"][0]
if endpoint_name in ignore_endpoints:
continue
if item["isError"]:
continue
trace_id = item["traceIds"][0]
duration = item["duration"]
service_code = self.query_service_code__by_trace_id(trace_id)
if service_code in ignore_services:
continue
self.push_2_summary_result(service_code, endpoint_name, duration, trace_id)
# 得到排序后的结果
summary_service_result = []
for key in self.summary_result:
summary_service_result.append({"service": key, "count": self.summary_result[key]["count"]})
summary_service_result.sort(key=lambda x: x["count"], reverse=True)
# print(summary_service_result)
for service in summary_service_result:
service_name = service["service"]
# print("-" * 100, service_name, "(%s)" % str(self.summary_result[service_name]["count"]))
summary_endpoint_result = []
for key in self.summary_result[service_name]:
if "count" == key:
continue
summary_endpoint_result.append(
{"endpoint": key, "count": self.summary_result[service_name][key]["count"]})
summary_endpoint_result.sort(key=lambda x: x["count"], reverse=True)
for endpoint in summary_endpoint_result:
# print(endpoint["endpoint"], "(%s)" % str(endpoint["count"]), " -- ",
# self.summary_result[service_name][key]["trace_id"])
result.append({
"endpoint": service_name + ":" + endpoint["endpoint"],
"service": service_name,
"count": str(endpoint["count"]),
"trace_id": self.summary_result[service_name][key]["trace_id"],
"duration_min": self.summary_result[service_name][key]["duration_min"],
"duration_max": self.summary_result[service_name][key]["duration_max"],
"duration_avg": self.summary_result[service_name][key]["duration_avg"],
})
return result
|
StarcoderdataPython
|
8128522
|
import inspect
import numpy as np
import statistics
from autogoal.ml.utils import LabelEncoder, check_number_of_labels
METRICS = []
def register_metric(func):
METRICS.append(func)
return func
def find_metric(*types):
for metric_func in METRICS:
signature = inspect.signature(metric_func)
if len(types) != len(signature.parameters):
break
for type_if, type_an in zip(types, signature.parameters):
if not conforms(type_an.annotation, type_if):
break
return metric_func
raise ValueError("No metric found for types: %r" % types)
def supervised_fitness_fn(score_metric_fn):
def fitness_fn(
pipeline,
X,
y,
*args,
validation_split=0.3,
cross_validation_steps=3,
cross_validation="median",
**kwargs
):
scores = []
for _ in range(cross_validation_steps):
len_x = len(X) if isinstance(X, list) else X.shape[0]
indices = np.arange(0, len_x)
np.random.shuffle(indices)
split_index = int(validation_split * len(indices))
train_indices = indices[:-split_index]
test_indices = indices[-split_index:]
if isinstance(X, list):
X_train, y_train, X_test, y_test = (
[X[i] for i in train_indices],
y[train_indices],
[X[i] for i in test_indices],
y[test_indices],
)
else:
X_train, y_train, X_test, y_test = (
X[train_indices],
y[train_indices],
X[test_indices],
y[test_indices],
)
pipeline.send("train")
pipeline.run(X_train, y_train)
pipeline.send("eval")
y_pred = pipeline.run(X_test, None)
scores.append(score_metric_fn(y_test, y_pred))
return getattr(statistics, cross_validation)(scores)
return fitness_fn
def unsupervised_fitness_fn(score_metric_fn):
def fitness_fn(pipeline, X, *args, **kwargs):
scores = []
pipeline.send("train")
pipeline.run(X)
pipeline.send("eval")
y_pred = pipeline.run(X)
return score_metric_fn(X, y_pred)
return fitness_fn
@supervised_fitness_fn
def accuracy(ytrue, ypred) -> float:
return np.mean([1 if yt == yp else 0 for yt, yp in zip(ytrue, ypred)])
@unsupervised_fitness_fn
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `<NAME> and <NAME>, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
# X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0.0, 0.0
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (
1.0
if intra_disp == 0.0
else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.0))
)
|
StarcoderdataPython
|
4811331
|
<gh_stars>0
# Generated by Django 3.2.5 on 2021-07-18 03:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_auto_20210717_2308'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.AddField(
model_name='lesson',
name='tags',
field=models.ManyToManyField(blank=True, null=True, to='courses.Tag'),
),
]
|
StarcoderdataPython
|
1793733
|
import os
import numpy as np
from pynwb import register_class, docval, get_class
from pynwb.core import VectorIndex, VectorData, DynamicTable, ElementIdentifiers
from hdmf.utils import call_docval_func
from pynwb import load_namespaces
name = 'ndx-simulation-output'
here = os.path.abspath(os.path.dirname(__file__))
ns_path = os.path.join(here, 'spec', name + '.namespace.yaml')
load_namespaces(ns_path)
def create_ragged_array(name, values):
"""
:param values: list of lists
:return:
"""
vector_data = VectorData(
name, 'indicates which compartments the data refers to',
[item for sublist in values for item in sublist])
vector_index = VectorIndex(
name + '_index', np.cumsum([len(x) for x in values]), target=vector_data)
return vector_data, vector_index
@register_class('Compartments', name)
class Compartments(DynamicTable):
__columns__ = (
{'name': 'number', 'index': True,
'description': 'cell compartment ids corresponding to a each column in the data'},
{'name': 'position', 'index': True,
'description': 'the observation intervals for each unit'},
{'name': 'label', 'description': 'the electrodes that each spike unit came from',
'index': True, 'table': True}
)
@docval({'name': 'name', 'type': str, 'doc': 'Name of this Compartments object',
'default': 'compartments'},
{'name': 'id', 'type': ('array_data', ElementIdentifiers),
'doc': 'the identifiers for the units stored in this interface', 'default': None},
{'name': 'columns', 'type': (tuple, list), 'doc': 'the columns in this table', 'default': None},
{'name': 'colnames', 'type': 'array_data', 'doc': 'the names of the columns in this table',
'default': None},
{'name': 'description', 'type': str, 'doc': 'a description of what is in this table', 'default': None},
)
def __init__(self, **kwargs):
if kwargs.get('description', None) is None:
kwargs['description'] = "data on spiking units"
call_docval_func(super(Compartments, self).__init__, kwargs)
@staticmethod
def _compartment_finder(cell_compartments, cond, dtype, start_ind):
cell_compartments = np.array(cell_compartments)
if isinstance(cond, dtype):
return start_ind + np.where(cell_compartments == cond)[0]
else:
return np.array([start_ind + np.where(cell_compartments == x)[0] for x in cond]).ravel()
def find_compartments(self, cell, compartment_numbers=None, compartment_labels=None):
"""
Parameters
----------
cell: int
find indices of compartments of this cell
compartment_numbers: int | Iterable(int) (optional)
where these are (this is) the compartment(s)
compartment_labels: str | Iterable(str) (optional)
or where these are (this is) the label(s)
Returns
-------
np.array(dtype=int)
"""
if compartment_numbers is not None and compartment_labels is not None:
raise ValueError('you cannot specify both compartments and compartment_labels')
if cell == 0:
start_ind = 0
else:
start_ind = self.compartments['number_index'].data[cell-1]
cell_compartments = self.compartments['number'][cell]
if compartment_numbers is not None:
return self._compartment_finder(cell_compartments, compartment_numbers, int, start_ind)
elif compartment_labels is not None:
return self._compartment_finder(cell_compartments, compartment_labels, str, start_ind)
else:
return np.arange(start_ind, start_ind + len(cell_compartments), dtype=int)
CompartmentSeries = get_class('CompartmentSeries', name)
CompartmentSeries._compartment_finder = _compartment_finder
CompartmentSeries.find_compartments = find_compartments
SimulationMetaData = get_class('SimulationMetaData', name)
|
StarcoderdataPython
|
11215521
|
<reponame>codacy-badger/pycayennelpp
import pytest
from cayennelpp.lpp_data import LppData
def test_temperature_from_bytes():
# 01 67 FF D7 = -4.1C
temp_buf = bytearray([0x01, 0x67, 0xFF, 0xD7])
temp_dat = LppData.from_bytes(temp_buf)
assert temp_buf == temp_dat.bytes()
def test_accelerometer_from_bytes():
# 06 71 04 D2 FB 2E 00 00
acc_buf = bytearray([0x06, 0x71, 0x04, 0xD2, 0xFB, 0x2E, 0x00, 0x00])
acc_dat = LppData.from_bytes(acc_buf)
assert acc_buf == acc_dat.bytes()
def test_generic_from_bytes():
buff = bytearray([0x00, 0x64, 0xff, 0xff, 0xff, 0xfb])
data = LppData.from_bytes(buff)
assert buff == data.bytes()
assert int(data.type) == 100
assert data.value == (4294967291,)
def test_generic_from_bytes_invalid_size():
with pytest.raises(Exception):
buf = bytearray([0x00, 0x64, 0x00, 0x00, 0x00])
LppData.from_bytes(buf)
def test_gps_from_bytes():
# 01 88 06 76 5f f2 96 0a 00 03 e8
gps_buf = bytearray([0x01, 0x88, 0x06, 0x76,
0x5f, 0xf2, 0x96, 0x0a,
0x00, 0x03, 0xe8])
gps_dat = LppData.from_bytes(gps_buf)
assert gps_buf == gps_dat.bytes()
def test_voltage_from_bytes():
# 25V on channel 1
buff = bytearray([0x01, 0x74, 0x9, 0xc4])
data = LppData.from_bytes(buff)
assert buff == data.bytes()
assert data.value == (25,)
def test_load_from_bytes():
# 42.321kg on channel 0
buff = bytearray([0x00, 0x7A, 0x00, 0xA5, 0x51])
data = LppData.from_bytes(buff)
assert buff == data.bytes()
def test_unix_time_from_bytes():
# 1970-01-01T08:00Z (ie unix time 0)
buff = bytearray([0x01, 0x85, 0x00, 0x00, 0x00, 0x00])
data = LppData.from_bytes(buff)
assert buff == data.bytes()
assert data.value == (0,)
def test_init_invalid_type():
with pytest.raises(Exception):
LppData(0, 4242, 0)
def test_init_data_none():
with pytest.raises(Exception):
LppData(0, 0, None)
def test_init_invalid_dimension():
with pytest.raises(Exception):
LppData(0, 136, 0)
def test_any_from_bytes_invalid_size():
with pytest.raises(Exception):
buf = bytearray([0x00, 0x00])
LppData.from_bytes(buf)
def test_gps_from_bytes_invalid_size():
with pytest.raises(Exception):
buf = bytearray([0x00, 0x88, 0x00])
LppData.from_bytes(buf)
def test_lpp_data_size():
assert LppData(0, 0, 0).size == 3
def test_lpp_data_str():
print(LppData(0, 0, 0))
|
StarcoderdataPython
|
3220693
|
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn import datasets
# 加载iris数据
iris = datasets.load_iris()
data = iris['data']
# 学习 → 生成簇
model = cluster.KMeans(n_clusters=3)
model.fit(data)
# 取得学习结果的标签
labels = model.labels_
# 图表的绘制
ldata = data[labels == 0]
plt.scatter(ldata[:, 2], ldata[:, 3],
c='black' ,alpha=0.3,s=100 ,marker="^")
ldata = data[labels == 1]
plt.scatter(ldata[:, 2], ldata[:, 3],
c='black' ,alpha=0.3,s=100 ,marker="*")
ldata = data[labels == 2]
plt.scatter(ldata[:, 2], ldata[:, 3],
c='black' ,alpha=0.3,s=100 ,marker="o")
# 设置轴标签
plt.xlabel(iris["feature_names"][2],fontsize='large')
plt.ylabel(iris["feature_names"][3],fontsize='large')
plt.show()
|
StarcoderdataPython
|
8033627
|
<filename>test/network/test_latencies.py<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from network.latencies import (
ExponentiallyDistributedLatencyPolicy,
StaticLatencyPolicy
)
def test_static_latency_policy():
latency_policy = StaticLatencyPolicy(base_delay=42)
assert (latency_policy.get_delay(0, 1) == 42)
assert (latency_policy.get_delay(1, 0) == 42)
latency_policy.set_delay(src_node=3, dst_node=4, delay=15)
latency_policy.set_delay(src_node=4, dst_node=3, delay=12)
assert (latency_policy.get_delay(3, 4) == 15)
assert (latency_policy.get_delay(4, 3) == 12)
latency_policy.set_delay(src_node=3, dst_node=4, delay=None)
latency_policy.set_delay(src_node=4, dst_node=3, delay=None)
assert (latency_policy.get_delay(src_node=3, dst_node=4) == 42)
assert (latency_policy.get_delay(src_node=4, dst_node=3) == 42)
def test_exponentially_distributed_latency_policy():
# When set to zero, there's no randomness at all
latency_policy = ExponentiallyDistributedLatencyPolicy(avg_delay=0)
assert (latency_policy.get_delay(0, 1) == 0)
assert (latency_policy.get_delay(1, 0) == 0)
# Global Avg Delay ---------------------------------------------------------
latency_policy = ExponentiallyDistributedLatencyPolicy(avg_delay=3)
# Assert delays randomness
assert (100 == len({latency_policy.get_delay(0, 1) for _ in range(100)}))
assert (100 == len({latency_policy.get_delay(1, 0) for _ in range(100)}))
# We check that avg corresponds to exp. dist's avg.
n = 100000
assert (abs(
3 - (sum([latency_policy.get_delay(0, 1) for _ in range(n)]) / n)
) < 0.1)
assert (abs(
3 - (sum([latency_policy.get_delay(1, 0) for _ in range(n)]) / n)
) < 0.1)
# Edge Avg Delays ----------------------------------------------------------
latency_policy.set_avg_delay(src_node=3, dst_node=4, avg_delay=1)
latency_policy.set_avg_delay(src_node=4, dst_node=3, avg_delay=2)
# Assert delays randomness
assert (100 == len({latency_policy.get_delay(3, 4) for _ in range(100)}))
assert (100 == len({latency_policy.get_delay(4, 3) for _ in range(100)}))
# We check that avg corresponds to exp. dist's avg.
n = 100000
assert (abs(
1 - (sum([latency_policy.get_delay(3, 4) for _ in range(n)]) / n)
) < 0.05)
assert (abs(
2 - (sum([latency_policy.get_delay(4, 3) for _ in range(n)]) / n)
) < 0.05)
# Removing Edge-specific Avg Delays ----------------------------------------
latency_policy.set_avg_delay(src_node=3, dst_node=4, avg_delay=None)
latency_policy.set_avg_delay(src_node=4, dst_node=3, avg_delay=None)
# We check that avg corresponds to the global exp. dist's avg.
n = 100000
assert (abs(
3 - (sum([latency_policy.get_delay(3, 4) for _ in range(n)]) / n)
) < 0.1)
assert (abs(
3 - (sum([latency_policy.get_delay(4, 3) for _ in range(n)]) / n)
) < 0.1)
|
StarcoderdataPython
|
1914125
|
#!/bin/env/python3
# -*- encoding: utf-8 -*-
from detector.config import Config
from detector.evaluation import plot_keras_history
from detector.features import ImageProcess
from detector.loader import ImagesLoader
from detector.model import SiameseNetworkModel
if __name__ == "__main__":
loader = ImagesLoader(file_ext="*.jpg")
df = loader.prepare_dataset(balance_targets=True)
ipo = ImageProcess(data=df)
train_df, val_df = ipo.train_test_split()
x_left_train, x_right_train, targs_train = ipo.get_augmented_images_arrays(train_data=train_df,
n_new=5)
x_left_val, x_right_val, targs_val = ipo.get_validation_images_arrays(val_data=val_df)
snm = SiameseNetworkModel()
# (Config.resize_width, Config.resize_height, 3)
snm.build_model_2(img_dimension=Config.img_width)
hist = snm.fit_model(x_left=x_left_train,
x_right=x_right_train,
y=targs_train,
x_val_left=x_left_val,
x_val_right=x_right_val,
y_val=targs_val,
e=Config.train_number_epochs,
add_callbacks=True)
plot_keras_history(hist)
|
StarcoderdataPython
|
282273
|
"""Interface class implementation for the x path data.
"""
from typing import Dict
from apysc._type.attr_linking_interface import AttrLinkingInterface
from apysc._type.int import Int
from apysc._type.revert_interface import RevertInterface
class PathXInterface(RevertInterface, AttrLinkingInterface):
_x: Int
def _initialize_x_if_not_initialized(self) -> None:
"""
Initialize the _x attribute if it hasn't been initialized yet.
"""
if hasattr(self, '_x'):
return
self._x = Int(0)
self._append_x_linking_setting()
def _append_x_linking_setting(self) -> None:
"""
Append a x attribute linking setting.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_x_linking_setting,
locals_=locals(),
module_name=__name__, class_=PathXInterface):
self._append_applying_new_attr_val_exp(
new_attr=self._x, attr_name='x')
self._append_attr_to_linking_stack(
attr=self._x, attr_name='x')
@property
def x(self) -> Int:
"""
Get a x-coordinate of the destination point.
Returns
-------
x : Int
A x-coordinate of the destination point.
"""
import apysc as ap
with ap.DebugInfo(
callable_='x', locals_=locals(),
module_name=__name__, class_=PathXInterface):
self._initialize_x_if_not_initialized()
return self._x._copy()
@x.setter
def x(self, value: Int) -> None:
"""
Set a x-coordinate of the destination point.
Parameters
----------
value : Int
X-coordinate of the destination point.
"""
import apysc as ap
with ap.DebugInfo(
callable_='x', locals_=locals(),
module_name=__name__, class_=PathXInterface):
self._initialize_x_if_not_initialized()
self._x.value = value
self._append_x_linking_setting()
_x_snapshots: Dict[str, int]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_x_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_x_snapshots',
value=int(self._x._value), snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._initialize_x_if_not_initialized()
self._x._value = self._x_snapshots[snapshot_name]
|
StarcoderdataPython
|
346482
|
import sys
from enerctl import *
def usage():
print("""COMMAND [OPTION]...
Send COMMAND to a legacy Energenie 433 MHz radio controlled wall socket.
Commands:
cmd SEQUENCE Send a custom 4-bit binary code to the socket
off [SOCKET] Turn SOCKET off
on [SOCKET] Turn SOCKET on
Miscellaneous:
h, help Display this help message
q, quit Exit the console
Omitting the socket number means on/off commands will be accepted by all
sockets within range of the transmitter.
Examples:
on
cmd 1001
off 3""")
def cleanup():
transmitter_cleanup()
gpio_cleanup()
def main():
gpio_init()
transmitter_init()
try:
while True:
cmd = input("> ")
cmd = [item.lower() for item in cmd.split()]
if not cmd:
continue
elif ("help" in cmd) or ("h" in cmd):
usage()
continue
base_cmd = cmd.pop(0)
if base_cmd == "cmd":
if not cmd:
print("ERROR: Control sequence required")
continue
elif len(cmd) > 1:
print("ERROR: Too many arguments")
continue
code_str = cmd.pop(0)
if len(code_str) != 4:
print("ERROR: Invalid control sequence")
continue
try:
code = int(code_str, 2)
except ValueError:
print("ERROR: Invalid control sequence")
continue
k_0 = (code >> 0) & 1
k_1 = (code >> 1) & 1
k_2 = (code >> 2) & 1
k_3 = (code >> 3) & 1
send_code(k_3, k_2, k_1, k_0)
print(f"Control code {code_str} transmitted")
continue
elif base_cmd == "quit":
cleanup()
sys.exit(0)
# Default socket ID is 5 (for all)
sock_id = ALL_SOCKETS
if cmd:
try:
sock_id = int(cmd.pop(0))
except ValueError:
print("ERROR: Invalid socket ID")
continue
if cmd:
print("ERROR: Too many arguments")
continue
if sock_id != ALL_SOCKETS:
if not MINIMUM_SOCKET_ID <= sock_id <= MAXIMUM_SOCKET_ID:
print(f"ERROR: Socket ID ({sock_id}) out of range. Must be {MINIMUM_SOCKET_ID}-{MAXIMUM_SOCKET_ID}")
continue
if base_cmd == "off":
socket_off(sock_id)
if sock_id == ALL_SOCKETS:
print("All sockets powered off")
else:
print(f"Socket {sock_id} powered off")
elif base_cmd == "on":
socket_on(sock_id)
if sock_id == ALL_SOCKETS:
print("All sockets powered on")
else:
print(f"Socket {sock_id} powered on")
else:
print(f"ERROR: {base_cmd} is an invalid command")
except KeyboardInterrupt:
print("")
cleanup()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1758071
|
<reponame>migueldvb/ads-python
""" Beers for citations. The new underground currency. """
__author__ = "<NAME> <<EMAIL>>"
# Standard library
import httplib
import json
import os
import urllib
from collections import Counter
# Module specific
import ads
# Couple of mutable variables for the reader
author_query = "^Casey, <NAME>."
records_filename = "citations.json"
papers = ads.search(author_query)
# How many citations did we have last time this ran?
if not os.path.exists(records_filename):
all_citations_last_time = {
"total": 0
}
else:
with open(records_filename, "r") as fp:
all_citations_last_time = json.load(fp)
# Build a dictionary with all of our citations
bibcodes, citations = zip(*[(paper.bibcode, paper.citation_count) for paper in papers])
all_citations = dict(zip(bibcodes, citations))
all_citations["total"] = sum(citations)
# Check if we have more citations than last time, but only if we have run this script
# beforehand, too. Otherwise we'll get 1,000 notifications on the first time the script
# has been run
if (all_citations["total"] > all_citations_last_time["total"]
and len(all_citations_last_time) > 1):
# Someone has cited us since the last time we checked.
newly_cited_papers = {}
for bibcode, citation_count in zip(bibcodes, citations):
new_citations = citation_count - all_citations_last_time[bibcode]
if new_citations > 0:
# Who were the first authors for the new papers that cited us?
citing_papers = ads.search("citations(bibcode:{0})".format(bibcode), rows=new_citations)
newly_cited_papers[bibcode] = [paper.author[0] if paper.author[0] != papers[0].author[0]
else "self-citation" for paper in citing_papers]
# Ok, so now we have a dictionary (called 'newly_cited_papers') that contains the bibcodes and
# names of authors who we owe beers to. But instead, we would like to know how many beers we
# owe, and who we owe them to.
beers_owed = Counter(sum(newly_cited_papers.values(), []))
# Let's not buy ourself beers.
if "self-citation" in beers_owed:
del beers_owed["self-citation"]
for author, num_of_beers_owed in beers_owed.iteritems():
formatted_author = " ".join([name.strip() for name in author.split(",")[::-1]])
this_many_beers = "{0} beers".format(num_of_beers_owed) if num_of_beers_owed > 1 else "a beer"
message = "You owe {0} {1} because they just cited you!".format(formatted_author, this_many_beers)
print(message)
if not "PUSHOVER_TOKEN" in os.environ \
or not "PUSHOVER_USER" in os.environ:
print("No pushover.net notification sent because PUSHOVER_TOKEN or"
" PUSHOVER_USER environment variables not found.")
continue
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": os.environ["PUSHOVER_TOKEN"],
"user": os.environ["PUSHOVER_USER"],
"message": message
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
else:
print("No new citations!")
# Save these citations
with open(records_filename, "w") as fp:
json.dump(all_citations, fp)
|
StarcoderdataPython
|
3303538
|
<reponame>DarkShadow4/python<filename>wxPython/wxGlade-0.9.1/examples/matplotlib2/matplotlib_GUI.py
# -*- coding: UTF-8 -*-
#
# generated by wxGlade
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
import matplotlib_canvas
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.panel_1 = wx.Panel(self, wx.ID_ANY)
self.matplotlib_canvas = matplotlib_canvas.MatplotlibCanvas(self.panel_1, wx.ID_ANY)
self.text_function = wx.TextCtrl(self.panel_1, wx.ID_ANY, "sin(x)")
self.text_xmin = wx.TextCtrl(self.panel_1, wx.ID_ANY, "0")
self.text_max = wx.TextCtrl(self.panel_1, wx.ID_ANY, "10")
self.text_xstep = wx.TextCtrl(self.panel_1, wx.ID_ANY, "0.1")
self.button_plot = wx.Button(self.panel_1, wx.ID_ANY, "Plot")
self.button_clear = wx.Button(self.panel_1, wx.ID_ANY, "Clear")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_button_plot, self.button_plot)
self.Bind(wx.EVT_BUTTON, self.on_button_clear, self.button_clear)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("matplotlib canvas example")
self.text_xmin.SetMinSize((40, -1))
self.text_max.SetMinSize((40, -1))
self.text_xstep.SetMinSize((40, -1))
self.button_plot.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2.Add(self.matplotlib_canvas, 1, wx.ALL | wx.EXPAND, 3)
label_4 = wx.StaticText(self.panel_1, wx.ID_ANY, "f(x) = ")
sizer_4.Add(label_4, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
sizer_4.Add(self.text_function, 1, 0, 0)
sizer_2.Add(sizer_4, 0, wx.ALL | wx.EXPAND, 5)
label_1 = wx.StaticText(self.panel_1, wx.ID_ANY, "xmin")
sizer_3.Add(label_1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(self.text_xmin, 0, 0, 0)
label_2 = wx.StaticText(self.panel_1, wx.ID_ANY, "xmax")
sizer_3.Add(label_2, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(self.text_max, 0, 0, 0)
label_3 = wx.StaticText(self.panel_1, wx.ID_ANY, "step")
sizer_3.Add(label_3, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(self.text_xstep, 0, 0, 0)
sizer_3.Add((20, 20), 1, 0, 0)
sizer_3.Add(self.button_plot, 0, 0, 0)
sizer_3.Add(self.button_clear, 0, wx.LEFT, 8)
sizer_2.Add(sizer_3, 0, wx.ALL | wx.EXPAND, 5)
self.panel_1.SetSizer(sizer_2)
sizer_1.Add(self.panel_1, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
self.SetSize((614, 505))
# end wxGlade
def on_button_plot(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_plot' not implemented!")
event.Skip()
def on_button_clear(self, event): # wxGlade: MyFrame.<event_handler>
print("Event handler 'on_button_clear' not implemented!")
event.Skip()
# end of class MyFrame
|
StarcoderdataPython
|
4802120
|
from django.conf.global_settings import DATETIME_INPUT_FORMATS, DATE_INPUT_FORMATS
# Suppress warnings about star import
from locales.default.formats import * # noqa: F401,F403 pylint: disable=wildcard-import
FIRST_DAY_OF_WEEK = 1 # Monday
MONTH_DAY_FORMAT = 'D, j F' # Mon, 25 October
DATE_FORMAT = 'j F Y' # '25 October 2006'
TIME_FORMAT = '%H:%M:%S' # '14:30:59'
DATETIME_FORMAT = 'D, j F Y H:i:s' # Mon, 25 October 2006 14:30:59
SHORT_DATE_FORMAT = '%d.%m.%y' # '25.10.06'
SHORT_DATETIME_FORMAT = '%d.%m.%y %H:%M:%S' # '25.10.06 14:30:59'
DATE_INPUT_FORMATS += (
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y' # '25.10.06'
)
DATETIME_INPUT_FORMATS += (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
|
StarcoderdataPython
|
11364649
|
# Copyright 2017-2020 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .db_connector import *
from .pg_db_connector import *
__all__ = (db_connector.__all__ +
pg_db_connector.__all__)
|
StarcoderdataPython
|
4825159
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='A discriminative generative network to generate art images. Trained with the BAM dataset',
author='jiwidi',
license='MIT',
)
|
StarcoderdataPython
|
4989424
|
<reponame>huaweicloud/trace_generation_rnn<gh_stars>10-100
"""Run the forward pass to evaluate the flavor LSTM.
License:
MIT License
Copyright (c) 2021 HUAWEI CLOUD
"""
import argparse
import logging
import torch
from torch.utils.data import DataLoader
from tracegen_rnn.constants import ExampleKeys
from tracegen_rnn.evaluator import Evaluator
from tracegen_rnn.flav_dataset import FlavDataset, IGNORE_INDEX
from tracegen_rnn.loss_functions import FlavLossFunctions
from tracegen_rnn.trace_lstm import TraceLSTM
from tracegen_rnn.utils.collate_utils import CollateUtils
from tracegen_rnn.utils.common_args import add_common_args
from tracegen_rnn.utils.logging_utils import init_console_logger
logger = logging.getLogger("tracegen_rnn.evaluate_flav_lstm")
def make_flav_dataloaders(args):
try:
trainset = FlavDataset(args.flav_map_fn, args.seq_len,
args.train_flavs, args.range_start,
args.range_stop)
trainloader = DataLoader(trainset, batch_size=args.batch_size,
collate_fn=CollateUtils.batching_collator,
shuffle=True)
except AttributeError:
# No train_flavs provided:
trainloader = None
testset = FlavDataset(args.flav_map_fn, args.seq_len,
args.test_flavs, args.range_start,
args.range_stop)
testloader = DataLoader(testset, batch_size=args.batch_size,
collate_fn=CollateUtils.batching_collator,
shuffle=False)
return trainloader, testloader
class EvaluateFlavLSTM(Evaluator):
"""Class to help with testing of a flavor LSTM."""
def batch_forward(self, batch, criterion):
"""Run the forward pass and get the number of examples and the loss.
"""
inputs = batch[ExampleKeys.INPUT]
targets = batch[ExampleKeys.TARGET]
num = targets[targets != IGNORE_INDEX].numel()
inputs, targets = (inputs.to(self.device),
targets.to(self.device))
batch_size = inputs.shape[1]
self.net.hidden = self.net.init_hidden(self.device, batch_size)
outputs = self.net(inputs)
outputs = outputs.reshape(-1, outputs.shape[-1])
targets = targets.reshape(-1)
loss = criterion(outputs, targets)
return num, loss
def main(args):
"""Run the evaluation of the saved model.
"""
# Init a logger that writes to console:
logger_levels = [("tracegen_rnn", logging.DEBUG)]
init_console_logger(logger_levels)
_, testloader = make_flav_dataloaders(args)
net = TraceLSTM.create_from_path(args.lstm_model, args.device)
eval_lstm = EvaluateFlavLSTM(net, args.device, testloader)
criterions = [torch.nn.CrossEntropyLoss(), FlavLossFunctions.next_step_err]
labels = ["NLL", "Err%"]
for criterion, label in zip(criterions, labels):
logger.info(label)
eval_lstm.get_test_score(None, criterion)
def parse_arguments():
"""Helper function to parse the command-line arguments, return as an
'args' object.
"""
parser = argparse.ArgumentParser(
description="Eval of flavor LSTM.")
add_common_args(parser)
parser.add_argument(
'--lstm_model', type=str, required=False,
help="The trained model for the LSTM.")
args = parser.parse_args()
return args
if __name__ == "__main__":
MY_ARGS = parse_arguments()
main(MY_ARGS)
|
StarcoderdataPython
|
345002
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestVectorAssemblerBatchOp(unittest.TestCase):
def test_vectorassemblerbatchop(self):
df = pd.DataFrame([
[2, 1, 1],
[3, 2, 1],
[4, 3, 2],
[2, 4, 1],
[2, 2, 1],
[4, 3, 2],
[1, 2, 1],
[5, 3, 3]
])
data = BatchOperator.fromDataframe(df, schemaStr="f0 int, f1 int, f2 int")
colnames = ["f0","f1","f2"]
VectorAssemblerBatchOp().setSelectedCols(colnames)\
.setOutputCol("out").linkFrom(data).print()
pass
|
StarcoderdataPython
|
9717144
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
GROUPS_INDEX_URL = 'horizon:identity:groups:index'
GROUPS_INDEX_VIEW_TEMPLATE = 'identity/groups/index.html'
GROUPS_CREATE_URL = 'horizon:identity:groups:create'
GROUPS_CREATE_VIEW_TEMPLATE = 'identity/groups/create.html'
GROUPS_UPDATE_URL = 'horizon:identity:groups:update'
GROUPS_UPDATE_VIEW_TEMPLATE = 'identity/groups/update.html'
GROUPS_MANAGE_URL = 'horizon:identity:groups:manage_members'
GROUPS_MANAGE_VIEW_TEMPLATE = 'identity/groups/manage.html'
GROUPS_ADD_MEMBER_URL = 'horizon:identity:groups:add_members'
GROUPS_ADD_MEMBER_VIEW_TEMPLATE = 'identity/groups/add_non_member.html'
GROUPS_ADD_MEMBER_AJAX_VIEW_TEMPLATE = 'identity/groups/_add_non_member.html'
|
StarcoderdataPython
|
6593941
|
<filename>experiments/run_randomized_experiment.py
import json
import os
import numpy as np
import run_experiment
if __name__ == '__main__':
#
# Loop
run_per_architecture = 10
for i in range(run_per_architecture):
experiment_dir = "./random_jsons/random_architectures_with_loss"
print("")
print(f'''Launching experiment number {i}...''')
filenames = [f for f in os.listdir(experiment_dir) if os.path.isfile(os.path.join(experiment_dir, f))]
for filename in filenames:
full_path = os.path.join(experiment_dir, filename)
#
# Run architecture
print(" |- Running experiment ", full_path)
path = run_experiment.run_as_module(full_path)
#path = "random_Fri_Oct_15_09-02-15_2021_Fri_Oct_15_09-42-33_2021"
train_path = os.path.join(path, "learning_curve_train.npy")
test_path = os.path.join(path, "learning_curve_test.npy")
learning_curve_train = np.load(train_path)
learning_curve_test = np.load(test_path)
print(" Final loss (train):", learning_curve_train[-1])
print(" Final loss (test ):", learning_curve_test[-1] )
#
# Record:
config = json.load( open( full_path) )
if not 'Results' in config:
config['Results'] = {
'learning_curve_train': [],
'learning_curve_test': []
}
config['Results']['learning_curve_train'].append( float(learning_curve_train[-1]) )
config['Results']['learning_curve_test' ].append( float(learning_curve_test[-1] ) )
#
# Dump
with open( full_path, 'w') as outfile:
json.dump(config, outfile, indent=4)
outfile.close()
|
StarcoderdataPython
|
9645709
|
<gh_stars>1-10
#!/usr/bin/env python
from .model import LPWriter, LPSVariableType, LPSWriter, MPSWriter, ModelWriter
from .solution import CBCSolutionReader, CplexSolutionReader, GLPKSolutionReader, LPSolveSolutionReader, SolutionReader
|
StarcoderdataPython
|
12810656
|
<gh_stars>10-100
from java.io import File
from java.io import FileInputStream
from java.util import Properties
#Load properties file in java.util.Properties
def loadPropsFil(propsFil):
properties={}
propFil = Properties()
propFil.load(FileInputStream(propsFil))
properties.update(propFil)
return properties
propertyfile='/opt/data/myfile.properties'
properties = loadPropsFil(propertyfile)
for key, value in properties.iteritems():
print "%s=%s" % (key, value)
dictName="MyDictDev2"
dictId='Environments/%s'%dictName
if repository.exists(dictId):
print "update dictionary '%s'" % dictId
dict=repository.read(dictId)
dict.values['entries'].putAll(properties)
repository.update(dict)
else:
print "new dictionary created '%s'" % dictId
dict=repository.create(factory.configurationItem(dictId, 'udm.Dictionary', {'entries':properties}))
|
StarcoderdataPython
|
9662821
|
<filename>ogcore/utils.py
'''
------------------------------------------------------------------------
Miscellaneous functions used in the OG-Core model.
------------------------------------------------------------------------
'''
# Packages
import os
import sys
import requests
from zipfile import ZipFile
import urllib.request
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
import numpy as np
import pandas as pd
import pickle
from pkg_resources import resource_stream, Requirement
EPSILON = 1e-10 # tolerance or comparison functions
def mkdirs(path):
'''
Makes directories to save output.
Args:
path (str): path name for new directory
Returns:
None
'''
try:
os.makedirs(path)
except OSError as oe:
if oe.errno == 17: # 17 is an error code if can't make path
pass
def pct_diff_func(simul, data):
'''
Used to calculate the absolute percent difference between data
moments and model moments.
Args:
simul (array_like): any shape, model moments
data (array_like): same shape as simul, data moments
Functions called: None
Returns:
output (array_like): percentage differences between model and
data moments
'''
frac = (simul - data) / data
output = np.abs(frac)
return output
def convex_combo(var1, var2, nu):
'''
Takes the convex combination of two variables, where nu is in [0,1].
Args:
var1 (array_like): any shape, variable 1
var2 (array_like): same shape as var1, variable 2
nu (scalar): weight on var1 in convex combination, in [0, 1]
Returns:
combo (array_like): same shape as var1, convex combination of
var1 and var2
'''
combo = nu * var1 + (1 - nu) * var2
return combo
def read_file(path, fname):
'''
Read the contents of 'path'. If it does not exist, assume the file
is installed in a .egg file, and adjust accordingly.
Args:
path (str): path name for new directory
fname (str): filename
Returns:
file contents (str)
'''
if not os.path.exists(os.path.join(path, fname)):
path_in_egg = os.path.join("ogcore", fname)
buf = resource_stream(Requirement.parse("ogcore"), path_in_egg)
_bytes = buf.read()
return StringIO(_bytes.decode("utf-8"))
else:
return open(os.path.join(path, fname))
def pickle_file_compare(fname1, fname2, tol=1e-3, exceptions={},
relative=False):
'''
Read two pickle files and unpickle each. We assume that each
resulting object is a dictionary. The values of each dict are either
numpy arrays or else types that are comparable with the == operator.
Args:
fname1 (str): file name of file 1
fname2 (str): file name of file 2
tol (scalar): tolerance
exceptions (dict): exceptions
relative (bool): whether comparison compares relative values
Returns:
comparison (bool): whether therea two dictionaries are the same
'''
pkl1 = safe_read_pickle(fname1)
pkl2 = safe_read_pickle(fname2)
comparison = dict_compare(fname1, pkl1, fname2, pkl2, tol=tol,
exceptions=exceptions, relative=relative)
return comparison
def comp_array(name, a, b, tol, unequal, exceptions={}, relative=False):
'''
Compare two arrays in the L inifinity norm
Return True if | a - b | < tol, False otherwise
If not equal, add items to the unequal list
name: the name of the value being compared
Args:
name (str): name of variable being compared
a (array_like): first array to compare
b (array_like): second array to compare
tol (scalar): tolerance used for comparison
unequal (dict): dict of variables that are not equal
exceptions (dict): exceptions
relative (bool): whether comparison compares relative values
Returns:
(bool): whether two arrays are the same or not
'''
if name in exceptions:
tol = exceptions[name]
if not a.shape == b.shape:
print("unequal shapes for {0} comparison ".format(str(name)))
unequal.append((str(name), a, b))
return False
else:
if np.all(a < EPSILON) and np.all(b < EPSILON):
return True
if relative:
err = abs(a - b)
mn = np.mean(b)
err = np.max(err / mn)
else:
err = np.max(abs(a - b))
if not err < tol:
print("diff for {0} is {1} which is NOT OK".format(str(name), err))
unequal.append((str(name), a, b))
return False
else:
print("err is {0} which is OK".format(err))
return True
def comp_scalar(name, a, b, tol, unequal, exceptions={}, relative=False):
'''
Compare two scalars in the L inifinity norm
Return True if abs(a - b) < tol, False otherwise
If not equal, add items to the unequal list
Args:
name (str): name of variable being compared
a (scalar): first scalar to compare
b (scalra): second scalar to compare
tol (scalar): tolerance used for comparison
unequal (list): list of variables that are not equal
exceptions (dict): exceptions
relative (bool): whether comparison compares relative values
Returns:
(bool): whether two arrays are the same or not
'''
if name in exceptions:
tol = exceptions[name]
if (a < EPSILON) and (b < EPSILON):
return True
if relative:
err = float(abs(a - b)) / float(b)
else:
err = abs(a - b)
if not err < tol:
print("err for {0} is {1} which is NOT OK".format(str(name), err))
unequal.append((str(name), str(a), str(b)))
return False
else:
print("err is {0} which is OK".format(err))
return True
def dict_compare(name1, dict1, name2, dict2, tol, verbose=False,
exceptions={}, relative=False):
r'''
Compare two dictionaries. The values of each dict are either
numpy arrays or else types that are comparable with the `==` operator.
For arrays, they are considered the same if `|x - y| < tol` in
the L_inf norm. For scalars, they are considered the same if
`x - y < tol`.
Args:
name1 (str): name of dictionary 1
dict1 (dict): first dictionary to compare
name2 (str): name of dictionary 2
dict2 (dict): second dictionary to compare
tol (scalar): tolerance used for comparison
verbose (bool): whether print messages
exceptions (dict): exceptions
relative (bool): whether comparison compares relative values
Returns:
(bool): whether two dictionaries are the same or not
'''
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
check = True
if keys1 != keys2:
if len(keys1) == len(keys2):
extra1 = keys1 - keys2
extra2 = keys2 - keys1
msg1 = "extra items in {0}: {1}"
print(msg1.format(name1, extra1))
print(msg1.format(name2, extra2))
return False
elif len(keys1) > len(keys2):
bigger = keys1
bigger_file = name1
smaller = keys2
else:
bigger = keys2
bigger_file = name2
smaller = keys1
res = bigger - smaller
msg = "more items in {0}: {1}"
print(msg.format(bigger_file, res))
return False
else:
unequal_items = []
for k, v in dict1.items():
if type(v) == np.ndarray:
check &= comp_array(k, v, dict2[k], tol, unequal_items,
exceptions=exceptions,
relative=relative)
else:
try:
check &= comp_scalar(
k, v, dict2[k], tol, unequal_items,
exceptions=exceptions, relative=relative)
except TypeError:
check &= comp_array(
k, np.array(v), np.array(dict2[k]), tol,
unequal_items, exceptions=exceptions,
relative=relative)
if verbose and unequal_items:
frmt = "Name {0}"
res = [frmt.format(x[0]) for x in unequal_items]
print("Different arrays: ", res)
return False
return check
def to_timepath_shape(some_array):
'''
This function takes an vector of length T and tiles it to fill a
Tx1x1 array for time path computations.
Args:
some_array (Numpy array): array to reshape
Returns:
tp_array (Numpy array): reshaped array
'''
tp_array = some_array.reshape(some_array.shape[0], 1, 1)
return tp_array
def get_initial_path(x1, xT, p, shape):
r'''
This function generates a path from point x1 to point xT such that
that the path x is a linear or quadratic function of time t.
* linear: `x = d*t + e`
* quadratic: `x = a*t^2 + b*t + c`
Args:
x1 (scalar): initial value of the function x(t) at t=0
xT (scalar): value of the function x(t) at t=T-1
T (int): number of periods of the path, must be >= 3
shape (str): shape of guess for time path, "linear", "ratio",
or "quadratic"
Returns:
xpath (Numpy array): guess of variable over the time path
Notes:
The identifying assumptions for quadratic are the following:
1. `x1` is the value at time `t=0: x1 = c
2. `xT` is the value at time `t=T-1: xT = a*(T-1)^2 + b*(T-1) + c`
3. the slope of the path at `t=T-1` is 0: 0 = 2*a*(T-1) + b`
'''
if shape == "linear":
xpath = np.linspace(x1, xT, p.T)
elif shape == "ratio":
domain = np.linspace(0, p.T, p.T)
domain2 = np.tile(domain.reshape(p.T, 1, 1), (1, p.S, p.J))
xpath = (-1 / (domain2 + 1)) * (xT - x1) + xT
elif shape == "quadratic":
cc = x1
bb = 2 * (xT - x1) / (p.T - 1)
aa = (x1 - xT) / ((p.T - 1) ** 2)
xpath = (aa * (np.arange(0, p.T).reshape(p.T, 1, 1) ** 2) +
(bb * np.arange(0, p.T).reshape(p.T, 1, 1)) + cc)
ending_x_tail = np.tile(xT.reshape(1, p.S, p.J),
(p.S, 1, 1))
xpath_full = np.append(xpath, ending_x_tail, axis=0)
return xpath_full
def safe_read_pickle(file_path):
'''
This function reads a pickle from Python 2 into Python 2 or Python 3
Args:
file_path (str): path to pickle file
Returns:
obj (object): object saved in pickle file
'''
with open(file_path, 'rb') as f:
try:
obj = pickle.load(f, encoding='latin1')
except TypeError: # pragma no cover
obj = pickle.load(f) # pragma no cover
return obj
def rate_conversion(annual_rate, start_age, end_age, S):
'''
This function converts annual rates to model period ratesself.
Args:
annual_rate (array_like): annualized rates
start_age (int): age at which agents become economically active
end_age (int): maximum age of agents
S (int): number of model periods in agents life
Returns:
rate (array_like): model period rates
'''
rate = (1 + annual_rate) ** ((end_age - start_age) / S) - 1
return rate
def save_return_table(table_df, output_type, path, precision=2):
'''
Function to save or return a table of data.
Args:
table_df (Pandas DataFrame): table
output_type (string): specifies the type of file to save
table to: 'csv', 'tex', 'excel', 'json'
path (string): specifies path to save file with table to
precision (integer): number of significant digits to print.
Defaults to 0.
Returns:
table_df (Pandas DataFrame): table
'''
pd.options.display.float_format = (
'{:,.' + str(precision) + 'f}').format
if path is None:
if output_type == 'tex':
tab_str = table_df.to_latex(index=False, na_rep='')
return tab_str
elif output_type == 'json':
tab_str = table_df.to_json(double_precision=precision)
return tab_str
elif output_type == 'html':
tab_html = table_df.to_html(
classes="table table-striped table-hover"
).replace('\n', '')
tab_html.replace('\n', '')
return tab_html
else:
return table_df
else:
if output_type == 'tex':
table_df.to_latex(buf=path, index=False, na_rep='')
elif output_type == 'csv':
table_df.to_csv(path_or_buf=path, index=False, na_rep='')
elif output_type == 'json':
table_df.to_json(path_or_buf=path,
double_precision=precision)
elif output_type == 'excel':
table_df.to_excel(excel_writer=path, index=False, na_rep='')
else:
print('Please enter a valid output format') # pragma no cover
assert(False) # pragma no cover
class Inequality():
'''
A class with methods to compute different measures of inequality.
'''
def __init__(self, dist, pop_weights, ability_weights, S, J):
'''
Args:
dist (Numpy array): distribution of endogenous variables
over age and lifetime income group, size, SxJ
pop_weights (Numpy array): fraction of population by each
age, length S
ability_weights (Numpy array): fraction of population for
each lifetime income group, length J
S (int): number of economically active periods in lifetime
J (int): number of ability types
Returns:
None
'''
self.dist = dist
self.pop_weights = pop_weights
self.ability_weights = ability_weights
weights = (np.tile(pop_weights.reshape(S, 1), (1, J)) *
ability_weights.reshape(1, J))
flattened_dist = dist.flatten()
flattened_weights = weights.flatten()
idx = np.argsort(flattened_dist)
self.sort_dist = flattened_dist[idx]
self.sort_weights = flattened_weights[idx]
self.cum_weights = np.cumsum(self.sort_weights)
def gini(self, type='overall'):
'''
Compute the Gini coefficient
Args:
None
Returns:
gini_coeff (scalar): Gini coefficient
'''
if type == 'overall':
p = np.cumsum(self.sort_weights)
nu = np.cumsum(self.sort_dist * self.sort_weights)
elif type == 'age':
flattened_dist = self.dist.sum(axis=1).flatten()
flattened_weights = self.pop_weights.flatten()
idx = np.argsort(flattened_dist)
sort_dist = flattened_dist[idx]
sort_weights = flattened_weights[idx]/flattened_weights.sum()
p = np.cumsum(sort_weights)
nu = np.cumsum(sort_dist*sort_weights)
elif type == 'ability':
flattened_dist = self.dist.sum(axis=0).flatten()
flattened_weights = self.ability_weights.flatten()
idx = np.argsort(flattened_dist)
sort_dist = flattened_dist[idx]
sort_weights = flattened_weights[idx]/flattened_weights.sum()
p = np.cumsum(sort_weights)
nu = np.cumsum(sort_dist*sort_weights)
nu = nu / nu[-1]
gini_coeff = (nu[1:] * p[:-1]).sum() - (nu[:-1] * p[1:]).sum()
return gini_coeff
def var_of_logs(self):
'''
Compute the variance of logs
Args:
None
Returns:
var_ln_dist (scalar): variance of logs
'''
ln_dist = np.log(self.sort_dist)
weight_mean = ((
ln_dist * self.sort_weights).sum() / self.sort_weights.sum())
var_ln_dist = ((
(self.sort_weights * ((ln_dist - weight_mean) ** 2)).sum())
* (1. / (self.sort_weights.sum())))
return var_ln_dist
def ratio_pct1_pct2(self, pct1, pct2):
'''
Compute the pct1/pct2 percentile ratio
Args:
pct1 (scalar): percentile to compute the top pctile% for,
in (0, 1).
pct2 (scalar): percentile to compute the top pctile% for,
in (0, 1)
Returns:
pct_ratio (scalar): ratio of pct1 to pct2
Notes:
usually pct1 > pct 2
'''
assert pct1 > 0
assert pct1 < 1
assert pct2 > 0
assert pct2 < 1
loc_pct1 = np.argmin(np.abs(self.cum_weights - pct1))
loc_pct2 = np.argmin(np.abs(self.cum_weights - pct2))
pct_ratio = self.sort_dist[loc_pct1] / self.sort_dist[loc_pct2]
return pct_ratio
def pct(self, pct):
'''
Returns value at given percentile
Args:
pct1 (scalar): percentile to compute the value at,
in (0, 1).
Returns:
value (scalar): value of variable at pct
'''
assert pct > 0
assert pct < 1
loc_pct = np.argmin(np.abs(self.cum_weights - pct))
value = self.sort_dist[loc_pct]
return value
def top_share(self, pctile):
'''
Compute the top X% share
Args:
pctile (scalar): percentile to compute the top pctile% for,
in (0, 1).
Returns:
pctile_share (scalar): share of variable attributed to the
top pctile group
'''
assert pctile > 0
assert pctile < 1
loc_pctile = np.argmin(np.abs(self.cum_weights - (1 - pctile)))
pctile_share = ((
self.sort_dist[loc_pctile:] *
self.sort_weights[loc_pctile:]).sum() /
(self.sort_dist * self.sort_weights).sum())
return pctile_share
def read_cbo_forecast():
'''
This function reads the CBO Long-Term Budget Projections document
from https://www.cbo.gov/about/products/budget-economic-data#1
and then formats the relevant data for use with OG-Core
'''
CBO_LT_URL = (
'https://www.cbo.gov/system/files/2020-09/51119-2020-09-ltbo_0.xlsx'
)
# Read in data
df = pd.read_excel(CBO_LT_URL, sheet_name='3. Economic Vars',
skiprows=7, nrows=45)
df.drop(columns=['Unnamed: 3', 'Unnamed: 4'], inplace=True)
df[~((pd.isnull(df['Unnamed: 0'])) & (pd.isnull(df['Unnamed: 1'])) &
(pd.isnull(df['Unnamed: 2'])))]
df.fillna(value='', inplace=True)
df['full_var_name'] = (df['Unnamed: 0'] + df['Unnamed: 1'] +
df['Unnamed: 2'])
CBO_VAR_NAMES = {
'Real GDP (Billions of 2019 dollars) ': 'Y',
'On 10-year Treasury notes and the OASDI trust funds': 'r',
'Growth of Real Earnings per Worker': 'w_growth',
'Growth of Total Hours Worked': 'L_growth',
'Hours of All Persons (Nonfarm Business Sector)': 'L',
'Personal Consumption Expenditures': 'C',
'Gross Private Domestic Investment': 'I_total',
'Government Consumption Expenditures and Gross Investment': 'G',
'Old-Age and Survivors Insurance': 'agg_pension_outlays',
'Individual income taxes': 'iit_revenue',
'Payroll taxes': 'payroll_tax_revenue',
'Corporate income taxes': 'business_tax_revenue',
'Wages and Salaries': 'wL'}
df['var_name'] = df['full_var_name'].replace(CBO_VAR_NAMES)
# keep just variables of interest
df.drop(columns=[
'Unnamed: 0', 'Unnamed: 1', 'Unnamed: 2', 'full_var_name'],
inplace=True)
df = df[df['var_name'].isin(CBO_VAR_NAMES.values())]
# Keep just real interest rate (not nominal)
# Note that real interest rate comes first in table
df.drop_duplicates(subset='var_name', inplace=True)
# reshape so that variable names down column
df = pd.melt(df, id_vars='var_name',
value_vars=[i for i in range(1990, 2051)])
df = df.pivot(index='variable', columns='var_name', values='value')
df.reset_index(inplace=True)
df.rename(columns={'variable': 'year'}, inplace=True)
# add debt forcast
df_fiscal = pd.read_excel(CBO_LT_URL,
sheet_name='1. Summary Extended Baseline',
skiprows=9, nrows=32)
df_fiscal = df_fiscal[['Fiscal Year', 'Revenues',
'Federal Debt Held by the Public']]
df_lt = df.merge(df_fiscal, left_on='year', right_on='Fiscal Year',
how='left')
df_lt.rename(columns={'Federal Debt Held by the Public': 'D/Y'},
inplace=True)
df_lt['D'] = df_lt['Y'] * df_lt['D/Y']
CBO_10yr_budget_URL = (
'https://www.cbo.gov/system/files/2021-02/51118-2021-02-11-' +
'budgetprojections.xlsx')
df = pd.read_excel(CBO_10yr_budget_URL, sheet_name='Table 1-1',
skiprows=8, nrows=7)
df.rename(
columns={'Unnamed: 0': 'variable', 'Actual, \n2020': 2020},
inplace=True)
df.drop(columns=['Unnamed: 15', 'Unnamed: 16',
'2026.1', '2031.1'], inplace=True)
df1 = df[~((pd.isnull(df.variable)) | (df.variable == 'Other'))]
df = pd.read_excel(CBO_10yr_budget_URL, sheet_name='Table 1-3',
skiprows=9, nrows=22)
df.rename(columns={'Unnamed: 0': 'variable'}, inplace=True)
df.drop(columns=['2026.1', '2031.1'],
inplace=True)
df.drop_duplicates(subset='variable', keep='last', inplace=True)
df2 = df[~pd.isnull(df.variable)]
CBO_10yr_macro_URL = (
'https://www.cbo.gov/system/files/2021-02/51135-2021-02-' +
'economicprojections.xlsx')
df = pd.read_excel(CBO_10yr_macro_URL,
sheet_name='2. Calendar Year', skiprows=6,
nrows=131)
df.rename(columns={'Unnamed: 1': 'variable'}, inplace=True)
df.drop(columns=[
'Unnamed: 0', 'Unnamed: 2', 'Units', 'Unnamed: 19',
'Unnamed: 20', 'Unnamed: 21', 'Unnamed: 22', 'Unnamed: 23',
'Unnamed: 24'], inplace=True)
# Note that real values come second (after nominal values)
df.drop_duplicates(subset='variable', keep='last', inplace=True)
df3 = df[~pd.isnull(df.variable)]
df_st = df1.append(df2, sort=False, ignore_index=True).append(
df3, sort=False, ignore_index=True)
df_st['var_name'] = df_st['variable'].replace(CBO_VAR_NAMES)
df_st = df_st[~pd.isnull(df_st.var_name)]
df_st.drop(columns=['variable'], inplace=True)
# reshape so each row a year
df_st = pd.melt(df_st, id_vars='var_name',
value_vars=[i for i in range(2017, 2031)])
df_st = df_st.pivot(index='variable', columns='var_name',
values='value').reset_index()
df_st.rename(columns={'variable': 'year'}, inplace=True)
# merge with long term data
df_cbo = df_lt.merge(df_st, how='outer', on='year',
suffixes=('_lt', '_st'))
# replace * with 0
df_cbo.replace(to_replace='*', value=0.0, inplace=True)
return df_cbo
def print_progress(iteration, total, source_name='', prefix='Progress:',
suffix='Complete', decimals=1, bar_length=50):
'''
Prints a progress bar to the terminal when completing small tasks
of a larger job.
Args:
iteration (int>=1): which task the job is currently doing
total (int>=1): how many tasks are in the job
source_name (string): name of source data
prefix (string): what to print before the progress bar
suffix (string): what to print after the progress bar
decimals (int>=0): how many decimals in the percentage
bar_length (int>=3): how many boxes in the progress bar
Functions called: None
Objects created within function:
status (string): status of download
str_format (string): string containing percentage completed
percents (string): percentage completed
filled_length (int): number of boxes in the progress bar to fill
bar (string): progress bar
Returns: status
'''
status = 'Incomplete'
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
if iteration == 0:
if source_name == '':
sys.stdout.write('Accessing data files...\n')
else:
sys.stdout.write('Accessing ' + source_name +
' data files...\n')
sys.stdout.write('\r%s |%s| %s%s %s' %
(prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.write('Computing...\n')
status = 'Complete'
sys.stdout.flush()
return status
def fetch_files_from_web(file_urls):
'''
Fetches zip files from respective web addresses and saves them as
temporary files. Prints progress bar as it downloads the files.
Args:
file_urls (list of strings): list of URLs of respective data zip
files
Functions called:
print_progress()
Objects created within function:
local_paths = list, local paths for teporary files
iteration = int, the number of files that have been downloaded
total = total, the total number of files to download
f = temporary file of monthly CPS survey
path = string, local path for temporary file
zipped_file = ZipFile object, opened zipfile
Files created by this function:
.dta file for each year of SCF data
Returns:
local_paths (list of strings): local paths of temporary data
files
'''
local_paths = []
iteration = 0
total = len(file_urls)
_ = print_progress(iteration, total, source_name='SCF')
for file_url in file_urls:
# url = requests.get(file_url) (if using reuests package)
url = urllib.request.urlopen(file_url)
f = NamedTemporaryFile(delete=False)
path = f.name
# url.content (if using requests package)
with ZipFile(BytesIO(url.read())) as zipped_file:
for contained_file in zipped_file.namelist():
f.write(zipped_file.open(contained_file).read())
# for line in zipped_file.open(contained_file).readlines():
# f.write(line)
local_paths.append(path)
f.close()
iteration += 1
_ = print_progress(iteration, total, source_name='SCF')
return local_paths
def not_connected(url='http://www.google.com/', timeout=5):
'''
Checks for internet connection status of machine.
Args:
url (string): url used to check connectivity
timeout (float>0): time to wait for timeout
Functions called: None
Returns:
Boolean singleton: =True if connection was made within timeout
Raises:
ConnectionError: If no response from url withing timeout
'''
try:
_ = requests.get(url, timeout=timeout)
return False
except requests.ConnectionError:
return True
|
StarcoderdataPython
|
9689344
|
"""
Copyright (c) 2018 NSF Center for Space, High-performance, and Resilient Computing (SHREC)
University of Pittsburgh. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
"""
from time import sleep
from ..error import DrSEUsError
from . import jtag
class bdi(jtag):
error_messages = ['syntax error in command',
'timeout while waiting for halt',
'wrong state for requested command', 'read access failed']
def __init__(self, database, options):
self.prompts = ['P2020>']
self.port = 23
super().__init__(database, options)
self.set_targets()
self.open()
def __str__(self):
string = 'BDI3000 at {} port {}'.format(
self.options.debugger_ip_address, self.port)
return string
def set_targets(self):
super().set_targets('p2020')
def close(self):
self.telnet.write(bytes('quit\r', encoding='utf-8'))
super().close()
def reset_bdi(self):
event = self.db.log_event(
'Warning', 'Debugger', 'Reset BDI', success=False)
self.telnet.write(bytes('boot\r\n', encoding='utf-8'))
self.telnet.close()
if self.db.result is None:
self.db.campaign.debugger_output += 'boot\n'
else:
self.db.result.debugger_output += 'boot\n'
sleep(1)
self.connect_telnet()
sleep(1)
self.command(None, error_message='', log_event=False)
event.success = True
event.save()
def reset_dut(self, attempts=5):
expected_output = [
'- TARGET: processing user reset request',
'- BDI asserts HRESET',
'- Reset JTAG controller passed',
'- JTAG exists check passed',
'- BDI removes HRESET',
'- TARGET: resetting target passed',
'- TARGET: processing target startup \.\.\.\.',
'- TARGET: processing target startup passed']
try:
super().reset_dut(expected_output, 1)
except DrSEUsError:
self.reset_bdi()
super().reset_dut(expected_output, max(attempts-1, 1))
def halt_dut(self):
super().halt_dut('halt 0 1', [
'- TARGET: core #0 has entered debug mode',
'- TARGET: core #1 has entered debug mode'])
def continue_dut(self):
super().continue_dut('go 0 1')
def select_core(self, core):
self.command('select {}'.format(core), ['Target CPU', 'Core state'],
'Error selecting core')
def get_mode(self):
msr = int(self.command(
'rd msr', [':'], 'Error getting register value'
).split('\r')[0].split(':')[1].split()[0], base=16)
supervisor = not bool(msr & (1 << 14))
return 'supervisor' if supervisor else 'user'
def set_mode(self, mode='supervisor'):
msr = list(bin(int(self.command(
'rd msr', [':'], 'Error getting register value'
).split('\r')[0].split(':')[1].split()[0], base=16)))
if mode == 'supervisor':
msr[-15] = '0'
else:
msr[-15] = '1'
msr = hex(int(''.join(msr), base=2))
self.command('rm msr {}'.format(msr),
error_message='Error setting register value')
self.db.log_event(
'Information', 'Debugger', 'Set processor mode', mode)
def command(self, command, expected_output=[], error_message=None,
log_event=True):
return super().command(command, expected_output, error_message,
log_event, '\r\n', False)
def get_register_value(self, register_info):
target = self.targets[register_info.target]
if register_info.target_index is None:
target_index = 0
else:
target_index = register_info.target_index
if register_info.register_alias is None:
register_name = register_info.register
else:
register_name = register_info.register_alias
register = target['registers'][register_info.register]
if 'type' in target and target['type'] == 'memory_mapped':
command = 'md'
if 'bits' in register:
bits = register['bits']
if bits == 8:
command = 'mdb'
elif bits == 16:
command = 'mdh'
elif bits == 64:
command = 'mdd'
address = target['base'][target_index] + register['offset']
buff = self.command('{} {:#x} 1'.format(command, address),
[':'], 'Error getting register value')
elif 'SPR' in register:
buff = self.command('rdspr {}'.format(register['SPR']), [':'],
'Error getting register value')
elif 'PMR' in register:
buff = self.command('rdpmr {}'.format(register['PMR']), [':'],
'Error getting register value')
else:
buff = self.command('rd {}'.format(register_name), [':'],
'Error getting register value')
return buff.split('\r')[0].split(':')[1].split()[0]
def set_register_value(self, register_info):
target = self.targets[register_info.target]
if register_info.target_index is None:
target_index = 0
else:
target_index = register_info.target_index
if register_info.register_alias is None:
register_name = register_info.register
else:
register_name = register_info.register_alias
register = target['registers'][register_info.register]
value = register_info.injected_value
if 'type' in target and target['type'] == 'memory_mapped':
command = 'mm'
if 'bits' in register:
bits = register['bits']
if bits == 8:
command = 'mmb'
elif bits == 16:
command = 'mmh'
elif bits == 64:
command = 'mmd'
address = target['base'][target_index] + register['offset']
self.command('{} {:#x} {} 1'.format(command, address, value),
error_message='Error getting register value')
elif 'SPR' in register:
self.command('rmspr {} {}'.format(register['SPR'], value),
error_message='Error setting register value')
elif 'PMR' in register:
self.command('rmpmr {} {}'.format(register['PMR'], value),
error_message='Error setting register value')
else:
self.command('rm {} {}'.format(register_name, value),
error_message='Error setting register value')
|
StarcoderdataPython
|
6422291
|
import pytest
import os
from bai_kafka_utils.kafka_service import KafkaServiceConfig
BOOTSTRAP_SERVERS = [os.environ["KAFKA_BOOTSTRAP_SERVERS"]]
@pytest.fixture
def kafka_service_config():
return KafkaServiceConfig(
consumer_group_id="CONSUMER_GROUP_ID",
producer_topic="BAI_APP_FETCHER",
consumer_topic="BAI_APP_EXECUTOR",
bootstrap_servers=BOOTSTRAP_SERVERS,
logging_level="INFO",
cmd_submit_topic="CMD_SUBMIT",
cmd_return_topic="CMD_RETURN",
status_topic="BAI_APP_STATUS",
)
|
StarcoderdataPython
|
9743358
|
import pytest
def test_str_ok(deserialized_document):
try:
str(deserialized_document)
except:
pytest.fail()
def test_repr_ok(deserialized_document):
try:
repr(deserialized_document)
except:
pytest.fail()
|
StarcoderdataPython
|
9652136
|
from contextlib import redirect_stderr
import requests
import re
from io import StringIO
import csv
BASE_URL = "https://dnsadm.nepustil.net/index.cgi"
user = ""
password = ""
"""Get PTR Records"""
def getRecords():
action = "readzone"
data = {
'domain': user,
'password': password,
'action': action,
'resrec': ''
}
request = requests.post(BASE_URL, data=data)
read = False
res = []
for line in request.text.splitlines():
if(line.startswith("254")):
read = True
continue
if(read):
list = re.sub("\s+", ",", line.strip())
reader = csv.reader(list.split('\n'), delimiter=',')
for row in reader:
res.append(row)
return res
"""Check if Record Exists"""
def existsRecord(ip):
res = getRecords()
for record in res:
if(record[0] == ip):
return True
return False
"""Add PTR Record"""
def addRecord(domain, ip):
action = "doaddrr"
data = {
'domain': user,
'password': password,
'action': action,
'resrec': ip,
'restype': 'PTR',
'resval': domain+".",
'resttl': 86400
}
requests.post(BASE_URL, data=data)
"""Remove PTR Record"""
def removeRecord(ip):
res = getRecords()
for record in res:
if(record[0] == ip):
domain = record[4]
action = "dodelrr"
data = {
'domain': user,
'password': password,
'action': action,
'resrec': ip,
'restype': 'PTR',
'resval': domain,
'resttl': 86400
}
requests.post(BASE_URL, data=data)
|
StarcoderdataPython
|
122718
|
<reponame>RobotCodeLab/atlas_ros
#!/usr/bin/python3
import smbus
import serial
import sys
import time
class I2Cbus:
def __init__(self, timeout=1, **kwargs):
self.__timeout = timeout
self.bus = smbus.SMBus(1)
def read_line(self, address):
try:
response = self.bus.read_i2c_block_data(address, 0x00)
except IOError as e:
print ("[Quality] Error %s occurred while reading on address %d" % (e.strerror, address))
return None
response = [i for i in response if not i == '\00']
char_list = list(map(lambda x: chr(x & ~0x80), list(response[1:])))
char_list = ''.join(char_list).strip('\x00').split(',')
return [float(x) for x in char_list]
def convert_string_to_bytes(self, cmd):
converted = []
for b in cmd:
converted.append(ord(b))
return converted
def send_cmd(self, cmd, address):
start = ord(cmd[0])
end = cmd[1:] + "\00"
end = self.convert_string_to_bytes(end)
try:
self.bus.write_i2c_block_data(address, start, end)
time.sleep(self.__timeout) # Wait 1s for response to arrive
return True
except IOError as e:
print ("[Quality] Error %s occurred while writing on address %d" % (e.strerror, address))
return None
def get_data(self, address):
"""
Gets a single reading from sensor in selected port
"""
# Send request for data
self.send_cmd("R", address)
line = self.read_line(address)
return line
|
StarcoderdataPython
|
225537
|
"""
Q701
Insert into a Binary Search Tree
Medium
Given the root node of a binary search tree (BST) and a value
to be inserted into the tree, insert the value into the BST.
Return the root node of the BST after the insertion. It is
guaranteed that the new value does not exist in the original
BST.
Note that there may exist multiple valid ways for the insertion,
as long as the tree remains a BST after insertion. You can
return any of them.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
def insert_(root, val):
if root.left is None and root.right is None:
if root.val > val:
root.left = TreeNode(val)
else:
root.right = TreeNode(val)
return None
if root.val > val:
if root.left is not None:
insert_(root.left, val)
else:
root.left = TreeNode(val)
else:
if root.right is not None:
insert_(root.right, val)
else:
root.right = TreeNode(val)
return None
insert_(root, val)
return root
a1 = TreeNode(1)
a2 = TreeNode(2)
a3 = TreeNode(3)
a2.left = a1
a2.right = a3
sol = Solution()
def preorder(tree):
if tree is not None:
print(tree.val)
preorder(tree.left)
preorder(tree.right)
sol.insertIntoBST(a2, 4)
preorder(a2)
|
StarcoderdataPython
|
1937999
|
import pickle
import os
import shutil
class File_Operation:
"""This class shall be used to save the model after training and load the saved model for prediction."""
def __init__(self,file_object,logger_object):
self.file_object = file_object
self.logger_object = logger_object
self.model_directory='models/'
def save_model(self,model,filename):
"""
Method Name: save_model
Description: Save the model file to directory
Outcome: File gets saved
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the save_model method of the File_Operation class')
try:
path = os.path.join(self.model_directory,filename) #create seperate directory for each cluster
if os.path.isdir(path): #remove previously existing models for each clusters
shutil.rmtree(self.model_directory)
os.makedirs(path)
else:
os.makedirs(path) #
with open(path +'/' + filename+'.sav',
'wb') as f:
pickle.dump(model, f) # save the model to file
self.logger_object.log(self.file_object,
'Model File '+filename+' saved. Exited the save_model method of the Model_Finder class')
return 'success'
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in save_model method of the Model_Finder class. Exception message: ' + str(e))
self.logger_object.log(self.file_object,
'Model File '+filename+' could not be saved. Exited the save_model method of the Model_Finder class')
raise Exception()
def load_model(self,filename):
"""
Method Name: load_model
Description: load the model file to memory
Output: The Model file loaded in memory
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the load_model method of the File_Operation class')
try:
with open(self.model_directory + filename + '/' + filename + '.sav',
'rb') as f:
self.logger_object.log(self.file_object,
'Model File ' + filename + ' loaded. Exited the load_model method of the Model_Finder class')
return pickle.load(f)
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in load_model method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Model File ' + filename + ' could not be saved. Exited the load_model method of the Model_Finder class')
raise Exception()
def find_correct_model_file(self,cluster_number):
"""
Method Name: find_correct_model_file
Description: Select the correct model based on cluster number
Output: The Model file
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the find_correct_model_file method of the File_Operation class')
try:
self.cluster_number= cluster_number
self.folder_name=self.model_directory
self.list_of_model_files = []
self.list_of_files = os.listdir(self.folder_name)
for self.file in self.list_of_files:
try:
if (self.file.index(str( self.cluster_number))!=-1):
self.model_name=self.file
except:
continue
self.model_name=self.model_name.split('.')[0]
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class.')
return self.model_name
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in find_correct_model_file method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class with Failure')
raise Exception()
|
StarcoderdataPython
|
6558227
|
<reponame>LuukHenk/motif_database<gh_stars>0
from django.apps import AppConfig
class DatabaseConfig(AppConfig):
""" Motig database """
name = "motif_database"
|
StarcoderdataPython
|
174411
|
<reponame>bdharang/AWS_SHELL<gh_stars>0
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
import sys
from prompt_toolkit.key_binding.input_processor import KeyPress
from prompt_toolkit.keys import Keys
from prompt_toolkit.interface import CommandLineInterface
from tests.compat import unittest
from awsshell.app import AWSShell, InputInterrupt
class KeysTest(unittest.TestCase):
def setUp(self):
self.aws_shell = AWSShell(None, mock.Mock(), mock.Mock())
self.processor = self.aws_shell.cli.input_processor
def test_F2(self):
match_fuzzy = self.aws_shell.model_completer.match_fuzzy
self.processor.feed_key(KeyPress(Keys.F2, ''))
assert match_fuzzy != self.aws_shell.model_completer.match_fuzzy
def test_F3(self):
enable_vi_bindings = self.aws_shell.enable_vi_bindings
with self.assertRaises(InputInterrupt):
self.processor.feed_key(KeyPress(Keys.F3, ''))
assert enable_vi_bindings != self.aws_shell.enable_vi_bindings
def test_F4(self):
show_completion_columns = self.aws_shell.show_completion_columns
with self.assertRaises(InputInterrupt):
self.processor.feed_key(KeyPress(Keys.F4, ''))
assert show_completion_columns != \
self.aws_shell.show_completion_columns
def test_F5(self):
show_help = self.aws_shell.show_help
with self.assertRaises(InputInterrupt):
self.processor.feed_key(KeyPress(Keys.F5, ''))
assert show_help != self.aws_shell.show_help
def test_F10(self):
# Exiting from the test in this mock test environment will throw:
# IOError: [Errno 25] Inappropriate ioctl for device
# In a non-mock test environment it would through a EOFError.
# TODO: Probably better to mock the call to event.cli.set_exit().
with self.assertRaises(IOError) as e:
self.processor.feed_key(KeyPress(Keys.F10, ''))
|
StarcoderdataPython
|
8000178
|
#!/usr/bin/env python
import re
import sys
# pgstrip strips the Project Gutenberg header & footer from text.
def pgstrip(text):
m = re.search("\*\*\* *START OF.*?\*\*\*", text)
if m:
text = text[m.end():]
m = re.search("\*\*\* *END OF", text)
if m:
text = text[:m.start()]
return text
def fixspace(text):
return re.sub("\s+", " ", text.strip())
# Eliminate parenthetical statements.
def fixparens(text):
return re.sub(" \(.*?\)", "", text)
def ispara(text):
if len(text) < 1 or text[-1].isalnum():
return False
return True
def paragraphs(text):
paras = [""]
for line in text.split("\n"):
line = line.rstrip()
if line == "":
paras.append("")
else:
paras[-1] += line + " "
paras = map(fixspace, paras)
paras = map(fixparens, paras)
return "\n".join(filter(ispara, paras))
for file in sys.argv[1:]:
with open(file, "r") as fd:
print paragraphs(pgstrip(fd.read()))
|
StarcoderdataPython
|
1933786
|
from django.test.testcases import SimpleTestCase
from ..models import ClaUser
class ClaUserTestCase(SimpleTestCase):
def setUp(self):
self.token = "<PASSWORD>"
def test_pk_as_token(self):
user = ClaUser(token=self.token, zone_name="zone_name")
self.assertEqual(user.pk, self.token)
def test_save_doesnt_do_anything(self):
user = ClaUser(token=self.token, zone_name="zone_name")
user.save()
def test_is_authenticated_always_returns_true(self):
user = ClaUser(token=self.token, zone_name="zone_name")
self.assertTrue(user.is_authenticated())
|
StarcoderdataPython
|
124505
|
import numpy as np
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = np.full((2, 3), 2)
print(a2)
# [[2 2 2]
# [2 2 2]]
print(np.block([a1, a2]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]]
print(np.block([[a1], [a2]]))
# [[1 1 1]
# [1 1 1]
# [2 2 2]
# [2 2 2]]
print(np.block([[a1, a2], [a2, a1]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [2 2 2 1 1 1]
# [2 2 2 1 1 1]]
print(np.block([[[a1]], [[a2]]]))
# [[[1 1 1]
# [1 1 1]]
#
# [[2 2 2]
# [2 2 2]]]
print(np.block([[[a1]], [[a2]]]).shape)
# (2, 2, 3)
a3 = np.full(6, 3)
print(a3)
# [3 3 3 3 3 3]
print(np.block([[a1, a2], [a3]]))
# [[1 1 1 2 2 2]
# [1 1 1 2 2 2]
# [3 3 3 3 3 3]]
# print(np.block([[a1, a2], a3]))
# ValueError: List depths are mismatched. First element was at depth 2, but there is an element at depth 1 (arrays[1])
# print(np.block([[a1, a2, a3]]))
# ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
StarcoderdataPython
|
6682558
|
<reponame>vyshakTs/STORE_MANAGEMENT_SYSTEM
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models import F
from phonenumber_field.modelfields import PhoneNumberField
from helpers import current_datetime
from sms.apps.storemaster.models import WebStore
# Create your models here.
class TimeStampedModel(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class User(AbstractUser):
user_id = models.UUIDField(null=True)
STORE_OWNER = 1
EMPLOYEE = 2
CUSTOMER = 3
USER_TYPE_CHOICES = (
(CUSTOMER, 'customer'),
(STORE_OWNER, 'store owner'),
(EMPLOYEE, 'employee'),
)
user_type = models.PositiveSmallIntegerField(
choices=USER_TYPE_CHOICES, default=CUSTOMER
)
is_first_login = models.BooleanField(default=True)
mobile = PhoneNumberField(unique=True, null=True, blank=True)
def __str__(self):
return self.username
@property
def is_CUSTOMER(self):
return self.user_type == self.CUSTOMER
@property
def is_EMPLOYEE(self):
return self.user_type == self.EMPLOYEE
@property
def is_SO(self):
return self.user_type == self.STORE_OWNER
# def save(self, *args, **kwargs):
# created = self.pk is None
# super(User, self).save(*args, **kwargs)
# if created and self.is_SO:
# WebStore.objects.create(user=self)
class PostCode(models.Model):
city = models.ForeignKey('City', on_delete=models.CASCADE, default=None)
pin_code = models.CharField(max_length=50)
class City(models.Model):
city_id = models.UUIDField(default=uuid.uuid4)
state = models.ForeignKey('State', on_delete=models.CASCADE,)
name = models.CharField(max_length=50)
city_code = models.CharField(max_length=10, unique=True)
class State(models.Model):
state_id = models.UUIDField(default=uuid.uuid4)
country = models.ForeignKey('Country', on_delete=models.CASCADE,) # many-to-one relationship
name = models.CharField(max_length=50)
state_code = models.CharField(max_length=10, unique=True)
def __str__(self):
return self.name
class Country(models.Model):
country_id = models.UUIDField(default=uuid.uuid4)
name = models.CharField(max_length=50)
country_code = models.CharField(max_length=10, unique=True)
def __str__(self):
return self.name
class Language(models.Model):
pass
|
StarcoderdataPython
|
187381
|
<reponame>ExLeonem/master-thesis-code
import functools
import numpy as np
class Pipeline:
"""
Execute a transformation pipeline to prepare a dataset
for benchmarking.
Transformers need to be of form "def fn(data, **kwargs)".
Parameters:
*args (fn): Transformers to execute in sequence.
"""
def __init__(self, *args):
self.transformers = args
def __call__(self, data, **kwargs):
output = data
for transformer in self.transformers:
output = transformer(output, **kwargs)
return output
def select_classes(data, classes=None, **kwargs):
"""
Select datapoints correspondings to first n labels or a specific list of classes.
Ignoring the classes parameter skips the transformation.
Parameter:
data (tuple(numpy.ndarray, numpy.ndarray)): The input value and targets
classes (int|list): An integer to extract first n classes or a list of class labels. (default: None)
Returns:
((numpy.ndarray, numpy.ndarray)) The transformed inputs and targets
"""
# Skip class limitation
if classes is None:
return data
# Select only specific classes
inputs, targets = data
# Use n-classes first classes
if isinstance(classes, int):
target_unique_values = np.unique(targets)
if classes < 1 :
raise ValueError("Can't select {} labels. Positive number of classes expected.".format(classes))
if classes <= len(target_unique_values):
selected_labels = target_unique_values[:classes]
selector = functools.reduce(lambda a, b: a | b, [targets == label for label in selected_labels])
new_targets = targets[selector]
new_inputs = inputs[selector]
return new_inputs, new_targets
else:
# Less available unique classes than to select
raise ValueError("Can't select {} labels from {} available unique labels. ".format(classes, len(target_unique_values)))
# Select specific labels
if isinstance(classes, list):
if len(classes) == 0:
raise ValueError("Can't labels out of an empty list. Set the class parameter with a non empty list.")
selector = functools.reduce(lambda a, b: a | b, [targets == label for label in classes])
new_targets = targets[selector]
new_inputs = inputs[selector]
return new_inputs, new_targets
raise ValueError("Error in transformer.select_class. Could not use classes parameter. Pass either nothing, an integer or a list of labels for the classes kwarg.")
def image_channels(data, **kwargs):
"""
Check if image data has enough channels.
If data has format of grayscale with missing color channel dimension, additional dimension will be added.
Parameter:
data ((numpy.ndarray, numpy.ndarray)): Inputs and targets of the dataset.
Returns:
((numpy.ndarray, numpy.ndarray)) The transformed data
"""
inputs, targets = data
# Grayscale image with missing dimension? shape: (batch, height, width)
if len(inputs.shape) == 3:
inputs = np.expand_dims(inputs, axis=-1)
if len(inputs.shape) != 4:
raise ValueError("Error in image_channels/2. Expected image data to have 3 or 4 dimensions. Got {}.".format(len(inputs.shape)))
return inputs, targets
def inputs_to_type(data, dtype=None, **kwargs):
"""
"""
if dtype is None:
return data
# Transform inputs to given type
inputs, targets = data
return inputs.astype(dtype), targets
|
StarcoderdataPython
|
1881862
|
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,re,json,urllib,urlparse,datetime
import re
import base64
try: action = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))['action']
except: action = None
from resources.lib.libraries import trakt
from resources.lib.libraries import control
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib.libraries import metacache
from resources.lib.libraries import favourites
from resources.lib.libraries import workers
from resources.lib.libraries import views
from resources.lib.libraries import playcount
from resources.lib.libraries import cleangenre
class movies:
def __init__(self):
self.list = []
self.en_headers = {'Accept-Language': 'en-US'}
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.imdb_link = 'http://www.imdb.com'
self.fanarttv_key = control.fanarttv_key
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.month_date = (self.datetime - datetime.timedelta(days = 30)).strftime('%Y-%m-%d')
self.month2_date = (self.datetime - datetime.timedelta(days = 60)).strftime('%Y-%m-%d')
self.year_date = (self.datetime - datetime.timedelta(days = 365)).strftime('%Y-%m-%d')
self.year_date10 = (self.datetime - datetime.timedelta(days = 3650)).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user').strip()
self.imdb_user = control.setting('imdb_user').replace('ur', '')
self.info_lang = control.info_lang or 'en'
self.imdb_info_link = 'http://www.omdbapi.com/?i=%s&plot=full&r=json'
self.imdb_by_query = 'http://www.omdbapi.com/?t=%s&y=%s'
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.persons_link = 'http://www.imdb.com/search/name?count=100&name=%s'
self.personlist_link = 'http://www.imdb.com/search/name?count=100&gender=male,female'
#self.genres_tab = [('Action', 'action'), ('Adventure', 'adventure'), ('Animation', 'animation'),('Biography', 'biography'),
# ('Comedy', 'comedy'), ('Crime', 'crime'), ('Drama', 'drama'),('Family', 'family'), ('Fantasy', 'fantasy'),
# ('History', 'history'), ('Horror', 'horror'),('Music ', 'music'), ('Musical', 'musical'), ('Mystery', 'mystery'),
# ('Romance', 'romance'),('Science Fiction', 'sci_fi'), ('Sport', 'sport'), ('Thriller', 'thriller'), ('War', 'war'),('Western', 'western')]
self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=20&start=1'
self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=20&start=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&sort=boxoffice_gross_us,desc&count=20&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&groups=oscar_best_picture_winners&sort=year,desc&count=20&start=1'
self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?limit=20&page=1'
self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&sort=num_votes,desc&count=20&start=1'
self.theaters_link = 'http://www.imdb.com/search/title?title_type=feature&languages=en&num_votes=200,&release_date=%s,%s&sort=release_date_us,desc&count=20&start=1' % (self.year_date, self.today_date)
self.search_link = 'http://api-v2launch.trakt.tv/search?type=movie&query=%s&limit=20'
self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie,documentary&languages=en&num_votes=100,&genres=%s&sort=moviemeter,asc&count=20&start=1'
self.year_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=100,&production_status=released&year=%s&sort=moviemeter,asc&count=20&start=1'
self.person_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&role=%s&sort=year,desc&count=40&start=1'
self.certification_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&certification=%s&certification_country=US&primary_release_date.lte=%s&page=1' % ('%s', '%s', self.today_date)
self.scn_link = 'http://predb.me'
self.scn_page = 'http://predb.me/?search=%s+720p+tag:-foreign&cats=movies-hd&page=%s'
#self.added_link = 'http://predb.me?start=1'
self.added_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=500,&production_status=released&release_date=%s,%s&sort=release_date,desc&count=20&start=1' % (self.year_date, self.today_date)
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/me/collection/movies'
self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/me/watchlist/movies'
self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=40'
self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/movies?limit=40&page=1'
self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist' % self.imdb_user
self.trakt_lang_link = 'http://api-v2launch.trakt.tv/movies/%s/translations/%s'
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.trakt_link and '/users/' in url:
try:
if url == self.trakthistory_link: raise Exception()
if not '/users/me/' in url: raise Exception()
if trakt.getActivity() > cache.timeout(self.trakt_list, url, self.trakt_user): raise Exception()
self.list = cache.get(self.trakt_list, 72, url, self.trakt_user)
except:
self.list = cache.get(self.trakt_list, 2, url, self.trakt_user)
if '/users/me/' in url:
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['title'].lower()))
if idx == True: self.worker()
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url, self.trakt_user)
if idx == True: self.worker()
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 2, url, idx)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker()
elif u in self.scn_link:
self.list = cache.get(self.scn_list, 24, url)
if idx == True: self.worker()
if idx == True: self.movieDirectory(self.list)
return self.list
except Exception as e:
control.log("movies get e:%s" % e)
pass
def widget(self):
setting = control.setting('movie_widget')
if setting == '2':
self.get(self.featured_link)
elif setting == '3':
self.get(self.trending_link)
else:
self.get(self.added_link)
def favourites(self):
try:
items = favourites.getFavourites('movies')
self.list = [i[1] for i in items]
for i in self.list:
if not 'name' in i: i['name'] = '%s (%s)' % (i['title'], i['year'])
try: i['title'] = i['title'].encode('utf-8')
except: pass
try: i['originaltitle'] = i['originaltitle'].encode('utf-8')
except: pass
try: i['name'] = i['name'].encode('utf-8')
except: pass
if not 'duration' in i: i['duration'] = '0'
if not 'imdb' in i: i['imdb'] = '0'
if not 'tmdb' in i: i['tmdb'] = '0'
if not 'tvdb' in i: i['tvdb'] = '0'
if not 'tvrage' in i: i['tvrage'] = '0'
if not 'poster' in i: i['poster'] = '0'
if not 'banner' in i: i['banner'] = '0'
if not 'fanart' in i: i['fanart'] = '0'
self.worker()
self.list = sorted(self.list, key=lambda k: k['title'])
self.movieDirectory(self.list)
except:
return
def search(self, query=None):
#try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.search_link % (urllib.quote_plus(self.query))
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
self.worker()
self.movieDirectory(self.list)
return self.list
#except:
# return
def person(self, query=None):
try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.persons_link % urllib.quote_plus(self.query)
self.list = cache.get(self.imdb_person_list, 0, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def genres(self):
genres = [
('Action', 'action'),
('Adventure', 'adventure'),
('Animation', 'animation'),
('Biography', 'biography'),
('Comedy', 'comedy'),
('Crime', 'crime'),
('Drama', 'drama'),
('Documentary','documentary'),
('Family', 'family'),
('Fantasy', 'fantasy'),
('History', 'history'),
('Horror', 'horror'),
('Music ', 'music'),
('Musical', 'musical'),
('Mystery', 'mystery'),
('Romance', 'romance'),
('Science Fiction', 'sci_fi'),
('Sport', 'sport'),
('Thriller', 'thriller'),
('War', 'war'),
('Western', 'western')
]
for i in genres: self.list.append({'name': cleangenre.lang(i[0], self.info_lang), 'url': self.genre_link % i[1], 'image': 'genres.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def certifications(self):
try:
url = self.certifications_link
self.list = cache.get(self.tmdb_certification_list, 24, url)
for i in range(0, len(self.list)): self.list[i].update({'image': 'movieCertificates.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % str(i), 'image': 'movieYears.jpg', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self):
self.list = cache.get(self.imdb_person_list, 24, self.personlist_link)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
#control.log('@@ TRAKT LIST %s - %s' %(userlists,activity))
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link,
self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link,
self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
#self.addDirectory(self.list, queue=True)
self.addDirectory(self.list)
return self.list
def trakt_list(self, url, user):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
p = str(int(q['page']) + 1)
if p == '5': raise Exception()
q.update({'page': p})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = 'http://films4u.org/poster/'+base64.b64encode(imdb)+'.png'
poster = poster.encode('utf-8')
banner = 'http://films4u.org/banner/'+base64.b64encode(imdb)+'.png'
banner = banner.encode('utf-8')
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart = fanart.encode('utf-8')
try:
premiered = item['released']
premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try:
genre = item['genres']
genre = [i.title() for i in genre]
except: genre = '0'
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
try: mpaa = item['certification']
except: mpaa = '0'
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['overview']
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': banner, 'fanart': fanart, 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_list(self, url, idx=True):
try:
if url == self.imdbwatchlist_link:
def imdb_watchlist_id(url):
return re.compile('/export[?]list_id=(ls\d*)').findall(client.request(url))[0]
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
result = str(client.request(url,headers=self.en_headers))
try:
if idx == True: raise Exception()
pages = client.parseDOM(result, 'div', attrs = {'class': 'desc'})[0]
pages = re.compile('Page \d+? of (\d*)').findall(pages)[0]
for i in range(1, int(pages)):
u = url.replace('&start=1', '&start=%s' % str(i*100+1))
result += str(client.request(u, headers=self.en_headers))
except:
pass
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})
if len(next) == 0:
next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
next = [i[0] for i in next if 'Next' in i[1]]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
try: title = client.parseDOM(item, 'a')[1]
except: pass
try: title = client.parseDOM(item, 'a', attrs = {'onclick': '.+?'})[-1]
except: pass
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
year = re.findall('(\d{4})', year[0])[0]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = re.findall('(tt\d*)', imdb)[0]
imdb = imdb.encode('utf-8')
#control.log('[imdb_list] Title: %s ID:%s' %(title,imdb))
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: poster = '0'
poster = re.sub('(?:_SX\d+?|)(?:_SY\d+?|)(?:_UX\d+?|)_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
except: genre = '0'
genre = ' / '.join([i.strip() for i in genre.split(',')])
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
except: duration = '0'
duration = duration.encode('utf-8')
rating = '0'
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: pass
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
except: pass
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
except: votes = '0'
try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
except: director = '0'
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
except: cast = '0'
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart = fanart.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': fanart, 'next': next})
except:
pass
return self.list
def imdb_user_list(self, url):
print("Items", url)
try:
result = client.request(url, headers=self.en_headers)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
#control.log("##################><><><><> trakt_list item %s" % item)
print("Items",items)
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_person_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def scn_list(self, url):
def predb_items():
try:
years = [(self.datetime).strftime('%Y'), (self.datetime - datetime.timedelta(days = 365)).strftime('%Y')]
months = (self.datetime - datetime.timedelta(days = 180)).strftime('%Y%m%d')
result = ''
for i in years:
result += client.request(self.scn_page % (str(i), '1'))
result += client.request(self.scn_page % (str(i), '2'))
items = client.parseDOM(result, 'div', attrs = {'class': 'post'})
items = [(client.parseDOM(i, 'a', attrs = {'class': 'p-title'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in items]
items = [(i[0][0], i[1][0]) for i in items if len(i[0]) > 0 and len(i[1]) > 0]
items = [(re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', i[0]), re.compile('[\.|\(|\[|\s](\d{4})[\.|\)|\]|\s]').findall(i[0]), re.sub('[^0-9]', '', i[1])) for i in items]
items = [(i[0], i[1][-1], i[2]) for i in items if len(i[1]) > 0]
items = [i for i in items if int(months) <= int(i[2])]
items = sorted(items,key=lambda x: x[2])[::-1]
items = [(re.sub('(\.|\(|\[|LIMITED|UNCUT)', ' ', i[0]).strip(), i[1]) for i in items]
items = [x for y,x in enumerate(items) if x not in items[:y]]
items = items[:150]
print items
return items
except:
return
def predb_list(i):
try:
url = self.imdb_by_query % (urllib.quote_plus(i[0]), i[1])
item = client.request(url, headers=self.en_headers ,timeout='10')
item = json.loads(item)
title = item['Title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['Year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
#poster = 'http://films4u.org/poster/'+base64.b64encode(imdb)+'.png'
#poster = poster.encode('utf-8')
poster = item['Poster']
if poster == None or poster == '' or poster == 'N/A': poster = '0'
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = poster.encode('utf-8')
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart = fanart.encode('utf-8')
genre = item['Genre']
if genre == None or genre == '' or genre == 'N/A': genre = '0'
genre = genre.replace(', ', ' / ')
genre = genre.encode('utf-8')
duration = item['Runtime']
if duration == None or duration == '' or duration == 'N/A': duration = '0'
duration = re.sub('[^0-9]', '', str(duration))
duration = duration.encode('utf-8')
rating = item['imdbRating']
if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
votes = item['imdbVotes']
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None or votes == '' or votes == 'N/A': votes = '0'
votes = votes.encode('utf-8')
mpaa = item['Rated']
if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0'
mpaa = mpaa.encode('utf-8')
director = item['Director']
if director == None or director == '' or director == 'N/A': director = '0'
director = director.replace(', ', ' / ')
director = re.sub(r'\(.*?\)', '', director)
director = ' '.join(director.split())
director = director.encode('utf-8')
writer = item['Writer']
if writer == None or writer == '' or writer == 'N/A': writer = '0'
writer = writer.replace(', ', ' / ')
writer = re.sub(r'\(.*?\)', '', writer)
writer = ' '.join(writer.split())
writer = writer.encode('utf-8')
cast = item['Actors']
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
plot = item['Plot']
if plot == None or plot == '' or plot == 'N/A': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': fanart})
except:
pass
try:
items = cache.get(predb_items, 24)
start = re.compile('start=(\d*)').findall(url)[-1]
start = int(start)
if len(items) > (start + 30): next = self.scn_link + '?start=%s' % (start + 30)
else: next = ''
except:
return
threads = []
for i in range(start - 1, start + 29):
try: threads.append(workers.Thread(predb_list, items[i]))
except: pass
[i.start() for i in threads]
[i.join() for i in threads]
for i in range(0, len(self.list)): self.list[i].update({'next': next})
return self.list
def worker(self):
self.meta = []
total = len(self.list)
#control.log("##################><><><><> WORKER TOTAL %s" % total)
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.info_lang)
for r in range(0, total, 20):
threads = []
for i in range(r, r+20):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
if len(self.meta) > 0: metacache.insert(self.meta)
self.list = [i for i in self.list if not i['imdb'] == '0']
def super_info(self, i):
try:
#control.log('[super_info] ID:%s' % (str(i)))
zero ='0'.encode('utf-8')
if self.list[i]['metacache'] == True: raise Exception()
try: imdb = self.list[i]['imdb']
except: imdb = '0'
if not imdb == '0': url = self.imdb_info_link % imdb
else: raise Exception()
item = client.request(url, timeout='10')
item = json.loads(item)
title = item['Title']
title = title.encode('utf-8')
if not title == '0':
self.list[i].update({'title': title})
self.list[i].update({'originaltitle': title})
originaltitle = title
year = item['Year']
year = year.encode('utf-8')
if not year == '0': self.list[i].update({'year': year})
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
imdb = imdb.encode('utf-8')
if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb})
#control.log('[super_info] Title: %s ID:%s' % (title, imdb))
try:
poster = item['Poster']
if poster == '' or poster == None: poster = '0'
#if not poster == '0': poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
if not poster == '0': self.list[i].update({'poster': poster})
except:
poster = zero
try:
if not imdb == '0':
fanart = 'http://films4u.org/fanart/'+base64.b64encode(imdb)+'.png'
fanart= fanart.encode('utf-8')
else:
fanart = zero
except:
fanart = zero
# http://fanart.filmkodi.com/tt0006333.jpg
try:
premiered = item['Released']
premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
if premiered == '' or premiered == None: premiered = '0'
premiered = premiered.encode('utf-8')
if not premiered == '0': self.list[i].update({'premiered': premiered})
#studio = item['production_companies']
#try: studio = [x['name'] for x in studio][0]
#except:
studio = '0'
#if studio == '' or studio == None: studio = '0'
studio = studio.encode('utf-8')
#if not studio == '0': self.list[i].update({'studio': studio})
try: genre = item['Genre']
except: genre = '0'
if genre == '' or genre == None or genre == []: genre = '0'
genre = genre.encode('utf-8')
if not genre == '0': self.list[i].update({'genre': genre})
try: duration = str(item['Runtime'].replace(' min',''))
except: duration = '0'
if duration == '' or duration == None: duration = '0'
duration = duration.encode('utf-8')
if not duration == '0': self.list[i].update({'duration': duration})
try: rating = str(item['imdbRating'])
except: rating = '0'
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
if not rating == '0': self.list[i].update({'rating': rating})
try:
votes = str(item['imdbVotes'])
votes = str(format(int(votes),',d'))
except:
votes = '0'
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
if not votes == '0': self.list[i].update({'votes': votes})
try:
mpaa = item['Country']
except:
mpaa = '0'
if mpaa == '' or mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
if not mpaa == '0': self.list[i].update({'mpaa': mpaa})
try: cast = item['Actors']
except: cast = '0'
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
if not cast == '0': self.list[i].update({'cast': cast})
try: writer = item['Writer']
except: writer = '0'
if writer == '' or writer == None: writer= '0'
writer = writer.encode('utf-8').replace(', ', ' / ')
if len(writer) > 0: self.list[i].update({'writer': writer})
plot = item['Plot']
if plot == '' or plot == None: plot = '0'
plot = plot.encode('utf-8')
if not plot == '0': self.list[i].update({'plot': plot})
director = item['Director']
if director == '' or director == None or director == []: director = '0'
director = director.encode('utf-8')
if not director == '0': self.list[i].update({'director': director})
if not self.info_lang == 'en':
url = self.trakt_lang_link % (imdb, self.info_lang)
try:
item = trakt.getTrakt(url)
item = json.loads(item)[0]
t = item['title']
if not (t == None or t == ''): title = t
try: title = title.encode('utf-8')
except: pass
if not title == '0': self.list[i].update({'title': title})
t = item['overview']
if not (t == None or t == ''): plot = t
try: plot = plot.encode('utf-8')
except: pass
if not plot == '0': self.list[i].update({'plot': plot})
except:
pass
#self.meta.append({'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'lang': self.info_lang, 'item': {'code': imdb, 'imdb': imdb, 'tmdb': '0', 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': zero}})
self.meta.append({'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'lang': self.info_lang, 'item': {'title': title, 'originaltitle': originaltitle, 'year': year, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'poster': poster, 'banner': zero, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot}})
#control.log("><><><><> ITEM META IMDB %s" % imdb)
except Exception as e:
control.log('$Super error: %s' % e)
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: return
isFolder = True if control.setting('autoplay') == 'false' and control.setting('host_select') == '1' else False
isFolder = False if control.window.getProperty('PseudoTVRunning') == 'True' else isFolder
playbackMenu = control.lang(30204).encode('utf-8') if control.setting('autoplay') == 'true' else control.lang(30203).encode('utf-8')
traktMode = False if trakt.getTraktCredentials() == False else True
cacheToDisc = False if not action == 'movieSearch' else True
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
sysaddon = sys.argv[0]
indicators = playcount.getMovieIndicators(refresh=True) if action == 'movies' else playcount.getMovieIndicators()
watchedMenu = control.lang(30206).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(30206).encode('utf-8')
unwatchedMenu = control.lang(30207).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(30207).encode('utf-8')
try:
favitems = favourites.getFavourites('movies')
favitems = [i[0] for i in favitems]
except:
pass
for i in items:
try:
label = '%s (%s)' % (i['title'], i['year'])
imdb, title, year = i['imdb'], i['originaltitle'], i['year']
#label = i['name']
sysname = urllib.quote_plus('%s (%s)' % (title, year))
systitle = urllib.quote_plus(title)
tmdb = i['tmdb']
poster, banner, fanart = i['poster'], i['banner'], i['fanart']
if poster == '0': poster = addonPoster
if banner == '0' and poster == '0': banner = addonBanner
elif banner == '0': banner = poster
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if i['duration'] == '0': meta.update({'duration': '120'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.info_lang)})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s&meta=%s&t=%s' % (sysaddon, sysname, systitle, year, imdb, tmdb, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
if isFolder == True:
url = '%s?action=sources&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s&meta=%s' % (sysaddon, sysname, systitle, year, imdb, tmdb, sysmeta)
cm = []
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
cm.append((control.lang(30205).encode('utf-8'), 'Action(Info)'))
#if not action == 'movieSearch':
# cm.append((control.lang(30206).encode('utf-8'), 'RunPlugin(%s?action=moviePlaycount&title=%s&year=%s&imdb=%s&query=7)' % (sysaddon, systitle, year, imdb)))
# cm.append((control.lang(30207).encode('utf-8'), 'RunPlugin(%s?action=moviePlaycount&title=%s&year=%s&imdb=%s&query=6)' % (sysaddon, systitle, year, imdb)))
try:
overlay = int(playcount.getMovieOverlay(indicators, imdb))
#control.log('# %s' % overlay)
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb)))
meta.update({'playcount': 0, 'overlay': 6})
except Exception as e:
control.log('#Overlay e %s' % e)
pass
if traktMode == True:
cm.append((control.lang(30208).encode('utf-8'), 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))
if action == 'movieFavourites':
cm.append((control.lang(30210).encode('utf-8'), 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
elif action == 'movieSearch':
cm.append((control.lang(30209).encode('utf-8'), 'RunPlugin(%s?action=addFavourite&meta=%s&query=0&content=movies)' % (sysaddon, sysmeta)))
else:
if not imdb in favitems: cm.append((control.lang(30209).encode('utf-8'), 'RunPlugin(%s?action=addFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
else: cm.append((control.lang(30210).encode('utf-8'), 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
cm.append((control.lang(30211).encode('utf-8'), 'RunPlugin(%s?action=movieToLibrary&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s)' % (sysaddon, sysname, systitle, year, imdb, tmdb)))
cm.append((control.lang(30212).encode('utf-8'), 'RunPlugin(%s?action=addView&content=movies)' % sysaddon))
#Trailer
cm.append((control.lang(33003).encode('utf-8'),'RunPlugin(%s?action=trailer&name=%s)' % (sysaddon, sysname)))
item = control.item(label=label, iconImage=poster, thumbnailImage=poster)
try: item.setArt({'poster': poster, 'banner': banner})
except: pass
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setInfo(type='Video', infoLabels = meta)
item.setProperty('Video', 'true')
#item.setProperty('IsPlayable', 'true')
item.addContextMenuItems(cm, replaceItems=True)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=isFolder)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
url = '%s?action=movies&url=%s' % (sysaddon, urllib.quote_plus(url))
addonNext = control.addonNext()
item = control.item(label=control.lang(30213).encode('utf-8'), iconImage=addonNext, thumbnailImage=addonNext)
item.addContextMenuItems([], replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.content(int(sys.argv[1]), 'movies')
control.directory(int(sys.argv[1]), cacheToDisc=cacheToDisc)
views.setView('movies', {'skin.confluence': 500})
def addDirectory(self, items):
if items == None or len(items) == 0: return
sysaddon = sys.argv[0]
addonFanart = control.addonFanart()
addonThumb = control.addonThumb()
artPath = control.artPath()
for i in items:
try:
try: name = control.lang(i['name']).encode('utf-8')
except: name = i['name']
if i['image'].startswith('http://'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
try: cm.append((control.lang(30211).encode('utf-8'), 'RunPlugin(%s?action=moviesToLibrary&url=%s)' % (sysaddon, urllib.quote_plus(i['context']))))
except: pass
item = control.item(label=name, iconImage=thumb, thumbnailImage=thumb)
item.addContextMenuItems(cm, replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
|
StarcoderdataPython
|
4964316
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import math
import datetime
import pickle
## from node000:
## mpiexec -machinefile ~/hostfile -n <numavgs*numodors+1> ~/Python-2.6.4/bin/python2.6 odor_morphs.py
## nohup mpiexec -machinefile ~/hostfile -n 57 ~/Python-2.6.4/bin/python2.6 odor_morphs.py < /dev/null &
## typical value for numavgs = 8
## (depends on number of available processing nodes and number of odorfiles generated)
## typical value for numodors = len(inputList) = 7.
## OR for a single odor run; from any node (or laptop -- can use system python (2.7)):
## python2.6 odor_morphs.py [SAVEFORMOVIE]
## Set various option like NO_PGs or ONLY_TWO_MITS in simset_odor
sys.path.extend(["..","../networks","../generators","../simulations"])
from moose_utils import * # imports moose
from data_utils import * # has mpi import and variables also
from OBNetwork import *
from sim_utils import *
from stimuliConstants import * # has SETTLETIME, inputList and pulseList, GLOMS_ODOR, GLOMS_NIL
from simset_odor import * # has REALRUNTIME, NUMBINS
RUNTIME = REALRUNTIME + SETTLETIME
from pylab import * # part of matplotlib that depends on numpy but not scipy
from plot_odor_morphs import *
#-----------------------------------------------------------
class odorResponse:
def __init__(self,mpirank=mpirank): # mpirank is defined in data_utils.py
self.mpirank = mpirank
self.context = moose.PyMooseBase.getContext()
def setupStim(self,network,args,avgnum):
odorA, odorB = args[0]
self.setupOdor(network, odorA, odorB, avgnum)
print "Setup odorA =",odorA,"odorB =",odorB,"at",self.mpirank
def setupOdor(self, network, odorA, odorB, avgnum):
### first figure out which PG belongs to which glom
### PG_glom_map[pgname] returns the glom num of the PG: needed for ORN to PG connections.
PG_glom_map = {}
for projname in network.projectionDict.keys():
if 'PG_mitral' in projname:
for i,proj in enumerate(network.projectionDict[projname][2]):
# get the glomnum from the post path proj[2]
# name of the mitral cell from '/mitrals_2/...'
mitname = string.split(proj[2],'/')[1]
# glomerulus number from 'mitrals_2' by integer division i.e. 2/2 = 1
glomnum = int(string.split(mitname,'_')[1]) / 2
# name of the PG cell from '/PGs_2/...'
pgname = string.split(proj[1],'/')[1]
PG_glom_map[pgname] = glomnum
### Now connect the ORNs
for projname in network.projectionDict.keys():
#### Calling attach_spikes() for each projection,
#### would reconnect files to the same segment multiple times.
#### But attach_files_uniquely() checks whether timetable.tableSize is zero or not
#### i.e. files already attached or not.
### connect ORNs to mitrals
if 'ORN_mitral' in projname:
print "connecting ORN files to mitrals"
for i,proj in enumerate(network.projectionDict[projname][2]):
# get the glomnum from the post path proj[2]
mitname = string.split(proj[2],'/')[1] # name of the mitral cell from '/mitrals_2/...'
glomnum = int(string.split(mitname,'_')[1]) / 2 # glomerulus number from 'mitrals_2' by integer division i.e. 2/2 = 1
filebase = ORNpathseedstr+'firetimes_2sgm_glom_'+str(glomnum)
self.attach_files_uniquely(filebase,proj[0],proj[2],odorA,odorB,avgnum)
### connect ORNs to PG
if 'ORN_PG' in projname:
print "connecting ORN files to PGs"
for i,proj in enumerate(network.projectionDict[projname][2]):
pgname = string.split(proj[2],'/')[1] # name of the PG cell from '/PGs_2/...'
glomnum = PG_glom_map[pgname]
filebase = ORNpathseedstr+'firetimes_2sgm_glom_'+str(glomnum)
self.attach_files_uniquely(filebase,proj[0],proj[2],odorA,odorB,avgnum)
### connect SAs to PG
if 'SA_PG' in projname:
print "connecting SA files to PGs"
for i,proj in enumerate(network.projectionDict[projname][2]):
pgname = string.split(proj[2],'/')[1] # name of the PG cell from '/PGs_2/...'
glomnum = PG_glom_map[pgname]
filebase = ORNpathseedstr+'firetimes_SA'
self.attach_files_uniquely(filebase,proj[0],proj[2],odorA,odorB)
###### I am back to 'extra-connecting' modelled mitral as extra sister mitrals excitation to granules
###### Previously, as below, I was connecting ORNs of the glom to granules
###### which caused inhibition even when the sister mitrals were not even firing!
### connect unmodelled extra sister mitrals as files to granules
#if 'mitral_granule_extra' in projname:
# print "Connecting unmodelled sister excitation files to granules"
# for i,proj in enumerate(network.projectionDict[projname][2]):
# granulename = string.split(proj[2],'/')[1] # name of the granule cell from '/granules_singles_2/...'
# # glomnum from pre_path = proj[1] = 'file[+<glomnum>]_<filenumber1>[_<filenumber2>...]'
# glomstr = proj[1].split('+')[1].split('_',1)[0]
# filebase = ORNpathseedstr+'firetimes_2sgm_glom_'+glomstr
# self.attach_files_uniquely(filebase,proj[0]+'_'+glomstr,proj[2],odorA,odorB,avgnum)
def attach_files_uniquely(self,filebase,synname,postsegpath,odorA,odorB,avgnum=None):
ttpath = postsegpath+'/'+synname+'_tt'
if self.context.exists(ttpath):
# timetable already created by networkML reader - just wrap it below.
tt = moose.TimeTable(ttpath) # post_segment_path+'/'+syn_name+'_tt'
else:
## if timetable was not already created by networkML reader,
## it means that the synaptic weights must be zero!
## (no extra inhibition - only main inhibition)
## hence do not attach spikefiles
return
if tt.tableSize != 0: return # if files are already attached, do nothing!
filebase += '_odor_'+str(odorA)+'_'+str(odorB)
if avgnum is not None: filebase += '_avgnum'+str(avgnum)
## attach_spikes() accesses filenumbers to this segment
## from 'fileNumbers' field (of the timetable object in MOOSE)
## which is created while reading in networkML.
attach_spikes(filebase, tt, uniquestr+str(self.mpirank))
def run(self,network, binned):
print "Resetting MOOSE."
# from moose_utils.py sets clocks and resets
resetSim(network.context, SIMDT, PLOTDT)
print "Running at",self.mpirank
network.context.step(RUNTIME)
mitral_responses = []
mitral_responses_binned = []
if ONLY_TWO_MITS or NO_LATERAL: num_mits = MIT_SISTERS
else: num_mits = NUM_GLOMS*MIT_SISTERS
for mitnum in range(num_mits):
mitral = network.mitralTable[mitnum]
## only the last respiration cycle is taken
if binned: mitral_responses_binned.append(
plotBins(mitral._vmTableSoma, NUMBINS, RUNTIME,\
(NUM_RESPS-1)*RESPIRATION+SETTLETIME) )
## need to convert to numpy's array(),
## else MOOSE table cannot be pickled for mpi4py send()
mitral_responses.append(array(mitral._vmTableSoma))
return (mitral_responses,mitral_responses_binned)
#----------------------------------------------------------------
if __name__ == "__main__":
## uniquestr to put in every temp filename to avoid clashing with other processes
if len(sys.argv)>2: uniquestr = sys.argv[2]+'_' # _ necessary, else say 'morphs2'+mpirank is screwed
else: uniquestr = 'morphs_'
numodors = len(inputList)
#### if only one process is called, plot one odor directly
if mpisize == 1:
## for ref: inputList = [ (0.0,1.0), (0.2,0.8), (0.4,0.6), (0.6,0.4), (0.8,0.2), (1.0,0.0), (0.0,0.0) ]
odorA = 0.0
odorB = 1.0
avgnum = 0
## OBNetwork.py uses the proxympirank passed here
proxympirank = avgnum*numodors + 0 + 1 # avgnum*numodors + odornum + 1
sim = odorResponse(proxympirank)
## 'PG' includes 'ORN_PG', 'PG_mitral', 'mitral_PG' and 'SA_PG'
if ONLY_TWO_MITS and not NO_PGS: includeProjections = ['PG']
else: includeProjections = []
tweaks = build_tweaks(CLUB_MITRALS, NO_SPINE_INH, NO_SINGLES,\
NO_JOINTS, NO_MULTIS, NO_PGS, ONLY_TWO_MITS,\
includeProjections=includeProjections, nolateral=NO_LATERAL)
BINNED = True#False # for mitrals only
## if not BINNED, save the full mitral Vm-s
## and not just their spiketimes by setting spiketable = False below.
network = OBNetwork(OBNet_file, synchan_activation_correction, tweaks,\
proxympirank, 'morphs', granfilebase, spiketable=BINNED)
#printNetTree() # from moose_utils.py
## monitor those interneurons that are connected to mitral indices 0 and 1
## save only spiketimes by setting extras_spikes_only=True
extras_spikes_only = True # for interneurons
## choose one of the below for only interneurons connected to mits0/1 vs all.
interneurons_args = {}
#extras_args = {'mitrals':[0,1]}
tables = setupTables(network, NO_PGS, NO_SINGLES, NO_JOINTS, NO_MULTIS,\
interneurons_args, spikes=extras_spikes_only)
## To watch the pre compartment of mit2 that inhibits soma of mit 1
#mit2 = moose.Cell('/mitrals_2')
#mit2.precomp = moose.Compartment(get_matching_children(mit2, ['Seg0_sec_dendd4_4_278'])[0])
#mit2._vmTablePrecomp = setupTable("vmTablePrecomp",mit2.precomp,'Vm')
sim.setupStim(network, ((odorA,odorB),), avgnum=avgnum)
## widely different resting potentials of mit0 and mit1
if VARY_MITS_RMP:
tweak_field('/mitrals_0/##[TYPE=Compartment]', 'Em', '-58e-3')
tweak_field('/mitrals_1/##[TYPE=Compartment]', 'Em', '-70e-3')
if "SAVEFORMOVIE" in sys.argv:
SAVEFORMOVIE = True
mitTables = setupMitralTables(network,BINNED)
else: SAVEFORMOVIE = False
mitral_responses,mitral_responses_binned = sim.run(network,BINNED)
## Save data for movie
if SAVEFORMOVIE:
moviedatafile = 'movie_data_netseed'+netseedstr+'_stimseed'+rateseedstr+'_directed'+str(frac_directed)
timevec = arange(0.0,RUNTIME+1e-12,PLOTDT)
## number of colours in mitcolours must match number of mitral cells
## Each colour-entry below is a tuple of (baseline/initial colour, spiking/peak colour, colourmap)
## Each colour is a tuple of (r,g,b,a)
## Set in Moogli's config file, whether to change color, and/or alpha, or use colourmap.
mitcolours = [
((0.3,0,0,0.3),(1,0,0,1),'jet'), ((0.3,0,0.3,0.3),(1,0,1,1),'jet'),
((0,0,0.3,0.3),(0,0,1,1),'jet'), ((0,0,0.3,0.3),(0,0,0.3,0.3),'jet'),
((0,0.3,0,0.3),(0,1,0,1),'jet'), ((0,0.3,0,0.3),(0,0.3,0,0.3),'jet') ]
dataTables = exportTables(network, \
NO_PGS, NO_SINGLES, NO_JOINTS, NO_MULTIS, BINNED, \
interneurons_args,mitcolours) ## args tells: take only interneurons connected to mits 0,1
mitDataTables = exportMitralTables(mitTables,mitcolours,BINNED)
### Save colour info -- not implemented yet
#f = open(moviedatafile+'_colours.pickle','w')
#pickle.dump( colourTable, f )
#f.close()
## Save movie data
if BINNED: moviedatafile += '_mitspiketimes.pickle'
else: moviedatafile += '_mitVms.pickle'
f = open(moviedatafile,'w')
pickle.dump( {'projections':network.projectionDict,\
'sim_data':(timevec,dataTables,mitDataTables)}, f )
f.close()
print "Saved output file",moviedatafile
sys.exit(0)
## plot and display
if not extras_spikes_only:
timevec = arange(0.0,RUNTIME+1e-12,PLOTDT)
plot_extras(timevec, tables, NO_PGS, NO_SINGLES, NO_JOINTS, NO_MULTIS)
else:
deltabin = RESPIRATION/NUMBINS
## Only the last respiration cycle
timevec = arange(SETTLETIME+(NUM_RESPS-1)*RESPIRATION+deltabin/2,RUNTIME,deltabin)
plot_extras_spikes(timevec, tables, NO_PGS, NO_SINGLES, NO_JOINTS,\
NO_MULTIS, NUMBINS, RUNTIME, SETTLETIME)
figure()
title('Glomerulus 0')
if BINNED:
deltabin = RESPIRATION/NUMBINS
# Take only the last respiration cycle
timevec = arange(SETTLETIME+(NUM_RESPS-1)*RESPIRATION+deltabin/2,RUNTIME,deltabin)
mitral_responses = mitral_responses_binned
else:
timevec = arange(0.0,RUNTIME+1e-12,PLOTDT)
plot(timevec,mitral_responses[0],color=(0.0,1.0,0.0))
plot(timevec,mitral_responses[1],color=(0.0,1.0,0.5))
## plot soma; and precompartment of mit2 that inhibits mit0.
#figure()
#title('mitral 2')
#plot(timevec,mitral_responses[2],color=(1,0,0))
#plot(timevec,mit2._vmTablePrecomp,color=(0,0,0))
show()
#### if multiple processes are called, average over odor morphs
else:
## construct the results filename
today = datetime.date.today()
if NO_SINGLES: singles_str = '_NOSINGLES'
else: singles_str = '_SINGLES'
if NO_JOINTS: joints_str = '_NOJOINTS'
else: joints_str = '_JOINTS'
if NO_PGS: pgs_str = '_NOPGS'
else: pgs_str = '_PGS'
if NO_LATERAL: lat_str = '_NOLAT'
else: lat_str = '_LAT'
if VARY_MITS_RMP: varmitstr = '_VARMIT'
else: varmitstr = '_NOVARMIT'
## stable enough that time tags are not needed
now = ''#datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")+'_'
outfilename = '../results/odor_morphs/'+now+'odormorph'+\
'_netseed'+netseedstr+'_stimseed'+rateseedstr
if NONLINEAR_ORNS: outfilename += '_NL'+NONLINEAR_TYPE
outfilename += singles_str+joints_str+pgs_str+lat_str+varmitstr+\
'_numgloms'+str(NUM_GLOMS)
if DIRECTED: outfilename += '_directed'+str(FRAC_DIRECTED)
outfilename += '.pickle'
## if NOSHOW, then check if resultfile exists, proceed only if non-existent.
if 'NOSHOW' in sys.argv:
NOSHOW = True
## If NOSHOW, then automatic mode, hence don't overwrite resultfile, if exists beforehand.
if os.path.exists(outfilename):
## activdep_inhibition_repeats.py searches for Wrote in first word,
## and filename in second word. so output that even if not simulating.
if mpirank==boss:
for procnum in range(1,mpisize):
mpicomm.recv(source=procnum,tag=10)
print "ExistsSoNotWrote",outfilename
else:
mpicomm.send('done',dest=boss,tag=10)
sys.exit()
else: NOSHOW = False
if mpirank == boss:
#### collate at boss process
mitral_responses_list = []
mitral_responses_binned_list = []
numavgs = (mpisize-1)/numodors
for avgnum in range(numavgs):
response_odorset = []
response_odorset_binned = []
for odornum in range(numodors):
procnum = avgnum*numodors + odornum + 1
print 'waiting for process '+str(procnum)+'.'
#### you get a numpy array of rows=NUM_GLOMS*MIT_SISTERS and cols=NUMBINS
#### mitral responses has spike times, mitral_responses_binned has binned firing rates
mitral_responses,mitral_responses_binned = mpicomm.recv(source=procnum, tag=0)
response_odorset.append( mitral_responses )
response_odorset_binned.append( mitral_responses_binned )
mitral_responses_list.append(response_odorset)
mitral_responses_binned_list.append(response_odorset_binned)
## write results to a file
f = open(outfilename,'w')
pickle.dump((mitral_responses_list,mitral_responses_binned_list), f)
f.close()
print "Wrote", outfilename
if not NOSHOW:
plot_morphs(outfilename)
show()
else:
#### run the slave processes
sim = odorResponse()
avgnum = (mpirank-1)/numodors
odorset = inputList[(mpirank-1)%numodors]
odorA, odorB = odorset
## If CLUB_MITRAL=False, then extra exc from mitral sisters
## (to certain connected granules as proxy for unmodelled sisters) does NOT get used.
## Instead, here I connect extra baseline excitation to ALL granules if odor is non-zero.
if not CLUB_MITRALS and not (odorA==0.0 and odorB==0.0):
granfilebase += '_extra'
## includeProjections gets used only if ONLY_TWO_MITS is True:
## Keep below projections to 'second order cells'
## i.e. to cells (granules) connected to mits0&1.
## The connections between second order cell
## and mits0&1 are automatically retained of course.
## 'PG' includes 'ORN_PG', 'PG_mitral', 'mitral_PG' and 'SA_PG'
includeProjections = ['PG','granule_baseline']
tweaks = build_tweaks(CLUB_MITRALS, NO_SPINE_INH, NO_SINGLES,\
NO_JOINTS, NO_MULTIS, NO_PGS, ONLY_TWO_MITS,\
includeProjections=includeProjections, nolateral=NO_LATERAL)
## unique str = 'morphs_', etc so that temp files of morphs and pulses etc do not overlap
network = OBNetwork(OBNet_file, synchan_activation_correction, tweaks,\
mpirank, uniquestr, granfilebase, spiketable=True)
## widely different resting potentials of mit0 and mit1
if VARY_MITS_RMP:
tweak_field('/mitrals_0/##[TYPE=Compartment]', 'Em', '-58e-3')
tweak_field('/mitrals_1/##[TYPE=Compartment]', 'Em', '-70e-3')
#printNetTree() # from moose_utils.py
sim.setupStim(network, (odorset,), avgnum)
mitral_responses_both = sim.run(network, binned=True)
mpicomm.send( mitral_responses_both, dest=boss, tag=0 )
print 'sent from process',mpirank
|
StarcoderdataPython
|
11229908
|
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
from typing import Any, Optional
from astroid import nodes
from pylint.interfaces import UNDEFINED, Confidence
from pylint.testutils.global_test_linter import linter
from pylint.testutils.output_line import MessageTest
from pylint.utils import LinterStats
class UnittestLinter:
"""A fake linter class to capture checker messages."""
# pylint: disable=unused-argument
def __init__(self):
self._messages = []
self.stats = LinterStats()
def release_messages(self):
try:
return self._messages
finally:
self._messages = []
def add_message(
self,
msg_id: str,
line: Optional[int] = None,
node: Optional[nodes.NodeNG] = None,
args: Any = None,
confidence: Optional[Confidence] = None,
col_offset: Optional[int] = None,
end_lineno: Optional[int] = None,
end_col_offset: Optional[int] = None,
) -> None:
"""Add a MessageTest to the _messages attribute of the linter class."""
# If confidence is None we set it to UNDEFINED as well in PyLinter
if confidence is None:
confidence = UNDEFINED
# pylint: disable=fixme
# TODO: Test col_offset
# pylint: disable=fixme
# TODO: Initialize col_offset on every node (can be None) -> astroid
# if col_offset is None and hasattr(node, "col_offset"):
# col_offset = node.col_offset
# pylint: disable=fixme
# TODO: Test end_lineno and end_col_offset :)
self._messages.append(
MessageTest(msg_id, line, node, args, confidence, col_offset)
)
@staticmethod
def is_message_enabled(*unused_args, **unused_kwargs):
return True
@property
def options_providers(self):
return linter.options_providers
|
StarcoderdataPython
|
1884058
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#===========================================================
# File Name: BenchmarkTemplate.py
# Author: <NAME>, Columbia University
# Creation Date: 01-26-2019
# Last Modified: Tue Mar 5 21:45:20 2019
#
# Description: Standard benchmark template
#
# Copyright (C) 2018 <NAME>
# All rights reserved.
#
# This file is made available under
# the terms of the BSD license (see the COPYING file).
#===========================================================
"""
This module describe benchmark template.
A benchmark is given a detector/descriptor and a dataset, the way of performing the evluation.
"""
import numpy as np
from abc import ABCMeta, abstractmethod
import os
from tqdm import tqdm
import pickle as pkl
class Benchmark():
__metaclass__ = ABCMeta
"""Benchmark Template
Attributes
----------
name: str
Name of the dataset
tmp_feature_dir: str
Directory for saving the feature
result_dir: str
Directory for saving the final result
"""
def __init__(self, name, tmp_feature_dir='./data/features/',
result_dir='./python_scores/'):
self.name = name
self.tmp_feature_dir = tmp_feature_dir
self.result_dir = result_dir
def detect_feature(self, dataset, detector,
use_cache=True, save_feature=True):
"""
Extract feature from image.
:param dataset: Dataset to extract the feature
:type dataset: SequenceDataset
:param detector: Detector used to extract the feature
:type detector: DetectorAndDescriptor
:param use_cache: Load cached feature and result or not
:type use_cache: boolean
:param save_feature: Save computated feature or not
:type save_feature: boolean
:returns: feature
:rtype: dict
"""
feature_dict = {}
try:
os.makedirs('{}{}/{}/'.format(self.tmp_feature_dir,
dataset.name, detector.name))
except BaseException:
pass
pbar = tqdm(dataset)
for sequence in pbar:
pbar.set_description(
"Extract feature for {} in {} with {}".format(
sequence.name, dataset.name, detector.name))
for image in sequence.images():
image = image[1]
feature_file_name = '{}{}/{}/{}_{}_frame'.format(self.tmp_feature_dir, dataset.name,
detector.name, sequence.name, image.idx)
get_feature_flag = False
if use_cache:
try:
feature = np.load(feature_file_name + '.npy')
get_feature_flag = True
except BaseException:
get_feature_flag = False
if not get_feature_flag:
if detector.csv_flag:
feature_csv_name = '{}{}/{}/{}-{}.frames.csv'.format(self.tmp_feature_dir, dataset.name,
detector.name, sequence.name, image.idx)
feature = self.load_csv_feature(feature_csv_name)
# pdb.set_trace()
else:
feature = detector.detect_feature(image.image_data)
# print(feature.shape)
if save_feature:
np.save(feature_file_name, feature)
feature_dict['{}_{}'.format(
sequence.name, image.idx)] = feature
return feature_dict
def extract_descriptor(self, dataset, detector,
use_cache=False, save_feature=True):
"""
Extract feature from image.
:param dataset: Dataset to extract the descriptor
:type dataset: SequenceDataset
:param detector: Detector used to extract the descriptor
:type detector: DetectorAndDescriptor
:param use_cache: Load cached feature and result or not
:type use_cache: boolean
:param save_feature: Save computated feature or not
:type save_feature: boolean
:returns: feature, descriptor
:rtype: dict, dict
"""
feature_dict = {}
descriptor_dict = {}
try:
os.makedirs('{}{}/{}/'.format(self.tmp_feature_dir,
dataset.name, detector.name))
except BaseException:
pass
pbar = tqdm(dataset)
for sequence in pbar:
pbar.set_description(
"Extract feature for {} in {} with {}".format(
sequence.name, dataset.name, detector.name))
for image in sequence.images():
image = image[1]
feature_file_name = '{}{}/{}/{}_{}_frame'.format(self.tmp_feature_dir,
dataset.name, detector.name, sequence.name, image.idx)
descriptor_file_name = '{}{}/{}/{}_{}_descriptor'.format(self.tmp_feature_dir,
dataset.name, detector.name, sequence.name, image.idx)
get_feature_flag = False
if use_cache:
try:
feature = np.load(feature_file_name + '.npy')
descriptor = np.load(descriptor_file_name + '.npy')
get_feature_flag = True
except BaseException:
get_feature_flag = False
if not get_feature_flag:
if detector.csv_flag:
feature_csv_name = '{}{}/{}/{}-{}.frames.csv'.format(self.tmp_feature_dir, dataset.name,
detector.name, sequence.name, image.idx)
feature = self.load_csv_feature(feature_csv_name)
descriptor_csv_name = '{}{}/{}/{}-{}.descs.csv'.format(self.tmp_feature_dir, dataset.name,
detector.name, sequence.name, image.idx)
descriptor = self.load_csv_feature(descriptor_csv_name)
else:
if detector.is_both:
feature, descriptor = detector.extract_all(
image.image_data)
else:
feature = detector.detect_feature(image.image_data)
descriptor = detector.extract_descriptor(
image.image_data, feature=feature)
if save_feature:
np.save(feature_file_name, feature)
np.save(descriptor_file_name, descriptor)
feature_dict['{}_{}'.format(
sequence.name, image.idx)] = feature
descriptor_dict['{}_{}'.format(
sequence.name, image.idx)] = descriptor
return feature_dict, descriptor_dict
def load_csv_feature(self, csv_feature_file):
"""
Load feature from csvfile.
:param csv_feature_file: csv file to load feature
:type csv_feature_file: str
:returns: feature
:rtype: array
"""
feature = []
with open(csv_feature_file) as f:
for line in f:
tmp_list = line.split(';')
float_list = [float(i) for i in tmp_list]
feature.append(float_list)
return np.asarray(feature)
def load_feature(self, dataset_name, sequence_name, image, detector):
"""
Load feature from cached file. If failed, extract feature from image
:param dataset_name: Name of the dataset
:type dataset_name: str
:param sequence_name: Name of the sequence
:type sequence_name: str
:param image: Image
:type image: Image
:param detector: Detector used to extract the descriptor
:type detector: DetectorAndDescriptor
:returns: feature
:rtype: array
"""
feature_file_name = '{}{}/{}/{}_{}_frame'.format(self.tmp_feature_dir, dataset_name,
detector.name, sequence_name, image.idx)
try:
feature = np.load(feature_file_name)
except BaseException:
feature = detector.detect_feature(image.image_data)
np.save(feature_file_name, feature)
return feature
def load_descriptor(self, dataset_name, sequence_name, image, detector):
"""
Load descriptor from cached file. If failed, extract descriptor from image
:param dataset_name: Name of the dataset
:type dataset_name: str
:param sequence_name: Name of the sequence
:type sequence_name: str
:param image: Image
:type image: Image
:param detector: Detector used to extract the descriptor
:type detector: DetectorAndDescriptor
:returns: descriptor
:rtype: array
"""
descriptor_file_name = '{}{}/{}/{}_{}_descriptor'.format(self.tmp_feature_dir, dataset_name,
detector.name, sequence_name, image.idx)
try:
descriptor = np.load(descriptor_file_name)
except BaseException:
feature = detector.detect_feature(image.image_data)
descriptor = detector.extract_descriptor(
image.image_data, feature=feature)
np.save(descriptor_file_name, descriptor)
return descriptor
# Evaluation warpper
def evaluate_warpper(self, dataset, detector, result_list, extract_descriptor=False,
use_cache=True, save_result=True, custom_extraction=False):
"""
Load descriptor from cached file. If failed, extract descriptor from image.
**Structure of the result:**
result['dataset_name']: name of the dataset
result['result_term_list']: list of metrics for evaluation
result['task_name']: name of the task
result['detector_name']: name of the dataset
result['sequence_result']: a list for result from each sequence
result['ave_{}']: average value for each metric over all sequences
**Structure of the sequence result:**
sequence_result['sequence_name']: name of the sequence
sequence_result[result_name]: list of list of metrics over each link
sequence_result['result_label_list']: label of each link in sequence_result (Same order)
sequence_result['result_link_id_list']: ID of each link in sequence_result (Same order)
:param dataset: Dataset to extract the feature
:type dataset: SequenceDataset
:param detector: Detector used to extract the feature
:type detector: DetectorAndDescriptor
:param result_list: Metric to calculate
:type result_list: list
:param extract_descriptor: Extract descriptor or not
:type extract_descriptor: boolean
:param use_cache: Load cached feature and result or not
:type use_cache: boolean
:param save_result: Save result or not
:type save_result: boolean
:param custom_extraction: Use custom extraction method or not. See also and extract_descriptor_custom
:type custom_extraction: boolean
:returns: result
:rtype: dict
See Also
--------
detect_feature_custom: Extract feature with customized method (special evaluation).
extract_descriptor_custom: Extract descriptor with customized (special evaluation).
"""
if custom_extraction:
if extract_descriptor:
feature_dict, descriptor_dict = self.extract_descriptor_custom(
dataset, detector, use_cache=use_cache, save_feature=save_result)
else:
feature_dict = self.detect_feature_custom(
dataset, detector, use_cache=use_cache, save_feature=save_result)
else:
if extract_descriptor:
feature_dict, descriptor_dict = self.extract_descriptor(
dataset, detector, use_cache=use_cache, save_feature=save_result)
else:
feature_dict = self.detect_feature(
dataset, detector, use_cache=use_cache, save_feature=save_result)
try:
os.stat('{}{}/{}/{}/'.format(self.result_dir,
self.bench_name, dataset.name, detector.name))
except BaseException:
os.makedirs('{}{}/{}/{}/'.format(self.result_dir,
self.bench_name, dataset.name, detector.name))
get_result_flag = False
result_file_name = '{}{}/{}/{}/{}.pkl'.format(
self.result_dir, self.bench_name, dataset.name, detector.name, self.test_name)
if use_cache:
try:
result = pkl.load(open(result_file_name, 'rb'))
print('Get cached result from {}'.format(result_file_name))
get_result_flag = True
except BaseException:
get_result_flag = False
if not get_result_flag:
result = {}
result['dataset_name'] = dataset.name
result['result_term_list'] = result_list
result['task_name'] = self.name
result['detector_name'] = detector.name
result['sequence_result'] = []
for result_name in result_list:
result['ave_{}'.format(result_name)] = 0.0
# work with each sequence
pbar = tqdm(dataset)
for sequence in pbar:
pbar.set_description(
"Processing {} in {} for {}".format(
sequence.name, dataset.name, detector.name))
sequence_result = {}
sequence_result['sequence_name'] = sequence.name
for result_name in result_list:
sequence_result[result_name] = []
sequence_result['result_label_list'] = []
sequence_result['result_link_id_list'] = []
try:
result['label'] = sequence.label
except BaseException:
pass
# for each link
for link in sequence.links():
link = link[1]
try:
task = link.task
except BaseException:
task = None
feature_1 = feature_dict['{}_{}'.format(
sequence.name, link.source)]
feature_2 = feature_dict['{}_{}'.format(
sequence.name, link.target)]
if extract_descriptor:
descriptor_1 = descriptor_dict['{}_{}'.format(
sequence.name, link.source)]
descriptor_2 = descriptor_dict['{}_{}'.format(
sequence.name, link.target)]
sequence_result['result_link_id_list'].append(
"{}_{}".format(link.source, link.target))
sequence_result['result_label_list'].append(
dataset.get_image(sequence.name, link.target))
# for debug
#print("{}: {}_{}".format(sequence.name, link.source, link.target))
# if sequence.name == 'wall' and link.source=='1' and link.target == '2':
# pdb.set_trace()
# simple evaluation function for each test
if extract_descriptor:
result_number_list = self.evaluate_unit(
(feature_1, descriptor_1), (feature_2, descriptor_2), task)
else:
result_number_list = self.evaluate_unit(
feature_1, feature_2, task)
for result_name, result_number in zip(
result_list, result_number_list):
# for debug
#print('{}: {}'.format(result_name, result_number))
sequence_result[result_name].append(result_number)
for result_name in result_list:
sequence_result['ave_{}'.format(result_name)] = np.mean(
np.array(sequence_result['{}'.format(result_name)]))
result['ave_{}'.format(result_name)] = result['ave_{}'.format(
result_name)] + sequence_result['ave_{}'.format(result_name)]
result['sequence_result'].append(sequence_result)
# get average result
for result_name in result_list:
result['ave_{}'.format(result_name)] = result['ave_{}'.format(
result_name)] / len(result['sequence_result'])
# for debug
#print('ave {} {}'.format(result_name,result['ave_{}'.format(result_name)]))
if save_result:
with open(result_file_name, "wb") as output_file:
pkl.dump(result, output_file)
return result
def print_and_save_result(self, results):
"""
Print and save result.
:param results: Result to show
:type results: dict
"""
self.print_result(results)
self.save_result(results)
@abstractmethod
def evaluate(self, dataset, detector):
"""
Main function to run the evaluation wrapper. It could be different for different evaluation
:param dataset: Dataset to extract the feature
:type dataset: SequenceDataset
:param detector: Detector used to extract the feature
:type detector: DetectorAndDescriptor
See Also
--------
evaluate_warpper:
"""
pass
@abstractmethod
def evaluate_unit(self, feature_1, feature_2, task):
"""
Single evaluation unit. Given two features, return the result. Different for different benchmark
:param feature_1: Feature to run. It can be feature or descriptor.
:type feature_1: array
:param feature_2: Feature to run. It can be feature or descriptor.
:type feature_2: array
:param task: What to run
:type task: dict
See Also
--------
evaluate_warpper: How to run the unit.
dset.dataset.Link: definition of task.
"""
pass
@abstractmethod
def detect_feature_custom(self, dataset, detector,
use_cache=False, save_feature=True):
"""
Customized feature extraction method. For special task.
:param dataset: Dataset to extract the feature
:type dataset: SequenceDataset
:param detector: Detector used to extract the feature
:type detector: DetectorAndDescriptor
:param use_cache: Load cached feature and result or not
:type use_cache: boolean
:param save_feature: Save computated feature or not
:type save_feature: boolean
:returns: feature
:rtype: dict
See Also
--------
evaluate_warpper:
extract_descriptor_custom:
"""
pass
@abstractmethod
def extract_descriptor_custom(
self, dataset, detector, use_cache=False, save_feature=True):
"""
Customized description extraction method. For special task.
:param dataset: Dataset to extract the descriptor
:type dataset: SequenceDataset
:param detector: Detector used to extract the descriptor
:type detector: DetectorAndDescriptor
:param use_cache: Load cached feature and result or not
:type use_cache: boolean
:param save_feature: Save computated feature or not
:type save_feature: boolean
:returns: feature
:rtype: dict
See Also
--------
evaluate_warpper:
extract_feature_custom:
"""
pass
|
StarcoderdataPython
|
4968495
|
<reponame>cmltaWt0/edx-load-tests<filename>loadtests/teams_discussion/__init__.py
from locustfile import TeamsDiscussionLocust
|
StarcoderdataPython
|
5034964
|
<filename>pdmreader/unicode_formatter.py
import string
import unicodedata
import math
import itertools
# https://stackoverflow.com/a/44237289/3128576
class UnicodeFormatter(string.Formatter):
def format_field(self, value, format_spec):
if not isinstance(value, str) or not value or not format_spec:
return super().format_field(value, format_spec)
print_length = self.get_print_width(value)
if len(value) == print_length:
return format(value, format_spec)
fill, align, width, format_spec = UnicodeFormatter.parse_align(format_spec)
if width == 0:
return value
formatted_value = format(value, format_spec)
pad_len = width - print_length
if pad_len <= 0:
return formatted_value
left_pad = ''
right_pad = ''
if align in '<=':
right_pad = fill * pad_len
elif align == '>':
left_pad = fill * pad_len
elif align == '^':
left_pad = fill * math.floor(pad_len/2)
right_pad = fill * math.ceil(pad_len/2)
return ''.join((left_pad, formatted_value, right_pad))
@staticmethod
def get_print_width(s: str):
width = 0
for c in s:
# https://bugs.python.org/issue12568#msg145523
width_type = unicodedata.east_asian_width(c)
if width_type == 'F' or width_type == 'W':
width += 2
else:
width += 1
return width
@staticmethod
def parse_align(format_spec):
format_chars = '=<>^'
align = '<'
fill = None
if format_spec[1] in format_chars:
align = format_spec[1]
fill = format_spec[0]
format_spec = format_spec[2:]
elif format_spec[0] in format_chars:
align = format_spec[0]
format_spec = format_spec[1:]
if align == '=':
raise ValueError("'=' alignment not allowed in string format specifier")
if format_spec[0] in '+- ':
raise ValueError('Sign not allowed in string format specifier')
if format_spec[0] == '#':
raise ValueError('Alternate form (#) not allowed in string format specifier')
if format_spec[0] == '0':
if fill is None:
fill = '0'
format_spec = format_spec[1:]
if fill is None:
fill = ' '
width_str = ''.join(itertools.takewhile(str.isdigit, format_spec))
width_len = len(width_str)
format_spec = format_spec[width_len:]
if width_len > 0:
width = int(width_str)
else:
width = 0
return fill, align, width, format_spec
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.