seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
407659418
|
import copy
import random
import torch
from .base_model import BaseModel
from .utils import (
get_G_heap,
G_Net,
combine_mapping_networks,
categorize_mappings,
coshuffle,
)
from .optimizers import get_optimizer
from models.networks import networks
from models.networks.loss import GANLoss, cal_gradient_penalty
from models.networks.utils import get_prior
from util.util import one_hot
class GAGANModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
if is_train:
parser.add_argument(
'--g_loss_mode',
nargs='*',
default=['vanilla'],
help='lsgan | nsgan | vanilla | wgan | hinge | rsgan',
)
parser.add_argument(
'--d_loss_mode',
type=str,
default='vanilla',
help='lsgan | nsgan | vanilla | wgan | hinge | rsgan',
)
parser.add_argument('--which_D', type=str, default='S', help='Standard(S) | Relativistic_average (Ra)')
parser.add_argument('--lambda_f', type=float, default=0.1, help='the hyper-parameter that balance Fq and Fd')
parser.add_argument('--candi_num', type=int, default=2,
help='# of survived candidatures in each evolutionary iteration.')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
self.output = None
self.loss_names = ['D_real', 'D_fake', 'D_gp', 'G', 'D']
self.visual_names = ['real_visual', 'gen_visual']
if self.isTrain: # only defined during training time
self.model_names = ['G', 'D']
else:
self.model_names = ['G']
# define networks
self.netG = networks.define_G(opt, self.gpu_ids)
if self.isTrain: # only defined during training time
self.netD = networks.define_D(opt, self.gpu_ids)
# define loss functions
self.criterionD = GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device)
# define G mutations
self.G_mutations = [
GANLoss(g_loss, 'G', opt.which_D).to(self.device)
for g_loss in opt.g_loss_mode
]
# initialize optimizers
self.optimizer_G = get_optimizer(opt.optim_type)(self.netG.parameters(), lr=opt.lr_g)
self.optimizer_D = get_optimizer(opt.optim_type)(self.netD.parameters(), lr=opt.lr_d)
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
# Evolutionary candidatures setting (init)
self.G_candis = [copy.deepcopy(self.netG.state_dict())] * opt.candi_num
self.optG_candis = [copy.deepcopy(self.optimizer_G.state_dict())] * opt.candi_num
def forward(self) -> dict:
batch_size = self.opt.batch_size
if self.opt.gan_mode == "conditional":
z = get_prior(self.opt.batch_size, self.opt.z_dim, self.opt.z_type, self.device)
y = self.CatDis.sample([batch_size])
y = one_hot(y, [batch_size, self.opt.cat_num])
gen_data = self.netG(z, y)
self.set_output(gen_data)
return {'data': gen_data, 'condition': y}
elif self.opt.gan_mode == 'unconditional':
gen_data = self.netG(self.inputs)
self.set_output(gen_data)
return {'data': gen_data}
elif self.opt.gan_mode == 'unconditional-z':
z = get_prior(self.opt.batch_size, self.opt.z_dim, self.opt.z_type, self.device)
gen_data = self.netG(z)
self.set_output(gen_data)
return {'data': gen_data}
else:
raise ValueError(f'unsupported gan_mode {self.opt.gan_mode}')
def set_output(self, x):
self.output = x
def get_output(self):
return self.output
def backward_G(self, gen_data, criterion) -> dict:
# pass D
real_out = self.netD(self.inputs)
fake_out = self.netD(gen_data)
loss_G_fake, loss_G_real = criterion(fake_out, real_out)
if self.opt.dataset_mode == 'embedding' and not self.opt.exact_orthogonal:
embedding_dim = gen_data['data'].shape[1]
weight = self.netG.module.layer.data
loss_G_orthogonal = 0.001 / 2 * (
(weight.T @ weight) - torch.eye(embedding_dim, device=self.device)
).norm()
else:
loss_G_orthogonal = 0.
loss_G = loss_G_fake + loss_G_real + loss_G_orthogonal
loss_G.backward()
return {
'': loss_G,
'fake': loss_G_fake,
'real': loss_G_real,
'orthogonal': loss_G_orthogonal,
'mode': criterion.loss_mode,
}
def backward_D(self, gen_data):
# pass D
real_out = self.netD(self.inputs)
fake_out = self.netD(gen_data)
self.loss_D_fake, self.loss_D_real = self.criterionD(fake_out, real_out)
if self.opt.use_gp is True:
self.loss_D_gp = cal_gradient_penalty(
self.netD,
self.inputs['data'],
gen_data['data'],
self.device,
type='mixed',
constant=1.0,
lambda_gp=10.0,
)[0]
else:
self.loss_D_gp = 0.
self.loss_D = self.loss_D_fake + self.loss_D_real + self.loss_D_gp
self.loss_D.backward()
def optimize_parameters(self):
if self.step % (self.opt.D_iters + 1) == 0:
self.set_requires_grad(self.netD, False)
self.G_candis, self.opt_G_candis, self.loss_G = self.Evo_G(self.G_candis, self.optG_candis)
self.G_candis, self.opt_G_candis, xo_success_rate = self.crossover(self.G_candis, self.optG_candis)
self.loss_G = {'xo_success_rate': xo_success_rate, **self.loss_G}
else:
gen_data = self.forward()
self.set_requires_grad(self.netD, True)
self.optimizer_D.zero_grad()
self.backward_D(gen_data)
self.optimizer_D.step()
self.step += 1
def Evo_G(self, G_candis, optG_candis):
"""
Enumerate candi_num*G_mutations to find the top
candi_num network for fitness_score, self.netG will
be updated using the best network.
"""
G_heap = get_G_heap(self.opt.candi_num)
# variation-evaluation-selection
for G_candi, optG_candi in zip(G_candis, optG_candis):
for criterionG in self.G_mutations:
# Variation
self.netG.load_state_dict(G_candi)
self.optimizer_G.load_state_dict(optG_candi)
self.optimizer_G.zero_grad()
gen_data = self.forward()
G_losses = self.backward_G(gen_data, criterionG)
self.optimizer_G.step()
# Evaluation
fitness = self.fitness_score()
# Selection
if fitness > G_heap.top().fitness:
netG_dict = copy.deepcopy(self.netG.state_dict())
optimizerG_dict = copy.deepcopy(self.optimizer_G.state_dict())
G_heap.replace(
G_Net(fitness=fitness, G_candis=netG_dict, optG_candis=optimizerG_dict, losses=G_losses)
)
G_candis = [net.G_candis for net in G_heap.array]
optG_candis = [net.optG_candis for net in G_heap.array]
max_idx = G_heap.argmax()
self.netG.load_state_dict(G_candis[max_idx])
# self.optimizer_G.load_state_dict(optG_candis[max_idx]) # not sure if loading is necessary
loss_G = G_heap.array[max_idx].losses
return G_candis, optG_candis, loss_G
def crossover(self, G_candis: list, optG_candis: list):
"""
crossover nets
"""
G_candis, optG_candis = coshuffle(G_candis, optG_candis)
G_heap = get_G_heap(self.opt.candi_num)
for G_candi, optG_candi in zip(G_candis, optG_candis):
self.netG.load_state_dict(G_candi)
fitness = self.fitness_score()
if fitness > G_heap.top().fitness:
G_heap.replace(
G_Net(fitness=fitness, G_candis=G_candi, optG_candis=optG_candi, losses=None)
)
SO_mappings, non_SO_mappings, SO_optG, non_SO_optG = categorize_mappings(G_candis, optG_candis)
xo_total_count, xo_success_count = 0, 0
for networks, optimizers, is_SO in zip(
[SO_mappings, non_SO_mappings],
[SO_optG, non_SO_optG],
[True, False]
):
for (G_candi_1, G_candi_2), optGs in zip(
zip(networks[::2], networks[1::2]),
zip(optimizers[::2], optimizers[1::2]),
):
G_child = combine_mapping_networks(G_candi_1, G_candi_2, is_SO=is_SO)
optG_child = random.choice(optGs)
self.netG.load_state_dict(G_child)
fitness = self.fitness_score()
if fitness > G_heap.top().fitness:
G_heap.replace(
G_Net(fitness=fitness, G_candis=G_child, optG_candis=optG_child, losses=None)
)
xo_success_count += 1
xo_total_count += 1
G_candis = [net.G_candis for net in G_heap.array]
optG_candis = [net.optG_candis for net in G_heap.array]
max_idx = G_heap.argmax()
self.netG.load_state_dict(G_candis[max_idx])
self.optimizer_G.load_state_dict(optG_candis[max_idx]) # not sure if loading is necessary
return G_candis, optG_candis, xo_success_count / xo_total_count
def fitness_score(self):
"""
Evaluate netG based on netD
"""
with torch.no_grad():
eval_data = self.forward()
eval_fake = self.netD(eval_data)
# Quality fitness score
Fq = eval_fake.data.mean().item()
return Fq
| null |
models/gagan_model.py
|
gagan_model.py
|
py
| 10,159 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "base_model.BaseModel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "base_model.BaseModel.__init__",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "base_model.BaseModel",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.networks.networks.define_G",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.networks.networks",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "models.networks.networks.define_D",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "models.networks.networks",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "models.networks.loss.GANLoss",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "models.networks.loss.GANLoss",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "optimizers.get_optimizer",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "optimizers.get_optimizer",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "models.networks.utils.get_prior",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "util.util.one_hot",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "models.networks.utils.get_prior",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "models.networks.loss.cal_gradient_penalty",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "utils.get_G_heap",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "utils.G_Net",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "utils.coshuffle",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "utils.get_G_heap",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "utils.G_Net",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "utils.categorize_mappings",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "models.networks.networks",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "models.networks.networks",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "utils.combine_mapping_networks",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "utils.G_Net",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 256,
"usage_type": "call"
}
] |
190917490
|
import numpy as np
from importlib import import_module, reload
from zfit_constants import *
from lmfit import minimize, Parameters
from scipy.stats import gmean # geometric mean
import csv
class CliInterface():
def __init__(self):
self.checkBoxLogMag
class DoModel:
"""
Encapsulate all operations for modeling. This class is instantiated
only once, and the instance is used to interface to the remaining
program and to perform the actual modeling.
"""
def __init__(self, cli=False):
if cli is False:
# Interface to the main program which instantiates this class
self.amw = None
self.ya = None
self.ya_list = None
self.range = None
self.range_list = None
self.print_results = None
self.draw_formatted = None
self.exc_handler = None
else:
# Interface to the main program which instantiates this class
self.amw = CliInterface()
self.ya = None
self.ya_list = None
self.range = None
self.range_list = None
self.print_results = None
self.draw_formatted = None
self.exc_handler = None
# Local fitting functions ============================
def _fcn2min(self, params, w, Z, weight, **kwargs):
"""
This is the function to minimize. It is the difference between model
and target (aka residuals) with modeling and penalty weights applied.
:param params:
:param w: radian frequency array
:param Z: complex impedances corresponding to frequencies w
:param weight: array of weights corresponding to frequencies w
:param **kwargs: keyword arguments
:return: must return array for leastsq method, optional for others.
"""
if self.amw.checkBoxLogMag.isChecked():
diff = np.log10(
Z) - np.log10(self.model.model(w, params, **kwargs))
else:
diff = Z - self.model.model(w, params, **kwargs)
diff *= weight
# Flatten complex impedance into re/im adjacent floats
residuals = diff.view('double')
if LIMITS == "zfit":
# Get mean-square float of entire difference array for weight scaling
mean_sq = (residuals**2).mean()
# Append penalties to residuals
bounded = np.hstack(
(residuals, self._bound_penalties(params, mean_sq)))
return bounded
else:
return residuals
def _bound_penalties(self, params, weight):
"""
This function is only used when zfit_constants.LIMITS == 'zfit'.
Return a list of numbers each of which increases rapidly when the min or max for
a param is approached. This represents boundary penalties as a parameter goes
out of bounds.
:param params:
:param weight:
:return: a list of N elements where N is the total number of min or max bounds
in the set of params. Each element is a number which increases rapidly when
the min or max bound is approached. Append penalty elements if params are out of bounds.
"""
penalties = []
# Custom limiting is done here:
# This is an exponent which controls the abruptness of penalty increase as
# a min or max limit is approached:
PENALTY_WALL = 6
full_penalty = 1e4 * weight
for p in self.model.PARAMS:
name = p['name']
val = params[name].value
# max and min must be >0 or None.
# Use min/max limits from model.PARAMS here and set them to None
# for lmfit (except for diff evo method).
if p['max'] == None:
max_pen = 0
else:
max_pen = full_penalty if val >= p['max'] else \
full_penalty * np.power(val/p['max'], PENALTY_WALL)
if p['min'] == None:
min_pen = 0
else:
min_pen = full_penalty if val <= p['min'] else \
full_penalty * np.power(p['min']/val, PENALTY_WALL)
penalty = np.maximum(np.abs(max_pen), np.abs(min_pen))
penalties.append(penalty)
return penalties
def _min_max_set(self, min_max, method, scaled_val):
"""
Set min or max for the minimization function being used, and
whether limits are handled by lmfit or zfit.
:param min_max:
:param method:
:param scaled_val:
:return:
"""
if method == "differential_evolution":
# Diff Evo requires finite min & max values
return scaled_val if min_max == None else min_max
elif LIMITS == "zfit":
# lmfit doesn't do the limiting
return None
elif LIMITS == "lmfit":
# lmfit gets spec'd limit
return min_max
def _prog_bar_update(self, x1, x2, x3, *x4, **x5):
"""
Update the progress bar in the main app.
(Could look for abort here too.)
:param xn: Dummy args to match actual sent by minimize() callback
:return: nothing
"""
self.amw.prog_bar_tick += 1
if self.amw.prog_bar_tick >= 100:
self.amw.prog_bar_tick = 0
next = (self.amw.progressBar.value() + 1) % 100
self.amw.progressBar.setValue(next)
def _find_sf(self, model_list):
"""
Find scale factors for frequency (FSF) and impedance (ZSF).
Ignore component if it doesn't begin with R, G, C, L, or M
:param model_list: PARAM list from model script
:return: FSF and ZSF
"""
R, G, C, L = [], [], [], []
for comp in model_list:
type = comp['name'][0].upper()
if type == 'R':
R.append(comp['init'])
elif type == 'G':
G.append(comp['init'])
elif type == 'C':
C.append(comp['init'])
elif type == 'L' or type == 'M':
L.append(comp['init'])
# Get zsf depending on whether Rs and/or Gs are present
if R == []:
if G == []:
# No Rs or Gs
zsf = 1.0
else:
# Gs but no Rs
zsf = 1.0 / gmean(G)
else:
if G == []:
# Rs but no Gs
zsf = gmean(R)
else:
# Both Rs and Gs. Use the geometric mean of the
# preferred zsf for each
Rzsf = gmean(R)
Gzsf = 1.0 / gmean(G)
zsf = gmean([Rzsf, Gzsf])
# Get fsf depending on whether Ls and/or Cs are present
if L == []:
if C == []:
# No Ls or Cs
fsf = 1.0
else:
# No Ls, but Cs
fsf = 1.0 / (gmean(C) * zsf)
else:
if C == []:
# No Cs, but Ls
fsf = zsf / gmean(L)
else:
# Both Ls and Cs. Use the geometric mean of the
# preferred fsf for each
Lfsf = zsf / gmean(L)
Cfsf = 1.0 / (gmean(C) * zsf)
fsf = gmean([Lfsf, Cfsf])
return fsf, zsf
def _normalize(self, val, min, max, comp_type, fsf, zsf):
# Scale val, min, and max by the appropriate factor depending
# on its type and return the scaled values. Default scale is 1.0.
scale = {
'R': 1.0/zsf,
'G': zsf,
'C': fsf*zsf,
'L': fsf/zsf,
'M': fsf/zsf
}
s = scale.get(comp_type, 1.0)
min_ret = None if min is None else min * s
max_ret = None if max is None else max * s
return {'init': val*s, 'min': min_ret, 'max': max_ret}
def _denormalize(self, val, comp_type, fsf, zsf):
# Scale val by the appropriate factor depending on its type
# and return the scaled value. Default scale is 1.0.
scale = {
'R': zsf,
'G': 1.0/zsf,
'C': 1.0/(fsf*zsf),
'L': zsf/fsf,
'M': zsf/fsf
}
s = scale.get(comp_type, 1.0)
return val * s
def do_model(self):
# Clear status and params label boxes
self.amw.labelParams.setText("")
self.amw.labelParams.repaint()
self.amw.labelStatus.setText("Modeling...")
self.amw.labelStatus.repaint()
# Import the model script, or reload it if already imported
self.model = import_module("Models." + self.amw.lineEditModel.text())
self.model = reload(self.model)
# Clear any previous modeling, M and P axes
for line in self.ya.ax[M].get_lines() + self.ya.ax[P].get_lines():
if line.get_label() == "modeledZPlot":
line.remove()
# Create local concatenated arrays for freq, mag, phase, and load.
# Overwrites data in ya class
m = np.array([])
p = np.array([])
f = np.array([])
l = np.array([])
for y, r in zip(self.ya_list, self.range_list):
# Copy data from lists to working objects
self.ya.data_unbundle(y)
self.range.data_unbundle(r)
# Append data to form full set for modeling
m = np.append(m, self.ya.inputData[M])
p = np.append(p, np.radians(self.ya.inputData[P]))
f = np.append(f, self.range.xa["Hz"])
l = np.append(l, self.range.load_array)
# Radian frequency
w = 2*np.pi*f
# Use drawn curves if they exist
if self.ya.drawnData[M] is not None:
# Drawn data exists for magnitude, use it instead
m = self.ya.drawnData[M]
if self.ya.drawnData[P] is not None:
# Drawn data exists for phase, use it instead
p = np.radians(self.ya.drawnData[P])
if len(self.ya_list) > 1:
# Multiple data segments.
# Create null weighting array, same size as m but full of 1's
weight = m.copy()
weight.fill(1.0)
else:
weight = self.ya.drawnData[W]
# Complex impedance target
z = m*np.exp(1j*p)
# Instantiate clean class for lmfit fitter
params = Parameters()
params.clear()
# Init list of name/value tuples
values = []
# Get selected fitting method
method = METHODS[self.amw.comboBoxMethod.currentIndex()][1]
if self.amw.checkBoxLocked.isChecked():
# Read last saved or edited params data (denormalized)
with open(PARAM_FILE, mode='r', encoding='utf-8', newline='') as f:
reader = csv.reader(f)
next(f) # skip header line
for line in reader:
v = (line[0], float(line[1]))
# Build a list of name/value tuples
values.append(v)
else:
# Do actual modeling.
# Make working copy of PARAMS list from model
param_list = list(self.model.PARAMS)
# Adjust min and max if necessary
for p in param_list:
p["min"] = self._min_max_set(p["min"], method, p["init"] / 1e2)
p["max"] = self._min_max_set(p["max"], method, p["init"] * 1e2)
if self.amw.do_norm_denorm:
# Normalize component, frequency, and impedance values
# Determine frequency and Z scaling factors from initial values
fsf, zsf = self._find_sf(param_list)
# Normalize each component value, min, and max
for p in param_list:
type = p["name"][0].upper()
norm = self._normalize(
p["init"], p["min"], p["max"], type, fsf, zsf)
p["init"] = norm["init"]
p["min"] = norm["min"]
p["max"] = norm["max"]
# Normalize frequency, target Z, and load
w = w / fsf
z = z / zsf
l = l / zsf
else:
fsf, zsf = 1.0, 1.0
# Add modified params to lmfit Parameter class
# .add converts min/max of None to -/+inf
for p in param_list:
params.add(p["name"], value=p["init"],
vary=p["vary"], min=p["min"], max=p["max"])
# Perform weighted model optimization.
# Errors will be caught and displayed by zfit_excepthook() in main window.
kw_args = {"load": l, "fsf": fsf, "zsf": zsf}
result = minimize(self._fcn2min, params, args=(w, z, weight), kws=kw_args, method=method,
iter_cb=self._prog_bar_update)
# Don't use params class after minimize -- some values are scrambled or changed.
# Populate values[] with modeling results, denormalized if necessary
for p in param_list:
name = p["name"]
val = result.params[name].value
if self.amw.do_norm_denorm:
comp_type = name[0]
val = self._denormalize(val, comp_type, fsf, zsf)
v = (name, val)
values.append(v)
if self.amw.do_norm_denorm:
# Denormalize frequency, target Z, and load
w = w * fsf
z = z * zsf
l = l * zsf
# Write denormalized modeling results to file
with open(PARAM_FILE, mode='w', encoding='utf-8') as f:
print('name, value', file=f)
for p in values:
print('{}, {}'.format(p[0], p[1]), file=f)
self.amw.progressBar.setValue(0)
# Convert list of tuples to a single dict for the model, to be compatible
# with the way minimize() uses the model
values_d = {p[0]: p[1] for p in values}
# Get complex impedance of model using modeled or locked parameters
# Use denormalized values
kw_args = {"load": l, "fsf": 1.0, "zsf": 1.0}
zfit = self.model.model(w, values_d, **kw_args)
# Break into magnitude and degree phase
magfit = np.abs(zfit)
phasefit = np.angle(zfit, deg=True)
# Split into segments as required, add to data list
a, b, i = 0, 0, 0
for y in self.ya_list:
self.ya.data_unbundle(y)
seg_length = len(self.ya.inputData[M])
b += seg_length
self.ya.modeledData[M] = magfit[a:b]
self.ya.modeledData[P] = phasefit[a:b]
a += seg_length
self.ya_list[i] = self.ya.data_bundle()
i += 1
# Refresh working classes with currently indexed segment data
self.ya.data_unbundle(self.ya_list[self.range.segment_index])
self.range.data_unbundle(self.range_list[self.range.segment_index])
# Add to plot
self.ya.ax[M].plot(self.range.xa["Hz"], self.ya.modeledData[M], self.ya.modeledLinePlot[M],
ls=self.ya.modeledLineStyle[M], lw=1, label="modeledZPlot")
self.ya.ax[P].plot(self.range.xa["Hz"], self.ya.modeledData[P], self.ya.modeledLinePlot[P],
ls=self.ya.modeledLineStyle[P], lw=1, label="modeledZPlot")
# Update results text box
self.print_results(values)
if self.amw.checkBoxLocked.isChecked():
self.amw.labelStatus.setText("")
else:
# Append "written to" text to results box
outstr = self.amw.labelParams.text()
outstr += "<br>Written to<br>" + PARAM_FILE
self.amw.labelParams.setText(outstr)
# Print optimization info
status = "Number of function calls: " + str(result.nfev) + "<br>"
if result.aborted:
status = RICH_TEXT_RED + "Process aborted:<br>"
#status += result.lmdif_message
self.amw.labelStatus.setText(status)
self.draw_formatted()
| null |
pyZfit/zfit_modelcore.py
|
zfit_modelcore.py
|
py
| 16,304 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.log10",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "scipy.stats.gmean",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "numpy.append",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "numpy.radians",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "lmfit.Parameters",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "lmfit.minimize",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 381,
"usage_type": "call"
},
{
"api_name": "numpy.angle",
"line_number": 382,
"usage_type": "call"
}
] |
93733382
|
import datetime
import json
from django.db.models import Q
from django.views.generic import View
# Create your views here.
from IHome.utils.response_code import RET
from IHome.utils.views import LoginRequiredJSONMixin
from django import http
import logging
from house.models import House
from order.models import Order
logger = logging.getLogger('django')
class HouseOrder(LoginRequiredJSONMixin, View):
'''房屋订单'''
def get(self, request):
"""
1. 去订单的表中查询当前登录用户下的订单
2. 返回数据
"""
user = request.user
# 取当前角色的标识:房客:custom,房东:landlord
role = request.GET.get("role")
if role is None:
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "参数错误"})
# 判断 role 是否是指定的值
if role not in ("custom", "landlord"):
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "参数错误"})
try:
if "custom" == role: # 房客订单查询
orders = Order.objects.filter(user=user).order_by('-create_time')
elif "landlord" == role: # 房东订单查询
# 1. 先查出当前登录用户的所有的房屋, House
houses = House.objects.filter(user=user)
# 2. 取到所有的房屋id
houses_ids = [house.id for house in houses]
# 3. 从订单表中查询出房屋id在第2步取出来的列表中的房屋
orders = Order.objects.filter(house_id__in=houses_ids).order_by('-create_time')
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.DBERR, 'errmsg': "数据查询错误"})
orders_dict_li = []
for order in orders:
orders_dict_li.append({
"order_id": order.id,
"title": order.house.title,
"img_url": order.house.index_image_url if order.house.index_image_url else "",
"start_date": order.begin_date.strftime("%Y-%m-%d"),
"end_date": order.end_date.strftime("%Y-%m-%d"),
"ctime": order.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"days": order.days,
"amount": order.amount,
"status": order.status,
"comment": order.comment if order.comment else ""
})
return http.JsonResponse({'errno': RET.OK, 'errmsg': "OK", 'data': {"orders": orders_dict_li}})
def post(self, request):
"""
下单
1. 获取参数
2. 校验参数
3. 查询指定房屋是否存在
4. 判断当前房屋的房主是否是登录用户
5. 查询当前预订时间是否存在冲突
6. 生成订单模型,进行下单
7. 返回下单结果
"""
# 获取到当前用户
user = request.user
# 1. 获取到传入的参数
params = json.loads(request.body.decode())
house_id = params.get('house_id')
start_date_str = params.get('start_date')
end_date_str = params.get('end_date')
# 2. 校验参数
if not all([house_id, start_date_str, end_date_str]):
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': '参数错误'})
try:
start_date = datetime.datetime.strptime(start_date_str, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date_str, '%Y-%m-%d')
assert start_date < end_date, Exception("开始日期大于结束日期")
# 计算入住天数
days = (end_date - start_date).days
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "参数错误"})
# 3. 查询指定房屋是否存在
try:
house = House.objects.get(id=house_id)
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.NODATA, 'errmsg': "房屋不存在"})
# 4. 判断当前房屋的房主是否是当前用户,如果当前用户是房东,不能预订
if house.user_id == user.id:
return http.JsonResponse({'errno': RET.ROLEERR, 'errmsg': "不能预订自已的房屋"})
# 5. 查询该房屋是否有冲突的订单
count = Order.objects.filter(house_id=house_id, begin_date__lt=end_date, end_date__gt=start_date).count()
if count > 0:
return http.JsonResponse({'errno': RET.DATAERR, 'errmsg': "该房屋已被预订"})
try:
# 6. 生成订单模型,进行下单
order = Order.objects.create(user=user, house=house, begin_date=start_date, end_date=end_date,
days=days, house_price=house.price, amount=days * house.price)
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.DBERR, 'errmsg': "生成订单失败"})
# 7. 返回下单结果
return http.JsonResponse({'errno': RET.OK, 'errmsg': "OK", 'data': {"order_id": order.id}})
def put(self, request):
'''接单拒单
1. 接受参数:order_id
2. 通过order_id找到指定的订单,(条件:status="待接单")
3. 修改订单状态
4. 保存到数据库
5. 返回
'''
user = request.user
data_json = json.loads(request.body.decode())
# 取到订单号
order_id = data_json.get("order_id")
action = data_json.get("action")
if not all([order_id, action]):
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "参数错误"})
# accept / reject
if action not in ("accept", "reject"):
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "参数错误"})
# 2. 查询订单
try:
order = Order.objects.get(id=order_id, status='WAIT_ACCEPT')
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.NODATA, 'errmsg': "未查询到订单"})
# 查询当前订单的房东是否是当前登录用户,如果不是,不允许操作
if user != order.house.user:
return http.JsonResponse({'errno': RET.ROLEERR, 'errmsg': "不允许操作"})
# 3 更改订单的状态
if "accept" == action:
# 接单
order.status = "WAIT_COMMENT"
elif "reject" == action:
order.status = "REJECTED"
# 取出原因
reason = data_json.get("reason")
if not reason:
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "请填写拒单原因"})
# 保存拒单原因
order.comment = reason
# 提交数据库
try:
order.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.DBERR, 'errmsg': "保存数据失败"})
return http.JsonResponse({'errno': RET.OK, 'errmsg': "OK"})
class HouseOrderComment(LoginRequiredJSONMixin, View):
'''订单评论'''
def put(self, request):
"""
订单评价
1. 获取参数
2. 校验参数
3. 修改模型
"""
# 1. 获取参数
data_json = json.loads(request.body.decode())
order_id = data_json.get("order_id")
comment = data_json.get("comment")
# 2. 2. 校验参数
if not all([order_id, comment]):
return http.JsonResponse({'errno': RET.PARAMERR, 'errmsg': "参数错误"})
try:
order = Order.objects.get(id=order_id, status="WAIT_COMMENT")
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.DBERR, 'errmsg': "该订单不存在"})
# 3. 修改模型并且保存到数据库
order.comment = comment
order.status = "COMPLETE"
try:
order.save()
except Exception as e:
logger.error(e)
return http.JsonResponse({'errno': RET.DBERR, 'errmsg': "保存数据失败"})
# 4. 返回结果
return http.JsonResponse({'errno': RET.OK, 'errmsg': "ok"})
| null |
apps/order/views.py
|
views.py
|
py
| 8,441 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "IHome.utils.views.LoginRequiredJSONMixin",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "order.models.Order.objects.filter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "order.models.Order.objects",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "order.models.Order",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "house.models.House.objects.filter",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "house.models.House.objects",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "house.models.House",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "house.models.id",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "house.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "order.models.Order.objects.filter",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "order.models.Order.objects",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "order.models.Order",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.DBERR",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "order.models",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "order.models.id",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "order.models.house",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "order.models.house",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "order.models.begin_date.strftime",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "order.models.begin_date",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "order.models.end_date.strftime",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "order.models.end_date",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "order.models.create_time.strftime",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "order.models.create_time",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "order.models.days",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "order.models.amount",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "order.models.status",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "order.models.comment",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.OK",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "house.models",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "house.models.House.objects.get",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "house.models.House.objects",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "house.models.House",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.NODATA",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "house.models.user_id",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "house.models",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.ROLEERR",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "order.models.Order.objects.filter",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "order.models.Order.objects",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "order.models.Order",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.DATAERR",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "order.models",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "order.models.Order.objects.create",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "order.models.Order.objects",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "order.models.Order",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "house.models",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "house.models.price",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "house.models",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.DBERR",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.OK",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "order.models.id",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "order.models",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "order.models.Order.objects.get",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "order.models.Order.objects",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "order.models.Order",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.NODATA",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "order.models.house",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.ROLEERR",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "order.models.status",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "order.models.status",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "order.models.comment",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "order.models.save",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "order.models",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.DBERR",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.OK",
"line_number": 181,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "IHome.utils.views.LoginRequiredJSONMixin",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.PARAMERR",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "order.models",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "order.models.Order.objects.get",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "order.models.Order.objects",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "order.models.Order",
"line_number": 205,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.DBERR",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "order.models.comment",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 211,
"usage_type": "name"
},
{
"api_name": "order.models.status",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "order.models",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "order.models.save",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "order.models",
"line_number": 215,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.DBERR",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "django.http",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "IHome.utils.response_code.RET.OK",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "IHome.utils.response_code.RET",
"line_number": 221,
"usage_type": "name"
}
] |
362079957
|
'''
@Description:
@Version: 1.0.0
@Author: louishsu
@E-mail: [email protected]
@Date: 2019-08-09 17:11:31
@LastEditTime: 2019-08-12 10:26:07
@Update:
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from utils import eig
def plot_embedding(X, y, images, title=None, t=6e-3, figsize=(12, 9)):
""" Plot embedding
Params:
X: {ndarray(N, n_features)}
y: {ndarray(N)}
images: {ndarray(N, H, W)}
title: {str}
t: {float} threshold
"""
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize=figsize)
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(y[i]), color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < t:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(
images[i], cmap=plt.cm.gray_r), X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
plt.show()
class NeighborhoodPreservingEmbedding():
""" Neighborhood Preserving Embedding
Attributes:
n_neighbors: {int}
n_components: {int}
W_: {ndarray}
components_: {ndarray(n_samples, n_components)}
"""
def __init__(self, n_neighbors, n_components=2, k_skip=2, tol=1e-12):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.k_skip = k_skip
self.tol = tol
self.W_ = None
self.components_ = None
def fit(self, X):
"""
Params:
X: {ndarray(n_samples, n_features)}
"""
from sklearn.neighbors import KDTree
kdtree = KDTree(X, metric='euclidean')
n_samples, n_features = X.shape
self.W_ = np.zeros((n_samples, n_samples))
for i in range(n_samples):
## 获取近邻样本点
x = X[i]
idx = kdtree.query(x.reshape(1, -1), self.n_neighbors + 1, return_distance=False)[0][1: ]
## 求取矩阵 Z = (x - N).dot((x - N).T)
N = X[idx]
Z = (x - N).dot((x - N).T)
## 求取权重 w_i
# Z_inv = np.linalg.inv(Z + self.tol * np.eye(self.n_neighbors))
# w = np.sum(Z_inv, axis=1) / np.sum(Z_inv)
# 上两句改为
Z = Z + np.eye(self.n_neighbors) * np.trace(Z) * self.tol
w = np.linalg.pinv(Z).dot(np.ones(self.n_neighbors))
w = w / np.sum(w)
## 保存至 W
for j in range(self.n_neighbors):
self.W_[idx[j], i] = w[j]
## 求取矩阵 M = (I - W)(I - W)^T
I = np.eye(n_samples)
M = (I - self.W_).dot((I - self.W_).T)
## 求解 X M X^T \alpha = \lambda X X^T \alpha
# A1 = X.T.M.dot(X)
# A2 = X.T.dot(X)
# eigval, eigvec = eig(A1, A2)
# eigvec = eigvec[:, np.argsort(eigval)]
## 求解 X (I - M) X^T \alpha = \lambda X X^T \alpha
A1 = X.T.dot(I - M).dot(X)
A2 = X.T.dot(X)
eigval, eigvec = eig(A1, A2)
eigvec = eigvec[:, np.argsort(eigval)[::-1]]
## 选取 D 维
self.components_ = eigvec[:, self.k_skip: self.n_components + self.k_skip]
def transform(self, X):
"""
Params:
X: {ndarray(n_samples, n_features)}
Returns:
Y: {ndarray(n_samples, n_components)}
"""
Y = X.dot(self.components_)
return Y
def fit_transform(self, X):
"""
Params:
X: {ndarray(n_samples, n_features)}
Returns:
Y: {ndarray(n_samples, n_components)}
"""
self.fit(X)
Y = self.transform(X)
return Y
if __name__ == "__main__":
from sklearn import datasets
# -----------------------------------------------------------------------------
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
images = digits.images
npe = NeighborhoodPreservingEmbedding(30, 2, k_skip=3)
X_npe = npe.fit_transform(X)
plot_embedding(X_npe, y, images, title=None, t=2e-3, figsize=(12, 9))
# -----------------------------------------------------------------------------
X, color = datasets.samples_generator.make_s_curve(1000, random_state=0)
npe = NeighborhoodPreservingEmbedding(10, 2)
X_npe = npe.fit_transform(X)
plt.figure()
plt.scatter(X_npe[:, 0], X_npe[:, 1], c=color)
plt.show()
| null |
Algorithms/neighborhood_preserving_embedding.py
|
neighborhood_preserving_embedding.py
|
py
| 5,187 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.min",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm.Set1",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.offsetbox",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.r_",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.offsetbox.AnnotationBbox",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "matplotlib.offsetbox",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "matplotlib.offsetbox.OffsetImage",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.offsetbox",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.cm",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "sklearn.neighbors.KDTree",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.trace",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.pinv",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "utils.eig",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.load_digits",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "{'KDTree': 'sklearn.neighbors.KDTree'}",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.samples_generator.make_s_curve",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.samples_generator",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "{'KDTree': 'sklearn.neighbors.KDTree'}",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
}
] |
279139142
|
import numpy as np
import os
from .base import Base
from .trace import Trace
from .dispersion import Dispersion
from .sensitivity import Sensitivity
from ..utils import h5Attr
class Beam(Base):
__INT__=np.uint64
def __init__(self,h5,clip,xr=[-np.inf,np.inf],yr=[-np.inf,np.inf]):
Base.__init__(self,h5)
# the dispersion order
self.order=h5Attr(h5,'order')
# load the beam primatives
self.trace=Trace(h5)
self.dispersion=Dispersion(h5)
self.sensitivity=Sensitivity(h5)
# save the clipper and the ranges of the detector
self.polyclip=clip
self.xr=xr
self.yr=yr
# record this
self.naxis=self.polyclip.naxis
def __str__(self):
msg='Grism beam object:\n(beam,order)=({},{})'
return msg.format(self.beam,self.order)
def specDrizzle(self,xd,yd,lamb,ignore='average'):
''' run the polyclip to get the fractional pixel areas '''
# output data products
xyg=[]
lam=[]
val=[]
# ignore the pixel if it is outside the bounding box.
ignore=ignore.lower()
if ignore=='average':
# if average of pixel is in bounding box
xave=np.average(xd)
yave=np.average(yd)
if (xave<self.xr[0]) or (xave>self.xr[1]) or \
(yave<self.yr[0]) or (yave>self.yr[1]):
return xyg,lam,val
elif ignore=='minmax':
# test min/max in range
x0,x1=np.amin(xd),np.amax(xd)
y0,y1=np.amin(yd),np.amax(yd)
if (x1<self.xr[0]) or (x0>self.xr[1]) or \
(y1<self.yr[0]) or (y0>self.yr[1]):
return xyg,lam,val
else:
pass
# convert from (x0,y0) & lamb to (xg,yg,lamb) triplets
xg,yg=self.xyd2xyg(xd,yd,lamb)
# could put pixfrac here
# clip against the edge
xg=np.clip(xg,0,self.naxis[0])
yg=np.clip(yg,0,self.naxis[1])
# run the polygon clipper
x,y,area,indices=self.polyclip(xg,yg)
# only continue if there drizzled pixels
if len(x) != 0:
pix=x.astype(self.__INT__)+self.naxis[0]*y.astype(self.__INT__)
# process each wavelength
for j,l in enumerate(lamb):
j0,j1=indices[j],indices[j+1]
if j1 > j0:
xyg.extend(pix[j0:j1])
lam.extend(list(np.full(j1-j0,j)))
val.extend(area[j0:j1])
return xyg,lam,val
def wavelengths(self,x,y,nsub):
disp=np.abs(self.dispersion(x,y))/nsub
delta=self.sensitivity.wmax-self.sensitivity.wmin
nwave=int(delta/disp)+2
dwave=delta/(nwave-1.)
wave=np.arange(nwave)*dwave+self.sensitivity.wmin
return wave
def xyd2xyg(self,xd,yd,lamb):
''' convert the (x,y) pair in the equivalent direct image FLT to a
a collection of (x,y) pairs on a grism image at a collection of
wavelengths '''
# compute the arclength from the dispersion model
s=self.dispersion.arclength(lamb,xd,yd)
# compute the grism (x,y) pairs along the trace
xg,yg=self.trace(s,xd,yd)
return xg,yg
| null |
h5axeconfig/grism/beam.py
|
beam.py
|
py
| 3,452 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "base.Base",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.uint64",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.inf",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "base.Base.__init__",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "base.Base",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "utils.h5Attr",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "trace.Trace",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dispersion.Dispersion",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sensitivity.Sensitivity",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.amin",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 99,
"usage_type": "call"
}
] |
315655171
|
from selenium import webdriver
from bs4 import BeautifulSoup
import os
chromedriver = "C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe"
os.environ["webdriver.chrome.driver"] = chromedriver
browser = webdriver.Chrome(chromedriver)
#设置浏览器打开url
url = "http://www.baidu.com"
browser.get(url)
#在百度搜索框输入关键字"python"
browser.find_element_by_id("kw").send_keys("python")
#单机搜索按钮
browser.find_element_by_id("su").click()
html = browser.page_source
browser.quit()
| null |
info.py
|
info.py
|
py
| 525 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 7,
"usage_type": "name"
}
] |
136835603
|
from selenium import webdriver
import math
import time
import os
try:
start_time = time.time()
link = "http://suninjuly.github.io/redirect_accept.html"
browser = webdriver.Chrome()
browser.get(link)
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
button = browser.find_element_by_xpath("//button")
button.click()
time.sleep(1)
# узнаем имя новой вкладки
new_window = browser.window_handles[1]
# переключаемся на новую вкладку
browser.switch_to.window(new_window)
# new_window = browser.window_handles[1]
# считываем значение переменной x:
read_x = browser.find_element_by_xpath("//span[@id='input_value']")
# считаем математическую функцию от x:
x = read_x.text
y = calc(x)
# находим поле для ввода ответа
input1 = browser.find_element_by_xpath("//input[@id='answer']")
# вводим ответ y:
input1.send_keys(y)
# Нажимаем на кнопку Submit:
submit = browser.find_element_by_xpath("//button[@type='submit']")
submit.click()
finally:
# проверяем время работы теста
print("Test run time: %f seconds.\n" % (time.time() - start_time))
# выводим информацию из alert в консоль
alert = browser.switch_to.alert
print (alert.text)
# успеваем скопировать код за 1 секунд
time.sleep(1)
# закрываем браузер после всех манипуляций
browser.quit()
| null |
part_2/2.3.6.py
|
2.3.6.py
|
py
| 1,557 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "time.time",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "math.log",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 45,
"usage_type": "call"
}
] |
552418177
|
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
import resnest.torch as resnest_torch
# from https://www.kaggle.com/kneroma/clean-fast-simple-bird-identifier-training-colab
class PretrainedModel():
def __init__(self, num_classes=397, name="resnest"):
"""
Loads a pretrained model.
Supports ResNest, ResNext-wsl, EfficientNet, ResNext and ResNet.
Arguments:
name {str} -- Name of the model to load
Keyword Arguments:
num_classes {int} -- Number of classes to use (default: {1})
Returns:
torch model -- Pretrained model
"""
if "resnest" in name:
model = getattr(resnest_torch, name)(pretrained=True)
elif "wsl" in name:
model = torch.hub.load("facebookresearch/WSL-Images", name)
elif name.startswith("resnext") or name.startswith("resnet"):
model = torch.hub.load("pytorch/vision:v0.6.0", name, pretrained=True)
elif name.startswith("tf_efficientnet_b"):
model = getattr(timm.models.efficientnet, name)(pretrained=True)
elif "efficientnet-b" in name:
model = EfficientNet.from_pretrained(name)
else:
model = pretrainedmodels.__dict__[name](pretrained='imagenet')
if hasattr(model, "fc"):
nb_ft = model.fc.in_features
model.fc = nn.Linear(nb_ft, num_classes)
elif hasattr(model, "_fc"):
nb_ft = model._fc.in_features
model._fc = nn.Linear(nb_ft, num_classes)
elif hasattr(model, "classifier"):
nb_ft = model.classifier.in_features
model.classifier = nn.Linear(nb_ft, num_classes)
elif hasattr(model, "last_linear"):
nb_ft = model.last_linear.in_features
model.last_linear = nn.Linear(nb_ft, num_classes)
self.model = model
def get_model(self):
return self.model
class BirdsongModel2(BaseModel):
def __init__(self, num_classes=397):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv2 = nn.Conv2d(6, 12, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv3 = nn.Conv2d(12, 24, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv4 = nn.Conv2d(24, 48, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.fc1 = nn.Linear(48 * 8 * 17, 1024)
self.fc2 = nn.Linear(1024, num_classes)
self.sig = nn.Sigmoid()
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv2(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv3(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv4(x), 2, 2)) # max pool cuts each dimension in half
x = x.view(-1, 48 * 8 * 17)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return self.sig(x)
class BirdsongModel(BaseModel):
def __init__(self, num_classes=398):
super().__init__()
self.conv1 = nn.Conv2d(1, 4, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv2 = nn.Conv2d(4, 8, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv3 = nn.Conv2d(8, 16, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv4 = nn.Conv2d(16, 32, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.conv5 = nn.Conv2d(32, 64, kernel_size=3, padding=1) # Input dimensions = output dimensions
self.fc1 = nn.Linear(64 * 8 * 8, 1024)
self.fc2 = nn.Linear(1024, num_classes)
self.m = nn.Sigmoid()
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv2(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv3(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv4(x), 2, 2)) # max pool cuts each dimension in half
x = F.relu(F.max_pool2d(self.conv5(x), 2, 2)) # max pool cuts each dimension in half
x = x.view(-1, 64 * 8 * 8)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return self.m(x)
class MnistModel(BaseModel):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
| null |
model/model.py
|
model.py
|
py
| 5,130 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "resnest.torch",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "torch.nn.hub.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.hub",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.hub.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.nn.hub",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "base.BaseModel",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "base.BaseModel",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "base.BaseModel",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout2d",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.max_pool2d",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.dropout",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 117,
"usage_type": "name"
}
] |
313382624
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404,render,redirect
from django.http import HttpResponse, Http404
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login,logout
from .models import Question, Choice
# forms
from .forms import ChoiceForm, QuestionForm, CreateQuestionForm #,UserForm
# Create your views here.
def pollhome(request):
questions_list = Question.objects.filter(active = True)#.all()[0:4]
template = 'polls/home.html'
context = {'questions':questions_list}
return render(request,template,context)
#return HttpResponse(output)
def results(request,slug):
question = get_object_or_404(Question, slug=slug)
return render(request, 'polls/results.html', {'question': question})
def detail(request, slug):
"""
Returns a path to the detail view for a single question
"""
try:
question = Question.objects.get(slug=slug)
except Question.DoesNotExist:
raise Http404("Question does not exist")
return render(request, 'polls/detail.html', {'question': question})
def vote(request, slug):
question = get_object_or_404(Question, slug=slug)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:result', args=(question.slug,)))
def choice_form(request):
form = ChoiceForm()
template = 'polls/choiceview.html'
context = {
'form':form
}
return render(request,template,context = context)
def edit_question_form(request,id):
question = Question.objects.get(pk=id)
form = QuestionForm(instance = question)
template = 'polls/choiceview.html'
context = {
"form":form,
}
return render(request,template,context = context)
def create_new_question(request):
if request.method == 'POST':
q_text = request.POST.cleaned_data['question_text']
new_question = Question(question_text = q_text)
form = CreateQuestionForm(request.POST,instance=new_question)
if form.is_valid():
form.save()
HttpResponseRedirect('/polls/home.html')
else:
form = CreateQuestionForm()
template = 'polls/newquestion.html'
context = {
"form":form
}
return render(request = request, template_name=template, context = context)
def signup(request):
#template = 'registration/signup.html'
#context = {'form':form}
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect('polls:home')
else:
form = UserCreationForm()
return render(request,'polls/newuser.html',{'form':form})
| null |
polls/views.py
|
views.py
|
py
| 3,399 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "models.Question.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.Question",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.Question.objects.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "models.Question.DoesNotExist",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Question",
"line_number": 40,
"usage_type": "argument"
},
{
"api_name": "models.Choice.DoesNotExist",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "models.Choice",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "forms.ChoiceForm",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "models.Question.objects.get",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "models.Question.objects",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "models.Question",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "forms.QuestionForm",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "models.Question",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "forms.CreateQuestionForm",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "forms.CreateQuestionForm",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 113,
"usage_type": "call"
}
] |
431660562
|
import os
import time
import argparse
import torch
import pandas as pd
from sgm_train import train_document, train_image, train_tabular
from utils import count_run_time
class parameter(object):
def __init__(self):
parser = argparse.ArgumentParser()
# train parameter
parser.add_argument('--out_dir', type=str, default='./results/',
help="Output directory.")
parser.add_argument('--epochs', type=int, default=100,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=1e-4,
help='Initial learning rate.')
parser.add_argument('--early_stop', action='store_false', default=True,
help='Whether to early stop.')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--run_num', type=int, default=10,
help='Number of experiments')
parser.add_argument('--cuda', type=str, default='0',
help='Choose cuda')
parser.add_argument('--seed', type=int, default=42, help="Random seed.")
# train information parameter
parser.add_argument('--verbose', action='store_false', default=True,
help='Whether to print training details')
parser.add_argument('--print_step', type=int, default=5,
help='Epoch steps to print training details')
parser.add_argument('--plot_logs', action='store_true', default=False,
help='Whether to plot training logs')
# data parameter
parser.add_argument('--data_name', type=str, default='market',
help='Dataset name')
parser.add_argument('--data_path', type=str, default=f'./data/',
help='Wether to inject noise to train data')
parser.add_argument('--inject_noise', type=bool, default=True,
help='Whether to inject noise to train data')
parser.add_argument('--cont_rate', type=float, default=0.01,
help='Inject noise to contamination rate')
parser.add_argument('--anomal_rate', type=str, default='default',
help='Adjust anomaly rate')
# model parameter
## General
parser.add_argument('--lam_out', type=float, default=20,
help='Parameter Lambda_outliers')
parser.add_argument('--lam_dist', type=float, default=0.01,
help='Parameter Lambda_DE')
parser.add_argument('--a', type=float, default=15,
help='Parameter a')
parser.add_argument('--epsilon', type=float, default=90,
help='Parameter epsilon')
# Specific
parser.add_argument('--model_name', type=str, default='SGM',
help='Choose model')
parser.add_argument('--hidden_dim', type=str, default='auto',
help='Hidden dimension of the model')
if __name__ == '__main__':
args = parser.parse_args()
else:
args = parser.parse_args([])
args.device = torch.device(f'cuda:{args.cuda}' if torch.cuda.is_available() else 'cpu')
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
# Specific design
self.__dict__.update(args.__dict__)
def update(self, update_dict):
logs = '==== Parameter Update \n'
origin_dict = self.__dict__
for key in update_dict.keys():
if key in origin_dict:
logs += f'{key} ({origin_dict[key]} -> {update_dict[key]}), '
origin_dict[key] = update_dict[key]
else:
logs += f'{key} ({update_dict[key]}), '
self.__dict__ = origin_dict
print(logs)
if __name__ == '__main__':
start_time = time.time()
# Total metrics
metrics = pd.DataFrame()
# Conduct one experiements
args = parameter()
print(f'Device is {args.device.type}-{args.cuda}')
an_metrics_dict = train_tabular(args)
metrics = pd.DataFrame(an_metrics_dict, index=[0])
# Conduct multiple experiments
# One parameters
# lam_out = [1, 2, 3]
# for param in lam_out:
# # init
# args = parameter()
# # update
# update_dict = {'lam_out': param}
# args.update(update_dict)
# an_metrics_dict = train_image(args)
# an_metrics = pd.DataFrame(an_metrics_dict, index=[f'{param}'])
# metrics = pd.concat([metrics, an_metrics])
# Two parameters
# Iterate parameters
# lam_out = [1, 2, 3, 4, 5]
# lam_dist = [0.06, 0.04, 0.02, 1e-2, 8e-3, 1e-3]
# print(f'lam_out: {lam_out}')
# print(f'lam_dist: {lam_dist}')
# args = parameter()
# count_time = count_run_time(5 * 6)
# count_time.path = f'{args.out_dir}{args.model_name}_{args.data_name}_{time.strftime("%M%S")}.txt'
# for param1 in lam_out:
# for param2 in lam_dist:
# # init
# args_tmp = parameter()
# args_tmp.update(args.__dict__)
# # update
# update_dict = {'lam_out': param1, 'lam_dist': param2}
# args_tmp.update(update_dict)
# print(f'Train Parameter: {args_tmp.__dict__}')
# an_metrics_dict = train_tabular(args_tmp)
# an_metrics = pd.DataFrame(an_metrics_dict, index=[f'{param1}_{param2}'])
# metrics = pd.concat([metrics, an_metrics])
# count_time.current_count()
# Three parameters
# epsilon = [80, 84, 86, 88, 90]
# lam_out = [3, 4, 5, 6, 7, 10, 18]
# lam_dist = [0.1, 0.05, 0.02, 1e-2, 5e-3, 1e-3, 1e-4, 1e-5]
# args = parameter()
# count_time = count_run_time(5 * 7 * 8)
# count_time.path = f'{args.out_dir}{args.model_name}_{args.data_name}_{str(time.time()).split(".")[0][-2:]}.txt'
# for param0 in epsilon:
# for param1 in lam_out:
# for param2 in lam_dist:
# # init
# args_tmp = parameter()
# args_tmp.update(args.__dict__)
# # update
# update_dict = {'epsilon': param0, 'lam_out': param1, 'lam_dist': param2}
# args_tmp.update(update_dict)
# # train
# print(f'Train Parameter: {args_tmp.__dict__}')
# an_metrics_dict = train_tabular(args_tmp)
# an_metrics = pd.DataFrame(an_metrics_dict, index=[f'{param0}_{param1}_{param2}'])
# metrics = pd.concat([metrics, an_metrics])
# count_time.current_count()
# # anomal_rate = [0.05, 0.1, 0.15, 0.20, 0.25]
# # lam_out = [2, 4, 5]
# # lam_dist = [8e-5, 5e-5, 1e-5, 5e-6]
# # count_time = count_run_time(5 * 3 * 4)
# # for param0 in anomal_rate:
# # for param1 in lam_out:
# # for param2 in lam_dist:
# # if param0 <= 0.1:
# # epsilon = 90
# # elif param0 <= 0.2:
# # epsilon = 80
# # elif param0 <= 0.3:
# # epsilon = 70
# # # init
# # args = parameter()
# # count_time.path = f'{args.out_dir}{args.model_name}_{args.data_name}_{time.strftime("%M%S")}.txt'
# # # update
# # update_dict = {'lam_out': param1, 'lam_dist': param2, 'anomal_rate': param0}
# # args.update(update_dict)
# # # specific
# # update_dict = {'batch_size': 32, 'epsilon': epsilon, 'hidden_dim': '[1024, 256, 80, 20]'} # For document
# # args.update(update_dict)
# # # train
# # print(f'Train Parameter: {args.__dict__}')
# # an_metrics_dict = train_document(args)
# # an_metrics = pd.DataFrame(an_metrics_dict, index=[f'{param0}_{param1}_{param2}'])
# # metrics = pd.concat([metrics, an_metrics])
# # count_time.current_count()
print(f'Finished!\nTotal time is {time.time()-start_time:.2f}s')
print(f'Current time is {time.strftime("%m%d_%H%M")}')
print(metrics.sort_values('AUC', ascending=False))
metrics.to_csv(f'{args.out_dir}{args.model_name}_{args.data_name}_{time.strftime("%m%d_%H%M")}.csv')
| null |
sgm_main.py
|
sgm_main.py
|
py
| 9,012 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "sgm_train.train_tabular",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 236,
"usage_type": "call"
}
] |
193780406
|
"""Component to manage the AIS Cloud."""
import asyncio
import logging
import requests
import json
import os
from homeassistant.ais_dom import ais_global
from homeassistant.const import EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.const import (CONF_NAME, CONF_IP_ADDRESS, CONF_MAC)
from homeassistant.util import slugify
DOMAIN = 'ais_cloud'
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['feedparser==5.2.1', 'readability-lxml', 'bs4']
CLOUD_APP_URL = "https://powiedz.co/ords/f?p=100:1&x01=TOKEN:"
CLOUD_WS_TOKEN = None
CLOUD_WS_HEADER = {}
GLOBAL_RSS_NEWS_TEXT = None
GLOBAL_RSS_HELP_TEXT = None
G_PLAYERS = []
def get_news_text():
return GLOBAL_RSS_NEWS_TEXT
def get_rss_help_text():
return GLOBAL_RSS_HELP_TEXT
def check_url(url_address):
# check the 301 redirection
try:
r = requests.head(url_address, allow_redirects=True, timeout=1)
return r.url
except:
return url_address
# Get player id by his name
def get_player_data(player_name):
for player in G_PLAYERS:
if player["friendly_name"] == player_name:
return player
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the radio station list."""
_LOGGER.info("Initialize the radio station list.")
data = hass.data[DOMAIN] = AisColudData(hass)
yield from data.get_types_async()
# add "Console" panel to the menu list
my_ip = ais_global.get_my_global_ip()
yield from hass.components.frontend.async_register_built_in_panel(
'iframe', "Konsola", "mdi:console",
"console", {'url': 'http://' + my_ip + ':8888'})
def get_radio_types(call):
_LOGGER.info("get_radio_types ")
data.get_radio_types(call)
def get_radio_names(call):
_LOGGER.info("get_radio_names")
data.get_radio_names(call)
def select_radio_name(call):
_LOGGER.info("select_radio_name")
data.select_radio_name(call)
def get_players(call):
_LOGGER.info("get_players ")
data.get_players(call, hass)
def play_audio(call):
_LOGGER.info("play_audio ")
data.play_audio(call)
def get_podcast_types(call):
_LOGGER.info("get_podcast_types ")
data.get_podcast_types(call)
def get_podcast_names(call):
_LOGGER.info("get_podcast_names ")
data.get_podcast_names(call)
def get_podcast_tracks(call):
_LOGGER.info("get_podcast_tracks")
data.get_podcast_tracks(call)
def select_podcast_track(call):
_LOGGER.info("select_podcast_track")
data.select_podcast_track(call)
def select_media_player(call):
_LOGGER.info("select_media_player")
data.select_media_player(call)
def get_rss_news_category(call):
_LOGGER.info("get_rss_news_category")
data.get_rss_news_category(call)
def get_rss_news_channels(call):
_LOGGER.info("get_rss_news_channels")
data.get_rss_news_channels(call)
def get_rss_news_items(call):
_LOGGER.info("get_rss_news_items ")
data.get_rss_news_items(call)
def select_rss_news_item(call):
_LOGGER.info("select_rss_news_item")
data.select_rss_news_item(call)
def select_rss_help_item(call):
_LOGGER.info("select_rss_help_item")
data.select_rss_help_item(call)
# register services
hass.services.async_register(
DOMAIN, 'get_radio_types', get_radio_types)
hass.services.async_register(
DOMAIN, 'get_radio_names', get_radio_names)
hass.services.async_register(
DOMAIN, 'select_radio_name', select_radio_name)
hass.services.async_register(
DOMAIN, 'get_players', get_players)
hass.services.async_register(
DOMAIN, 'play_audio', play_audio)
hass.services.async_register(
DOMAIN, 'get_podcast_types', get_podcast_types)
hass.services.async_register(
DOMAIN, 'get_podcast_names', get_podcast_names)
hass.services.async_register(
DOMAIN, 'get_podcast_tracks', get_podcast_tracks)
hass.services.async_register(
DOMAIN, 'select_podcast_track', select_podcast_track)
hass.services.async_register(
DOMAIN, 'select_media_player', select_media_player)
hass.services.async_register(
DOMAIN, 'get_rss_news_category', get_rss_news_category)
hass.services.async_register(
DOMAIN, 'get_rss_news_channels', get_rss_news_channels)
hass.services.async_register(
DOMAIN, 'get_rss_news_items', get_rss_news_items)
hass.services.async_register(
DOMAIN, 'select_rss_news_item', select_rss_news_item)
hass.services.async_register(
DOMAIN, 'select_rss_help_item', select_rss_help_item)
def device_discovered(service):
""" Called when a device has been discovered. """
_LOGGER.info("Discovered a new device type: " + str(service.as_dict()))
try:
d = service.as_dict().get('data')
s = d.get('service')
p = d.get('platform')
if s == 'load_platform.sensor' and p == 'mqtt':
i = d.get('discovered')
uid = i.get('unique_id')
if uid is not None:
# search entity_id for this unique_id
# add sensor to group
hass.async_add_job(
hass.services.async_call(
'group',
'set', {
"object_id": "all_ais_sensors",
"add_entities": ["sensor." + uid]
}
)
)
elif s == 'load_platform.media_player':
hass.async_add_job(
hass.services.async_call('ais_cloud', 'get_players')
)
_LOGGER.info("Discovered device prepare remote menu!")
# prepare menu
hass.async_add_job(
hass.services.async_call(
'ais_ai_service',
'prepare_remote_menu'
)
)
except Exception as e:
_LOGGER.error("device_discovered: " + str(e))
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, device_discovered)
def state_changed(state_event):
""" Called on state change """
entity_id = state_event.data.get('entity_id')
if entity_id.startswith('media_player.'):
_new = state_event.data['new_state'].attributes
if state_event.data['old_state'] is None:
_old = {}
_old['friendly_name'] = 'new ais dome device'
else:
_old = state_event.data['old_state'].attributes
# check if name was changed
if _new['friendly_name'] != _old['friendly_name']:
hass.async_add_job(
hass.services.async_call('ais_cloud', 'get_players')
)
elif entity_id == 'input_select.assistant_voice':
voice = hass.states.get(entity_id).state
if voice == 'Jola online':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda-network'
elif voice == 'Jola lokalnie':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda-local'
elif voice == 'Celina':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda#female_1-local'
elif voice == 'Anżela':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda#female_2-local'
elif voice == 'Asia':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda#female_3-local'
elif voice == 'Sebastian':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda#male_1-local'
elif voice == 'Bartek':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda#male_2-local'
elif voice == 'Andrzej':
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda#male_3-local'
else:
ais_global.GLOBAL_TTS_VOICE = 'pl-pl-x-oda-local'
elif entity_id == 'input_number.assistant_rate':
try:
ais_global.GLOBAL_TTS_RATE = float(hass.states.get(entity_id).state)
except Exception:
ais_global.GLOBAL_TTS_RATE = 1
elif entity_id == 'input_number.assistant_tone':
try:
ais_global.GLOBAL_TTS_PITCH = float(hass.states.get(entity_id).state)
except Exception:
ais_global.GLOBAL_TTS_PITCH = 1
hass.bus.async_listen(EVENT_STATE_CHANGED, state_changed)
return True
class AisCloudWS:
def __init__(self):
"""Initialize the cloud WS connections."""
self.url = "https://powiedz.co/ords/dom/dom/"
def setCloudToken(self):
# take the token from secrets
global CLOUD_WS_TOKEN, CLOUD_WS_HEADER
if CLOUD_WS_TOKEN is None:
CLOUD_WS_TOKEN = ais_global.get_sercure_android_id_dom()
CLOUD_WS_HEADER = {'Authorization': '{}'.format(CLOUD_WS_TOKEN)}
def ask(self, question, org_answer):
self.setCloudToken()
rest_url = self.url + 'ask?question=' + question + " "
rest_url += '&org_answer=' + org_answer
ws_resp = requests.get(rest_url, headers=CLOUD_WS_HEADER)
return ws_resp
def audio_type(self, nature):
self.setCloudToken()
try:
rest_url = self.url + "audio_type?nature=" + nature
ws_resp = requests.get(rest_url, headers=CLOUD_WS_HEADER, timeout=7)
return ws_resp
except:
_LOGGER.error("Can't connect to AIS Cloud!!! " + rest_url)
ais_global.G_OFFLINE_MODE = True
def audio_name(self, nature, a_type):
self.setCloudToken()
rest_url = self.url + "audio_name?nature=" + nature
rest_url += "&type=" + a_type
ws_resp = requests.get(rest_url, headers=CLOUD_WS_HEADER)
return ws_resp
def audio(self, item, a_type, text_input):
self.setCloudToken()
rest_url = self.url + "audio?item=" + item + "&type="
rest_url += a_type + "&text_input=" + text_input
ws_resp = requests.get(rest_url, headers=CLOUD_WS_HEADER)
return ws_resp
def key(self, service):
self.setCloudToken()
rest_url = self.url + "key?service=" + service
ws_resp = requests.get(rest_url, headers=CLOUD_WS_HEADER)
return ws_resp
def delete_key(self, service):
self.setCloudToken()
rest_url = self.url + "key?service=" + service
ws_resp = requests.delete(rest_url, headers=CLOUD_WS_HEADER)
return ws_resp
class AisCacheData:
def __init__(self, hass):
"""Initialize the files cache"""
self.hass = hass
self.persistence_radio = '/dom/radio_stations.json'
self.persistence_podcast = '/dom/podcast.json'
self.persistence_news = '/dom/news_chanels.json'
def get_path(self, nature):
path = str(os.path.dirname(__file__))
if nature == ais_global.G_AN_RADIO:
path = path + self.persistence_radio
elif nature == ais_global.G_AN_PODCAST:
path = path + self.persistence_podcast
elif nature == ais_global.G_AN_NEWS:
path = path + self.persistence_news
return path
def audio_type(self, nature):
# get types from cache file
path = self.get_path(nature)
data = None
if not os.path.isfile(path):
return None
else:
with open(path) as file:
data = json.loads(file.read())
# items = data["data"]
# for item in items:
# # values.add(item['type'])
# # types = list(sorted(values))
# _LOGGER.error("item " + str(item))
return data
def store_audio_type(self, nature, json_data):
path = self.get_path(nature)
with open(path, 'w') as outfile:
json.dump(json_data, outfile)
def audio_name(self, nature, type):
# get names from cache file
return None
# names = [ais_global.G_EMPTY_OPTION]
# path = self.get_path(nature)
# if not os.path.isfile(path):
# return None
# else:
# return names
def audio(self, item, type, text_input):
return None
class AisColudData:
"""Class to hold radio stations data."""
def __init__(self, hass):
self.hass = hass
self.radio_names = []
self.podcast_names = []
self.podcast_tracks = []
self.audio_name = None
self.cloud = AisCloudWS()
self.cache = AisCacheData(hass)
self.news_channels = []
self.news_items = []
@asyncio.coroutine
def get_types_async(self):
def load():
# check if we have data stored in local files
# otherwise we should work in online mode and get data from cloud
# ----------------
# ----- RADIO ----
# ----------------
ws_resp = self.cloud.audio_type(ais_global.G_AN_RADIO)
if ws_resp is None:
json_ws_resp = self.cache.audio_type(ais_global.G_AN_RADIO)
else:
json_ws_resp = ws_resp.json()
self.cache.store_audio_type(ais_global.G_AN_RADIO, json_ws_resp)
types = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
types.append(item)
# populate list with all stations from selected type
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.radio_type",
"options": types})
# ----------------
# --- PODCASTS ---
# ----------------
ws_resp = self.cloud.audio_type(ais_global.G_AN_PODCAST)
if ws_resp is None:
json_ws_resp = self.cache.audio_type(ais_global.G_AN_PODCAST)
else:
json_ws_resp = ws_resp.json()
self.cache.store_audio_type(ais_global.G_AN_PODCAST, json_ws_resp)
types = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
types.append(item)
# populate list with all podcast from selected type
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_type",
"options": types})
# ----------------
# ----- NEWS -----
# ----------------
ws_resp = self.cloud.audio_type(ais_global.G_AN_NEWS)
if ws_resp is None:
json_ws_resp = self.cache.audio_type(ais_global.G_AN_NEWS)
else:
json_ws_resp = ws_resp.json()
self.cache.store_audio_type(ais_global.G_AN_NEWS, json_ws_resp)
types = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
types.append(item)
# populate list with all news types from selected type
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_category",
"options": types})
yield from self.hass.async_add_job(load)
def get_radio_types(self, call):
ws_resp = self.cloud.audio_type(ais_global.G_AN_RADIO)
json_ws_resp = ws_resp.json()
types = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
types.append(item)
# populate list with all stations from selected type
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.radio_type",
"options": types})
def get_radio_names(self, call):
"""Load stations of the for the selected type."""
if "radio_type" not in call.data:
_LOGGER.error("No radio_type")
return []
if call.data["radio_type"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.radio_station_name",
"options": [ais_global.G_EMPTY_OPTION]})
return
ws_resp = self.cache.audio_name(
ais_global.G_AN_RADIO, call.data["radio_type"])
if ws_resp is None:
ws_resp = self.cloud.audio_name(
ais_global.G_AN_RADIO, call.data["radio_type"])
json_ws_resp = ws_resp.json()
self.radio_names = []
names = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
names.append(item["NAME"])
self.radio_names.append(item)
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.radio_station_name",
"options": names})
# select the radio name
if self.audio_name is not None:
self.hass.block_till_done()
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.radio_station_name",
"option": self.audio_name})
# this name will be set after the list refresh
self.audio_name = None
# check if the change was done form remote
import homeassistant.components.ais_ai_service as ais_ai
if (ais_ai.CURR_ENTITIE == 'input_select.radio_type'
and ais_ai.CURR_BUTTON_CODE == 23):
ais_ai.set_curr_entity(
self.hass,
'input_select.radio_station_name')
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Wybierz stację"
})
def select_radio_name(self, call):
"""Get station stream url for the selected name."""
if "radio_name" not in call.data:
_LOGGER.error("No radio_name")
return
# the station was selected from select list in app
# we need to find the url and play it
radio_name = call.data["radio_name"]
_url = None
_audio_info = {}
for audio in self.radio_names:
if audio["NAME"] == radio_name:
if "STREAM_URL" in audio:
_url = check_url(audio["STREAM_URL"])
_audio_info["NAME"] = audio["NAME"]
_audio_info["MEDIA_SOURCE"] = ais_global.G_AN_RADIO
_audio_info["IMAGE_URL"] = audio["IMAGE_URL"]
_audio_info = json.dumps(_audio_info)
if _url is not None:
# take the entity_id dynamically
# according to the input_select.radio_player LV
player_name = self.hass.states.get(
'input_select.radio_player').state
player = get_player_data(player_name)
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "audio/mp4",
"media_content_id": check_url(_url)
})
# set stream image and title only if the player is AIS dom player
if player["device_ip"] is not None:
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "ais_info",
"media_content_id": _audio_info
})
def get_podcast_types(self, call):
ws_resp = self.cloud.audio_type(ais_global.G_AN_PODCAST)
json_ws_resp = ws_resp.json()
types = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
types.append(item)
# populate list with all podcast types
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_type",
"options": types})
def get_podcast_names(self, call):
"""Load podcasts names for the selected type."""
if "podcast_type" not in call.data:
_LOGGER.error("No podcast_type")
return []
if call.data["podcast_type"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_name",
"options": [ais_global.G_EMPTY_OPTION]})
return
ws_resp = self.cloud.audio_name(
ais_global.G_AN_PODCAST, call.data["podcast_type"])
json_ws_resp = ws_resp.json()
names = [ais_global.G_EMPTY_OPTION]
self.podcast_names = []
for item in json_ws_resp["data"]:
names.append(item["NAME"])
self.podcast_names.append(item)
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_name",
"options": names})
# check if the change was done form remote
import homeassistant.components.ais_ai_service as ais_ai
if (ais_ai.CURR_ENTITIE == 'input_select.podcast_type'
and ais_ai.CURR_BUTTON_CODE == 23):
ais_ai.set_curr_entity(
self.hass,
'input_select.podcast_name')
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Wybierz audycję"
})
def get_podcast_tracks(self, call):
import feedparser
if "podcast_name" not in call.data:
_LOGGER.error("No podcast_name")
return
if call.data["podcast_name"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_track",
"options": [ais_global.G_EMPTY_OPTION]})
return
podcast_name = call.data["podcast_name"]
if "lookup_url" in call.data:
_lookup_url = call.data["lookup_url"]
_image_url = call.data["image_url"]
selected_by_voice_command = True
else:
# the podcast was selected from select list in app
_lookup_url = None
_image_url = None
selected_by_voice_command = False
for podcast in self.podcast_names:
if podcast["NAME"] == podcast_name:
_lookup_url = podcast["LOOKUP_URL"]
_image_url = podcast["IMAGE_URL"]
if _lookup_url is not None:
# download the episodes
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Pobieram"
})
try:
d = feedparser.parse(check_url(_lookup_url))
tracks = [ais_global.G_EMPTY_OPTION]
self.podcast_tracks = []
for e in d.entries:
track = {'title': e.title, 'link': e.enclosures[0]}
try:
track['image_url'] = d.feed.image.href
except Exception:
track['image_url'] = _image_url
tracks.append(e.title)
self.podcast_tracks.append(track)
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_track",
"options": tracks})
if selected_by_voice_command:
track = self.podcast_tracks[0]
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Pobrano " + str(len(d.entries))
+ " odcinków"
+ ", audycji " + podcast_name
+ ", włączam najnowszy odcinek: " + track["title"]
})
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.podcast_track",
"option": track["title"]})
else:
# check if the change was done form remote
import homeassistant.components.ais_ai_service as ais_ai
if (ais_ai.CURR_ENTITIE
== 'input_select.podcast_name'
and ais_ai.CURR_BUTTON_CODE == 23):
ais_ai.set_curr_entity(
self.hass,
'input_select.podcast_track')
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Pobrano " + str(len(d.entries))
+ " odcinków, wybierz odcinek"
})
else:
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Pobrano " + str(len(d.entries))
+ " odcinków"
+ ", audycji " + podcast_name
})
except Exception as e:
_LOGGER.error("Error: " + str(e))
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Nie można pobrać odcinków. " + podcast_name
})
def select_podcast_track(self, call):
"""Get track stream url for the selected name."""
if "podcast_track" not in call.data:
_LOGGER.error("No podcast_track")
return
if call.data["podcast_track"] == ais_global.G_EMPTY_OPTION:
# TODO stop selected player
pass
# the station was selected from select list in app
# we need to find the url and play it
podcast_track = call.data["podcast_track"]
_url = None
_audio_info = {}
for podcast in self.podcast_tracks:
if podcast["title"] == podcast_track:
if "link" in podcast:
_url = check_url(podcast["link"].href)
try:
_audio_info["IMAGE_URL"] = podcast["image_url"]
except Exception as e:
_audio_info["IMAGE_URL"] = ''
_audio_info["NAME"] = podcast["title"]
_audio_info["MEDIA_SOURCE"] = ais_global.G_AN_PODCAST
_audio_info = json.dumps(_audio_info)
if _url is not None:
# take the entity_id dynamically
# according to the input_select.radio_player LV
player_name = self.hass.states.get(
'input_select.podcast_player').state
player = get_player_data(player_name)
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "audio/mp4",
"media_content_id": check_url(_url)
})
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "audio/mp4",
"media_content_id": _url
})
# set stream image and title
if player["device_ip"] is not None:
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "ais_info",
"media_content_id": _audio_info
})
def play_audio(self, call):
audio_type = call.data["audio_type"]
if audio_type == ais_global.G_AN_RADIO:
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.radio_type",
"option": call.data["type"]})
self.hass.block_till_done()
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.radio_station_name",
"option": call.data["name"]})
# this name will be set after the list refresh
self.audio_name = call.data["name"]
self.hass.block_till_done()
player_name = self.hass.states.get(
'input_select.radio_player').state
player = get_player_data(player_name)
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "audio/mp4",
"media_content_id": check_url(call.data["stream_url"])
})
# set stream image and title
if player["device_ip"] is not None:
_audio_info = {"IMAGE_URL": call.data["image_url"], "NAME": call.data["name"],
"MEDIA_SOURCE": ais_global.G_AN_RADIO}
_audio_info = json.dumps(_audio_info)
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "ais_info",
"media_content_id": _audio_info
})
elif audio_type == ais_global.G_AN_PODCAST:
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.podcast_type",
"option": call.data["type"]})
self.hass.services.call(
'ais_cloud',
'get_podcast_tracks', {
"lookup_url": call.data["lookup_url"],
"podcast_name": call.data["name"],
"image_url": call.data["image_url"]
}
)
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.podcast_name",
"option": call.data["name"]})
elif audio_type == ais_global.G_AN_MUSIC:
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.ais_music_service",
"option": "YouTube"})
# self.hass.block_till_done()
self.hass.services.call(
'input_text',
'set_value', {
"entity_id": "input_text.ais_music_query",
"value": call.data["text"]})
elif audio_type == ais_global.G_AN_SPOTIFY:
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.ais_music_service",
"option": ais_global.G_AN_SPOTIFY})
# self.hass.block_till_done()
self.hass.services.call(
'input_text',
'set_value', {
"entity_id": "input_text.ais_music_query",
"value": call.data["text"] + " "})
def select_media_player(self, call):
if "media_player_type" not in call.data:
_LOGGER.error("No media_player_type")
return
player_name = None
_url = None
_audio_info = {}
media_player_type = call.data["media_player_type"]
if media_player_type == "Radio":
radio_name = self.hass.states.get(
'input_select.radio_station_name').state
if radio_name == ais_global.G_EMPTY_OPTION:
return
player_name = self.hass.states.get(
'input_select.radio_player').state
for radio in self.radio_names:
if radio["NAME"] == radio_name:
if "STREAM_URL" in radio:
_url = radio["STREAM_URL"]
_audio_info["NAME"] = radio["NAME"]
_audio_info["MEDIA_SOURCE"] = ais_global.G_AN_RADIO
_audio_info["IMAGE_URL"] = radio["IMAGE_URL"]
if media_player_type == "Podcast":
podcast_track = self.hass.states.get(
'input_select.podcast_track').state
if podcast_track == ais_global.G_EMPTY_OPTION:
return
player_name = self.hass.states.get(
'input_select.podcast_player').state
for track in self.podcast_tracks:
if track["title"] == podcast_track:
if "link" in track:
_url = track["link"].href
try:
_audio_info["IMAGE_URL"] = track["image_url"]
except Exception as e:
_audio_info["IMAGE_URL"] = ''
_audio_info["NAME"] = track["title"]
_audio_info["MEDIA_SOURCE"] = ais_global.G_AN_PODCAST
if media_player_type == "Music":
track_name = self.hass.states.get(
'input_select.ais_music_track_name').state
if track_name == ais_global.G_EMPTY_OPTION:
return
player_name = self.hass.states.get(
'input_select.ais_music_player').state
import homeassistant.components.ais_yt_service as yt
for music_track in yt.G_YT_FOUND:
if music_track["title"] == track_name:
_url = "https://www.youtube.com/watch?v="
_url += music_track["id"]
_audio_info["IMAGE_URL"] = music_track["thumbnail"]
_audio_info["NAME"] = music_track["title"]
_audio_info["MEDIA_SOURCE"] = ais_global.G_AN_MUSIC
if media_player_type == "Book":
chapter_name = self.hass.states.get(
'input_select.book_chapter').state
if chapter_name == ais_global.G_EMPTY_OPTION:
return
player_name = self.hass.states.get(
'input_select.book_player').state
import homeassistant.components.ais_gm_service as gm
for ch in gm.G_SELECTED_TRACKS:
if ch["name"] == chapter_name:
_url = gm.G_GM_MOBILE_CLIENT_API.get_stream_url(ch["id"])
_audio_info = {"IMAGE_URL": ch["image"], "NAME": ch["name"],
"MEDIA_SOURCE": ais_global.G_AN_AUDIOBOOK}
if player_name is not None:
player = get_player_data(player_name)
if _url is not None:
# play media on selected device
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "audio/mp4",
"media_content_id": check_url(_url)
})
if player["device_ip"] is not None:
# set stream image and title
self.hass.services.call(
'media_player',
'play_media', {
"entity_id": player["entity_id"],
"media_content_type": "ais_info",
"media_content_id": json.dumps(_audio_info)
})
def get_players(self, call, hass):
global G_PLAYERS
G_PLAYERS = []
players_lv = []
if "device_name" in call.data:
# check if this device already exists
name = slugify(call.data.get('device_name'))
m_player = hass.states.get('media_player.' + name)
if m_player is None:
_LOGGER.info("Adding new ais dom player " + name)
hass.async_run_job(
async_load_platform(
hass, 'media_player', 'ais_exo_player',
{
CONF_NAME: call.data.get('device_name'),
CONF_IP_ADDRESS: call.data.get(CONF_IP_ADDRESS),
CONF_MAC: call.data.get(CONF_MAC)
},
hass.config))
# take the info about normal players
entities = hass.states.async_all()
for entity in entities:
if entity.entity_id.startswith('media_player.'):
player = {}
friendly_name = entity.attributes.get('friendly_name')
device_ip = entity.attributes.get('device_ip')
player['friendly_name'] = friendly_name
player['entity_id'] = entity.entity_id
player['device_ip'] = device_ip
G_PLAYERS.append(player)
players_lv.append(friendly_name)
# add player to group if it's not added
hass.async_add_job(
hass.services.async_call(
'group',
'set', {
"object_id": "audio_player",
"add_entities": [entity.entity_id]}))
hass.async_add_job(
hass.services.async_call(
'input_select',
'set_options', {
"entity_id": "input_select.radio_player",
"options": players_lv}))
hass.async_add_job(
hass.services.async_call(
'input_select',
'set_options', {
"entity_id": "input_select.podcast_player",
"options": players_lv}))
hass.async_add_job(
hass.services.async_call(
'input_select',
'set_options', {
"entity_id": "input_select.ais_music_player",
"options": players_lv}))
hass.async_add_job(
hass.services.async_call(
'input_select',
'set_options', {
"entity_id": "input_select.book_player",
"options": players_lv}))
hass.async_add_job(
hass.services.async_call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_player",
"options": players_lv}))
# TODO remove Podłączony głośnik from the list
hass.async_add_job(
hass.services.async_call(
'input_select',
'set_options', {
"entity_id": "input_select.tts_player",
"options": players_lv}))
# rebuild the groups
import homeassistant.components.ais_ai_service as ais_ai
ais_ai.get_groups(hass)
def get_rss_news_category(self, call):
ws_resp = self.cloud.audio_type(ais_global.G_AN_NEWS)
json_ws_resp = ws_resp.json()
self.cache.store_audio_type(ais_global.G_AN_NEWS, json_ws_resp)
types = [ais_global.G_EMPTY_OPTION]
for item in json_ws_resp["data"]:
types.append(item)
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_category",
"options": types})
def get_rss_news_channels(self, call):
"""Load news channels of the for the selected category."""
if "rss_news_category" not in call.data:
_LOGGER.error("No rss_news_category")
return []
if call.data["rss_news_category"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_channel",
"options": [ais_global.G_EMPTY_OPTION]})
return
ws_resp = self.cloud.audio_name(
ais_global.G_AN_NEWS, call.data["rss_news_category"])
json_ws_resp = ws_resp.json()
names = [ais_global.G_EMPTY_OPTION]
self.news_channels = []
for item in json_ws_resp["data"]:
names.append(item["NAME"])
self.news_channels.append(item)
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_channel",
"options": names})
# check if the change was done form remote
import homeassistant.components.ais_ai_service as ais_ai
if (ais_ai.CURR_ENTITIE == 'input_select.rss_news_category'
and ais_ai.CURR_BUTTON_CODE == 23):
ais_ai.set_curr_entity(
self.hass,
'input_select.rss_news_channel')
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Wybierz kanał wiadomości"
})
def get_rss_news_items(self, call):
import feedparser
if "rss_news_channel" not in call.data:
_LOGGER.error("No rss_news_channel")
return
if call.data["rss_news_channel"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_item",
"options": [ais_global.G_EMPTY_OPTION]})
return
rss_news_channel = call.data["rss_news_channel"]
if "lookup_url" in call.data:
_lookup_url = call.data["lookup_url"]
_image_url = call.data["image_url"]
selected_by_voice_command = True
else:
# the news was selected from select list in app
_lookup_url = None
_image_url = None
selected_by_voice_command = False
for channel in self.news_channels:
if channel["NAME"] == rss_news_channel:
_lookup_url = channel["LOOKUP_URL"]
_image_url = channel["IMAGE_URL"]
if _lookup_url is not None:
# download the episodes
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "pobieram"
})
try:
d = feedparser.parse(_lookup_url)
items = [ais_global.G_EMPTY_OPTION]
self.news_items = []
for e in d.entries:
item = {'title': e.title, 'link': e.link, 'image_url': _image_url, 'description': e.description}
if e.title not in items:
items.append(e.title)
self.news_items.append(item)
self.hass.services.call(
'input_select',
'set_options', {
"entity_id": "input_select.rss_news_item",
"options": items})
if selected_by_voice_command:
item = self.news_items[0]
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "mamy "
+ str(len(d.entries)) + " wiadomości z "
+ rss_news_channel
+ ", czytam najnowszy artykuł: " + item["title"]
})
self.hass.services.call(
'input_select',
'select_option', {
"entity_id": "input_select.rss_news_item",
"option": item["title"]})
else:
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "mamy "
+ str(len(d.entries))
+ " wiadomości, wybierz artykuł"
})
# check if the change was done form remote
import homeassistant.components.ais_ai_service as ais_ai
if (ais_ai.CURR_ENTITIE
== 'input_select.rss_news_channel'
and ais_ai.CURR_BUTTON_CODE == 23):
ais_ai.set_curr_entity(
self.hass,
'input_select.rss_news_item')
except Exception as e:
_LOGGER.error("Error: " + str(e))
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": "Nie można pobrać wiadomości z: "
+ rss_news_channel
})
def select_rss_news_item(self, call):
"""Get text for the selected item."""
global GLOBAL_RSS_NEWS_TEXT
if "rss_news_item" not in call.data:
_LOGGER.error("No rss_news_item")
return
if call.data["rss_news_item"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
GLOBAL_RSS_NEWS_TEXT = ''
self.hass.states.async_set(
'sensor.rss_news_text', '-', {
'text': "" + GLOBAL_RSS_NEWS_TEXT,
'friendly_name': 'Tekst strony'
})
return
# the station was selected from select list in app
# we need to find the url and read the text
rss_news_item = call.data["rss_news_item"]
_url = None
for item in self.news_items:
if item["title"] == rss_news_item:
if "description" in item:
GLOBAL_RSS_NEWS_TEXT = item["description"]
if "link" in item:
_url = check_url(item["link"])
if _url is not None:
import requests
from readability import Document
response = requests.get(check_url(_url))
response.encoding = 'utf-8'
doc = Document(response.text)
GLOBAL_RSS_NEWS_TEXT += doc.summary()
from bs4 import BeautifulSoup
GLOBAL_RSS_NEWS_TEXT = BeautifulSoup(
GLOBAL_RSS_NEWS_TEXT, "lxml").text
text = "Czytam artykuł. " + GLOBAL_RSS_NEWS_TEXT
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": text
})
self.hass.states.async_set(
'sensor.rss_news_text', GLOBAL_RSS_NEWS_TEXT[:200], {
'text': "" + GLOBAL_RSS_NEWS_TEXT,
'friendly_name': 'Tekst strony'
})
def select_rss_help_item(self, call):
"""Get text for the selected item."""
global GLOBAL_RSS_HELP_TEXT
GLOBAL_RSS_HELP_TEXT = ''
if "rss_help_topic" not in call.data:
_LOGGER.error("No rss_help_topic")
return
if call.data["rss_help_topic"] == ais_global.G_EMPTY_OPTION:
# reset status for item below
self.hass.states.async_set(
'sensor.ais_rss_help_text', "-", {
'text': "" + GLOBAL_RSS_HELP_TEXT,
'friendly_name': "Tekst strony"
})
return
# we need to build the url and get the text to read
rss_help_topic = call.data["rss_help_topic"]
_url = check_url(
"https://raw.githubusercontent.com/wiki/sviete/AIS-WWW/" + rss_help_topic.replace(" ", "-") + ".md")
import requests
from readability import Document
response = requests.get(_url)
doc = Document(response.text)
GLOBAL_RSS_HELP_TEXT += doc.summary()
from markdown import markdown
GLOBAL_RSS_HELP_TEXT = markdown(GLOBAL_RSS_HELP_TEXT)
import re
GLOBAL_RSS_HELP_TEXT = re.sub(r'<code>(.*?)</code>', ' ', GLOBAL_RSS_HELP_TEXT)
GLOBAL_RSS_HELP_TEXT = re.sub('#', '', GLOBAL_RSS_HELP_TEXT)
from bs4 import BeautifulSoup
GLOBAL_RSS_HELP_TEXT = BeautifulSoup(
GLOBAL_RSS_HELP_TEXT, "lxml").text
text = "Czytam stronę pomocy. " + GLOBAL_RSS_HELP_TEXT
self.hass.services.call(
'ais_ai_service',
'say_it', {
"text": text
})
self.hass.states.async_set(
'sensor.ais_rss_help_text', GLOBAL_RSS_HELP_TEXT[:200], {
'text': "" + response.text,
'friendly_name': "Tekst strony"
})
| null |
homeassistant/components/ais_cloud/__init__.py
|
__init__.py
|
py
| 50,147 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.head",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.get_my_global_ip",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.EVENT_PLATFORM_DISCOVERED",
"line_number": 190,
"usage_type": "argument"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 214,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 224,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 224,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_VOICE",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 226,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_RATE",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_RATE",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 231,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_PITCH",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.GLOBAL_TTS_PITCH",
"line_number": 236,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 236,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.EVENT_STATE_CHANGED",
"line_number": 238,
"usage_type": "argument"
},
{
"api_name": "asyncio.coroutine",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global.get_sercure_android_id_dom",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_OFFLINE_MODE",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "requests.delete",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 312,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 335,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 380,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 392,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 392,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 398,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 398,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 410,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 412,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 415,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 415,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 416,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "asyncio.coroutine",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 428,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 428,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 430,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 430,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 445,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 445,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 451,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 451,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 455,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 455,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 458,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 458,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 461,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 461,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_ENTITIE",
"line_number": 482,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_BUTTON_CODE",
"line_number": 483,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.set_curr_entity",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 484,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 509,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 509,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 511,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 537,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 537,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 539,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 539,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 554,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 554,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 560,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 560,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 563,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 563,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 565,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 565,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_ENTITIE",
"line_number": 577,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 577,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_BUTTON_CODE",
"line_number": 578,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 578,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.set_curr_entity",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 579,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 593,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 593,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 599,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 599,
"usage_type": "name"
},
{
"api_name": "feedparser.parse",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 625,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 625,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_ENTITIE",
"line_number": 659,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 659,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_BUTTON_CODE",
"line_number": 661,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 661,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.set_curr_entity",
"line_number": 662,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 662,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 692,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 692,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 709,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 709,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 710,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 744,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 744,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 772,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 772,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 773,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 781,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 781,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_MUSIC",
"line_number": 802,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 802,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_SPOTIFY",
"line_number": 814,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 814,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_SPOTIFY",
"line_number": 819,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 819,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 838,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 838,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_RADIO",
"line_number": 847,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 847,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 852,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 852,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_PODCAST",
"line_number": 865,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 865,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 869,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 869,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_yt_service.G_YT_FOUND",
"line_number": 874,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_yt_service",
"line_number": 874,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_MUSIC",
"line_number": 880,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 880,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 884,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 884,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_gm_service.G_SELECTED_TRACKS",
"line_number": 889,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_gm_service",
"line_number": 889,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_gm_service.G_GM_MOBILE_CLIENT_API.get_stream_url",
"line_number": 891,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_gm_service.G_GM_MOBILE_CLIENT_API",
"line_number": 891,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_gm_service",
"line_number": 891,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_AUDIOBOOK",
"line_number": 893,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 893,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 912,
"usage_type": "call"
},
{
"api_name": "homeassistant.util.slugify",
"line_number": 921,
"usage_type": "call"
},
{
"api_name": "homeassistant.helpers.discovery.async_load_platform",
"line_number": 926,
"usage_type": "call"
},
{
"api_name": "homeassistant.const.CONF_NAME",
"line_number": 929,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.CONF_IP_ADDRESS",
"line_number": 930,
"usage_type": "name"
},
{
"api_name": "homeassistant.const.CONF_MAC",
"line_number": 931,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.get_groups",
"line_number": 994,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 994,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 997,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 997,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 999,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 999,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1000,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1000,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1014,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1014,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1020,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1020,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_AN_NEWS",
"line_number": 1023,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1023,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1025,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1025,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_ENTITIE",
"line_number": 1037,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 1037,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_BUTTON_CODE",
"line_number": 1038,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 1038,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.set_curr_entity",
"line_number": 1039,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 1039,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1053,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1053,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1059,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1059,
"usage_type": "name"
},
{
"api_name": "feedparser.parse",
"line_number": 1084,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1085,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1085,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_ENTITIE",
"line_number": 1124,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 1124,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.CURR_BUTTON_CODE",
"line_number": 1126,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 1126,
"usage_type": "name"
},
{
"api_name": "homeassistant.components.ais_ai_service.set_curr_entity",
"line_number": 1127,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.ais_ai_service",
"line_number": 1127,
"usage_type": "name"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1146,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1146,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 1169,
"usage_type": "call"
},
{
"api_name": "readability.Document",
"line_number": 1171,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 1175,
"usage_type": "call"
},
{
"api_name": "homeassistant.ais_dom.ais_global.G_EMPTY_OPTION",
"line_number": 1197,
"usage_type": "attribute"
},
{
"api_name": "homeassistant.ais_dom.ais_global",
"line_number": 1197,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 1212,
"usage_type": "call"
},
{
"api_name": "readability.Document",
"line_number": 1213,
"usage_type": "call"
},
{
"api_name": "markdown.markdown",
"line_number": 1217,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1219,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 1220,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 1223,
"usage_type": "call"
}
] |
605923304
|
import os
import sys
import glob
import cv2
def save_face(frame, p1, p2, filename):
# Get center point coordinates
cp = ((p1[0] + p2[0])//2, (p1[1] + p2[1])//2)
# Find the y-midpoint
y_midp = int(((p2[1] - p1[1]) // 2) * 1.25)
# Cropping a square around the face
# - from the cp xy points, subtract y-midpoint to get top left pt
# - add y-midpoint to get bottom right pt
crop = frame[ cp[1]-y_midp:cp[1]+y_midp, cp[0]-y_midp:cp[0]+y_midp ]
# crop = frame[y1:y1+h, x1:x1+w]
crop = cv2.resize(crop, dsize=(128, 128), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(filename, crop)
# Video Capture
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print('Camera open failed!')
sys.exit()
# Network
model = './opencv_face_detector/res10_300x300_ssd_iter_140000_fp16.caffemodel'
config = './opencv_face_detector/deploy.prototxt'
net = cv2.dnn.readNet(model, config)
if net.empty():
print('Net open failed!')
sys.exit()
# Output Directory & File Index
outdir = 'output'
prefix = outdir + '/face_'
file_idx = 1
try:
if not os.path.exists(outdir):
os.makedirs(outdir)
except OSError:
print('output folter create failed!')
png_list = glob.glob(prefix + '*.png')
if len(png_list) > 0:
png_list.sort()
last_file = png_list[-1]
file_idx = int(last_file[-8:-4]) + 1
# Read Frames
cnt = 0
while True:
_, frame = cap.read()
if frame is None:
break
# Face Detection
blob = cv2.dnn.blobFromImage(frame, 1, (300, 300), (104, 177, 123))
net.setInput(blob)
detect = net.forward()
detect = detect[0, 0, :, :]
(h, w) = frame.shape[:2]
for i in range(detect.shape[0]):
confidence = detect[i, 2]
if confidence < 0.8:
break
# Face found!
x1 = int(detect[i, 3] * w)
y1 = int(detect[i, 4] * h)
x2 = int(detect[i, 5] * w)
y2 = int(detect[i, 6] * h)
# Save face image as a png file
cnt += 1
if cnt % 5 == 0:
filename = '{0}{1:04d}.png'.format(prefix, file_idx)
save_face(frame, (x1, y1), (x2, y2), filename)
file_idx += 1
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
label = 'Face: %4.3f' % confidence
cv2.putText(frame, label, (x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
| null |
face_capture.py
|
face_capture.py
|
py
| 2,526 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "cv2.resize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.readNet",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.dnn.blobFromImage",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "cv2.LINE_AA",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 109,
"usage_type": "call"
}
] |
348184475
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
##@package sceneExport writes horde3d scene files
#
#
import bpy
from xml.dom.minidom import * #xml
from . import export_h3d_b25 #model export
from mathutils import *
import os
import shutil # copy files
import math
import re # remove whitespaces
from subprocess import Popen
## eror popup non ascii character
#
class ObjectEncodingError(bpy.types.Operator):
bl_idname = "h3d.object_encoding_error"
bl_label = "ERROR you seem to use non ASCII Characters in an object name"
def execute(self, context):
self.report({'INFO'}, "ERROR you seem to use non ASCII Characters in an object name")
print("ERROR you seem to use non ASCII Characters in an object name")
return {'FINISHED'}
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
## check if a string can be encoded as ascii
#
def check_encoding(string):
try:
string.encode(encoding='ascii')
except:
return False
return True
##saves current selection to list
#@return list of selected blender objects
#
def save_selection():
selectedObj=[]
for obj in bpy.data.objects:
# obj bool
selectedObj.append((obj, obj in bpy.context.selected_objects))
return selectedObj
## sets blender selection from list
#@param list of blender objects to be selected
#
def restore_selection(selectedObj):
bpy.ops.object.select_all(action='DESELECT')
for i in range(0, len(selectedObj)):
if selectedObj[i][1]:
bpy.context.scene.objects[selectedObj[i][0].name].select=True
##parses existing scene xml
#@param path to scene file
#@param list to append nonblender objects in scene to
#
#
def parseSceneXML(path, nonblender):
sceneXML=xml.dom.minidom.parse(path)
#Referece?
blenderObjs=[]
for group in sceneXML.childNodes:
for child in group.childNodes:
if child.nodeName=="Reference":
obj_name=""
tx=0.0
ty=0.0
tz=0.0
rx=0.0
ry=0.0
rz=0.0
sx=0.0
sy=0.0
sy=0.0
for (name, value) in child.attributes.items():
if(name=="name"):
obj_name=value
elif(name=="tx"):
tx=value
elif(name=="ty"):
ty=value
elif(name=="tz"):
tz=value
elif(name=="rx"):
rx=value
elif(name=="ry"):
ry=value
elif(name=="rz"):
rz=value
elif(name=="sx"):
sx=value
elif(name=="sy"):
sy=value
elif(name=="sz"):
sz=value
for obj in bpy.data.objects:
if(obj.type=='MESH'):
# similar2blenderobjects?
if objectsSimilar(obj, obj_name, tx, ty, tz, rx, ry, rz, sx, sy, sz):
# attratchment?
for refChild in child.childNodes:
print(refChild.nodeName)
if refChild.nodeName=="Attachment":
# => blendergui
attatchStr=re.sub('\s+',' ',refChild.toxml())
print(attatchStr)
if 'h3d' in bpy.data.objects[obj.name]:
bpy.data.objects[obj.name]['h3d']['h3dGameEngineAttatchment']=attatchStr
else:
bpy.data.objects[obj.name]["h3d"]={}
bpy.data.objects[obj.name]['h3d']['h3dGameEngineAttatchment']=attatchStr
if re.sub('\s+',' ',(child.toxml())) in nonblender:
nonblender.remove(re.sub('\s+',' ',(child.toxml())))
blenderObjs.append(re.sub('\s+',' ',(child.toxml()))) # not have duplicates on reexport
else:
#nonblender object
if not re.sub('\s+',' ',(child.toxml())) in nonblender: # not have duplicates on reexport
if not re.sub('\s+',' ',(child.toxml())) in blenderObjs:
nonblender.append(re.sub('\s+',' ',(child.toxml())))
else:
#nonblender object
if(re.sub('\s+',' ',(child.toxml()))!= " "):
nonblender.append(re.sub('\s+',' ',(child.toxml())))
##determinates if a blenderobjects is similar to given data
#@param obj blenderobject
#@param name extracted from xml
#@param tx, ty, tz, rx, ry, rz, sx, sy, sz transformation from xml
#@return True if similar else False
def objectsSimilar(obj, name , tx, ty, tz, rx, ry, rz, sx, sy, sz):
isSimilar=0
if obj.name == name or obj.data.name == name:
isSimilar=isSimilar+1
if str(tx) == str( round(obj.matrix_world.to_translation().x , 5)):
isSimilar=isSimilar+1
if str(ty) == str(round( obj.matrix_world.to_translation().z, 5)):
isSimilar=isSimilar+1
if str(tz) == str(-round(obj.matrix_world.to_translation().y, 5)):
isSimilar=isSimilar+1
if str(rx) == str(round( obj.matrix_world.to_euler().x*180/ math.pi, 5)):
isSimilar=isSimilar+1
if str(ry) == str(round( obj.matrix_world.to_euler().z*180/ math.pi, 5)):
isSimilar=isSimilar+1
if str(rz) == str(-round(obj.matrix_world.to_euler().y*180/ math.pi, 5)):
isSimilar=isSimilar+1
if str(sx) == str(round(obj.matrix_world.to_scale().x, 5)):
isSimilar=isSimilar+1
if str(sy) == str(round(obj.matrix_world.to_scale().z, 5)):
isSimilar=isSimilar+1
if str(sz) == str(round(obj.matrix_world.to_scale().y, 5)):
isSimilar=isSimilar+1
if obj.parent and obj.parent.type == 'ARMATURE':
if str(tx) == str( round(obj.parent.matrix_world.to_translation().x , 5)):
isSimilar=isSimilar+1
if str(ty) == str(round( obj.parent.matrix_world.to_translation().z, 5)):
isSimilar=isSimilar+1
if str(tz) == str(-round(obj.parent.matrix_world.to_translation().y, 5)):
isSimilar=isSimilar+1
if str(rx) == str(round( obj.parent.matrix_world.to_euler().x*180/ math.pi, 5)):
isSimilar=isSimilar+1
if str(ry) == str(round( obj.parent.matrix_world.to_euler().z*180/ math.pi, 5)):
isSimilar=isSimilar+1
if str(rz) == str(-round(obj.parent.matrix_world.to_euler().y*180/ math.pi, 5)):
isSimilar=isSimilar+1
if str(sx) == str(round(obj.parent.matrix_world.to_scale().x, 5)):
isSimilar=isSimilar+1
if str(sy) == str(round(obj.parent.matrix_world.to_scale().z, 5)):
isSimilar=isSimilar+1
if str(sz) == str(round(obj.parent.matrix_world.to_scale().y, 5)):
isSimilar=isSimilar+1
#name, position
return isSimilar>9
##save blenderobject to Horde3d geo file
#@param blender object
#
def saveH3DModel(blenderObj):
if 'H3DContent_path'in bpy.data.scenes[0]['h3d']:
basePath=bpy.data.scenes[0]['h3d']['H3DContent_path']
else :
basePath= "/tmp"
#never happens
#mind the duplicates
if blenderObj.data.users > 1:
blenderObj_name = blenderObj.data.name
else:
blenderObj_name = blenderObj.name
if not os.path.exists(basePath+"models"):
os.makedirs(basePath+"models")
if not os.path.exists(basePath+"models"+os.sep+blenderObj_name):
os.makedirs(basePath+"models"+os.sep+blenderObj_name)
conv= export_h3d_b25.Converter( ".",basePath,"models","textures","materials","animations")
conv.convertSingleModel(blenderObj)
conv.saveModel(basePath+"models"+os.sep+blenderObj_name+os.sep+blenderObj_name, True, True)
# animations now
if ('exportAnims' in bpy.data.scenes[0]['h3d'] \
and bpy.data.scenes[0]['h3d']['exportAnims']\
) or not 'exportAnims' in bpy.data.scenes[0]['h3d']:
if not os.path.exists(basePath+"animations"):
os.makedirs(basePath+"animations")
conv.writeAnimation(basePath+"animations"+os.sep+blenderObj_name, blenderObj)
if blenderObj.parent!=None and blenderObj.parent.type=='ARMATURE':
conv.writeAnimation(basePath+"animations"+os.sep+blenderObj_name, blenderObj.parent )
##parse existing materials, extract used shader and flags
#@param path to file
#@return list containtng shader, flags (joined with ',' as string)
#
def parseMaterialXML(materialFile):
settings=[]
flags=""
if os.path.isfile(py.data.scenes[0]['h3d']['H3DContent_path']+os.sep+"materials"+os.sep+materialFile):
materialXML=xml.dom.minidom.parse(bpy.data.scenes[0]['h3d']['H3DContent_path']+os.sep+"materials"+os.sep+materialFile)
for material in materialXML.childNodes:
for node in material.childNodes:
if node.nodeName=="Shader":
for (name, value) in node.attributes.items():
if name == "source":
settings.append(value[8:])
elif node.nodeName=="ShaderFlag":
for (name, value) in node.attributes.items():
if name == "name":
if flags == "":
flags=flags+value
else:
flags=flags+","+value
#append non blender xml data?
#just use xml material?
settings.append(flags)
return settings
##generate Material xml from blenderobject
#@param blenderobject
#@return material name (not used and senseless, multi materials)
#
def genMaterial(blenderobj):
material_name = "BlenderMaterial.material.xml"
shader="default.shader"
shader_flags="_F01_Skinning,_F02_NormalMapping"
copyTextures= True
generateMaterials=True
bpy.ops.file.make_paths_absolute()
if len(blenderobj.data.materials)==0:
#use default
return material_name
for material in blenderobj.data.materials:
if material == None:
#use default
return material_name
if 'h3d' in material:
if 'h3DshaderFlag' in material['h3d']:
shader_flags= material['h3d']['h3DshaderFlag']
print("already there")
else:
material['h3d']={}
material['h3d']['generateMaterialXML']=True
if 'useMaterialTemplate' in material['h3d'] and \
material['h3d']['useMaterialTemplate']:
if 'materialXML' in material['h3d']:
parsedMat=parseMaterialXML(material['h3d']['materialXML'])
shader=parsedMat[0]
shader_flags=parsedMat[1]
#todo uniforms
if 'copyTextures' in bpy.data.scenes[0]['h3d']:
copyTextures=bpy.data.scenes[0]['h3d']['copyTextures']
if 'generateMaterialXML' in material['h3d']:
generateMaterials = material['h3d']['generateMaterialXML']
if generateMaterials:
if 'H3DContent_path'in bpy.data.scenes[0]['h3d']:
basePath=bpy.data.scenes[0]['h3d']['H3DContent_path']
else :
basePath= "/tmp"
material_name = material.name
mat_xml=Document()
mat_node=mat_xml.createElement("Material")
mat_xml.appendChild(mat_node)
shaderNode= mat_xml.createElement("Shader")
mat_node.appendChild(shaderNode)
shaderNode.setAttribute("source", "shaders"+ os.sep + shader)
#add shaderflags from property
for flag in shader_flags.split(","):
shaderFlagNode=mat_xml.createElement("ShaderFlag")
mat_node.appendChild(shaderFlagNode)
shaderFlagNode.setAttribute("name", flag.strip(' '))
# add textures
for texture in material.texture_slots:
if texture != None:
textureNode = mat_xml.createElement("Sampler")
mat_node.appendChild(textureNode)
#todo: different kinds of textures! eg normalmap
textureNode.setAttribute("name", "albedoMap")
if texture.texture.rna_type.name == 'Image Texture':
texture.texture.image.filepath
textureNode.setAttribute("map","textures"+os.sep+"models"+os.sep+bpy.path.basename(texture.texture.image.filepath))
if copyTextures:
try:
shutil.copyfile(texture.texture.image.filepath, basePath+"textures"+ os.sep +"models"+os.sep+bpy.path.basename(texture.texture.image.filepath))
except:
pass
else:
textureNode.setAttribute("map","textures" +os.sep+"models"+os.sep +"default.png")
# write material
print(mat_xml.toprettyxml(indent=" "))
with open(basePath+"materials"+os.sep+material_name+".material.xml", "w") as materialfile:
materialfile.write( mat_xml.toprettyxml())
return material_name
##sets xml references data from blender object
#@param blenderobject
#@param xml reference node
#@param indicate if object is link or duplicate
#
def set_reference_data(obj, reference, duplicate):
if obj.parent!=None and obj.parent.type=='ARMATURE': #preserve animated objects scale
reference.setAttribute( "sx", str(round(obj.parent.matrix_world.to_scale().x, 5)))
reference.setAttribute( "sy", str(round(obj.parent.matrix_world.to_scale().z, 5)))
reference.setAttribute( "sz", str(round(obj.parent.matrix_world.to_scale().y, 5)))
reference.setAttribute( "tx", str( round(obj.parent.matrix_world.to_translation().x, 5)))
reference.setAttribute( "ty", str(round( obj.parent.matrix_world.to_translation().z, 5)))
reference.setAttribute( "tz", str(-round(obj.parent.matrix_world.to_translation().y, 5)))
reference.setAttribute( "rx", str(round( obj.parent.matrix_world.to_euler().x*180/ math.pi, 5)))
reference.setAttribute( "ry", str(round( obj.parent.matrix_world.to_euler().z*180/ math.pi, 5)))
reference.setAttribute( "rz", str(-round(obj.parent.matrix_world.to_euler().y*180/ math.pi, 5)))
elif obj.parent!=None and obj.parent.type=='MESH': #preserve offset to parent
reference.setAttribute( "sx", str(round( obj.matrix_local.to_scale().x, 5)))
reference.setAttribute( "sy", str(round( obj.matrix_local.to_scale().z, 5)))
reference.setAttribute( "sz", str(round( obj.matrix_local.to_scale().y, 5)))
reference.setAttribute( "tx", str( round(obj.matrix_local.to_translation().x, 5)))
reference.setAttribute( "ty", str(round( obj.matrix_local.to_translation().z, 5)))
reference.setAttribute( "tz", str(-round(obj.matrix_local.to_translation().y, 5)))
reference.setAttribute( "rx", str(round( obj.matrix_local.to_euler().x*180/ math.pi, 5)))
reference.setAttribute( "ry", str(round( obj.matrix_local.to_euler().z*180/ math.pi, 5)))
reference.setAttribute( "rz", str(-round(obj.matrix_local.to_euler().y*180/ math.pi, 5)))
else:
reference.setAttribute( "tx", str( round(obj.matrix_world.to_translation().x, 5)))
reference.setAttribute( "ty", str(round( obj.matrix_world.to_translation().z, 5)))
reference.setAttribute( "tz", str(-round(obj.matrix_world.to_translation().y, 5)))
reference.setAttribute( "rx", str(round( obj.matrix_world.to_euler().x*180/ math.pi, 5)))
reference.setAttribute( "ry", str(round( obj.matrix_world.to_euler().z*180/ math.pi, 5)))
reference.setAttribute( "rz", str(-round(obj.matrix_world.to_euler().y*180/ math.pi, 5)))
reference.setAttribute( "sx", str(round(obj.matrix_world.to_scale().x, 5)))
reference.setAttribute( "sy", str(round(obj.matrix_world.to_scale().z, 5)))
reference.setAttribute( "sz", str(round(obj.matrix_world.to_scale().y, 5)))
#duplicates use mesh name
if duplicate or obj.data.users > 1:
reference.setAttribute( "sceneGraph", "models"+ os.sep+obj.data.name+ os.sep + obj.data.name + ".scene.xml") # .data.name
else:
reference.setAttribute( "sceneGraph", "models"+ os.sep+obj.name+ os.sep + obj.name + ".scene.xml") # .name
reference.setAttribute( "name", obj.name)
##checks if object is a duplicate
#@param blender object
#@param list of duplicates
#@return bool indicating if object is duplicate
#
def is_duplicate(obj, duplicates):
duplicate=False
if obj.data.users > 1:
if obj.data.name in duplicates:
duplicate= True
else:
duplicates.append(obj.data.name )
return duplicate
##export blender groups
#@param Mesh boolean export for setting
#@param Scene boolean export for setting
#@param Materials boolean export for setting
#
def process_groups(exportMesh=False, exportScene=False, exportMaterials=True):
exportExtern=False
if 'H3DContent_path'in bpy.data.scenes[0]['h3d']:
basePath=bpy.data.scenes[0]['h3d']['H3DContent_path']
else :
basePath= os.sep+"tmp"
if 'exportExtern' in bpy.data.scenes[0]['h3d']:
exportExtern =bpy.data.scenes[0]['h3d']['exportExtern']
for Bgroup in bpy.data.groups:
if Bgroup.library==None or exportExtern: # export only local groups for now
process_group(Bgroup, exportMesh, exportScene, exportMaterials)
##export blender group
#@param blendergroup to be exported
#@param Mesh boolean export for setting
#@param Scene boolean export for setting
#@param Materials boolean export for setting
#
def process_group(Bgroup, exportMesh=False, exportScene=False, exportMaterials=True):
nonblender=[]
duplicates=[]
#defaults
clearScene=False
getAttatchments=True
keepNonBlender= True
exportExtern=False
if 'H3DContent_path'in bpy.data.scenes[0]['h3d']:
basePath=bpy.data.scenes[0]['h3d']['H3DContent_path']
else :
basePath= os.sep+"tmp"
if 'parseSceneXML' in bpy.data.scenes[0]['h3d']:
getAttatchments=bpy.data.scenes[0]['h3d']['parseSceneXML']
if 'overwriteH3Dfiles' in bpy.data.scenes[0]['h3d']:
clearScene= bpy.data.scenes[0]['h3d']['overwriteH3Dfiles']
if 'keepNonBlender' in bpy.data.scenes[0]['h3d']:
keepNonBlender= bpy.data.scenes[0]['h3d']['keepNonBlender']
if 'exportExtern' in bpy.data.scenes[0]['h3d']:
exportExtern =bpy.data.scenes[0]['h3d']['exportExtern']
#parse scenefile:
if getAttatchments:
if os.path.isfile(basePath+"models"+os.sep+Bgroup.name+".group.scene.xml"):
parseSceneXML(basePath+"models"+os.sep+Bgroup.name+".group.scene.xml", nonblender)
#groups for linking
#write out as own group.scene.xml
gr_scene_xml=Document()
group2 = gr_scene_xml.createElement("Group")
group2.setAttribute( "tx", "0")
group2.setAttribute( "ty", "0")
group2.setAttribute( "tz", "0")
group2.setAttribute( "sx", "1")
group2.setAttribute( "sy", "1")
group2.setAttribute( "sz", "1")
group2.setAttribute( "rx", "0")
group2.setAttribute( "ry", "0")
group2.setAttribute( "rz", "0")
group2.setAttribute( "name", Bgroup.name)
gr_scene_xml.appendChild(group2)
for ob in Bgroup.objects:
enc_ascii=check_encoding(ob.name)
if not enc_ascii:
bpy.ops.h3d.object_encoding_error('INVOKE_DEFAULT')
if ob.type == 'MESH' and is_export_set(ob) and enc_ascii:
if exportMaterials:
genMaterial(ob)
# children are added with their parents
if ((ob.parent == None or ob.parent.type=='ARMATURE' or ob.parent.type=='MESH') \
and (ob.library==None or exportExtern)) \
and ob!=None:
#ob.data.library!=None => link
if exportMesh and not is_duplicate(ob, duplicates):
if ob.parent and ob.parent.type=='Armature':
#this does not work if objects scale == armatures scale
#set models Armature to id matrix too
tmp_locA=ob.parent.matrix_local.copy()
tmp_loc=ob.matrix_local.copy()
# clear location
ob.matrix_local=Matrix()
#export
saveH3DModel(ob)
#resore loacation
ob.matrix_local=tmp_loc
ob.parent.matrix_local=tmp_locA
elif ob.parent and ob.parent.type=='MESH':
#pivot error?
tmp_locA=ob.parent.matrix_local.copy()
tmp_loc=ob.matrix_local.copy()
# clear location
ob.parent.matrix_local=Matrix()
ob.matrix_local=Matrix()
#export
saveH3DModel(ob)
#resore loacation
ob.matrix_local=tmp_loc
ob.parent.matrix_local=tmp_locA
else:
tmp_loc=ob.matrix_local.copy()
# clear location
ob.matrix_local=Matrix()
#export
saveH3DModel(ob)
#resore loacation
ob.matrix_local=tmp_loc
#is duplicate? .data.name, data.users
if not ( ob.parent and ob.parent.type == 'MESH' ):
reference2 = gr_scene_xml.createElement("Reference")
set_reference_data(ob, reference2, is_duplicate(ob, duplicates))
group2.appendChild(reference2)
# add children
for childOb in ob.children:
if childOb.type !='ARMATURE':
referenceC2 = gr_scene_xml.createElement("Reference")
set_reference_data(childOb, referenceC2, is_duplicate(childOb, duplicates))
reference2.appendChild(referenceC2)
# add attatchments
if 'h3d'in ob and not clearScene:
if 'h3dGameEngineAttatchment' in ob['h3d']:
attatchmentstr=xml.dom.minidom.parseString(ob['h3d']['h3dGameEngineAttatchment'])
for child in attatchmentstr.childNodes:
if child.nodeName=="Attachment":
attatchment=child
reference2.appendChild(attatchment)
#nonblender objects?
elif ob.type == 'EMPTY':
# if link:
if ob.dupli_group!=None:
# check group.scene.xml exists?
if os.path.isfile(basePath+"models"+os.sep+ob.dupli_group.name+".group.scene.xml"):
gRef=gr_scene_xml.createElement("Reference")
gRef.setAttribute( "tx", str( round(ob.matrix_world.to_translation().x, 5)))
gRef.setAttribute( "ty", str(round( ob.matrix_world.to_translation().z, 5)))
gRef.setAttribute( "tz", str(-round(ob.matrix_world.to_translation().y, 5)))
gRef.setAttribute( "rx", str(round( ob.matrix_world.to_euler().x*180/ math.pi, 5)))
gRef.setAttribute( "ry", str(round( ob.matrix_world.to_euler().z*180/ math.pi, 5)))
gRef.setAttribute( "rz", str(-round(ob.matrix_world.to_euler().y*180/ math.pi, 5)))
gRef.setAttribute( "sx", str(round(ob.matrix_world.to_scale().x, 5)))
gRef.setAttribute( "sy", str(round(ob.matrix_world.to_scale().z, 5)))
gRef.setAttribute( "sz", str(round(ob.matrix_world.to_scale().y, 5)))
gRef.setAttribute( "sceneGraph", "models"+ os.sep + ob.dupli_group.name + ".group.scene.xml")
gRef.setAttribute( "name", ob.name)
group2.appendChild(gRef)
# else error
else:
print("link not exported yet?")
gRef=gr_scene_xml.createElement("Reference")
gRef.setAttribute( "tx", str( round(ob.matrix_world.to_translation().x, 5)))
gRef.setAttribute( "ty", str(round( ob.matrix_world.to_translation().z, 5)))
gRef.setAttribute( "tz", str(-round(ob.matrix_world.to_translation().y, 5)))
gRef.setAttribute( "rx", str(round( ob.matrix_world.to_euler().x*180/ math.pi, 5)))
gRef.setAttribute( "ry", str(round( ob.matrix_world.to_euler().z*180/ math.pi, 5)))
gRef.setAttribute( "rz", str(-round(ob.matrix_world.to_euler().y*180/ math.pi, 5)))
gRef.setAttribute( "sx", str(round(ob.matrix_world.to_scale().x, 5)))
gRef.setAttribute( "sy", str(round(ob.matrix_world.to_scale().z, 5)))
gRef.setAttribute( "sz", str(round(ob.matrix_world.to_scale().y, 5)))
gRef.setAttribute( "sceneGraph", "models"+ os.sep + ob.dupli_group.name + ".group.scene.xml")
gRef.setAttribute( "name", ob.name)
group2.appendChild(gRef)
# append non blender objects
if not clearScene and keepNonBlender:
for nobj in nonblender:
nonblender_element=xml.dom.minidom.parseString(nobj)
#hopefully avoid empty nodes
group2.appendChild(nonblender_element.childNodes[len(nonblender_element.childNodes) -1])
#get lamps
#position
#type?
## write scene xml ##
#default: h3d_content/models/blendfileName.scene.xml
print(gr_scene_xml.toprettyxml(indent=" "))
if exportScene:
with open(basePath+"models"+ os.sep+Bgroup.name+".group.scene.xml", "w") as scenefile:
scenefile.write( gr_scene_xml.toprettyxml())
##check if object should be exported
#@param blender object
#@return boolean indicating object export
def is_export_set(obj):
exportSet=True
'''
if "h3d" in obj and "h3DdoNotExport" in obj["h3d"]:
exportSet=not obj["h3d"]["h3DdoNotExport"]
'''
if 'onlyExportSelected' in bpy.data.scenes[0]['h3d']:
if bpy.data.scenes[0]['h3d']['onlyExportSelected']:
exportSet = bpy.context.scene.objects[obj.name].select
if obj.hide or obj.hide_render:
exportSet=False
return exportSet
##export blender scene
#@param Mesh boolean export for setting
#@param Scene boolean export for setting
#@param Materials boolean export for setting
#
def process_scene(exportMesh=False, exportScene=False, exportMaterials=True):
nonblender=[]
duplicates=[]
#defaults
clearScene=False
getAttatchments=True
keepNonBlender= True
exportBGroups=True
if 'H3DContent_path'in bpy.data.scenes[0]['h3d']:
basePath=bpy.data.scenes[0]['h3d']['H3DContent_path']
else :
basePath= os.sep+"tmp"
if 'parseSceneXML' in bpy.data.scenes[0]['h3d']:
getAttatchments=bpy.data.scenes[0]['h3d']['parseSceneXML']
if 'overwriteH3Dfiles' in bpy.data.scenes[0]['h3d']:
clearScene= bpy.data.scenes[0]['h3d']['overwriteH3Dfiles']
if 'keepNonBlender' in bpy.data.scenes[0]['h3d']:
keepNonBlender= bpy.data.scenes[0]['h3d']['keepNonBlender']
if 'exportGroups' in bpy.data.scenes[0]['h3d']:
exportBGroups=bpy.data.scenes[0]['h3d']['exportGroups']
#parse scenefile:
if getAttatchments:
if os.path.isfile(basePath+"models"+os.sep+bpy.data.scenes[0].name+".scene.xml"):
parseSceneXML(basePath+"models"+os.sep+bpy.data.scenes[0].name+".scene.xml", nonblender)
# create SceneXML:
scene_xml=Document()
#<Group tx="0" sx="3" ty="15" rx="0" sy="3" tz="0" ry="0" sz="3" rz="0" >
group = scene_xml.createElement("Group")
group.setAttribute( "tx", "0")
group.setAttribute( "ty", "0")
group.setAttribute( "tz", "0")
group.setAttribute( "sx", "1")
group.setAttribute( "sy", "1")
group.setAttribute( "sz", "1")
group.setAttribute( "rx", "0")
group.setAttribute( "ry", "0")
group.setAttribute( "rz", "0")
scene_xml.appendChild(group)
print(scene_xml.toprettyxml(indent=" "))
for obj in bpy.data.objects:
enc_ascii=check_encoding(obj.name)
if not enc_ascii:
bpy.ops.h3d.object_encoding_error('INVOKE_DEFAULT')
if obj.type == 'MESH' and is_export_set(obj) and enc_ascii:
if exportMaterials:
genMaterial(obj)
# childrens references are added with their parents
# librarys with their groups
if ((obj.parent == None or obj.parent.type=='ARMATURE' or obj.parent.type=='MESH') \
and obj.library==None) \
and obj!=None:
#obj.data.library!=None => link
if exportMesh and not is_duplicate(obj, duplicates):
if obj.parent and obj.parent.type=='ARMATURE':
#this does not work if objects scale == armatures scale
#set models Armature to id matrix too
tmp_locA=obj.parent.matrix_local.copy()
tmp_loc=obj.matrix_local.copy()
# clear location
obj.parent.matrix_local=Matrix()
#export
saveH3DModel(obj)
#resore loacation
obj.matrix_local=tmp_loc
obj.parent.matrix_local=tmp_locA
elif obj.parent and obj.parent.type=='MESH':
#pivot error?
tmp_locA=obj.parent.matrix_local.copy()
tmp_loc=obj.matrix_local.copy()
# clear location
obj.parent.matrix_local=Matrix()
obj.matrix_local=Matrix()
#export
saveH3DModel(obj)
#resore loacation
obj.matrix_local=tmp_loc
obj.parent.matrix_local=tmp_locA
else:
tmp_loc=obj.matrix_local.copy()
# clear location
obj.matrix_local=Matrix()
#export
saveH3DModel(obj)
#resore loacation
obj.matrix_local=tmp_loc
#<Reference tx="0" ty="0" sx="0.5" tz="0" sy="0.5" rx="0" sz="0.5"
#sceneGraph="models/sphere/sphere.scene.xml" ry="0" rz="0" name="sphere2" />
#is duplicate? .data.name, data.users
if not ( obj.parent and obj.parent.type == 'MESH' ):
reference = scene_xml.createElement("Reference")
set_reference_data(obj, reference, is_duplicate(obj, duplicates))
group.appendChild(reference)
# add children
for childObj in obj.children:
if childObj.type !='ARMATURE':
referenceC = scene_xml.createElement("Reference")
set_reference_data(childObj, referenceC, is_duplicate(childObj, duplicates))
reference.appendChild(referenceC)
#todo children relation is recursive
# add attatchments
if 'h3d'in obj and not clearScene:
if 'h3dGameEngineAttatchment' in obj['h3d']:
attatchmentstr=xml.dom.minidom.parseString(obj['h3d']['h3dGameEngineAttatchment'])
for child in attatchmentstr.childNodes:
if child.nodeName=="Attachment":
attatchment=child
reference.appendChild(attatchment)
elif obj.type == 'EMPTY':
# if link:
if obj.dupli_group!=None:
# check group.scene.xml exists?
if os.path.isfile(basePath+"models"+os.sep+obj.dupli_group.name+".group.scene.xml"):
gRef=scene_xml.createElement("Reference")
gRef.setAttribute( "tx", str( round(obj.matrix_world.to_translation().x, 5)))
gRef.setAttribute( "ty", str(round( obj.matrix_world.to_translation().z, 5)))
gRef.setAttribute( "tz", str(-round(obj.matrix_world.to_translation().y, 5)))
gRef.setAttribute( "rx", str(round( obj.matrix_world.to_euler().x*180/ math.pi, 5)))
gRef.setAttribute( "ry", str(round( obj.matrix_world.to_euler().z*180/ math.pi, 5)))
gRef.setAttribute( "rz", str(-round(obj.matrix_world.to_euler().y*180/ math.pi, 5)))
gRef.setAttribute( "sx", str(round(obj.matrix_world.to_scale().x, 5)))
gRef.setAttribute( "sy", str(round(obj.matrix_world.to_scale().z, 5)))
gRef.setAttribute( "sz", str(round(obj.matrix_world.to_scale().y, 5)))
gRef.setAttribute( "sceneGraph", "models"+ os.sep + obj.dupli_group.name + ".group.scene.xml")
gRef.setAttribute( "name", obj.name)
group.appendChild(gRef)
# else error
else:
print("link not exported yet?")
gRef=scene_xml.createElement("Reference")
gRef.setAttribute( "tx", str( round(obj.matrix_world.to_translation().x, 5)))
gRef.setAttribute( "ty", str(round( obj.matrix_world.to_translation().z, 5)))
gRef.setAttribute( "tz", str(-round(obj.matrix_world.to_translation().y, 5)))
gRef.setAttribute( "rx", str(round( obj.matrix_world.to_euler().x*180/ math.pi, 5)))
gRef.setAttribute( "ry", str(round( obj.matrix_world.to_euler().z*180/ math.pi, 5)))
gRef.setAttribute( "rz", str(-round(obj.matrix_world.to_euler().y*180/ math.pi, 5)))
gRef.setAttribute( "sx", str(round(obj.matrix_world.to_scale().x, 5)))
gRef.setAttribute( "sy", str(round(obj.matrix_world.to_scale().z, 5)))
gRef.setAttribute( "sz", str(round(obj.matrix_world.to_scale().y, 5)))
gRef.setAttribute( "sceneGraph", "models"+ os.sep + obj.dupli_group.name + ".group.scene.xml")
gRef.setAttribute( "name", obj.name)
group.appendChild(gRef)
# append non blender objects
if not clearScene and keepNonBlender:
for nobj in nonblender:
nonblender_element=xml.dom.minidom.parseString(nobj)
#hopefully avoid empty nodes
group.appendChild(nonblender_element.childNodes[len(nonblender_element.childNodes) -1])
#get lamps
#position
#type?
## write scene xml ##
#default: h3d_content/models/blendfileName.scene.xml
print(scene_xml.toprettyxml(indent=" "))
if exportScene:
with open(basePath+"models"+ os.sep+bpy.data.scenes[0].name+".scene.xml", "w") as scenefile:
scenefile.write( scene_xml.toprettyxml())
if exportBGroups:
process_groups(exportMesh, exportScene, exportMaterials)
def writeGEConfig(program_path):
'''
<!DOCTYPE SceneConfiguration>
<Configuration>
<Pipeline path="pipelines/deferred.pipeline.xml" />
<SceneGraph path="models/demo.scene.xml" />
<LightParameters lightingcontext="LIGHTING" shadowcontext="SHADOWMAP" material="materials/lighting.material.xml" />
<AttachmentPlugIn name="GameEngine" />
<EngineConfig shadowMapSize="2048" texCompression="true" loadTextures="true" maxNumMessages="5000" fastAnimation="true" />
<ActiveCamera name="camera" />
<Extras/>
</Configuration>
'''
pass
##start Horde3D application
#
#
def start_H3Dapplication():
# demoapp scene_file
if 'H3Dprogram' in bpy.data.scenes[0]['h3d']:
if 'H3DContent_path'in bpy.data.scenes[0]['h3d']:
basePath=bpy.data.scenes[0]['h3d']['H3DContent_path']
else :
basePath= "/tmp"
scenepath=basePath+"models"+ os.sep+bpy.data.scenes[0].name+".scene.xml"
scenepath=os.path.relpath(scenepath, bpy.data.scenes[0]['h3d']['H3Dprogram'])
#remove leading ../ because of binary instead of dir
scenepath=scenepath[3:]
Popen([bpy.path.abspath(bpy.data.scenes[0]['h3d']['H3Dprogram'], os.sep), scenepath])
| null |
Tools/Exporters/Blender/2.63+/h3d_render/export_h3d_scene.py
|
export_h3d_scene.py
|
py
| 32,229 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bpy.types",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.object.select_all",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parse",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "bpy.data",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "re.sub",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 184,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 203,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 226,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 246,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 253,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 267,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parse",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "bpy.data",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.file.make_paths_absolute",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 338,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 349,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "bpy.path.basename",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "bpy.path",
"line_number": 364,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfile",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "bpy.path.basename",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "bpy.path",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 398,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 400,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 410,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 411,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 419,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 420,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 429,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 456,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 459,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 461,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 484,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 485,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 487,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 489,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 490,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 492,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 495,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 496,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 498,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 499,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 503,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 503,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 503,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 504,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.h3d.object_encoding_error",
"line_number": 527,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 527,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parseString",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 601,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 601,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 613,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 613,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 613,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 618,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 619,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 620,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 624,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 634,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 635,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 636,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 640,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parseString",
"line_number": 647,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 647,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 647,
"usage_type": "name"
},
{
"api_name": "os.sep",
"line_number": 661,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 675,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 676,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 677,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 699,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 700,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 702,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 704,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 705,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 707,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 708,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 710,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 711,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 713,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 714,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 718,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 718,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 718,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 718,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 719,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 719,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 740,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.h3d.object_encoding_error",
"line_number": 743,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 743,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parseString",
"line_number": 830,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 830,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 830,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
"line_number": 841,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 841,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 841,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 846,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 847,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 848,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 852,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 862,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 863,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": 864,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 868,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parseString",
"line_number": 875,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 875,
"usage_type": "attribute"
},
{
"api_name": "xml.dom.minidom",
"line_number": 875,
"usage_type": "name"
},
{
"api_name": "os.sep",
"line_number": 889,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 889,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 918,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 919,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 920,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 924,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 924,
"usage_type": "attribute"
},
{
"api_name": "os.path.relpath",
"line_number": 926,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 926,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 926,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 931,
"usage_type": "call"
},
{
"api_name": "bpy.path.abspath",
"line_number": 931,
"usage_type": "call"
},
{
"api_name": "bpy.path",
"line_number": 931,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 931,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 931,
"usage_type": "attribute"
}
] |
628914625
|
"""Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
import utils
import cv2
from torchvision.transforms import Compose
from models.midas_net import MidasNet
from models.transforms import Resize, NormalizeImage, PrepareForNet
from torch.utils import tensorboard
def run(input_path, output_path, model_path):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda")
print("device: %s" % device)
# load network
model = MidasNet(model_path, non_negative=True)
transform = Compose(
[
Resize(
384,
384,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="upper_bound",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
model.to(device)
model.eval()
# get input
img_names = glob.glob(os.path.join(input_path, "*"))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
for f in os.listdir(output_path):
if os.path.isfile(os.path.join(output_path, f)):
os.remove(os.path.join(output_path, f))
writer = tensorboard.SummaryWriter(output_path)
print("start processing")
for ind, img_name in enumerate(img_names):
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = utils.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
prediction = model.forward(sample)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
log_depth(writer, 'depth3d', prediction, sample, ind)
prediction = prediction.squeeze().cpu().numpy()
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
utils.write_depth(filename, prediction, bits=2)
print("finished")
def log_depth(writer, tag, depth, rgb, iteration):
mesh = depth_to_mesh(depth)
colors = rgb.reshape(1, 3, -1).moveaxis(1, 2) * 255
writer.add_mesh(tag, mesh, colors=colors, global_step=iteration)
def depth_to_mesh(depth):
mind = depth.min()
maxd = depth.max()
depth = (depth - mind) / (maxd - mind)
depth = 1 - depth
depth = depth.moveaxis(1, 3)
h, w = depth.shape[1:3]
x = torch.arange(w).type(depth.type())
y = torch.arange(h).type(depth.type())
x = (2 * (x / (w - 1)) - 1)
y = -(2 * (y / (h - 1)) - 1)
if w > h:
y *= h / w
else:
x *= w / h
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
xy = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
xy.unsqueeze_(0)
mesh = torch.cat([xy, depth], 3)
mesh = mesh.reshape(1, -1, 3)
return mesh
if __name__ == "__main__":
# set paths
INPUT_PATH = "input"
OUTPUT_PATH = "output"
# MODEL_PATH = "model.pt"
MODEL_PATH = "model.pt"
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute depth maps
run(INPUT_PATH, OUTPUT_PATH, MODEL_PATH)
| null |
run.py
|
run.py
|
py
| 3,816 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "torch.device",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.midas_net.MidasNet",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.transforms.Resize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "models.transforms.NormalizeImage",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.transforms.PrepareForNet",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torch.utils.tensorboard",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "utils.read_image",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "utils.write_depth",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "torch.backends",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 144,
"usage_type": "attribute"
}
] |
405855000
|
import os
import re
import textwrap
import sys
import argparse
timestamps = []
user = []
info = []
#Danmu position Y axis
y_axis_list = [790,753,716,679,642]
#ass字幕文件头及样式
Def_info = textwrap.dedent('''
[Script Info]
Title: Default ASS file
ScriptType: v4.00+
WrapStyle: 2
Collisions: Normal
PlayResX: 384
PlayResY: 816
ScaledBorderAndShadow: yes
Video Zoom Percent: 1
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
Style: Default,微软雅黑,19,&H00F0F0F0,&H00FFFFFF,&H4C533B3B,&H910E0807,0,0,0,0,100.0,100.0,0.0,0.0,3,6.1923075,2.5,1,20,275,27,1
Style: D2,微软雅黑,19,&H00FFCF9C,&H00FFFFFF,&H4C533B3B,&H910E0807,0,0,0,0,100.0,100.0,0.0,0.0,3,6.1923075,2.5,1,74,275,27,1
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
''')
def main():
parser = argparse.ArgumentParser()
add = parser.add_argument
add('lrc_file')
args = parser.parse_args()
fin = args.lrc_file
file_name = args.lrc_file.split('.')
file_name = file_name[0]
with open(args.lrc_file,encoding='utf-8') as fin:
with open(f'{file_name}.ass','w',encoding='utf-8') as fout:
for line in fin:
if '[' in line:
line = re.split(r']|\t',line)
line_time = line[0].replace('[','')
timestamps.append(line_time[1:-1])
user.append(line[1])
info.append(' ' + line[2].replace('\n',''))
else:
pass
#ASS Header Writing
fout.write(Def_info)
t = '\\r'
b = 1
for user_,info_ in zip(user,info):
for n,y_axis in zip(range(1,6),y_axis_list):
time_start_num = n + b - 2
time_end_num = n + b - 1
try:
time_star = timestamps[time_start_num]
time_end = timestamps[time_end_num]
except BaseException:
break
else:
Dialogue = f'Dialogue: 4,{time_star},{time_end},Default,,0,0,0,,{{\pos(20,{y_axis})}}{{{t}D2}}{user_}:{{{t}}}{info_}'
fout.write(Dialogue + '\n')
b += 1
if __name__ == '__main__':
main()
| null |
PtoA.py
|
PtoA.py
|
py
| 2,628 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "textwrap.dedent",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 48,
"usage_type": "call"
}
] |
407831204
|
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image, ImageFilter
from pytesseract import image_to_string
# requires:
# pip install seaborn pytesseract
# brew install tesseract
def ocr(image, blur=5):
image = add_white_border(image)
im = render_image(image)
im.show()
im = im.filter(ImageFilter.GaussianBlur(blur))
im.show()
message = image_to_string(im, config="-c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ")
return message
def add_white_border(image):
image = [
[0, *line, 0]
for line in image
]
wide = len(image[0])
image = [[0] * wide] + image + [[0] * wide]
return image
def render_image(image):
sns.heatmap(image, cbar=False, yticklabels=False, xticklabels=False)
plt.axis('equal')
plt.savefig('temp.png', format='png')
return Image.open('temp.png')
| null |
shared/ocr.py
|
ocr.py
|
py
| 874 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "PIL.ImageFilter.GaussianBlur",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFilter",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "seaborn.heatmap",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 34,
"usage_type": "name"
}
] |
26411966
|
import streamlit as st
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
st.write("""**1. Select Age :**""")
age = st.slider('', 0, 100, 25)
st.write("""**You selected this option **""",age)
st.write("""**2. Select Gender :**""")
sex = st.selectbox("(1=Male, 0=Female)",["1","0"])
st.write("""**You selected this option **""",sex)
df = user_input_features()
heart = pd.read_csv("heart-disease (2).csv")
X = heart.iloc[:,0:13].values
Y = heart.iloc[:,[13]].values
model = RandomForestClassifier()
model.fit(X, Y)
prediction = model.predict(df)
st.subheader('Prediction :')
df1=pd.DataFrame(prediction,columns=['0'])
df1.loc[df1['0'] == 0, 'Chances of Heart Disease'] = 'No'
df1.loc[df1['0'] == 1, 'Chances of Heart Disease'] = 'Yes'
st.write(df1)
prediction_proba = model.predict_proba(df)
st.subheader('Prediction Probability in % :')
st.write(prediction_proba * 100)
| null |
Model-1/bug-fix.py
|
bug-fix.py
|
py
| 898 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "streamlit.write",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "streamlit.slider",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.selectbox",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 27,
"usage_type": "call"
}
] |
136910062
|
import re
import requests
import time,random
cookies = {
'_ntes_nnid': 'c0a9486ed4e5d91c72812668fe88891c,1587290454965',
'_ntes_nuid': 'c0a9486ed4e5d91c72812668fe88891c',
'Device-Id': 'TMjvh9JnECyAvbziFjqs',
'_ga': 'GA1.2.1244979980.1587298259',
'_gid': 'GA1.2.1284201187.1587950146',
'P_INFO': '13026205006|1588030063|1|netease_buff|00&99|jix&1587985821&netease_buff#jix&360900#10#0#0|&0|null|13026205006',
'client_id': 'Atefq2lOu5gKTo0rMBgAOA',
'Locale-Supported': 'zh-Hans',
'game': 'csgo',
'_gat_gtag_UA_109989484_1': '1',
'csrf_token': 'ImQ3MmI1YmY5MzMzMGQwNTUzMGVmZTMwM2NkNWQzYzdlNjg4NGU0OWYi.EYkAJA.DtypdwIX6KqnfCWN6tdjv3NxCjc',
}
headers = {
'Connection': 'keep-alive',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://buff.163.com/market/?game=csgo',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
}
min=str(1)
max=str(5)
num=3
idList = []
urlList= []
for i in range(num):
url = "https://buff.163.com/api/market/goods?game=csgo&page_num=" + str(
i + 1) + "&sort_by=price.desc&min_price=" + min + "&max_price=" + max
r = requests.get(url, headers=headers, cookies=cookies)
html = r.text
print(html)
id = re.findall(r'\"id\"\:\d+', html)
print(id)
for e in range(len(id)):
idnum = eval(id[e].split(':')[1])
idList.append(idnum)
if i % 9 == 0:
time.sleep(random.random() * 3)
print("1_"+str(round(i/num,1)*100)+'%')
print(idList)
for x in range(len(idList)):
items_url = "https://buff.163.com/api/market/goods/sell_order?game=csgo&goods_id="+str(idList[x])+"&page_num=1&sort_by=default&mode=&allow_tradable_cooldown=1"
buy_url = "https://buff.163.com/market/goods?goods_id=" + str(idList[x]) + "&from=market#tab=selling"
t = requests.get(items_url, headers=headers, cookies=cookies)
good_text = t.text
items_fraudwarnings = re.findall(r'\"fraudwarnings\"\:\"', good_text)
if len(items_fraudwarnings)>0:
print(len(items_fraudwarnings))
print(buy_url)
urlList.append(buy_url)
if x % 9 == 0:
time.sleep(random.random() * 3)
print("2_"+str(round(x/len(idList),2)*100)+'%')
| null |
buff2.py
|
buff2.py
|
py
| 2,482 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 66,
"usage_type": "call"
}
] |
479436492
|
#coding = utf-8
__author__ = 'Aimee'
from interface.public import Config
from interface.public import HttpService
#对请求方式参数化
def get_host(EndPoint):
host = Config.url()
endpoint = EndPoint
url = ''.join([host,endpoint])
return url
#再次封装类中的函数
def get_response(url,Method,**DataALL):
if Method =='get':
resp = HttpService.MyHTTP(url).get(url,**DataALL)
elif Method =='post':
resp = HttpService.MyHTTP(url).post(url,**DataALL)
return resp
| null |
第一期/广州-Aimee/interface/interface/public/base.py
|
base.py
|
py
| 536 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "interface.public.Config.url",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "interface.public.Config",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "interface.public.HttpService.MyHTTP",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "interface.public.HttpService",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "interface.public.HttpService.MyHTTP",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "interface.public.HttpService",
"line_number": 19,
"usage_type": "name"
}
] |
283794158
|
# Task 2
# Press the 'Run File' menu button to execute
import sqlite3 as db #import the module
con = db.connect('test.db') #connect to a local file test.db
with con: #with is used to avoid typing con.cur all the time
cur = con.cursor() #”with con” opens a level of indentation
#delete a table for sales people if already there
cur.execute("DROP TABLE IF EXISTS Sales")
#create salespeople table
cur.execute("CREATE TABLE Sales(Id INT, Name TEXT)")
cur.execute("INSERT INTO Sales VALUES(?,?)",(1,'John'))
cur.execute("INSERT INTO Sales VALUES(?,?)",(2,'Lola'))
cur.execute("INSERT INTO Sales VALUES(?,?)",(3,'Sean'))
cur.execute("SELECT Sales.Name, COUNT(Pets.ID), GROUP_CONCAT(Pets.Name), "\
+"SUM(Pets.Price) FROM Pets INNER JOIN Sales ON Pets.SalesID=Sales.ID GROUP BY Sales.Name; ")
rs=cur.fetchall()
print("join 2 tables to lookup salesperson\'s name into the Pets table, grouped by salesperson\n")
for row in rs:
print(row[0],"sold",row[1],"Pets, such as:",row[2],"for the total of ",row[3],"pounds")
#as soon as the indentation is over, the changes will be saved in the database.
| null |
03-updates/task-2.py
|
task-2.py
|
py
| 1,169 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
}
] |
148765803
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
num = np.poly1d([1,0])
den = np.poly1d([1,3,2,1])
sys = signal.TransferFunction(num,den)
t,y = signal.impulse(sys)
plt.plot(t,y)
plt.xlabel('$t$')
plt.ylabel('$y$')
plt.grid() # minor
plt.axis()
plt.show()
| null |
impulse_response_a1.py
|
impulse_response_a1.py
|
py
| 283 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "numpy.poly1d",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.poly1d",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.signal.TransferFunction",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "scipy.signal.impulse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 14,
"usage_type": "name"
}
] |
539983796
|
import requests
from bs4 import BeautifulSoup
import json
class Listing:
def __init__(self, url):
print("Parsing " + url)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
data = json.loads(soup.find('script', type='application/json').string)
self.pageProps = data["props"]["pageProps"]
self.cache = dict()
@property
def price(self):
""" return the montly rent in euros """
price_str = self.pageProps["listing"]["price"]
postionOfEuroSign = price_str.find("€")
price_str = price_str[postionOfEuroSign:].replace(",", "")
str_array = price_str.lower().split()
price_num = int(str_array[0][1:])
if "week" == str_array[-1]:
price_num = int(price_num * 30 / 7)
return price_num
@property
def propertyType(self):
return self.pageProps["listing"]["propertyType"]
@property
def point(self):
return self.pageProps["listing"]["point"]
@property
def numBedrooms(self):
try:
return self.pageProps["listing"]["numBedrooms"]
except:
return "1+ Bed"
@property
def numBathrooms(self):
try:
return self.pageProps["listing"]["numBathrooms"]
except:
return "1+ Bath"
@property
def canonicalUrl(self):
return self.pageProps["canonicalUrl"]
def __repr__(self):
""" return a json representation of the object"""
as_dict = dict()
as_dict["price"] = self.price
as_dict["numBedrooms"] = self.numBedrooms
as_dict["numBathrooms"] = self.numBathrooms
as_dict["canonicalUrl"] = self.canonicalUrl
point = self.point
as_dict["longitude"] = point["coordinates"][0]
as_dict["latitude"] = point["coordinates"][1]
return str(as_dict)
| null |
temporary-map-visualization-fix/listing.py
|
listing.py
|
py
| 1,718 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
}
] |
447754369
|
import layoutparser as lp
import cv2,os
import numpy as np
from collections import namedtuple
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
import sys, random, torch, glob, torchvision
# from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
# from torchvision.transforms import transforms
# from skimage import io
seed = 1234
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
model_primalaynet = lp.Detectron2LayoutModel('lp://PrimaLayout/mask_rcnn_R_50_FPN_3x/config',label_map = {1:"TextRegion", 2:"ImageRegion", 3:"TableRegion", 4:"MathsRegion", 5:"SeparatorRegion", 6:"OtherRegion"},extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.5])
def overlappingArea(l1, r1, l2, r2,ar):
x = 0
y = 1
area1 = abs(l1[x] - r1[x]) * abs(l1[y] - r1[y])
area2 = abs(l2[x] - r2[x]) * abs(l2[y] - r2[y])
check=False
areaI = ((min(r1[x], r2[x]) -
max(l1[x], l2[x])) *
(min(r1[y], r2[y]) -
max(l1[y], l2[y])))
thresh = 0
if ar==None:
ar=0
if area1<area2:
if abs(int(ar)/area1)>0.20:
thresh = abs(int(ar)/area1)
check=True
if area1>area2:
if abs(int(ar)/area2)>0.20:
thresh = abs(int(ar)/area1)
check=True
if (r2[x] < r1[x] and l2[x] > l1[x] and l2[y] > l1[y] and l2[y] < l1[y]) or (r2[x] > r1[x] and l2[x] < l1[x] and l2[y] < l1[y] and l2[y] > l1[y]):
check =True
return check, thresh
def area(a, b): # returns None if rectangles don't intersect
dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin)
dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin)
if (dx>=0) and (dy>=0):
return dx*dy
def filter_overlapping(coord,layout,index,tag):
if index<len(layout):
coord_update = coord
skip=[-1]
tag_update = tag
for idx,ele in enumerate(layout):
coord2 =ele
coord1= coord_update
l1=[coord1[0],coord1[1]]; r1=[coord1[2],coord1[3]]
l2=[coord2[0],coord2[1]]; r2=[coord2[2],coord2[3]]
ra = Rectangle(coord1[0],coord1[1],coord1[2],coord1[3])
rb = Rectangle(coord2[0],coord2[1], coord2[2],coord2[3])
ar = area(ra, rb)
check=False
check,thresh = overlappingArea(l1, r1, l2, r2,ar)
if ((tag[idx]=="ImageRegion" and tag[index]=="ImageRegion") and ar!=None and idx!=index) or ((tag[idx]=="ImageRegion" and tag[index]!="ImageRegion") and thresh>0.40 and idx!=index) or ((tag[idx]!="ImageRegion" and tag[index]=="ImageRegion") and thresh>0.40 and idx!=index ):
coord_update[0]=min(coord1[0],coord2[0])
coord_update[1]=min(coord1[1],coord2[1])
coord_update[2]=max(coord1[2],coord2[2])
coord_update[3]=max(coord1[3],coord2[3])
skip.append(idx)
tag_update="ImageRegion"
elif ar!=None and ar>0.01 and tag[idx]=="TextRegion" and tag[index]=="TextRegion":
coord_update[0]=min(coord1[0],coord2[0])
coord_update[1]=min(coord1[1],coord2[1])
coord_update[2]=max(coord1[2],coord2[2])
coord_update[3]=max(coord1[3],coord2[3])
skip.append(idx)
tag_update="TextRegion"
elif ar!=None and ((abs(coord1[0]-coord2[0])<300 and abs(coord1[2]-coord2[2])<300) and check==True) and idx!=index and (tag[idx]!="ImageRegion" and tag[index]!="ImageRegion"):# and (thresh>0.90)) or ((tag[idx]!="ImageRegion" and tag[index]=="ImageRegion") and (thresh>0.90))):
coord_update[0]=min(coord1[0],coord2[0])
coord_update[1]=min(coord1[1],coord2[1])
coord_update[2]=max(coord1[2],coord2[2])
coord_update[3]=max(coord1[3],coord2[3])
skip.append(idx)
tag_update=tag[idx]
return coord_update, skip,tag_update
def draw_box(layout,df,width_ratio,height_ratio):
skip=[-1]; bbox = []; tag =[]
for idx,ele in enumerate(layout):
bbox.append(list(ele.coordinates))
tag.append(ele.type)
final_box= []
final_tag = []
for idx, ele in enumerate(bbox):
if (idx in skip) or abs(ele[0]-ele[2])<10 or abs(ele[1]-ele[3])<10:
continue
ele,skip ,tag_update= filter_overlapping(ele,bbox,idx,tag)
final_box.append(ele)
final_tag.append(tag_update)
boxes,coords = sort(final_box,final_tag,df,width_ratio,height_ratio)
return boxes, coords
def update_box(boxes):
updated_box =[]; skip=0
boxes = sorted(boxes , key=lambda k: [k[1]])
for idx, box in enumerate(boxes):
coord = box
if idx+1<len(boxes):
box2 = boxes[idx+1]
if skip>0:
skip=skip-1
continue
for idx2, box2 in enumerate(boxes):
coord[0]=min(box2[0],box[0]); coord[1]=min(box2[1],box[1]); coord[2]=max(box2[2],box[2]); coord[3]=max(box2[3],box[3])
idx=idx+1
skip=skip+1
box=coord
box2 = boxes[idx]
updated_box.append(coord)
return updated_box
def remove_overlap(coords):
coord_update = coords
rm_index=[]
for idx1,coord1 in enumerate(coords):
for idx2,coord2 in enumerate(coords):
ra = Rectangle(coord1[0],coord1[1],coord1[2],coord1[3])
rb = Rectangle(coord2[0],coord2[1], coord2[2],coord2[3])
ar = area(ra, rb)
l1=[coord1[0],coord1[1]]; r1=[coord1[2],coord1[3]]
l2=[coord2[0],coord2[1]]; r2=[coord2[2],coord2[3]]
check,thresh = overlappingArea(l1, r1, l2, r2,ar)
if ar!=None and thresh>0 and idx1<len(coord_update):
coord_update[idx1][0]=min(coord1[0],coord2[0]); coord_update[idx1][1]=min(coord1[1],coord2[1])
coord_update[idx1][2]=max(coord1[2],coord2[2]); coord_update[idx1][3]=max(coord1[3],coord2[3])
del coord_update[idx2]
return coord_update
def df_to_wordbox(df,width_ratio,height_ratio):
boxes = []
for index, row in df.iterrows():
temp_box = []
temp_box.append(row['text_left']/width_ratio); temp_box.append(row['text_top']/height_ratio)
temp_box.append((row['text_width']+row['text_left'])/width_ratio); temp_box.append((row['text_height']+row['text_top'])/height_ratio)
#boxes.append(temp_box)
#temp_box.append(row['text_left']); temp_box.append(row['text_top'])
#temp_box.append((row['text_width'])); temp_box.append((row['text_height']))
boxes.append(temp_box)
return boxes
def craft_refinement(coords,df,width_ratio,height_ratio):
coords_update = []
org_coord = coords
org_coord2= coords
boxes = df_to_wordbox(df,width_ratio,height_ratio)
boxes_final = []
drop_lis = []
for idx1, coord1 in enumerate(boxes):
min_area = 0; count=0
index=idx1; check =False
for idx2, coord2 in enumerate(org_coord):
ra = Rectangle(coord1[0],coord1[1],coord1[2],coord1[3])
rb = Rectangle(coord2[0],coord2[1], coord2[2],coord2[3])
ar = area(ra, rb)
if ar!=None and min_area<ar:
min_area =ar
index= idx2
check =True
if ar==None:
count=count+1
if check ==True:
org_coord2[index][0] = int(min(coord1[0],org_coord2[index][0])); org_coord2[index][1] = int(min(coord1[1],org_coord2[index][1]))
org_coord2[index][2] = int(max(coord1[2],org_coord2[index][2])); org_coord2[index][3] = int(max(coord1[3],org_coord2[index][3]))
if count == len(org_coord):
boxes_final.append(coord1)
coords = remove_overlap(org_coord2)
if len(boxes_final)!=0:
for box in boxes_final:
vertical_min_dis = sys.maxsize
horizon_min_dis = sys.maxsize
ver_coord_update = None
hor_coord_update = None
hor_index = None
ver_inex = None
for idx,coord in enumerate(coords):
top_dis = abs(coord[1]-box[1]); left_dis = abs(coord[0]-box[2])
bottom_dis = abs(coord[1]-box[3]); right_dis = abs(coord[2]-box[2])
top_dis1 = abs(coord[3]-box[1]); left_dis1 = abs(coord[0]-box[0])
bottom_dis1 = abs(coord[3]-box[3]); right_dis1 = abs(coord[2]-box[0])
vertical_dis = min(top_dis,bottom_dis,top_dis1,bottom_dis1)
horizon_dis = min(left_dis,right_dis,left_dis1,right_dis1)
#if (vertical_min_dis>vertical_dis and (box[0]<=coord[0]-30 and box[2]< and box[2]>coord[0]) or (vertical_min_dis>vertical_dis and box[2]>coord[0]):
if (vertical_min_dis>vertical_dis and abs(box[0]-coord[0])<100):
vertical_min_dis = vertical_dis
ver_coord_update = coord
ver_index= idx
if horizon_min_dis>horizon_dis:
horizon_min_dis = horizon_dis
hor_coord_update = coord
hor_index = idx
if abs(vertical_min_dis)<150 :
coords[ver_index][0] = int(min(ver_coord_update[0],box[0])); coords[ver_index][1] = int(min(ver_coord_update[1],box[1]))
coords[ver_index][2] = int(max(ver_coord_update[2],box[2])); coords[ver_index][3] = int(max(ver_coord_update[3],box[3]))
#elif abs(horizon_min_dis)<10:
#coords[hor_index][0] = int(min(hor_coord_update[0],box[0])); coords[hor_index][1] = int(min(hor_coord_update[1],box[1]))
#coords[hor_index][2] = int(max(hor_coord_update[2],box[2])); coords[hor_index][3] = int(max(hor_coord_update[3],box[3]))
else:
coords.append(box)
coords = remove_overlap(coords)
return boxes,coords
def sort(final_box,final_tag,df,width_ratio,height_ratio):
skip=[-1]; final_coord =[]
for idx,box in enumerate(final_box):
if idx in skip:
continue
coord1=box
coord_update = coord1
for idx2,box2 in enumerate(final_box):
coord2=box2
l1=[coord1[0],coord1[1]]; r1=[coord1[2],coord1[3]]
l2=[coord2[0],coord2[1]]; r2=[coord2[2],coord2[3]]
ra = Rectangle(coord1[0],coord1[1],coord1[2],coord1[3])
rb = Rectangle(coord2[0],coord2[1], coord2[2],coord2[3])
ar = area(ra, rb)
if ar!=None and abs(ar)>0.1 and final_tag[idx]=="TextRegion" and final_tag[idx2]=="TextRegion":
coord_update[0]=min(coord1[0],coord2[0])
coord_update[1]=min(coord1[1],coord2[1])
coord_update[2]=max(coord1[2],coord2[2])
coord_update[3]=max(coord1[3],coord2[3])
skip.append(idx)
final_coord.append(coord_update)
boxes,coords = craft_refinement(final_coord,df,width_ratio,height_ratio)
return boxes, coords
def predict_primanet(image_path,in_df,width_ratio,height_ratio):
image1 = cv2.imread(image_path)
image = image1[..., ::-1]
layout = model_primalaynet.detect(image)
boxes,coords = draw_box(layout,in_df,width_ratio,height_ratio)
final_coord= []
for coord in coords:
temp_dict={}
temp_dict['text_left'] =coord[0]*width_ratio; temp_dict['text_top']=coord[1]*height_ratio
temp_dict['text_width']=abs((coord[2]-coord[0])*width_ratio); temp_dict['text_height']=abs((coord[3]-coord[1])*height_ratio)
#temp_dict['text_left'] =coord[0]; temp_dict['text_top']=coord[1]
#temp_dict['text_width']=abs(coord[2]-coord[0]); temp_dict['text_height']=abs(coord[3]-coord[1])
final_coord.append(temp_dict)
return final_coord
| null |
anuvaad-etl/anuvaad-extractor/block-merger/src/utilities/primalaynet/infer.py
|
infer.py
|
py
| 10,630 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "collections.namedtuple",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "layoutparser.Detectron2LayoutModel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "sys.maxsize",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 260,
"usage_type": "call"
}
] |
176537285
|
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import matplotlib.pyplot as plt
import numpy as np
import pyvista as pv
from matplotlib.colors import Normalize
from matplotlib.cm import magma
from cloud_colocations.plots import grid_to_edges
try:
plt.style.use("/home/simonpf/src/joint_flight/misc/matplotlib_style.rc")
except:
pass
from netCDF4 import Dataset
#
# Modis
#
modis_data = Dataset("modis.nc", "r")
x_modis = modis_data["x"][:][::10, ::10]
y_modis = modis_data["y"][:][::10, ::10]
xx = grid_to_edges(x_modis[:, :])
yy = grid_to_edges(y_modis[:, :])
zz = grid_to_edges(np.zeros(x_modis[:, :].shape))
vertices = np.zeros((xx.size, 3))
vertices[:, 0] = xx.ravel()
vertices[:, 1] = yy.ravel()
vertices[:, 2] = zz.ravel()
modis_surface = pv.StructuredGrid(xx, yy, zz)
modis_texture = pv.read_texture("gmi_texture_[5, 6].png")
modis_surface.texture_map_to_plane(inplace = True)
m = xx.shape[0]
n = xx.shape[1]
tc_x = np.linspace(0, 1, m).reshape(-1, 1)
tc_y = np.linspace(0, 1, n).reshape(1, -1)
tc_x, tc_y = np.meshgrid(tc_x, tc_y, indexing = "xy")
tcs = np.zeros((m * n, 2))
tcs[:, 0] = tc_x.T.ravel(order = "F")
tcs[:, 1] = tc_y.T.ravel(order = "F")
modis_surface.point_arrays["Texture Coordinates"][:, 0] = tcs[:, 0]
modis_surface.point_arrays["Texture Coordinates"][:, 1] = tcs[:, 1]
#
# Dardar
#
dardar_data = Dataset("dardar.nc", "r")
x_dardar = dardar_data["x"][:]
y_dardar = dardar_data["y"][:]
z_dardar = dardar_data["z"][:]
i_start = np.where(y_dardar > -600)[0]([0])
i_end = np.where(y_dardar > 600)[0][0]
dbz = np.minimum(10 * np.log10(np.maximum(dardar_data["rr"], 10 ** -2.6)), 20)[i_start : i_end]
xx = grid_to_edges(x_dardar[i_start : i_end, :])
yy = grid_to_edges(y_dardar[i_start : i_end, :])
zz = grid_to_edges(z_dardar[i_start : i_end, :]) / 5e1
for i in range(dbz.shape[0]):
z = zz[i, :]
j = np.where(z < 15)[0][0]
dbz[i, j:] = dbz[i, j]
vertices = np.zeros((xx.size, 3))
vertices[:, 0] = xx.ravel()
vertices[:, 1] = yy.ravel()
vertices[:, 2] = zz.ravel()
dardar_curtain = pv.StructuredGrid(xx, yy, zz)
#dbz = np.copy(yy[1::, 1:].T, order= "F")
dardar_curtain.cell_arrays["radar_reflectivity"] = dbz.T.ravel()
dardar_curtain.save("dardar.vts")
dardar_curtain.set_active_scalar("radar_reflectivity")
#m = xx.shape[0]
#n = xx.shape[1]
#tc_x = np.linspace(1, 0, m).reshape(-1, 1)
#tc_y = np.linspace(0, 1, n).reshape(1, -1)
#tc_x, tc_y = np.meshgrid(tc_x, tc_y)
#tcs = np.zeros((m * n, 2))
#tcs[:, 1] = tc_x.ravel()
#tcs[:, 0] = tc_y.ravel()
#
#origin = [x_modis[0, 0], y_modis[0, 0], zz[0, 0]]
#u = [x_modis[0, -1], y_modis[0, -1], zz[0, -1]]
#v = [x_modis[-1, 0], y_modis[-1, 0], zz[-1, 0]]
##modis_surface.cell_arrays["Texture Coordinates"] = tcs
#modis_texture = pv.numpy_to_texture(np.array(256 * modis_rgb[:, j_start : j_end, :], order = "F", dtype = np.uint8))
#modis_surface.point_arrays["Texture Coordinates"][:, 0] = tcs[:, 0]
#modis_surface.point_arrays["Texture Coordinates"][:, 1] = tcs[:, 1]
#
#return modis_surface, modis_texture
#bounds = [700, 1500, -800, 800, -100, 100]
#modis_clipped = modis_clipped.clip_box(bounds)
#bounds = [-1500, 1500, -800, -600, -100, 100]
#modis_clipped = modis_clipped.clip_box(bounds)
#bounds = [-1500, 1500, 600, 1000, -100, 100]
#modis_clipped = modis_clipped.clip_box(bounds)
bounds = [-2000, -900, -800, 800, -100, 100]
modis_clipped = modis_surface.clip_box(bounds)
plotter = pv.BackgroundPlotter()
#dardar_clipped = dardar_curtain.clip_surface(modis_surface)
plotter.add_mesh(modis_clipped, texture = modis_texture, lighting = False)
plotter.add_mesh(dardar_curtain, lighting = False, opacity = "sigmoid", cmap = "magma", show_scalar_bar = False)
#plotter.add_bounding_box(color = "black")
#plotter.show_grid(color = "black")
plotter.background_color = "white"
plotter.show(screenshot = "cloudsat_gmi.png")
| null |
notebooks/plot_3d.py
|
plot_3d.py
|
py
| 3,876 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cloud_colocations.plots.grid_to_edges",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cloud_colocations.plots.grid_to_edges",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "cloud_colocations.plots.grid_to_edges",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyvista.StructuredGrid",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pyvista.read_texture",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.log10",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cloud_colocations.plots.grid_to_edges",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cloud_colocations.plots.grid_to_edges",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cloud_colocations.plots.grid_to_edges",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pyvista.StructuredGrid",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pyvista.BackgroundPlotter",
"line_number": 105,
"usage_type": "call"
}
] |
503122610
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachine(Resource):
"""
Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param plan: Gets or sets the purchase plan when deploying virtual
machine from VM Marketplace images.
:type plan: :class:`Plan <azure.mgmt.compute.models.Plan>`
:param hardware_profile: Gets or sets the hardware profile.
:type hardware_profile: :class:`HardwareProfile
<azure.mgmt.compute.models.HardwareProfile>`
:param storage_profile: Gets or sets the storage profile.
:type storage_profile: :class:`StorageProfile
<azure.mgmt.compute.models.StorageProfile>`
:param os_profile: Gets or sets the OS profile.
:type os_profile: :class:`OSProfile <azure.mgmt.compute.models.OSProfile>`
:param network_profile: Gets or sets the network profile.
:type network_profile: :class:`NetworkProfile
<azure.mgmt.compute.models.NetworkProfile>`
:param diagnostics_profile: Gets or sets the diagnostics profile.
:type diagnostics_profile: :class:`DiagnosticsProfile
<azure.mgmt.compute.models.DiagnosticsProfile>`
:param availability_set: Gets or sets the reference Id of the
availability set to which this virtual machine belongs.
:type availability_set: :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
:param provisioning_state: Gets or sets the provisioning state, which
only appears in the response.
:type provisioning_state: str
:ivar instance_view: Gets the virtual machine instance view.
:vartype instance_view: :class:`VirtualMachineInstanceView
<azure.mgmt.compute.models.VirtualMachineInstanceView>`
:param license_type: Gets or sets the license type, which is for bring
your own license scenario.
:type license_type: str
:param vm_id: Gets the virtual machine unique id.
:type vm_id: str
:ivar resources: Gets the virtual machine child extension resources.
:vartype resources: list of :class:`VirtualMachineExtension
<azure.mgmt.compute.models.VirtualMachineExtension>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_view': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
}
def __init__(self, location, tags=None, plan=None, hardware_profile=None, storage_profile=None, os_profile=None, network_profile=None, diagnostics_profile=None, availability_set=None, provisioning_state=None, license_type=None, vm_id=None):
super(VirtualMachine, self).__init__(location=location, tags=tags)
self.plan = plan
self.hardware_profile = hardware_profile
self.storage_profile = storage_profile
self.os_profile = os_profile
self.network_profile = network_profile
self.diagnostics_profile = diagnostics_profile
self.availability_set = availability_set
self.provisioning_state = provisioning_state
self.instance_view = None
self.license_type = license_type
self.vm_id = vm_id
self.resources = None
| null |
azure-mgmt-compute/azure/mgmt/compute/models/virtual_machine.py
|
virtual_machine.py
|
py
| 5,682 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "resource.Resource",
"line_number": 25,
"usage_type": "name"
}
] |
374096073
|
"""
Copyright ©2019. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation
for educational, research, and not-for-profit purposes, without fee and without a
signed licensing agreement, is hereby granted, provided that the above copyright
notice, this paragraph and the following two paragraphs appear in all copies,
modifications, and distributions.
Contact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue,
Suite 510, Berkeley, CA 94720-1620, (510) 643-7201, [email protected],
http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED
"AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,
ENHANCEMENTS, OR MODIFICATIONS.
"""
from boac.models.cohort_filter import CohortFilter
import pytest
import simplejson as json
from tests.test_api.api_test_utils import all_cohorts_owned_by
admin_uid = '2040'
asc_advisor_uid = '1081940'
coe_advisor_uid = '1133399'
@pytest.fixture()
def admin_login(fake_auth):
fake_auth.login(admin_uid)
@pytest.fixture()
def asc_advisor_login(fake_auth):
fake_auth.login(asc_advisor_uid)
@pytest.fixture()
def coe_advisor_login(fake_auth):
fake_auth.login(coe_advisor_uid)
@pytest.fixture()
def asc_owned_cohort():
cohorts = all_cohorts_owned_by(asc_advisor_uid)
return next((c for c in cohorts if c['name'] == 'All sports'), None)
@pytest.fixture()
def coe_owned_cohort():
cohorts = all_cohorts_owned_by(coe_advisor_uid)
return next((c for c in cohorts if c['name'] == 'Radioactive Women and Men'), None)
class TestCohortDetail:
"""Cohort API."""
def test_my_cohorts_not_authenticated(self, client):
"""Rejects anonymous user."""
response = client.get('/api/cohorts/my')
assert response.status_code == 401
def test_my_cohorts(self, coe_advisor_login, client):
"""Returns user's cohorts."""
response = client.get('/api/cohorts/my')
assert response.status_code == 200
cohorts = response.json
assert len(cohorts) == 2
for key in 'name', 'alertCount', 'criteria', 'totalStudentCount', 'isOwnedByCurrentUser':
assert key in cohorts[0], f'Missing cohort element: {key}'
def test_students_with_alert_counts(self, asc_advisor_login, client, create_alerts, db_session):
"""Pre-load students into cache for consistent alert data."""
from boac.models.alert import Alert
assert client.get('/api/student/by_uid/61889').status_code == 200
assert client.get('/api/student/by_uid/98765').status_code == 200
Alert.update_all_for_term(2178)
cohorts = all_cohorts_owned_by(asc_advisor_uid)
assert len(cohorts)
cohort_id = cohorts[0]['id']
response = client.get(f'/api/cohort/{cohort_id}/students_with_alerts')
assert response.status_code == 200
students_with_alerts = response.json
assert len(students_with_alerts) == 3
deborah = students_with_alerts[0]
assert deborah['sid'] == '11667051'
assert deborah['alertCount'] == 3
# Summary student data is included with alert counts, but full term feeds are not.
assert deborah['cumulativeGPA'] == 3.8
assert deborah['cumulativeUnits'] == 101.3
assert deborah['expectedGraduationTerm']['name'] == 'Fall 2019'
assert deborah['level'] == 'Junior'
assert len(deborah['majors']) == 2
assert deborah['term']['enrolledUnits'] == 12.5
assert deborah['termGpa'][0]['gpa'] == 2.9
assert 'enrollments' not in deborah['term']
dave_doolittle = students_with_alerts[1]
assert dave_doolittle['sid'] == '2345678901'
assert dave_doolittle['uid']
assert dave_doolittle['firstName']
assert dave_doolittle['lastName']
assert dave_doolittle['alertCount'] == 1
def _get_alerts(uid):
_response = client.get(f'/api/student/by_uid/{uid}')
assert _response.status_code == 200
return _response.json['notifications']['alert']
alert_to_dismiss = _get_alerts(61889)[0]['id']
client.get('/api/alerts/' + str(alert_to_dismiss) + '/dismiss')
alert_to_dismiss = _get_alerts(98765)[0]['id']
client.get('/api/alerts/' + str(alert_to_dismiss) + '/dismiss')
students_with_alerts = client.get(f'/api/cohort/{cohort_id}/students_with_alerts').json
assert len(students_with_alerts) == 2
assert students_with_alerts[0]['sid'] == '11667051'
assert students_with_alerts[0]['alertCount'] == 2
def test_cohorts_all(self, asc_advisor_login, client):
"""Returns all cohorts per owner."""
response = client.get('/api/cohorts/all')
assert response.status_code == 200
api_json = response.json
count = len(api_json)
assert count == 3
for index, entry in enumerate(api_json):
user = entry['user']
if 0 < index < count:
# Verify order
assert user['name'] > api_json[index - 1]['user']['name']
assert 'uid' in user
cohorts = entry['cohorts']
cohort_count = len(cohorts)
for c_index, cohort in enumerate(cohorts):
if 0 < c_index < cohort_count:
# Verify order
assert cohort['name'] > cohorts[c_index - 1]['name']
assert 'id' in cohort
def test_get_cohort(self, coe_advisor_login, client, coe_owned_cohort, create_alerts):
"""Returns a well-formed response with filtered cohort and alert count per student."""
cohort_id = coe_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
cohort = json.loads(response.data)
assert cohort['id'] == cohort_id
assert cohort['name'] == coe_owned_cohort['name']
assert 'students' in cohort
assert cohort['students'][0].get('alertCount') == 3
def test_get_cohort_without_students(self, coe_advisor_login, client, coe_owned_cohort):
"""Returns a well-formed response with cohort and no students."""
cohort_id = coe_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}?includeStudents=false')
assert response.status_code == 200
cohort = json.loads(response.data)
assert 'students' not in cohort
def test_unauthorized_get_cohort(self, asc_advisor_login, client, coe_owned_cohort):
"""Returns a well-formed response with custom cohort."""
cohort_id = coe_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 404
assert 'No cohort found' in json.loads(response.data)['message']
def test_undeclared_major(self, asc_advisor_login, client):
"""Returns a well-formed response with custom cohort."""
cohort = all_cohorts_owned_by(asc_advisor_uid)[-1]
cohort_id = cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
cohort = json.loads(response.data)
assert cohort['name'] == 'Undeclared students'
students = cohort['students']
assert cohort['totalStudentCount'] == len(students) == 1
# We expect the student with 'Letters & Sci Undeclared UG' major
assert students[0]['sid'] == '5678901234'
def test_includes_cohort_member_sis_data(self, asc_advisor_login, asc_owned_cohort, client):
"""Includes SIS data for custom cohort students."""
cohort_id = asc_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
athlete = next(m for m in response.json['students'] if m['firstName'] == 'Deborah')
assert athlete['cumulativeGPA'] == 3.8
assert athlete['cumulativeUnits'] == 101.3
assert athlete['level'] == 'Junior'
assert athlete['majors'] == ['English BA', 'Nuclear Engineering BS']
def test_includes_cohort_member_current_enrollments(self, asc_advisor_login, asc_owned_cohort, client):
"""Includes current-term active enrollments for custom cohort students."""
cohort_id = asc_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}?orderBy=firstName')
assert response.status_code == 200
athlete = next(m for m in response.json['students'] if m['firstName'] == 'Deborah')
term = athlete['term']
assert term['termName'] == 'Fall 2017'
assert term['enrolledUnits'] == 12.5
assert len(term['enrollments']) == 5
assert term['enrollments'][0]['displayName'] == 'BURMESE 1A'
assert len(term['enrollments'][0]['canvasSites']) == 1
def test_includes_cohort_member_term_gpa(self, asc_advisor_login, asc_owned_cohort, client):
cohort_id = asc_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}?orderBy=firstName')
assert response.status_code == 200
deborah = next(m for m in response.json['students'] if m['firstName'] == 'Deborah')
assert len(deborah['termGpa']) == 4
assert deborah['termGpa'][0] == {'termName': 'Spring 2018', 'gpa': 2.9}
assert deborah['termGpa'][3] == {'termName': 'Spring 2016', 'gpa': 3.8}
def test_includes_cohort_member_athletics_asc(self, asc_advisor_login, asc_owned_cohort, client):
"""Includes athletic data custom cohort members for ASC advisors."""
cohort_id = asc_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
athlete = next(m for m in response.json['students'] if m['firstName'] == 'Deborah')
assert len(athlete['athleticsProfile']['athletics']) == 2
assert athlete['athleticsProfile']['inIntensiveCohort'] is not None
assert athlete['athleticsProfile']['isActiveAsc'] is not None
assert athlete['athleticsProfile']['statusAsc'] is not None
tennis = next(membership for membership in athlete['athleticsProfile']['athletics'] if membership['groupCode'] == 'WTE')
field_hockey = next(membership for membership in athlete['athleticsProfile']['athletics'] if membership['groupCode'] == 'WFH')
assert tennis['groupName'] == 'Women\'s Tennis'
assert tennis['teamCode'] == 'TNW'
assert tennis['teamName'] == 'Women\'s Tennis'
assert field_hockey['groupName'] == 'Women\'s Field Hockey'
assert field_hockey['teamCode'] == 'FHW'
assert field_hockey['teamName'] == 'Women\'s Field Hockey'
def test_omits_cohort_member_athletics_non_asc(self, coe_advisor_login, client, coe_owned_cohort):
"""Omits athletic data for non-ASC advisors."""
cohort_id = coe_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
secretly_an_athlete = next(m for m in response.json['students'] if m['firstName'] == 'Deborah')
assert 'athletics' not in secretly_an_athlete
assert 'inIntensiveCohort' not in secretly_an_athlete
assert 'isActiveAsc' not in secretly_an_athlete
assert 'statusAsc' not in secretly_an_athlete
def test_includes_cohort_member_athletics_advisors(self, admin_login, client, coe_owned_cohort):
"""Includes athletic data for admins."""
cohort_id = coe_owned_cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
athlete = next(m for m in response.json['students'] if m['firstName'] == 'Deborah')
assert len(athlete['athleticsProfile']['athletics']) == 2
assert athlete['athleticsProfile']['inIntensiveCohort'] is not None
assert athlete['athleticsProfile']['isActiveAsc'] is not None
assert athlete['athleticsProfile']['statusAsc'] is not None
def test_get_cohort_404(self, client, coe_advisor_login):
"""Returns a well-formed response when no cohort found."""
response = client.get('/api/cohort/99999999')
assert response.status_code == 404
assert 'No cohort found' in str(response.data)
def test_offset_and_limit(self, asc_advisor_login, asc_owned_cohort, client):
"""Returns a well-formed response with custom cohort."""
cohort_id = asc_owned_cohort['id']
api_path = f'/api/cohort/{cohort_id}'
# First, offset is zero
response = client.get(f'{api_path}?offset={0}&limit={1}')
assert response.status_code == 200
data_0 = json.loads(response.data)
assert data_0['totalStudentCount'] == 4
assert len(data_0['students']) == 1
# Now, offset is one
response = client.get(f'{api_path}?offset={1}&limit={1}')
data_1 = json.loads(response.data)
assert len(data_1['students']) == 1
# Verify that a different offset results in a different member
assert data_0['students'][0]['uid'] != data_1['students'][0]['uid']
def test_unauthorized_request_for_athletic_study_center_data(self, client, fake_auth):
"""In order to access intensive_cohort, inactive status, etc. the user must be either ASC or Admin."""
fake_auth.login('1022796')
data = {
'name': 'My filtered cohort just hacked the system!',
'filters': [
{'key': 'isInactiveAsc', 'type': 'boolean', 'value': True},
],
}
response = client.post(
'/api/cohort/create',
data=json.dumps(data),
content_type='application/json',
)
assert response.status_code == 403
def test_my_students_filter_me(self, client, asc_advisor_login):
cohort = CohortFilter.create(
uid=asc_advisor_uid,
name='All my students',
filter_criteria={
'cohortOwnerAcademicPlans': ['*'],
},
)
response = client.get(f"/api/cohort/{cohort['id']}").json
sids = sorted([s['sid'] for s in response['students']])
assert sids == ['11667051', '2345678901', '3456789012', '5678901234', '7890123456', '9100000000']
def test_my_students_filter_not_me(self, client, admin_login):
cohort = CohortFilter.create(
uid=asc_advisor_uid,
name='All my students',
filter_criteria={
'cohortOwnerAcademicPlans': ['*'],
},
)
response = client.get(f"/api/cohort/{cohort['id']}").json
sids = sorted([s['sid'] for s in response['students']])
assert sids == ['11667051', '2345678901', '3456789012', '5678901234', '7890123456', '9100000000']
class TestCohortCreate:
"""Cohort Create API."""
@classmethod
def _post_cohort_create(cls, client, json_data=(), expected_status_code=200):
response = client.post(
'/api/cohort/create',
data=json.dumps(json_data),
content_type='application/json',
)
assert response.status_code == expected_status_code
return json.loads(response.data)
@staticmethod
def _api_cohort(client, cohort_id, expected_status_code=200):
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == expected_status_code
return response.json
def test_create_cohort(self, client, asc_advisor_login):
"""Creates custom cohort, owned by current user."""
data = {
'name': 'Tennis',
'filters': [
{'key': 'majors', 'type': 'array', 'value': 'Letters & Sci Undeclared UG'},
{'key': 'groupCodes', 'type': 'array', 'value': 'MTE'},
{'key': 'majors', 'type': 'array', 'value': 'English BA'},
{'key': 'genders', 'type': 'array', 'value': 'Male'},
],
}
def _verify(api_json):
assert api_json.get('name') == 'Tennis'
assert api_json['alertCount'] is not None
assert len(api_json.get('criteria', {}).get('majors')) == 2
# ASC specific
team_groups = api_json.get('teamGroups')
assert len(team_groups) == 1
assert team_groups[0].get('groupCode') == 'MTE'
# Students
students = api_json.get('students')
assert len(students) == 1
assert students[0]['gender'] == 'Male'
assert students[0]['underrepresented'] is False
data = self._post_cohort_create(client, data)
_verify(data)
cohort_id = data.get('id')
assert cohort_id
_verify(self._api_cohort(client, cohort_id))
def test_asc_advisor_is_forbidden(self, asc_advisor_login, client, fake_auth):
"""Denies ASC advisor access to COE data."""
data = {
'name': 'ASC advisor wants to see students of COE advisor',
'filters': [
{
'key': 'coeEthnicities',
'type': 'array',
'value': 'Vietnamese',
},
],
}
assert self._post_cohort_create(client, data, expected_status_code=403)
def test_admin_create_of_coe_uid_cohort(self, admin_login, client, fake_auth):
"""Allows Admin to access COE data."""
data = {
'name': 'Admin wants to see students of COE advisor',
'filters': [
{
'key': 'coeGenders',
'type': 'array',
'value': 'M',
},
{
'key': 'genders',
'type': 'array',
'value': 'Different Identity',
},
],
}
api_json = self._post_cohort_create(client, data)
assert len(api_json['students']) == 2
for student in api_json['students']:
assert student['gender'] == 'Different Identity'
assert student['coeProfile']['gender'] == 'M'
def test_create_complex_cohort(self, client, coe_advisor_login):
"""Creates custom cohort, with many non-empty filter_criteria."""
data = {
'name': 'Complex',
'filters': [
{'key': 'majors', 'type': 'array', 'value': 'Gender and Women''s Studies'},
{'key': 'gpaRanges', 'type': 'array', 'value': 'numrange(2, 2.5, \'[)\')'},
{'key': 'levels', 'type': 'array', 'value': 'Junior'},
{'key': 'coeGenders', 'type': 'array', 'value': 'M'},
{'key': 'genders', 'type': 'array', 'value': 'Genderqueer/Gender Non-Conform'},
{'key': 'gpaRanges', 'type': 'array', 'value': 'numrange(0, 2, \'[)\')'},
{'key': 'majors', 'type': 'array', 'value': 'Environmental Economics & Policy'},
],
}
api_json = self._post_cohort_create(client, data)
cohort_id = api_json['id']
api_json = self._api_cohort(client, cohort_id)
assert api_json['alertCount'] is not None
criteria = api_json.get('criteria')
# Genders
assert criteria.get('genders') == ['Genderqueer/Gender Non-Conform']
# COE genders
assert criteria.get('coeGenders') == ['M']
# GPA
gpa_ranges = criteria.get('gpaRanges')
assert len(gpa_ranges) == 2
assert 'numrange(0, 2, \'[)\')' in gpa_ranges
# Levels
assert criteria.get('levels') == ['Junior']
# Majors
majors = criteria.get('majors')
assert len(majors) == 2
assert 'Gender and Women''s Studies' in majors
def test_admin_creation_of_asc_cohort(self, client, admin_login):
"""COE advisor cannot use ASC criteria."""
self._post_cohort_create(
client,
{
'name': 'Admin superpowers',
'filters': [
{'key': 'groupCodes', 'type': 'array', 'value': 'MTE'},
{'key': 'groupCodes', 'type': 'array', 'value': 'WWP'},
],
},
)
def test_forbidden_cohort_creation(self, client, coe_advisor_login):
"""COE advisor cannot use ASC criteria."""
data = {
'name': 'Sorry Charlie',
'filters': [
{'key': 'groupCodes', 'type': 'array', 'value': 'MTE'},
],
}
self._post_cohort_create(client, data, expected_status_code=403)
class TestCohortUpdate:
"""Cohort Update API."""
@classmethod
def _post_cohort_update(cls, client, json_data=()):
return client.post(
'/api/cohort/update',
data=json.dumps(json_data),
content_type='application/json',
)
def test_unauthorized_cohort_update(self, client, coe_advisor_login):
cohort = CohortFilter.create(
uid=asc_advisor_uid,
name='Swimming, Men\'s',
filter_criteria={
'groupCodes': ['MSW', 'MSW-DV', 'MSW-SW'],
},
)
data = {
'id': cohort['id'],
'name': 'Hack the name!',
}
response = self._post_cohort_update(client, data)
assert 403 == response.status_code
def test_update_filters(self, client, asc_advisor_login):
cohort = CohortFilter.create(
uid=asc_advisor_uid,
name='Swimming, Men\'s',
filter_criteria={
'groupCodes': ['MSW', 'MSW-DV', 'MSW-SW'],
},
)
# First, we POST an empty name
cohort_id = cohort['id']
response = self._post_cohort_update(client, {'id': cohort_id})
assert 400 == response.status_code
# Now, we POST a valid name
data = {
'id': cohort_id,
'filters': [
{'key': 'majors', 'type': 'array', 'value': 'Gender and Women''s Studies'},
{'key': 'gpaRanges', 'type': 'array', 'value': 'numrange(2, 2.5, \'[)\')'},
],
}
response = self._post_cohort_update(client, data)
assert 200 == response.status_code
updated_cohort = response.json
assert updated_cohort['alertCount'] is not None
assert updated_cohort['criteria']['majors'] == ['Gender and Women''s Studies']
assert updated_cohort['criteria']['gpaRanges'] == ['numrange(2, 2.5, \'[)\')']
assert updated_cohort['criteria']['groupCodes'] is None
def remove_empties(criteria):
return {k: v for k, v in criteria.items() if v is not None}
cohort = CohortFilter.find_by_id(cohort_id)
expected = remove_empties(cohort['criteria'])
actual = remove_empties(updated_cohort['criteria'])
assert expected == actual
def test_cohort_update_filter_criteria(self, client, asc_advisor_login):
name = 'Swimming, Men\'s'
cohort = CohortFilter.create(
uid=asc_advisor_uid,
name=name,
filter_criteria={
'groupCodes': ['MBB'],
},
)
cohort_id = cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
cohort = json.loads(response.data)
assert cohort['totalStudentCount'] == 1
# Update the db
updates = {
'id': cohort_id,
'criteria': {
'groupCodes': ['MBB', 'MBB-AA'],
},
}
response = self._post_cohort_update(client, updates)
assert response.status_code == 200
# Verify the value of 'student_count' in db
updated_cohort = CohortFilter.find_by_id(cohort_id)
assert updated_cohort['totalStudentCount'] == 2
assert 'sids' not in updated_cohort
assert updated_cohort['criteria']['groupCodes'] == updates['criteria']['groupCodes']
class TestCohortDelete:
"""Cohort Delete API."""
def test_delete_cohort_not_authenticated(self, client):
"""Custom cohort deletion requires authentication."""
response = client.delete('/api/cohort/delete/123')
assert response.status_code == 401
def test_delete_cohort_wrong_user(self, client, fake_auth):
"""Custom cohort deletion is only available to owners."""
cohort = CohortFilter.create(
uid=coe_advisor_uid,
name='Badminton teams',
filter_criteria={
'groupCodes': ['WWP', 'MWP'],
},
)
assert cohort
# This user does not own the custom cohort above
fake_auth.login('2040')
cohort_id = cohort['id']
response = client.get(f'/api/cohort/{cohort_id}')
assert response.status_code == 200
_cohort = json.loads(response.data)
assert _cohort['isOwnedByCurrentUser'] is False
response = client.delete(f'/api/cohort/delete/{cohort_id}')
assert response.status_code == 400
assert '2040 does not own' in str(response.data)
def test_delete_cohort(self, client, coe_advisor_login):
"""Deletes existing custom cohort while enforcing rules of ownership."""
name = 'Water polo teams'
cohort = CohortFilter.create(
uid=coe_advisor_uid,
name=name,
filter_criteria={
'groupCodes': ['WWP', 'MWP'],
},
)
# Verify deletion
cohort_id = cohort['id']
response = client.delete(f'/api/cohort/delete/{cohort_id}')
assert response.status_code == 200
cohorts = all_cohorts_owned_by(asc_advisor_uid)
assert not next((c for c in cohorts if c['id'] == cohort_id), None)
class TestCohortPerFilters:
"""Cohort API."""
@classmethod
def _api_get_students_per_filters(cls, client, json_data=(), expected_status_code=200):
response = client.post(
'/api/cohort/get_students_per_filters',
data=json.dumps(json_data),
content_type='application/json',
)
assert response.status_code == expected_status_code
return response.json
def test_students_per_filters_not_authenticated(self, client):
"""API requires authentication."""
self._api_get_students_per_filters(client, expected_status_code=401)
def test_students_per_filters_with_empty(self, client, coe_advisor_login):
"""API requires non-empty input."""
self._api_get_students_per_filters(client, {'filters': []}, expected_status_code=400)
def test_students_per_filters_unauthorized(self, client, asc_advisor_login):
"""ASC advisor is not allowed to query with COE attributes."""
self._api_get_students_per_filters(
client,
{
'filters':
[
{
'key': 'coeProbation',
'type': 'boolean',
'value': 'true',
},
],
},
expected_status_code=403,
)
def test_students_per_filters_coe_advisor(self, client, coe_advisor_login):
"""API translates 'coeProbation' filter to proper filter_criteria query."""
gpa_range_1 = 'numrange(0, 2, \'[)\')'
gpa_range_2 = 'numrange(2, 2.5, \'[)\')'
api_json = self._api_get_students_per_filters(
client,
{
'filters':
[
{
'key': 'coeProbation',
'type': 'boolean',
'value': 'true',
},
{
'key': 'gpaRanges',
'type': 'array',
'value': gpa_range_1,
},
{
'key': 'gpaRanges',
'type': 'array',
'value': gpa_range_2,
},
{
'key': 'lastNameRange',
'type': 'range',
'value': ['A', 'Z'],
},
],
},
)
assert 'totalStudentCount' in api_json
assert 'students' in api_json
criteria = api_json['criteria']
assert criteria['coeProbation'] is not None
assert criteria['lastNameRange'] is not None
gpa_ranges = criteria['gpaRanges']
assert len(gpa_ranges) == 2
assert gpa_range_1 in gpa_ranges
assert gpa_range_2 in gpa_ranges
for key in [
'coeAdvisorLdapUids',
'coeEthnicities',
'ethnicities',
'expectedGradTerms',
'genders',
'groupCodes',
'inIntensiveCohort',
'isInactiveAsc',
'levels',
'majors',
'transfer',
'underrepresented',
'unitRanges',
]:
assert criteria[key] is None
def test_my_students_filter_all_plans(self, client, coe_advisor_login):
"""Returns students mapped to advisor, across all academic plans."""
api_json = self._api_get_students_per_filters(
client,
{
'filters': [
{
'key': 'cohortOwnerAcademicPlans',
'type': 'array',
'value': '*',
},
],
},
)
sids = sorted([s['sid'] for s in api_json['students']])
assert sids == ['11667051', '7890123456', '9000000000', '9100000000']
def test_my_students_filter_selected_plans(self, client, coe_advisor_login):
"""Returns students mapped to advisor, per specified academic plans."""
api_json = self._api_get_students_per_filters(
client,
{
'filters': [
{
'key': 'cohortOwnerAcademicPlans',
'type': 'array',
'value': '162B0U',
},
{
'key': 'cohortOwnerAcademicPlans',
'type': 'array',
'value': '162B3U',
},
],
},
)
sids = sorted([s['sid'] for s in api_json['students']])
assert sids == ['7890123456', '9000000000']
def _get_defensive_line(self, client, inactive_asc, order_by):
api_json = self._api_get_students_per_filters(
client,
{
'filters':
[
{
'key': 'groupCodes',
'type': 'array',
'value': 'MFB-DL',
},
{
'key': 'isInactiveAsc',
'type': 'boolean',
'value': inactive_asc,
},
],
'orderBy': order_by,
},
)
return api_json['students']
def test_students_per_filters_order_by(self, client, asc_advisor_login):
"""Returns properly ordered list of students."""
def _get_first_student(order_by):
students = self._get_defensive_line(client, False, order_by)
assert len(students) == 3
return students[0]
assert _get_first_student('first_name')['firstName'] == 'Dave'
assert _get_first_student('last_name')['lastName'] == 'Doolittle'
assert _get_first_student('gpa')['cumulativeGPA'] == 3.005
assert _get_first_student('level')['level'] == 'Junior'
assert _get_first_student('major')['majors'][0] == 'Chemistry BS'
assert _get_first_student('units')['cumulativeUnits'] == 34
student = _get_first_student('group_name')
assert student['athleticsProfile']['athletics'][0]['groupName'] == 'Football, Defensive Backs'
def test_student_athletes_inactive_asc(self, client, asc_advisor_login):
"""An ASC advisor query defaults to active athletes only."""
students = self._get_defensive_line(client, False, 'gpa')
assert len(students) == 3
for student in students:
assert student['athleticsProfile']['isActiveAsc'] is True
def test_student_athletes_inactive_admin(self, client, admin_login):
"""An admin query defaults to active and inactive athletes."""
students = self._get_defensive_line(client, None, 'gpa')
assert len(students) == 4
def is_active_asc(student):
return student['athleticsProfile']['isActiveAsc']
assert is_active_asc(students[0]) is False
assert is_active_asc(students[1]) is True
assert is_active_asc(students[2]) is True
assert is_active_asc(students[3]) is True
def test_filter_expected_grad_term(self, client, coe_advisor_login):
"""Returns students per expected graduation."""
api_json = self._api_get_students_per_filters(
client,
{
'filters':
[
{
'key': 'expectedGradTerms',
'type': 'array',
'value': '2202',
},
],
},
)
students = api_json['students']
assert len(students) == 2
for student in students:
assert student['expectedGraduationTerm']['name'] == 'Spring 2020'
def test_filter_transfer(self, client, coe_advisor_login):
"""Returns list of transfer students."""
api_json = self._api_get_students_per_filters(
client,
{
'filters':
[
{
'key': 'transfer',
'type': 'boolean',
'value': True,
},
],
},
)
students = api_json['students']
assert len(students) == 2
for student in students:
assert student['transfer'] is True
def test_ethnicities_filter(self, client, coe_advisor_login):
"""Returns students of specified ethnicity."""
api_json = self._api_get_students_per_filters(
client,
{
'filters': [
{
'key': 'ethnicities',
'type': 'array',
'value': 'African-American / Black',
},
],
},
)
sids = sorted([s['sid'] for s in api_json['students']])
assert sids == ['2345678901', '3456789012', '890127492']
class TestDownloadCsvPerFilters:
"""Download Cohort CSV API."""
@classmethod
def _api_download_csv_per_filters(cls, client, json_data=(), expected_status_code=200):
response = client.post(
'/api/cohort/download_csv_per_filters',
data=json.dumps(json_data),
content_type='application/json',
)
assert response.status_code == expected_status_code
return response.json
def test_download_csv_not_authenticated(self, client):
"""API requires authentication."""
self._api_download_csv_per_filters(client, expected_status_code=401)
def test_download_csv_with_empty(self, client, coe_advisor_login):
"""API requires non-empty input."""
self._api_download_csv_per_filters(client, {'filters': ()}, expected_status_code=400)
def test_download_csv_unauthorized(self, client, asc_advisor_login):
"""ASC advisor is not allowed to query with COE attributes."""
self._api_download_csv_per_filters(
client,
{
'filters':
[
{
'key': 'coeProbation',
'type': 'boolean',
'value': 'true',
},
],
},
expected_status_code=403,
)
def test_download_csv(self, client, coe_advisor_login):
"""Advisor can download CSV with ALL students of cohort."""
data = {
'filters':
[
{
'key': 'coeEthnicities',
'type': 'array',
'value': ['H', 'B'],
},
],
}
response = client.post(
'/api/cohort/download_csv_per_filters',
data=json.dumps(data),
content_type='application/json',
)
assert response.status_code == 200
assert 'csv' in response.content_type
csv = str(response.data)
for snippet in [
'first_name,last_name,sid,email,phone',
'Deborah,Davies,11667051,[email protected],415/123-4567',
'Paul,Farestveit,7890123456,,415/123-4567',
'Wolfgang,Pauli-O\'Rourke,9000000000,,415/123-4567',
]:
assert str(snippet) in csv
class TestAllCohortFilterOptions:
"""Cohort Filter Options API."""
@classmethod
def _api_cohort_filter_options(cls, client, json_data=(), owner='me', expected_status_code=200):
response = client.post(
f'/api/cohort/filter_options/{owner}',
data=json.dumps(json_data),
content_type='application/json',
)
assert response.status_code == expected_status_code
return response.json
@classmethod
def _level_option(cls, student_class_level):
return {
'key': 'levels',
'type': 'array',
'value': student_class_level,
}
def test_filter_options_api_not_authenticated(self, client):
"""Menu API cohort-filter-options requires authentication."""
self._api_cohort_filter_options(client, expected_status_code=401)
def test_filter_options_with_nothing_disabled(self, client, coe_advisor_login):
"""Menu API with all menu options available."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters': [],
},
)
for category in api_json:
for menu in category:
assert 'disabled' not in menu
if menu['type'] == 'array':
for option in menu['options']:
assert 'disabled' not in option
def test_filter_options_my_students_for_me(self, client, coe_advisor_login):
"""Returns user's own academic plans under 'My Students'."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters': [],
},
)
my_students = next(option for group in api_json for option in group if option['name'] == 'My Students')
assert len(my_students['options']) == 5
assert {'name': 'All plans', 'value': '*'} in my_students['options']
assert {'name': 'Bioengineering BS', 'value': '16288U'} in my_students['options']
assert {'name': 'Engineering Undeclared UG', 'value': '162B0U'} in my_students['options']
assert {'name': 'BioE/MSE Joint Major BS', 'value': '162B3U'} in my_students['options']
assert {'name': 'Bioengineering UG', 'value': '16I010U'} in my_students['options']
def test_filter_options_my_students_for_not_me(self, client, coe_advisor_login):
"""Returns another user's academic plans under 'My Students'."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters': [],
},
asc_advisor_uid,
)
my_students = next(option for group in api_json for option in group if option['name'] == 'My Students')
assert len(my_students['options']) == 4
assert {'name': 'All plans', 'value': '*'} in my_students['options']
assert {'name': 'English BA', 'value': '25345U'} in my_students['options']
assert {'name': 'English UG', 'value': '25I039U'} in my_students['options']
assert {'name': 'Medieval Studies UG', 'value': '25I054U'} in my_students['options']
def test_filter_options_with_category_disabled(self, client, coe_advisor_login):
"""The coe_probation option is disabled if it is in existing-filters."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters':
[
{
'key': 'coeProbation',
'type': 'boolean',
},
],
},
)
assert len(api_json) == 3
for category in api_json:
for menu in category:
if menu['key'] == 'coeProbation':
assert menu['disabled'] is True
else:
assert 'disabled' not in menu
def test_filter_options_with_one_disabled(self, client, coe_advisor_login):
"""The 'Freshman' sub-menu option is disabled if it is already in cohort filter set."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters':
[
self._level_option('Freshman'),
self._level_option('Sophomore'),
self._level_option('Junior'),
{
'key': 'coeAdvisorLdapUids',
'type': 'array',
'value': '1022796',
},
],
},
)
assert len(api_json) == 3
assertion_count = 0
for category in api_json:
for menu in category:
# All top-level category menus are enabled
assert 'disabled' not in menu
if menu['key'] == 'levels':
for option in menu['options']:
disabled = option.get('disabled')
if option['value'] in ['Freshman', 'Sophomore', 'Junior']:
assert disabled is True
assertion_count += 1
else:
assert disabled is None
else:
assert 'disabled' not in menu
assert assertion_count == 3
def test_all_options_in_category_disabled(self, client, coe_advisor_login):
"""Disable the category if all its options are in existing-filters."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters':
[
self._level_option('Senior'),
self._level_option('Junior'),
self._level_option('Sophomore'),
self._level_option('Freshman'),
],
},
)
for category in api_json:
for menu in category:
if menu['key'] == 'levels':
assert menu.get('disabled') is True
for option in menu['options']:
assert option.get('disabled') is True
else:
assert 'disabled' not in menu
def test_disable_last_name_range(self, client, coe_advisor_login):
"""Disable the category if all its options are in existing-filters."""
api_json = self._api_cohort_filter_options(
client,
{
'existingFilters':
[
{
'key': 'lastNameRange',
'type': 'range',
'value': ['A', 'B'],
},
],
},
)
for category in api_json:
for menu in category:
is_disabled = menu.get('disabled')
if menu['key'] == 'lastNameRange':
assert is_disabled is True
else:
assert is_disabled is None
class TestTranslateToFilterOptions:
"""Cohort Filter Options API."""
@classmethod
def _api_translate_to_filter_options(cls, client, json_data=(), owner='me', expected_status_code=200):
response = client.post(
f'/api/cohort/translate_to_filter_options/{owner}',
data=json.dumps(json_data),
content_type='application/json',
)
assert response.status_code == expected_status_code
return response.json
def test_translate_criteria_when_empty(self, client, coe_advisor_login):
"""Empty criteria translates to zero rows."""
assert [] == self._api_translate_to_filter_options(
client,
{
'criteria': {},
},
)
def test_translate_criteria_with_boolean(self, client, coe_advisor_login):
"""Filter-criteria with boolean is properly translated."""
json_data = {
'criteria': {
'isInactiveCoe': False,
},
}
api_json = self._api_translate_to_filter_options(client, json_data)
assert len(api_json) == 1
assert api_json[0]['name'] == 'Inactive'
assert api_json[0]['key'] == 'isInactiveCoe'
assert api_json[0]['value'] is False
def test_translate_criteria_with_array(self, client, coe_advisor_login):
"""Filter-criteria with array is properly translated."""
api_json = self._api_translate_to_filter_options(
client,
{
'criteria': {
'genders': ['Female', 'Decline to State'],
'levels': ['Freshman', 'Sophomore'],
},
},
)
assert len(api_json) == 4
# Levels
assert api_json[0]['name'] == api_json[1]['name'] == 'Level'
assert api_json[0]['key'] == api_json[1]['key'] == 'levels'
assert api_json[0]['value'] == 'Freshman'
assert api_json[1]['value'] == 'Sophomore'
# Genders
assert api_json[2]['name'] == api_json[3]['name'] == 'Gender'
assert api_json[2]['key'] == api_json[3]['key'] == 'genders'
assert api_json[2]['value'] == 'Female'
assert api_json[3]['value'] == 'Decline to State'
def test_translate_criteria_with_range(self, client, coe_advisor_login):
"""Filter-criteria with range is properly translated."""
api_json = self._api_translate_to_filter_options(
client,
{
'criteria': {
'lastNameRange': ['M', 'Z'],
},
},
)
assert len(api_json) == 1
assert api_json[0]['name'] == 'Last Name'
assert api_json[0]['key'] == 'lastNameRange'
assert api_json[0]['value'] == ['M', 'Z']
def test_translate_criteria_my_students_for_me(self, client, coe_advisor_login):
"""User's own 'My Students' criteria are properly translated."""
api_json = self._api_translate_to_filter_options(
client,
{
'criteria': {
'cohortOwnerAcademicPlans': ['*'],
},
},
)
assert len(api_json) == 1
assert api_json[0]['name'] == 'My Students'
assert api_json[0]['subcategoryHeader'] == 'Choose academic plan...'
assert api_json[0]['key'] == 'cohortOwnerAcademicPlans'
assert api_json[0]['value'] == '*'
def test_translate_criteria_my_students_for_not_me(self, client, coe_advisor_login):
"""Another user's 'My Students' criteria are properly translated."""
api_json = self._api_translate_to_filter_options(
client,
{
'criteria': {
'cohortOwnerAcademicPlans': ['25I039U', '25I054U'],
},
},
asc_advisor_uid,
)
assert len(api_json) == 2
assert api_json[0]['name'] == 'My Students'
assert api_json[0]['key'] == 'cohortOwnerAcademicPlans'
assert api_json[0]['value'] == '25I039U'
assert api_json[1]['name'] == 'My Students'
assert api_json[1]['key'] == 'cohortOwnerAcademicPlans'
assert api_json[1]['value'] == '25I054U'
| null |
tests/test_api/test_cohort_controller.py
|
test_cohort_controller.py
|
py
| 49,600 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pytest.fixture",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tests.test_api.api_test_utils.all_cohorts_owned_by",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tests.test_api.api_test_utils.all_cohorts_owned_by",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "boac.models.alert.Alert.update_all_for_term",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "boac.models.alert.Alert",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "tests.test_api.api_test_utils.all_cohorts_owned_by",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "tests.test_api.api_test_utils.all_cohorts_owned_by",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 317,
"usage_type": "name"
},
{
"api_name": "simplejson.dumps",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "simplejson.loads",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 487,
"usage_type": "name"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 502,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 502,
"usage_type": "name"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.find_by_id",
"line_number": 531,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 531,
"usage_type": "name"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 538,
"usage_type": "name"
},
{
"api_name": "simplejson.loads",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.find_by_id",
"line_number": 559,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 559,
"usage_type": "name"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 575,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 575,
"usage_type": "name"
},
{
"api_name": "simplejson.loads",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter.create",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "boac.models.cohort_filter.CohortFilter",
"line_number": 599,
"usage_type": "name"
},
{
"api_name": "tests.test_api.api_test_utils.all_cohorts_owned_by",
"line_number": 610,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 621,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 915,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 937,
"usage_type": "call"
},
{
"api_name": "simplejson.dumps",
"line_number": 1114,
"usage_type": "call"
}
] |
244793223
|
import time
import argparse
from dask.distributed import Client
import dask.array as da
import dask.dataframe as dd
class Experiment(object):
def __init__(self, ip, nodesize, outfile):
self.schedluer_ip = ip
self.node_count = nodesize
self.outfile = outfile
self.client = Client(self.schedluer_ip)
self.x = da.random.normal(0, 1, size=(
10000, 10000), chunks=(10000, 10000))
self.y = da.random.normal(0, 1, size=(
10000, 10000), chunks=(10000, 10000))
def array_test(self):
dx = da.dot(self.x, self.x)
dot = da.dot(self.x, self.x.T)
dt = da.dot(self.x.T, self.x)
start_time = time.time()
dot.compute()
dt.compute()
dx.compute()
total_time = time.time() - start_time
print(total_time)
def df_test(self):
print(self.x.shape)
names = ['Row' + str(i) for i in range(self.x.shape[0])]
df = dd.from_array(self.x, columns=names)
print(df["Row100"])
s = df["Row100"].sum().compute()
print(s)
def append_to_file(nodes, run_time):
fp = open('append_results.txt', 'a')
data = nodes + ',' + str(run_time) + '\n'
fp.write(data)
fp.close()
if __name__ == '__main__':
ap = argparse.ArgumentParser(description='input')
ap.add_argument('-ip', "--ip", required=True, help="Dask scheduler IP")
ap.add_argument('-n', "--n", required=True, help="Node size")
ap.add_argument('-o', "--o", required=True, help="path to outfile")
args = vars(ap.parse_args())
e = Experiment(args["ip"], args["n"], args["o"])
#e.array_test()
e.df_test()
| null |
code/experiment.py
|
experiment.py
|
py
| 1,682 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "dask.distributed.Client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dask.array.random.normal",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dask.array.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "dask.array",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dask.array.random.normal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dask.array.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "dask.array",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "dask.array.dot",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dask.array",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "dask.array.dot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "dask.array",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "dask.array.dot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dask.array",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "dask.dataframe.from_array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "dask.dataframe",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 49,
"usage_type": "call"
}
] |
252889865
|
from django.urls import path
from . import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path(
'mascotas/<int:id>',
views.get_delete_update_mascota,
name='get_delete_update_mascota'
),
path(
'mascotas/',
views.get_post_mascotas,
name='get_post_mascotas'
),
path(
'mascota_perdida/<int:id>/',
views.GPD_mascota_perdida,
name='GPD_mascota_perdida'
),
path(
'mascota_perdida/',
views.post_mascota_perdida,
name='post_mascota_perdida'
),
path(
'mascota_encontrada/<int:id>/',
views.GPD_mascota_encontrada,
name='GPD_mascota_encontrada'
),
path(
'mascota_encontrada/',
views.post_mascota_encontrada,
name='post_mascota_encontrada'
),
path(
'usuario/<int:id>/',
views.GPD_usuario,
name='GPD_usuario'
),
# path(
# 'registro/',
# views.crear_usuario,
# name='crear_usuario'
# ),
# path(
# 'login/',
# obtain_auth_token,
# name='login'
# ),
path(
'usuario/',
views.post_usuario,
name='post_usuario'
),
path(
'reporte/<int:id>/',
views.GPD_reporte,
name='GPD_reporte'
),
path(
'reporte/',
views.post_reporte,
name='post_reporte'
),
path(
'reporte/<int:id>/',
views.GPD_reporte,
name='GPD_reporte'
),
path(
'reporte_avistado/',
views.post_reporte_avistado,
name='GPD_reporte_avistado'
),
path(
'reporte_encontrado/',
views.post_reporte_encontrado,
name='GPD_reporte_encontrado'
),
path(
'reporte_perdido/',
views.post_reporte_perdido,
name='GPD_reporte_perdido'
),
]
| null |
api/urls.py
|
urls.py
|
py
| 1,908 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 83,
"usage_type": "call"
}
] |
485660903
|
#!/usr/bin/python
import pickle
import sys
import matplotlib.pyplot
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from operator import itemgetter
### read in data dictionary, convert to numpy array
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
features = ["poi", "salary", "bonus"]
data = featureFormat(data_dict, features)
data = sorted(data, key=itemgetter(2), reverse=True)
''' add this code to ../tools/feature_format.py to get name of dictionary key instead of poi
if feature == "poi":
value = key
####
and uncomment this line:-
print "Largest data point:", data[0][0]
'''
data.pop(0)
for point in data:
salary = point[1]
bonus = point[2]
name = point[0]
matplotlib.pyplot.scatter(salary, bonus)
matplotlib.pyplot.ylabel("bonus")
matplotlib.pyplot.xlabel("salary")
matplotlib.pyplot.show()
| null |
outliers/enron_outliers.py
|
enron_outliers.py
|
py
| 906 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "feature_format.featureFormat",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot.scatter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.ylabel",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.xlabel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pyplot.show",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.pyplot",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
}
] |
205111357
|
import pygame
from random import randint #bedzie potrzebne do nadania losowej predkosci poczatkowej dla pilki
BLACK = (0,0,0)
class Ball(pygame.sprite.Sprite): #tworzymy klase pileczka
#sluzy do tworzenia pileczek
def __init__(self, color, width, height):
super().__init__() #odwolujemy sie do klasy wbudowanej w pygame: Sprite
self.image = pygame.Surface([width, height]) #zbieramy dane na temat koloru pilki, jej polozenia, itp. Ano i tlo ma niby byc przezroczyte(?) ale nie rozumiem do konca tak szczerze o co chodzi w tym miejscu
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
#wsm to wiekszosc z tego jest taka sama jak w przypadku paletki
pygame.draw.rect(self.image, color, [0,0,width,height]) #tworzymy pileczke
self.velocity = [randint(4,8),randint(-8,8)]#nadajemy predkosc początkową i zwrot(NIE KIERUNEK, FIZYKA MORDO PAMIOETAJ ZWROT TO NIE KIERUNEK) pileczce, losowane z podanych zakresow
self.rect = self.image.get_rect() #dostosowujemy pileczke do romziarów okna *chyba*
#sluzy do zmieniania pozycji pileczki zgodnie z podana predkoscia(velocity)
def update(self):
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
#sluzy do odbijania sie od paletek
def bounce(self):
self.velocity[0] = -self.velocity[0]#odbijamy się z ta sama predkoscia...
self.velocity[1] = randint(-8,8)#...w losowym kierunku
def after_bounce(self):
nball = Ball((172,192,203),10,10)
nball.rect.x = 345
nball.rect.y = 195
return nball
| null |
ball.py
|
ball.py
|
py
| 1,658 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.draw.rect",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 32,
"usage_type": "call"
}
] |
468149744
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 20 20:08:58 2017
@author: anurag
"""
# import all libraries
import quandl
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup as beautifulsoup
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from sklearn.svm import OneClassSVM
#import the dataset
df = pd.DataFrame(quandl.get("NSE/ITDC", start_date="2016-02-20"))
#transformm the dataset
classifier = PCA(n_components = 1)
fit = classifier.fit_transform(df)
def normalize(v):
norm=np.linalg.norm(v)
if norm==0:
return v
return v/norm
listy = normalize(fit)
listx = []
for i in range(len(listy)):
listx.append(i)
listx = np.array(listx)
meanx = np.mean(listx)
meany = np.mean(listy)
xn = listx - meanx
yn = listy - meany
#apply linear regression to dataset
bxy = np.dot(xn,yn)/(np.sum(xn**2))
c = meany - bxy*meanx
y = []
for x in range(len(listy)):
m = bxy*x + c
y.append(m)
x = x + 1
#plot the line and the dataset
plt.plot(listy , '^')
plt.plot(y)
| null |
regression.py
|
regression.py
|
py
| 1,035 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "quandl.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
}
] |
88393927
|
from flask import request, Flask,jsonify
from fastai import *
from fastai.vision import *
import sys
import json
path = Path('data/cloth_categories')
dBFilePath = '/home/azure/data.json'
app = Flask(__name__)
def predict(imagePath):
classes = ['Blouse', 'Blazer', 'Button-Down', 'Bomber', 'Anorak', 'Tee', 'Tank', 'Top', 'Sweater', 'Flannel', 'Hoodie', 'Cardigan', 'Jacket', 'Henley', 'Poncho', 'Jersey', 'Turtleneck', 'Parka', 'Peacoat', 'Halter', 'Skirt', 'Shorts', 'Jeans', 'Joggers', 'Sweatpants', 'Jeggings', 'Cutoffs', 'Sweatshorts', 'Leggings', 'Culottes', 'Chinos', 'Trunks', 'Sarong', 'Gauchos', 'Jodhpurs', 'Capris', 'Dress', 'Romper', 'Coat', 'Kimono', 'Jumpsuit', 'Robe', 'Caftan', 'Kaftan', 'Coverup', 'Onesie']
single_img_data = ImageDataBunch.single_from_classes(path, classes, tfms=get_transforms(),size=150).normalize(imagenet_stats)
learn = create_cnn(single_img_data, models.resnet34)
learn.load('stage-1_sz-150')
_, _, losses = learn.predict(open_image(imagePath))
predictions = sorted(zip(classes, map(float, losses)), key=lambda p: p[1], reverse=True)
with open(dBFilePath) as f:
data = json.load(f)
newObj = {'imgsrc': 'assets/'+os.path.basename(imagePath), 'category': predictions[0][0]}
data.append(newObj)
with open(dBFilePath, 'w') as f:
json.dump(data, f)
@app.route('/retailGyan/api/v1.0/predict', methods=['POST'])
def predict_task():
if not request.json or not 'imgPath' in request.json:
abort(400)
imgPath = request.json['imgPath']
predict(imgPath)
return jsonify("OK"), 201
if __name__ == '__main__':
app.run(debug=True)
| null |
ClothingClassPrediction.py
|
ClothingClassPrediction.py
|
py
| 1,696 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 36,
"usage_type": "call"
}
] |
611877133
|
import os
import re
import logging
from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.common.tasks import image
from bootstrapvz.common.tools import log_check_call
class FixVHD(Task):
description = 'Preparing VHD for Azure'
phase = phases.image_registration
predecessors = [image.MoveImage]
@classmethod
def run(cls, info):
log = logging.getLogger(__name__)
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/create-upload-generic
log.info('Converting to RAW format for alignment')
image_name = info.manifest.name.format(**info.manifest_vars)
destination_dir = info.manifest.bootstrapper['workspace'];
destination = os.path.join(destination_dir, image_name + '.raw')
src = os.path.join(destination_dir, image_name + '.' + info.volume.extension)
# 1. Convert to RAW
log_check_call(['qemu-img', 'convert', '-f', 'vpc',
'-O', 'raw', src,
destination
])
# 2. Extract image size in bytes and calculate the next MiB boundary
log.info('Extracting image size')
img_info = log_check_call(['qemu-img', 'info', '-f', 'raw', destination])
regexp = re.compile('virtual size:.*')
bytes = 0
for line in img_info:
match = regexp.match(line)
if match is not None:
bytes = int(line.split(' ')[4][1:])
if bytes == 0:
raise Exception('Could not determine image size')
mb = 1024 * 1024
sizemb = int(((bytes / mb) + 1) *mb)
# 3. Resize the RAW image. Additional options are added for qemu 2.6+
log.info('Resizing RAW format image for alignment (' + str(sizemb) + ' bytes)')
log_check_call(['qemu-img', 'resize', '-f', 'raw', destination, str(sizemb)])
format_opts = 'subformat=fixed'
regexp = re.compile('.* (0\\.|1\\.|2\\.0|2\\.1|2\\.2|2\\.3|2\\.4|2\\.5).*')
if not regexp.match(log_check_call(['qemu-img', '--version'])[0]):
log.info('Using qemu-img 2.6+, adding force_size option')
format_opts = 'subformat=fixed,force_size'
# 4. Convert the RAW back to VHD
log.info('Converting RAW format image back to VHD')
log_check_call(['qemu-img', 'convert', '-f', 'raw', '-o', format_opts, '-O', 'vpc', destination, src])
# 5. Clean up
log.info('The volume image has been prepared for boot on Azure, removing RAW image.')
os.remove(destination)
| null |
bootstrapvz/providers/azure/tasks/image.py
|
image.py
|
py
| 2,587 |
python
|
en
|
code
| null |
code-starcoder2
|
83
|
[
{
"api_name": "bootstrapvz.base.Task",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "bootstrapvz.common.phases.image_registration",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "bootstrapvz.common.phases",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "bootstrapvz.common.tasks.image.MoveImage",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "bootstrapvz.common.tasks.image",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "bootstrapvz.common.tools.log_check_call",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "bootstrapvz.common.tools.log_check_call",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bootstrapvz.common.tools.log_check_call",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "bootstrapvz.common.tools.log_check_call",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "bootstrapvz.common.tools.log_check_call",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 64,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.