id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
33785
|
from django.conf.urls import url
from django.conf.urls import patterns
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = patterns('',
url(r'^nl/foo/', view, name='not-translated'),
)
|
33786
|
from rest_framework import serializers
from .models import Subscriber
class SubscriberSerializer(serializers.ModelSerializer):
class Meta:
model = Subscriber
fields = (
'email',
)
|
33788
|
import collections
try:
stringtype = basestring # python 2
except:
stringtype = str # python 3
def coerce_to_list(x):
if isinstance(x, stringtype):
return x.replace(',', ' ').split()
return x or []
def namedtuple(name, args=None, optional=None):
args = coerce_to_list(args)
optional = coerce_to_list(optional)
x = collections.namedtuple(name, args + optional)
if hasattr(x.__new__, 'func_defaults'): # python 2
x.__new__.func_defaults = tuple([None] * len(optional))
elif hasattr(x.__new__, '__defaults__'): # python 3
x.__new__.__defaults__ = tuple([None] * len(optional))
else:
raise Exception('???')
return x
def optional(fn):
def opt(x):
if x is not None:
return fn(x)
return opt
|
33793
|
from twitchchatbot.lib.commands.parsing import commands
import json
def addcom(user, args):
# Concatenate a list of strings down to a single, space delimited string.
queueEvent = {}
if len(args) < 2:
queueEvent['msg'] = "Proper usage: !addcom <cmd> <Text to send>"
else:
commandHead = "!" + args[0]
commands[commandHead] = {
'limit' : 10,
'userbadge' : 'moderator',
'last_used' : 0
}
del args[0]
commands[commandHead]['return'] = " ".join(args)
with open("commands.json", "w") as f:
json.dump(commands, f, indent=1)
queueEvent['msg'] = "%s has added the %s command!" %( \
user, commandHead)
return queueEvent
|
33795
|
import os
import shutil
import argparse
import torch
from torch import nn
from torchvision.utils import save_image, make_grid
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import utils.utils as utils
from utils.constants import *
class GenerationMode(enum.Enum):
SINGLE_IMAGE = 0,
INTERPOLATION = 1,
VECTOR_ARITHMETIC = 2
def postprocess_generated_img(generated_img_tensor):
assert isinstance(generated_img_tensor, torch.Tensor), f'Expected PyTorch tensor but got {type(generated_img_tensor)}.'
# Move the tensor from GPU to CPU, convert to numpy array, extract 0th batch, move the image channel
# from 0th to 2nd position (CHW -> HWC)
generated_img = np.moveaxis(generated_img_tensor.to('cpu').numpy()[0], 0, 2)
# If grayscale image repeat 3 times to get RGB image (for generators trained on MNIST)
if generated_img.shape[2] == 1:
generated_img = np.repeat(generated_img, 3, axis=2)
# Imagery is in the range [-1, 1] (generator has tanh as the output activation) move it into [0, 1] range
generated_img -= np.min(generated_img)
generated_img /= np.max(generated_img)
return generated_img
def generate_from_random_latent_vector(generator, cgan_digit=None):
with torch.no_grad():
latent_vector = utils.get_gaussian_latent_batch(1, next(generator.parameters()).device)
if cgan_digit is None:
generated_img = postprocess_generated_img(generator(latent_vector))
else: # condition and generate the digit specified by cgan_digit
ref_label = torch.tensor([cgan_digit], dtype=torch.int64)
ref_label_one_hot_encoding = torch.nn.functional.one_hot(ref_label, MNIST_NUM_CLASSES).type(torch.FloatTensor).to(next(generator.parameters()).device)
generated_img = postprocess_generated_img(generator(latent_vector, ref_label_one_hot_encoding))
return generated_img, latent_vector.to('cpu').numpy()[0]
def generate_from_specified_numpy_latent_vector(generator, latent_vector):
assert isinstance(latent_vector, np.ndarray), f'Expected latent vector to be numpy array but got {type(latent_vector)}.'
with torch.no_grad():
latent_vector_tensor = torch.unsqueeze(torch.tensor(latent_vector, device=next(generator.parameters()).device), dim=0)
return postprocess_generated_img(generator(latent_vector_tensor))
def linear_interpolation(t, p0, p1):
return p0 + t * (p1 - p0)
def spherical_interpolation(t, p0, p1):
""" Spherical interpolation (slerp) formula: https://en.wikipedia.org/wiki/Slerp
Found inspiration here: https://github.com/soumith/ganhacks
but I didn't get any improvement using it compared to linear interpolation.
Args:
t (float): has [0, 1] range
p0 (numpy array): First n-dimensional vector
p1 (numpy array): Second n-dimensional vector
Result:
Returns spherically interpolated vector.
"""
if t <= 0:
return p0
elif t >= 1:
return p1
elif np.allclose(p0, p1):
return p0
# Convert p0 and p1 to unit vectors and find the angle between them (omega)
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
sin_omega = np.sin(omega) # syntactic sugar
return np.sin((1.0 - t) * omega) / sin_omega * p0 + np.sin(t * omega) / sin_omega * p1
def display_vector_arithmetic_results(imgs_to_display):
fig = plt.figure(figsize=(6, 6))
title_fontsize = 'x-small'
num_display_imgs = 7
titles = ['happy women', 'happy woman (avg)', 'neutral women', 'neutral woman (avg)', 'neutral men', 'neutral man (avg)', 'result - happy man']
ax = np.zeros(num_display_imgs, dtype=object)
assert len(imgs_to_display) == num_display_imgs, f'Expected {num_display_imgs} got {len(imgs_to_display)} images.'
gs = fig.add_gridspec(5, 4, left=0.02, right=0.98, wspace=0.05, hspace=0.3)
ax[0] = fig.add_subplot(gs[0, :3])
ax[1] = fig.add_subplot(gs[0, 3])
ax[2] = fig.add_subplot(gs[1, :3])
ax[3] = fig.add_subplot(gs[1, 3])
ax[4] = fig.add_subplot(gs[2, :3])
ax[5] = fig.add_subplot(gs[2, 3])
ax[6] = fig.add_subplot(gs[3:, 1:3])
for i in range(num_display_imgs):
ax[i].imshow(cv.resize(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.INTER_NEAREST))
ax[i].set_title(titles[i], fontsize=title_fontsize)
ax[i].tick_params(which='both', bottom=False, left=False, labelleft=False, labelbottom=False)
plt.show()
def generate_new_images(model_name, cgan_digit=None, generation_mode=True, slerp=True, a=None, b=None, should_display=True):
""" Generate imagery using pre-trained generator (using vanilla_generator_000000.pth by default)
Args:
model_name (str): model name you want to use (default lookup location is BINARIES_PATH).
cgan_digit (int): if specified generate that exact digit.
generation_mode (enum): generate a single image from a random vector, interpolate between the 2 chosen latent
vectors, or perform arithmetic over latent vectors (note: not every mode is supported for every model type)
slerp (bool): if True use spherical interpolation otherwise use linear interpolation.
a, b (numpy arrays): latent vectors, if set to None you'll be prompted to choose images you like,
and use corresponding latent vectors instead.
should_display (bool): Display the generated images before saving them.
"""
model_path = os.path.join(BINARIES_PATH, model_name)
assert os.path.exists(model_path), f'Could not find the model {model_path}. You first need to train your generator.'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Prepare the correct (vanilla, cGAN, DCGAN, ...) model, load the weights and put the model into evaluation mode
model_state = torch.load(model_path)
gan_type = model_state["gan_type"]
print(f'Found {gan_type} GAN!')
_, generator = utils.get_gan(device, gan_type)
generator.load_state_dict(model_state["state_dict"], strict=True)
generator.eval()
# Generate a single image, save it and potentially display it
if generation_mode == GenerationMode.SINGLE_IMAGE:
generated_imgs_path = os.path.join(DATA_DIR_PATH, 'generated_imagery')
os.makedirs(generated_imgs_path, exist_ok=True)
generated_img, _ = generate_from_random_latent_vector(generator, cgan_digit if gan_type == GANType.CGAN.name else None)
utils.save_and_maybe_display_image(generated_imgs_path, generated_img, should_display=should_display)
# Pick 2 images you like between which you'd like to interpolate (by typing 'y' into console)
elif generation_mode == GenerationMode.INTERPOLATION:
assert gan_type == GANType.VANILLA.name or gan_type ==GANType.DCGAN.name, f'Got {gan_type} but only VANILLA/DCGAN are supported for the interpolation mode.'
interpolation_name = "spherical" if slerp else "linear"
interpolation_fn = spherical_interpolation if slerp else linear_interpolation
grid_interpolated_imgs_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery') # combined results dir
decomposed_interpolated_imgs_path = os.path.join(grid_interpolated_imgs_path, f'tmp_{gan_type}_{interpolation_name}_dump') # dump separate results
if os.path.exists(decomposed_interpolated_imgs_path):
shutil.rmtree(decomposed_interpolated_imgs_path)
os.makedirs(grid_interpolated_imgs_path, exist_ok=True)
os.makedirs(decomposed_interpolated_imgs_path, exist_ok=True)
latent_vector_a, latent_vector_b = [None, None]
# If a and b were not specified loop until the user picked the 2 images he/she likes.
found_good_vectors_flag = False
if a is None or b is None:
while not found_good_vectors_flag:
generated_img, latent_vector = generate_from_random_latent_vector(generator)
plt.imshow(generated_img); plt.title('Do you like this image?'); plt.show()
user_input = input("Do you like this generated image? [y for yes]:")
if user_input == 'y':
if latent_vector_a is None:
latent_vector_a = latent_vector
print('Saved the first latent vector.')
elif latent_vector_b is None:
latent_vector_b = latent_vector
print('Saved the second latent vector.')
found_good_vectors_flag = True
else:
print('Well lets generate a new one!')
continue
else:
print('Skipping latent vectors selection section and using cached ones.')
latent_vector_a, latent_vector_b = [a, b]
# Cache latent vectors
if a is None or b is None:
np.save(os.path.join(grid_interpolated_imgs_path, 'a.npy'), latent_vector_a)
np.save(os.path.join(grid_interpolated_imgs_path, 'b.npy'), latent_vector_b)
print(f'Lets do some {interpolation_name} interpolation!')
interpolation_resolution = 47 # number of images between the vectors a and b
num_interpolated_imgs = interpolation_resolution + 2 # + 2 so that we include a and b
generated_imgs = []
for i in range(num_interpolated_imgs):
t = i / (num_interpolated_imgs - 1) # goes from 0. to 1.
current_latent_vector = interpolation_fn(t, latent_vector_a, latent_vector_b)
generated_img = generate_from_specified_numpy_latent_vector(generator, current_latent_vector)
print(f'Generated image [{i+1}/{num_interpolated_imgs}].')
utils.save_and_maybe_display_image(decomposed_interpolated_imgs_path, generated_img, should_display=should_display)
# Move from channel last to channel first (CHW->HWC), PyTorch's save_image function expects BCHW format
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0)))
interpolated_block_img = torch.stack(generated_imgs)
interpolated_block_img = nn.Upsample(scale_factor=2.5, mode='nearest')(interpolated_block_img)
save_image(interpolated_block_img, os.path.join(grid_interpolated_imgs_path, utils.get_available_file_name(grid_interpolated_imgs_path)), nrow=int(np.sqrt(num_interpolated_imgs)))
elif generation_mode == GenerationMode.VECTOR_ARITHMETIC:
assert gan_type == GANType.DCGAN.name, f'Got {gan_type} but only DCGAN is supported for arithmetic mode.'
# Generate num_options face images and create a grid image from them
num_options = 100
generated_imgs = []
latent_vectors = []
padding = 2
for i in range(num_options):
generated_img, latent_vector = generate_from_random_latent_vector(generator)
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0))) # make_grid expects CHW format
latent_vectors.append(latent_vector)
stacked_tensor_imgs = torch.stack(generated_imgs)
final_tensor_img = make_grid(stacked_tensor_imgs, nrow=int(np.sqrt(num_options)), padding=padding)
display_img = np.moveaxis(final_tensor_img.numpy(), 0, 2)
# For storing latent vectors
num_of_vectors_per_category = 3
happy_woman_latent_vectors = []
neutral_woman_latent_vectors = []
neutral_man_latent_vectors = []
# Make it easy - by clicking on the plot you pick the image.
def onclick(event):
if event.dblclick:
pass
else: # single click
if event.button == 1: # left click
x_coord = event.xdata
y_coord = event.ydata
column = int(x_coord / (64 + padding))
row = int(y_coord / (64 + padding))
# Store latent vector corresponding to the image that the user clicked on.
if len(happy_woman_latent_vectors) < num_of_vectors_per_category:
happy_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(happy_woman_latent_vectors)}. happy woman.')
elif len(neutral_woman_latent_vectors) < num_of_vectors_per_category:
neutral_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_woman_latent_vectors)}. neutral woman.')
elif len(neutral_man_latent_vectors) < num_of_vectors_per_category:
neutral_man_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_man_latent_vectors)}. neutral man.')
else:
plt.close()
plt.figure(figsize=(10, 10))
plt.imshow(display_img)
# This is just an example you could also pick 3 neutral woman images with sunglasses, etc.
plt.title('Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)')
cid = plt.gcf().canvas.mpl_connect('button_press_event', onclick)
plt.show()
plt.gcf().canvas.mpl_disconnect(cid)
print('Done choosing images.')
# Calculate the average latent vector for every category (happy woman, neutral woman, neutral man)
happy_woman_avg_latent_vector = np.mean(np.array(happy_woman_latent_vectors), axis=0)
neutral_woman_avg_latent_vector = np.mean(np.array(neutral_woman_latent_vectors), axis=0)
neutral_man_avg_latent_vector = np.mean(np.array(neutral_man_latent_vectors), axis=0)
# By subtracting neutral woman from the happy woman we capture the "vector of smiling". Adding that vector
# to a neutral man we get a happy man's latent vector! Our latent space has amazingly beautiful structure!
happy_man_latent_vector = neutral_man_avg_latent_vector + (happy_woman_avg_latent_vector - neutral_woman_avg_latent_vector)
# Generate images from these latent vectors
happy_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in happy_woman_latent_vectors])
neutral_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_woman_latent_vectors])
neutral_men_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_man_latent_vectors])
happy_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, happy_woman_avg_latent_vector)
neutral_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_woman_avg_latent_vector)
neutral_man_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_man_avg_latent_vector)
happy_man_img = generate_from_specified_numpy_latent_vector(generator, happy_man_latent_vector)
display_vector_arithmetic_results([happy_women_imgs, happy_woman_avg_img, neutral_women_imgs, neutral_woman_avg_img, neutral_men_imgs, neutral_man_avg_img, happy_man_img])
else:
raise Exception(f'Generation mode not yet supported.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="Pre-trained generator model name", default=r'VANILLA_000000.pth')
parser.add_argument("--cgan_digit", type=int, help="Used only for cGAN - generate specified digit", default=3)
parser.add_argument("--generation_mode", type=bool, help="Pick between 3 generation modes", default=GenerationMode.SINGLE_IMAGE)
parser.add_argument("--slerp", type=bool, help="Should use spherical interpolation (default No)", default=False)
parser.add_argument("--should_display", type=bool, help="Display intermediate results", default=True)
args = parser.parse_args()
# The first time you start generation in the interpolation mode it will cache a and b
# which you'll choose the first time you run the it.
a_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'a.npy')
b_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'b.npy')
latent_vector_a = np.load(a_path) if os.path.exists(a_path) else None
latent_vector_b = np.load(b_path) if os.path.exists(b_path) else None
generate_new_images(
args.model_name,
args.cgan_digit,
generation_mode=args.generation_mode,
slerp=args.slerp,
a=latent_vector_a,
b=latent_vector_b,
should_display=args.should_display)
|
33810
|
from .._common import *
from yo_fluq import *
Queryable = lambda *args, **kwargs: FlupFactory.QueryableFactory(*args, **kwargs)
T = TypeVar('T')
TOut = TypeVar('TOut')
TKey = TypeVar('TKey')
TValue = TypeVar('TValue')
TFactory = TypeVar('TFactory')
|
33814
|
import numpy as np
def CalcEnergy(m_amu,Px_au,Py_au,Pz_au):
amu2au = 1836.15
return 27.2*(Px_au**2 + Py_au**2 + Pz_au**2)/(2*amu2au*m_amu)
|
33877
|
import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class MetamodelRegression(PostHocUQ):
""" Extracts confidence scores from black-box regression models using a meta-model [2]_ .
References:
.. [2] Chen, Tongfei, et al. Confidence scoring using whitebox meta-models with linear classifier probes.
The 22nd International Conference on Artificial Intelligence and Statistics. PMLR, 2019.
"""
def _create_named_model(self, mdltype, config):
"""
Instantiates a model by name passed in 'mdltype'
:param mdltype: string with name (must be supprted)
:param config: dict with args passed in the instantiation call
:return: mdl instance
"""
assert (isinstance(mdltype, str))
if mdltype == 'gbr':
mdl = GradientBoostingRegressor(**config)
else:
raise NotImplementedError("ERROR: Requested model type unknown: \"%s\"" % mdltype)
return mdl
def _get_model_instance(self, model, config):
"""
Returns an instance of a model based on (a) a desired name or (b) passed in class, or
(c) passed in instance
:param model: string, class, or instance. Class and instance must have certain methods callable.
:param config: dict with args passed in during the instantiation
:return: model instance
"""
assert (model is not None and config is not None)
if isinstance(model, str): # 'model' is a name, create it
mdl = self._create_named_model(model, config)
elif inspect.isclass(model): # 'model' is a class, instantiate it
mdl = model(**config)
else: # 'model' is an instance, register it
mdl = model
if not all([hasattr(mdl, key) and callable(getattr(mdl, key)) for key in self.callable_keys]):
raise ValueError("ERROR: Passed model/method failed the interface test. Methods required: %s" %
','.join(self.callable_keys))
return mdl
def __init__(self, base_model=None, meta_model=None, base_config=None, meta_config=None, random_seed=42):
"""
:param base_model: Base model. Can be:
(1) None (default mdl will be set up),
(2) Named model (e.g., 'gbr'),
(3) Base model class declaration (e.g., sklearn.linear_model.LinearRegressor). Will instantiate.
(4) Model instance (instantiated outside). Will be re-used. Must have required callable methods.
Note: user-supplied classes and models must have certain callable methods ('predict', 'fit')
and be capable of raising NotFittedError.
:param meta_model: Meta model. Same values possible as with 'base_model'
:param base_config: None or a params dict to be passed to 'base_model' at instantiation
:param meta_config: None or a params dict to be passed to 'meta_model' at instantiation
:param random_seed: seed used in the various pipeline steps
"""
super(MetamodelRegression).__init__()
self.random_seed = random_seed
self.callable_keys = ['predict', 'fit'] # required methods - must be present in models passed in
self.base_model_default = 'gbr'
self.meta_model_default = 'gbr'
self.base_config_default = {'loss': 'ls', 'n_estimators': 300, 'max_depth': 10, 'learning_rate': 0.001,
'min_samples_leaf': 10, 'min_samples_split': 10, 'random_state': self.random_seed}
self.meta_config_default = {'loss': 'quantile', 'alpha': 0.95, 'n_estimators': 300, 'max_depth': 10,
'learning_rate': 0.001, 'min_samples_leaf': 10, 'min_samples_split': 10,
'random_state': self.random_seed}
self.base_config = base_config if base_config is not None else self.base_config_default
self.meta_config = meta_config if meta_config is not None else self.meta_config_default
self.base_model = None
self.meta_model = None
self.base_model = self._get_model_instance(base_model if base_model is not None else self.base_model_default,
self.base_config)
self.meta_model = self._get_model_instance(meta_model if meta_model is not None else self.meta_model_default,
self.meta_config)
def get_params(self, deep=True):
return {"base_model": self.base_model, "meta_model": self.meta_model, "base_config": self.base_config,
"meta_config": self.meta_config, "random_seed": self.random_seed}
def fit(self, X, y, meta_fraction=0.2, randomize_samples=True, base_is_prefitted=False,
meta_train_data=(None, None)):
"""
Fit base and meta models.
:param X: input to the base model
:param y: ground truth for the base model
:param meta_fraction: float in [0,1] - a fractional size of the partition carved out to train the meta model
(complement will be used to train the base model)
:param randomize_samples: use shuffling when creating partitions
:param base_is_prefitted: Setting True will skip fitting the base model (useful for base models that have been
instantiated outside/by the user and are already fitted.
:param meta_train_data: User supplied data to train the meta model. Note that this option should only be used
with 'base_is_prefitted'==True. Pass a tuple meta_train_data=(X_meta, y_meta) to activate.
Note that (X,y,meta_fraction, randomize_samples) will be ignored in this mode.
:return: self
"""
X = np.asarray(X)
y = np.asarray(y)
assert(len(meta_train_data)==2)
if meta_train_data[0] is None:
X_base, X_meta, y_base, y_meta = train_test_split(X, y, shuffle=randomize_samples, test_size=meta_fraction,
random_state=self.random_seed)
else:
if not base_is_prefitted:
raise ValueError("ERROR: fit(): base model must be pre-fitted to use the 'meta_train_data' option")
X_base = y_base = None
X_meta = meta_train_data[0]
y_meta = meta_train_data[1]
# fit the base model
if not base_is_prefitted:
self.base_model.fit(X_base, y_base)
# get input for the meta model from the base
try:
y_hat_meta = self.base_model.predict(X_meta)
except NotFittedError as e:
raise RuntimeError("ERROR: fit(): The base model appears not pre-fitted (%s)" % repr(e))
# used base input and output as meta input
X_meta_in = self._process_pretrained_model(X_meta, y_hat_meta)
# train meta model to predict abs diff
self.meta_model.fit(X_meta_in, np.abs(y_hat_meta - y_meta))
return self
def _process_pretrained_model(self, X, y_hat):
"""
Given the original input features and the base output probabilities, generate input features
to train a meta model. Current implementation copies all input features and appends.
:param X: numpy [nsamples, dim]
:param y_hat: [nsamples,]
:return: array with new features [nsamples, newdim]
"""
y_hat_meta_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_meta_prime])
return X_meta_in
def predict(self, X):
"""
Generate prediction and uncertainty bounds for data X.
:param X: input features
:return: namedtuple: A namedtuple that holds
y_mean: ndarray of shape (n_samples, [n_output_dims])
Mean of predictive distribution of the test points.
y_lower: ndarray of shape (n_samples, [n_output_dims])
Lower quantile of predictive distribution of the test points.
y_upper: ndarray of shape (n_samples, [n_output_dims])
Upper quantile of predictive distribution of the test points.
"""
y_hat = self.base_model.predict(X)
y_hat_prime = np.expand_dims(y_hat, -1) if len(y_hat.shape) < 2 else y_hat
X_meta_in = np.hstack([X, y_hat_prime])
z_hat = self.meta_model.predict(X_meta_in)
Result = namedtuple('res', ['y_mean', 'y_lower', 'y_upper'])
res = Result(y_hat, y_hat - z_hat, y_hat + z_hat)
return res
|
33900
|
from typing import Tuple
class BaseTimer:
"""
A timer controls the time passed into the the render function.
This can be used in creative ways to control the current time
such as basing it on current location in an audio file.
All methods must be implemented.
"""
@property
def is_paused(self) -> bool:
"""bool: The pause state of the timer"""
raise NotImplementedError()
@property
def is_running(self) -> bool:
"""bool: Is the timer currently running?"""
raise NotImplementedError()
@property
def time(self) -> float:
"""Get or set the current time.
This can be used to jump around in the timeline.
Returns:
float: The current time in seconds
"""
raise NotImplementedError()
@time.setter
def time(self, value: float):
raise NotImplementedError()
def next_frame(self) -> Tuple[float, float]:
"""Get timer information for the next frame.
Returns:
Tuple[float, float]: The frametime and current time
"""
raise NotImplementedError()
def start(self):
"""Start the timer initially or resume after pause"""
raise NotImplementedError()
def pause(self):
"""Pause the timer"""
raise NotImplementedError()
def toggle_pause(self):
"""Toggle pause state"""
raise NotImplementedError()
def stop(self) -> Tuple[float, float]:
"""
Stop the timer. Should only be called once when stopping the timer.
Returns:
Tuple[float, float]> Current position in the timer, actual running duration
"""
raise NotImplementedError()
|
33920
|
def coding_problem_41(flights_db, starting_airport):
"""
Given an unordered list of flights taken by someone, each represented as (origin, destination) pairs, and a
starting airport, compute the person's itinerary. If no such itinerary exists, return null. If there are multiple
possible itineraries, return the lexicographically smallest one. All flights must be used in the itinerary.
Examples:
>>> coding_problem_41([('SFO', 'HKO'), ('YYZ', 'SFO'), ('YUL', 'YYZ'), ('HKO', 'ORD')], 'YUL')
['YUL', 'YYZ', 'SFO', 'HKO', 'ORD']
>>> coding_problem_41([('SFO', 'COM'), ('COM', 'YYZ')], 'COM') # returns None
>>> coding_problem_41([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'A')], 'A')
['A', 'B', 'C', 'A', 'C']
The itinerary ['A', 'C', 'A', 'B', 'C'] is also a valid however the first one is lexicographically smaller.
"""
pass
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
33931
|
import numpy as np
class NumpyDynamic:
def __init__(self, dtype, array_size=(100,)):
self.data = np.zeros(array_size, dtype)
self.array_size = list(array_size)
self.size = 0
def add(self, x):
if self.size == self.array_size[0]:
self.array_size[0] *= 2
newdata = np.zeros(self.array_size, self.data.dtype)
newdata[:self.size] = self.data
self.data = newdata
self.data[self.size] = x
self.size += 1
def finalize(self):
return self.data[:self.size]
|
33937
|
import os
import random
from sklearn.metrics import mean_squared_error as mse
from core.composer.chain import Chain
from core.composer.composer import ComposerRequirements, DummyChainTypeEnum, DummyComposer
from core.models.data import OutputData
from core.models.model import *
from core.repository.dataset_types import NumericalDataTypesEnum, CategoricalDataTypesEnum
from core.repository.model_types_repository import (
ModelMetaInfoTemplate,
ModelTypesRepository
)
from core.repository.quality_metrics_repository import MetricsRepository, RegressionMetricsEnum
from core.repository.task_types import MachineLearningTasksEnum
from core.utils import project_root
random.seed(1)
np.random.seed(1)
import matplotlib.pyplot as plt
def compare_plot(predicted: OutputData, dataset_to_validate: InputData):
fig, ax = plt.subplots()
plt.plot(dataset_to_validate.target, linewidth=1, label="Observed")
plt.plot(predicted.predict, linewidth=1, label="Predicted")
ax.legend()
plt.show()
def calculate_validation_metric(chain: Chain, dataset_to_validate: InputData) -> float:
# the execution of the obtained composite models
predicted = chain.predict(dataset_to_validate)
# plot results
compare_plot(predicted, dataset_to_validate)
# the quality assessment for the simulation results
roc_auc_value = mse(y_true=dataset_to_validate.target,
y_pred=predicted.predict,
squared=False)
return roc_auc_value
# the dataset was obtained from NEMO model simulation
# specify problem type
problem_class = MachineLearningTasksEnum.auto_regression
# a dataset that will be used as a train and test set during composition
file_path_train = 'cases/data/ts/metocean_data_train.csv'
full_path_train = os.path.join(str(project_root()), file_path_train)
dataset_to_compose = InputData.from_csv(full_path_train, task_type=problem_class)
# a dataset for a final validation of the composed model
file_path_test = 'cases/data/ts/metocean_data_test.csv'
full_path_test = os.path.join(str(project_root()), file_path_test)
dataset_to_validate = InputData.from_csv(full_path_test, task_type=problem_class)
# the search of the models provided by the framework that can be used as nodes in a chain for the selected task
models_repo = ModelTypesRepository()
available_model_types, _ = models_repo.search_models(
desired_metainfo=ModelMetaInfoTemplate(input_type=NumericalDataTypesEnum.table,
output_type=CategoricalDataTypesEnum.vector,
task_type=problem_class,
can_be_initial=True,
can_be_secondary=True))
# the choice of the metric for the chain quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)
# the choice and initialisation
single_composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.ar],
secondary=[])
chain_single = DummyComposer(
DummyChainTypeEnum.flat).compose_chain(data=dataset_to_compose,
initial_chain=None,
composer_requirements=single_composer_requirements,
metrics=metric_function)
train_prediction = chain_single.fit(input_data=dataset_to_compose, verbose=True)
print("Composition finished")
compare_plot(train_prediction, dataset_to_compose)
# the quality assessment for the obtained composite models
rmse_on_valid_single = calculate_validation_metric(chain_single, dataset_to_validate)
print(f'Static RMSE is {round(rmse_on_valid_single, 3)}')
|
33953
|
import socket
import struct
import json
import time
import os
import platform
from optparse import OptionParser
import sys
import xml.etree.ElementTree as ET
import config
from device_config import BASE_CONST
MCAST_GRP = '192.168.3.11'
MCAST_PORT = 8427
DEFAULT_DCID_XML = '/Applications/Shure Update Utility.app/Contents/Resources/DCIDMap.xml'
deviceList = {}
discovered = []
# https://stackoverflow.com/questions/603852/multicast-in-python
def discover():
dcid_restore_from_file(config.app_dir('dcid.json'))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) #mac fix
sock.bind((MCAST_GRP, MCAST_PORT)) # use MCAST_GRP instead of '' to listen only
# to MCAST_GRP, not all groups on MCAST_PORT
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
data, (ip, _) = sock.recvfrom(1024)
data = data.decode('UTF-8', errors="ignore")
try:
process_discovery_packet(ip, data)
except:
pass
def process_discovery_packet(ip, data):
dcid = dcid_find(data)
device = dcid_get(dcid)
rx_type, channels = dcid_model_lookup(device['model'])
if __name__ == '__main__':
print('RX: {} at: {} DCID: {} BAND: {} CHANNELS: {}'.format(rx_type, ip, dcid, device['band'], channels))
add_rx_to_dlist(ip, rx_type, channels)
def dcid_find(data):
dcid = ''
data = data.split(',')
for i in data:
i = i.strip('()')
if 'cd:' in i:
i = i.split('cd:')[-1]
dcid = i
return dcid
def dcid_get(dcid):
return deviceList[dcid]
def dcid_model_lookup(name):
for (type_k, type_v) in BASE_CONST.items():
for (model_k, model_v) in type_v['DCID_MODEL'].items():
if name == model_k:
# print('Type: {} DCID_MODEL: {} Channels: {}'.format(type_k, model_k, model_v))
return (type_k, model_v)
return None
def add_rx_to_dlist(ip, rx_type, channels):
rx = next((x for x in discovered if x['ip'] == ip), None)
if rx:
rx['timestamp'] = time.time()
else:
discovered.append({
'ip' : ip,
'type': rx_type,
'channels': channels,
'timestamp': time.time()
})
discovered.sort(key=lambda x: x['ip'])
def time_filterd_discovered_list():
out = []
for i in discovered:
if (time.time() - i['timestamp']) < 30:
out.append(i)
return out
def DCID_Parse(file):
tree = ET.parse(file)
root = tree.getroot()
devices = root.findall('./MapEntry')
for device in devices:
model = device.find('Key').text
model_name = device.find('ModelName').text
dcid = []
for dccid in device.find('DCIDList').iter('DCID'):
try:
band = dccid.attrib['band']
except:
band = ''
dev = {'model': model,'model_name':model_name, 'band':band }
deviceList[dccid.text] = dev
def dcid_save_to_file(file):
with open(file, 'w') as f:
json.dump(deviceList, f, indent=2, separators=(',', ': '), sort_keys=True)
f.write('\n')
def dcid_restore_from_file(file):
global deviceList
with open(file,'r') as f:
deviceList = json.load(f)
def updateDCIDmap(inputFile, outputFile):
DCID_Parse(inputFile)
dcid_save_to_file(outputFile)
def DCIDMapCheck():
if platform.system() == 'Darwin' and os.path.isfile(DEFAULT_DCID_XML):
return DEFAULT_DCID_XML
return None
def main():
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-i", "--input", dest="input_file",
help="DCID input file")
parser.add_option("-o", "--output", dest="output_file",
help="output file")
parser.add_option("-c", "--convert", default=False,
action="store_true", dest="convert",
help="Generate dcid.json from input DCIDMap.xml file")
parser.add_option("-d", "--discover", default=True,
action="store_true", dest="discover",
help="Discover Shure devices on the network")
(options, args) = parser.parse_args()
if options.convert:
if not options.output_file:
print("use -o to specify a DCID output file destination")
sys.exit()
if options.input_file:
p = options.input_file
elif DCIDMapCheck():
p = DCIDMapCheck()
else:
print("Specify an input DCIDMap.xml file with -i or install Wireless Workbench")
sys.exit()
if p:
updateDCIDmap(p, options.output_file)
print("Converting {} to {}".format(p, options.output_file))
sys.exit()
if options.discover:
print("lets discover some stuff")
discover()
if __name__ == '__main__':
main()
|
33954
|
import fastai
from neptune.new.integrations.fastai import NeptuneCallback
from fastai.vision.all import *
import neptune.new as neptune
run = neptune.init(
project="common/fastai-integration", api_token="<PASSWORD>", tags="basic"
)
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_csv(path)
# Log all training phases of the learner
learn = cnn_learner(dls, resnet18, cbs=[NeptuneCallback(run=run, base_namespace="experiment")])
learn.fit_one_cycle(2)
learn.fit_one_cycle(1)
run.stop()
|
33979
|
import time
def f():
[
# Must be split over multiple lines to see the error.
# https://github.com/benfred/py-spy/pull/208
time.sleep(1)
for _ in range(1000)
]
f()
|
34007
|
import asyncio
from aiohttp import web
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Queue, Process
import os
from time import sleep
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
tick = asyncio.Condition()
async def wshandler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
recv_task = None
tick_task = None
while 1:
if not recv_task:
recv_task = asyncio.ensure_future(ws.receive())
if not tick_task:
await tick.acquire()
tick_task = asyncio.ensure_future(tick.wait())
done, pending = await asyncio.wait(
[recv_task,
tick_task],
return_when=asyncio.FIRST_COMPLETED)
if recv_task in done:
msg = recv_task.result()
if msg.tp == web.MsgType.text:
print("Got message %s" % msg.data)
ws.send_str("Pressed key code: {}".format(msg.data))
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
recv_task = None
if tick_task in done:
ws.send_str("game loop ticks")
tick.release()
tick_task = None
return ws
def game_loop(asyncio_loop):
# coroutine to run in main thread
async def notify():
await tick.acquire()
tick.notify_all()
tick.release()
queue = Queue()
# function to run in a different process
def worker():
while 1:
print("doing heavy calculation in process {}".format(os.getpid()))
sleep(1)
queue.put("calculation result")
Process(target=worker).start()
while 1:
# blocks this thread but not main thread with event loop
result = queue.get()
print("getting {} in process {}".format(result, os.getpid()))
task = asyncio.run_coroutine_threadsafe(notify(), asyncio_loop)
task.result()
asyncio_loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
asyncio_loop.run_in_executor(executor, game_loop, asyncio_loop)
app = web.Application()
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
|
34012
|
api_output_for_empty_months = """"Usage Data Extract",
"",
"AccountOwnerId","Account Name","ServiceAdministratorId","SubscriptionId","SubscriptionGuid","Subscription Name","Date","Month","Day","Year","Product","Meter ID","Meter Category","Meter Sub-Category","Meter Region","Meter Name","Consumed Quantity","ResourceRate","ExtendedCost","Resource Location","Consumed Service","Instance ID","ServiceInfo1","ServiceInfo2","AdditionalInfo","Tags","Store Service Identifier","Department Name","Cost Center","Unit Of Measure","Resource Group",'
"""
sample_data = [{u'AccountName': u'Platform',
u'AccountOwnerId': u'donald.duck',
u'AdditionalInfo': u'',
u'ConsumedQuantity': 23.0,
u'ConsumedService': u'Virtual Network',
u'CostCenter': u'1234',
u'Date': u'03/01/2017',
u'Day': 1,
u'DepartmentName': u'Engineering',
u'ExtendedCost': 0.499222332425423563466,
u'InstanceId': u'platform-vnet',
u'MeterCategory': u'Virtual Network',
u'MeterId': u'c90286c8-adf0-438e-a257-4468387df385',
u'MeterName': u'Hours',
u'MeterRegion': u'All',
u'MeterSubCategory': u'Gateway Hour',
u'Month': 3,
u'Product': u'Windows Azure Compute 100 Hrs Virtual Network',
u'ResourceGroup': u'',
u'ResourceLocation': u'All',
u'ResourceRate': 0.0304347826086957,
u'ServiceAdministratorId': u'',
u'ServiceInfo1': u'',
u'ServiceInfo2': u'',
u'StoreServiceIdentifier': u'',
u'SubscriptionGuid': u'abc3455ac-3feg-2b3c5-abe4-ec1111111e6',
u'SubscriptionId': 23467313421,
u'SubscriptionName': u'Production',
u'Tags': u'',
u'UnitOfMeasure': u'Hours',
u'Year': 2017},
{u'AccountName': u'Platform',
u'AccountOwnerId': u'donald.duck',
u'AdditionalInfo': u'',
u'ConsumedQuantity': 0.064076,
u'ConsumedService': u'Microsoft.Storage',
u'CostCenter': u'1234',
u'Date': u'03/01/2017',
u'Day': 1,
u'DepartmentName': u'Engineering',
u'ExtendedCost': 0.50000011123124314235234522345,
u'InstanceId': u'/subscriptions/abc3455ac-3feg-2b3c5-abe4-ec1111111e6/resourceGroups/my-group/providers/Microsoft.Storage/storageAccounts/ss7q3264domxo',
u'MeterCategory': u'Windows Azure Storage',
u'MeterId': u'd23a5753-ff85-4ddf-af28-8cc5cf2d3882',
u'MeterName': u'Standard IO - Page Blob/Disk (GB)',
u'MeterRegion': u'All Regions',
u'MeterSubCategory': u'Locally Redundant',
u'Month': 3,
u'Product': u'Locally Redundant Storage Standard IO - Page Blob/Disk',
u'ResourceGroup': u'my-group',
u'ResourceLocation': u'euwest',
u'ResourceRate': 0.0377320156152495,
u'ServiceAdministratorId': u'',
u'ServiceInfo1': u'',
u'ServiceInfo2': u'',
u'StoreServiceIdentifier': u'',
u'SubscriptionGuid': u'abc3455ac-3feg-2b3c5-abe4-ec1111111e6',
u'SubscriptionId': 23467313421,
u'SubscriptionName': u'Production',
u'Tags': None,
u'UnitOfMeasure': u'GB',
u'Year': 2017}]
|
34015
|
import numpy as np
import numpy.linalg as LA
from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1
from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2
from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3
from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4
from .path_alg import solve_path, pathalgo_general, h_lambdamax
"""
Classo and pathlasso are the main functions,
they can call every algorithm acording
to the method and formulation required
"""
# can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR'
def Classo(
matrix,
lam,
typ="R1",
meth="DR",
rho=1.345,
get_lambdamax=False,
true_lam=False,
e=None,
rho_classification=-1.0,
w=None,
intercept=False,
return_sigm=True,
):
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R3":
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
e = len(matrices[0]) / 2
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R3(pb, lam / lambdamax)
else:
beta, s = Classo_R3(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
elif typ == "R4":
if meth not in ["Path-Alg", "DR"]:
meth = "DR"
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
e = len(matrices[0])
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
beta, s = Classo_R4(pb, lam / lambdamax)
else:
beta, s = Classo_R4(pb, lam)
elif typ == "R2":
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "ODE"
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R2(pb, lam / lambdamax)
else:
beta = Classo_R2(pb, lam)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
out = solve_path(
matrices,
lam / lambdamax,
False,
rho_classification,
"C2",
intercept=intercept,
)
else:
out = solve_path(
matrices, lam, False, rho_classification, "C2", intercept=intercept
)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
out = solve_path(
matrices, lam / lambdamax, False, 0, "C1", intercept=intercept
)
else:
out = solve_path(matrices, lam, False, 0, "C1", intercept=intercept)
if intercept:
beta0, beta = out[0][-1], out[1][-1]
beta = np.array([beta0] + list(beta))
else:
beta = out[0][-1]
else: # LS
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]:
meth = "DR"
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
beta = Classo_R1(pb, lam / lambdamax)
else:
beta = Classo_R1(pb, lam)
if intercept:
betaO = ybar - np.vdot(Xbar, beta)
beta = np.array([betaO] + list(beta))
if w is not None:
if intercept:
beta[1:] = beta[1:] / w
else:
beta = beta / w
if typ in ["R3", "R4"] and return_sigm:
if get_lambdamax:
return (lambdamax, beta, s)
else:
return (beta, s)
if get_lambdamax:
return (lambdamax, beta)
else:
return beta
def pathlasso(
matrix,
lambdas=False,
n_active=0,
lamin=1e-2,
typ="R1",
meth="Path-Alg",
rho=1.345,
true_lam=False,
e=None,
return_sigm=False,
rho_classification=-1.0,
w=None,
intercept=False,
):
Nactive = n_active
if Nactive == 0:
Nactive = False
if type(lambdas) is bool:
lambdas = lamin ** (np.linspace(0.0, 1, 100))
if lambdas[0] < lambdas[-1]:
lambdass = [
lambdas[i] for i in range(len(lambdas) - 1, -1, -1)
] # reverse the list if needed
else:
lambdass = [lambdas[i] for i in range(len(lambdas))]
if w is not None:
matrices = (matrix[0] / w, matrix[1] / w, matrix[2])
else:
matrices = matrix
X, C, y = matrices
if typ == "R2":
pb = problem_R2(matrices, meth, rho, intercept=intercept)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R2(pb, lambdass, n_active=Nactive)
elif typ == "R3":
if intercept:
# here we use the fact that for R1 and R3, the intercept is simple beta0 = ybar-Xbar .vdot(beta) so by changing the X to X-Xbar and y to y-ybar we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
if e is None or e == len(matrices[0]) / 2:
r = 1.0
pb = problem_R3(matrices, meth)
else:
r = np.sqrt(2 * e / len(matrices[0]))
pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R3(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
elif typ == "R4":
if e is None or e == len(matrices[0]):
r = 1.0
pb = problem_R4(matrices, meth, rho, intercept=intercept)
else:
r = np.sqrt(e / len(matrices[0]))
pb = problem_R4(
(matrices[0] * r, matrices[1], matrices[2] * r),
meth,
rho / r,
intercept=intercept,
)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA, S = pathlasso_R4(pb, lambdass, n_active=Nactive)
S = np.array(S) / r ** 2
BETA = np.array(BETA)
elif typ == "C2":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(
matrices, rho_classification, typ="C2", intercept=intercept
)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices,
lambdass,
"C2",
n_active=Nactive,
rho=rho_classification,
intercept=intercept,
)
elif typ == "C1":
assert set(matrices[2]).issubset({1, -1})
lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept)
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathalgo_general(
matrices, lambdass, "C1", n_active=Nactive, intercept=intercept
)
else: # R1
if intercept:
# here we use the fact that for R1 and R3,
# the intercept is simple beta0 = ybar-Xbar .vdot(beta)
# so by changing the X to X-Xbar and y to y-ybar
# we can solve standard problem
Xbar, ybar = np.mean(X, axis=0), np.mean(y)
matrices = (X - Xbar, C, y - ybar)
pb = problem_R1(matrices, meth)
lambdamax = pb.lambdamax
if true_lam:
lambdass = [lamb / lambdamax for lamb in lambdass]
BETA = pathlasso_R1(pb, lambdass, n_active=n_active)
if intercept:
BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA])
real_path = [lam * lambdamax for lam in lambdass]
if w is not None:
if intercept:
ww = np.array([1] + list(w))
else:
ww = w
BETA = np.array([beta / ww for beta in BETA])
if typ in ["R3", "R4"] and return_sigm:
return (np.array(BETA), real_path, S)
return (np.array(BETA), real_path)
|
34022
|
from tensorpy import image_base
classifications = image_base.classify_folder_images('./images')
print("*** Displaying Image Classification Results as a list: ***")
for classification in classifications:
print(classification)
|
34023
|
import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.models.cnn import load_cnn
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
import deep_audio_features.bin.config
import numpy
def test_model(modelpath, ifile, layers_dropped,
test_segmentation=False, verbose=True):
"""Loads a model and predicts each classes probability
Arguments:
modelpath {str} : A path where the model was stored.
ifile {str} : A path of a given wav file,
which will be tested.
test_segmentation {bool}: If True extracts segment level
predictions of a sequence
verbose {bool}: If True prints the predictions
Returns:
y_pred {np.array} : An array with the probability of each class
that the model predicts.
posteriors {np.array}: An array containing the unormalized
posteriors of each class.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
# Restore model
model, hop_length, window_length = load_cnn(modelpath)
model = model.to(device)
class_names = model.classes_mapping
max_seq_length = model.max_sequence_length
zero_pad = model.zero_pad
spec_size = model.spec_size
fuse = model.fuse
# Apply layer drop
model = drop_layers(model, layers_dropped)
model.max_sequence_length = max_seq_length
# print('Model:\n{}'.format(model))
# Move to device
model.to(device)
# Create test set
test_set = FeatureExtractorDataset(X=[ifile],
# Random class -- does not matter at all
y=[0],
fe_method="MEL_SPECTROGRAM",
oversampling=False,
max_sequence_length=max_seq_length,
zero_pad=zero_pad,
forced_size=spec_size,
fuse=fuse, show_hist=False,
test_segmentation=test_segmentation,
hop_length=hop_length, window_length=window_length)
# Create test dataloader
test_loader = DataLoader(dataset=test_set, batch_size=1,
num_workers=4, drop_last=False,
shuffle=False)
# Forward a sample
posteriors, y_pred, _ = test(model=model, dataloader=test_loader,
cnn=True,
classifier=True if layers_dropped == 0
else False)
if verbose:
print("--> Unormalized posteriors:\n {}\n".format(posteriors))
print("--> Predictions:\n {}".format([class_names[yy] for yy in y_pred]))
return y_pred, numpy.array(posteriors)
if __name__ == '__main__':
# Read arguments -- model
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True,
type=str, help='Model')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
parser.add_argument('-s', '--segmentation', required=False,
action='store_true',
help='Return segment predictions')
parser.add_argument('-L', '--layers', required=False, default=0,
help='Number of final layers to cut. Default is 0.')
args = parser.parse_args()
# Get arguments
model = args.model
ifile = args.input
layers_dropped = int(args.layers)
segmentation = args.segmentation
# Test the model
d, p = test_model(modelpath=model, ifile=ifile,
layers_dropped=layers_dropped,
test_segmentation=segmentation)
|
34070
|
from jmap.account.imap.imap_utf7 import imap_utf7_decode, imap_utf7_encode
KNOWN_SPECIALS = set('\\HasChildren \\HasNoChildren \\NoSelect \\NoInferiors \\UnMarked \\Subscribed'.lower().split())
# special use or name magic
ROLE_MAP = {
'inbox': 'inbox',
'drafts': 'drafts',
'draft': 'drafts',
'draft messages': 'drafts',
'bulk': 'junk',
'bulk mail': 'junk',
'junk': 'junk',
'junk mail': 'junk',
'spam mail': 'junk',
'spam messages': 'junk',
'archive': 'archive',
'sent': 'sent',
'sent items': 'sent',
'sent messages': 'sent',
'deleted messages': 'trash',
'trash': 'trash',
'\\inbox': 'inbox',
'\\trash': 'trash',
'\\sent': 'sent',
'\\junk': 'junk',
'\\spam': 'junk',
'\\archive': 'archive',
'\\drafts': 'drafts',
'\\all': 'all',
}
class ImapMailbox(dict):
__slots__ = ('db',)
def __missing__(self, key):
return getattr(self, key)()
def name(self):
try:
parentname, name = self['imapname'].rsplit(self['sep'], maxsplit=1)
except ValueError:
name = self['imapname']
self['name'] = imap_utf7_decode(name.encode())
return self['name']
def parentId(self):
try:
parentname, name = self['imapname'].rsplit(self['sep'], maxsplit=1)
self['parentId'] = self.db.byimapname[parentname]['id']
except ValueError:
self['parentId'] = None
return self['parentId']
def role(self):
for f in self['flags']:
if f not in KNOWN_SPECIALS:
self['role'] = ROLE_MAP.get(f, None)
break
else:
self['role'] = ROLE_MAP.get(self['imapname'].lower(), None)
return self['role']
def sortOrder(self):
return 2 if self['role'] else (1 if self['role'] == 'inbox' else 3)
def isSubscribed(self):
return '\\subscribed' in self['flags']
def totalEmails(self):
return 0
def unreadEmails(self):
return 0
def totalThreads(self):
return self['totalEmails']
def unreadThreads(self):
return self['unreadEmails']
def myRights(self):
can_select = '\\noselect' not in self['flags']
self['myRights'] = {
'mayReadItems': can_select,
'mayAddItems': can_select,
'mayRemoveItems': can_select,
'maySetSeen': can_select,
'maySetKeywords': can_select,
'mayCreateChild': True,
'mayRename': False if self['role'] else True,
'mayDelete': False if self['role'] else True,
'maySubmit': can_select,
}
return self['myRights']
def imapname(self):
encname = imap_utf7_encode(self['name']).decode()
if self['parentId']:
parent = self.db.mailboxes[self['parentId']]
self['imapname'] = parent['imapname'] + parent['sep'] + encname
else:
self['imapname'] = encname
return self['imapname']
def created(self):
return self['uidvalidity']
def updated(self):
return self['uidvalidity'] * self['uidnext']
def deleted(self):
return None
|
34091
|
import jittor as jt
from jittor import nn
from jittor import Module
from jittor import init
from jittor.contrib import concat
from model.backbone import resnet50, resnet101
from model.backbone import res2net101
Backbone_List = ['resnet50', 'resnet101', 'res2net101']
class DeepLab(Module):
def __init__(self, output_stride=16, num_classes=2, backbone = 'resnet101'):
super(DeepLab, self).__init__()
if not backbone in Backbone_List:
print('Invalid Backbone! Initialized to resnet101')
backbone = 'resnet101'
if backbone == 'resnet50':
self.backbone = resnet50(output_stride=output_stride)
elif backbone == 'res2net101':
self.backbone = res2net101(output_stride=output_stride)
else:
self.backbone = resnet101(output_stride=output_stride)
self.backbone_name = backbone
self.aspp = ASPP(output_stride)
self.decoder = Decoder(num_classes)
def execute(self, input):
low_level_feat, _, _, x = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
x = nn.resize(x, size=(input.shape[2], input.shape[3]), mode='bilinear')
return x
def get_backbone(self):
return self.backbone
def get_head(self):
return [self.aspp, self.decoder]
def get_loss(self, target, pred, ignore_index=None):
loss_pred = nn.cross_entropy_loss(pred, target, ignore_index=ignore_index)
return loss_pred
def update_params(self, loss, optimizer):
optimizer.zero_grad()
loss.backward()
optimizer.step()
class Decoder(nn.Module):
def __init__(self, num_classes):
super(Decoder, self).__init__()
low_level_inplanes = 256 # mobilenet = 24 resnet / res2net = 256 xception = 128
self.conv1 = nn.Conv(low_level_inplanes, 48, 1, bias=False)
self.bn1 = nn.BatchNorm(48)
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(nn.Conv(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv(256, num_classes, kernel_size=1, stride=1))
def execute(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
#print (low_level_feat.shape)
x = nn.resize(x, size=(low_level_feat.shape[2], low_level_feat.shape[3]) , mode='bilinear')
x = concat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
class Single_ASPPModule(Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(Single_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm(planes)
self.relu = nn.ReLU()
def execute(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ASPP(Module):
def __init__(self, output_stride):
super(ASPP, self).__init__()
inplanes = 2048 # mobilnet = 320 resnet = 2048
if output_stride == 16:
dilations = [1, 6, 12, 18]
elif output_stride == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = Single_ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0])
self.aspp2 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1])
self.aspp3 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2])
self.aspp4 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3])
self.global_avg_pool = nn.Sequential(GlobalPooling(),
nn.Conv(inplanes, 256, 1, stride=1, bias=False),
nn.BatchNorm(256),
nn.ReLU())
self.conv1 = nn.Conv(1280, 256, 1, bias=False)
self.bn1 = nn.BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
def execute(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = x5.broadcast((1,1,x4.shape[2],x4.shape[3]))
x = concat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dropout(x)
return x
class GlobalPooling (Module):
def __init__(self):
super(GlobalPooling, self).__init__()
def execute (self, x):
return jt.mean(x, dims=[2,3], keepdims=1)
def main():
model = DeepLab(backbone = 'resnet101')
x = jt.ones([2, 3, 512, 512])
y = model(x)
print (y.shape)
_ = y.data
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
'''
DeepLab
59,572,610 total parameters.
59,462,946 training parameters.
'''
if __name__ == '__main__':
main()
|
34159
|
import pytest
from teos.extended_appointment import ExtendedAppointment
@pytest.fixture
def ext_appointment_data(generate_dummy_appointment):
return generate_dummy_appointment().to_dict()
# Parent methods are not tested.
def test_init_ext_appointment(ext_appointment_data):
# The appointment has no checks whatsoever, since the inspector is the one taking care or that, and the only one
# creating appointments.
ext_appointment = ExtendedAppointment(
ext_appointment_data["locator"],
ext_appointment_data["encrypted_blob"],
ext_appointment_data["to_self_delay"],
ext_appointment_data["user_id"],
ext_appointment_data["user_signature"],
ext_appointment_data["start_block"],
)
assert (
ext_appointment_data["locator"] == ext_appointment.locator
and ext_appointment_data["to_self_delay"] == ext_appointment.to_self_delay
and ext_appointment_data["encrypted_blob"] == ext_appointment.encrypted_blob
and ext_appointment_data["user_id"] == ext_appointment.user_id
and ext_appointment_data["user_signature"] == ext_appointment.user_signature
and ext_appointment_data["start_block"] == ext_appointment.start_block
)
def test_get_summary(ext_appointment_data):
assert ExtendedAppointment.from_dict(ext_appointment_data).get_summary() == {
"locator": ext_appointment_data["locator"],
"user_id": ext_appointment_data["user_id"],
}
def test_from_dict(ext_appointment_data):
# The appointment should be build if we don't miss any field
ext_appointment = ExtendedAppointment.from_dict(ext_appointment_data)
assert isinstance(ext_appointment, ExtendedAppointment)
# Otherwise it should fail
for key in ext_appointment_data.keys():
prev_val = ext_appointment_data[key]
ext_appointment_data[key] = None
with pytest.raises(ValueError, match="Wrong appointment data"):
ExtendedAppointment.from_dict(ext_appointment_data)
ext_appointment_data[key] = prev_val
|
34201
|
import copy
import datetime
import os
import random
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from inference.inference_utils import get_trange, get_tqdm
def init_random_seed(value=0):
random.seed(value)
np.random.seed(value)
torch.manual_seed(value)
torch.cuda.manual_seed(value)
torch.backends.cudnn.deterministic = True
def copy_data_to_device(data, device):
if torch.is_tensor(data):
return data.to(device)
elif isinstance(data, (list, tuple)):
return [copy_data_to_device(elem, device) for elem in data]
elif isinstance(data, dict):
return {name: copy_data_to_device(value, device) for name, value in data.items()}
raise ValueError('Unexpected data type {}'.format(type(data)))
def sum_dicts(current, new):
if current is None:
return new
result = dict(current)
for name, new_value in new.items():
result[name] = result.get(name, 0) + new_value
return result
def norm_dict(current, n):
if n == 0:
return current
return {name: value / (n + 1e-6) for name, value in current.items()}
def train_eval_loop(model, train_dataset, val_dataset, criterion,
lr=1e-4, epoch_n=10, batch_size=32,
device='cuda', early_stopping_patience=10, l2_reg_alpha=0,
max_batches_per_epoch_train=10000,
max_batches_per_epoch_val=1000,
data_loader_ctor=DataLoader,
optimizer_ctor=None,
lr_scheduler_ctor=None,
shuffle_train=True,
dataloader_workers_n=0,
clip_grad=10,
save_vis_images_path=None,
save_vis_images_freq=100,
save_models_path=None,
save_models_freq=10):
device = torch.device(device)
model.to(device)
if optimizer_ctor is None:
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_reg_alpha)
else:
optimizer = optimizer_ctor(model.parameters(), lr=lr)
if lr_scheduler_ctor is not None:
lr_scheduler = lr_scheduler_ctor(optimizer)
else:
lr_scheduler = None
train_dataloader = data_loader_ctor(train_dataset, batch_size=batch_size, shuffle=shuffle_train,
num_workers=dataloader_workers_n)
val_dataloader = data_loader_ctor(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=dataloader_workers_n)
best_val_loss = float('inf')
best_val_metrics = None
best_epoch_i = 0
best_model = copy.deepcopy(model)
for epoch_i in get_trange(epoch_n, desc='Epochs'):
try:
epoch_start = datetime.datetime.now()
print('Epoch {}'.format(epoch_i))
model.train()
mean_train_loss = 0
mean_train_metrics = None
train_batches_n = 0
for batch_i, (batch_x, batch_y) in get_tqdm(enumerate(train_dataloader), desc=f'Epoch {epoch_i}',
total=max_batches_per_epoch_train, leave=True):
if batch_i > max_batches_per_epoch_train:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
mean_train_loss += float(loss)
mean_train_metrics = sum_dicts(mean_train_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_train.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
train_batches_n += 1
mean_train_loss /= train_batches_n
mean_train_metrics = norm_dict(mean_train_metrics, train_batches_n)
print('Epoch: {} iterations, {:0.2f} sec'.format(train_batches_n,
(datetime.datetime.now() - epoch_start).total_seconds()))
print('Mean train loss', mean_train_loss, mean_train_metrics)
if save_models_path is not None and epoch_i % save_models_freq == 0:
torch.save(model, os.path.join(save_models_path, 'model_epoch_{:04d}.pth'.format(epoch_i)))
model.eval()
mean_val_loss = 0
mean_val_metrics = None
val_batches_n = 0
with torch.no_grad():
for batch_i, (batch_x, batch_y) in enumerate(val_dataloader):
if batch_i > max_batches_per_epoch_val:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
mean_val_loss += float(loss)
mean_val_metrics = sum_dicts(mean_val_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_val.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
val_batches_n += 1
mean_val_loss /= val_batches_n + 1e-6
mean_val_metrics = norm_dict(mean_val_metrics, val_batches_n)
print('Mean validation loss', mean_val_loss, mean_val_metrics)
if mean_val_loss < best_val_loss:
best_epoch_i = epoch_i
best_val_loss = mean_val_loss
best_val_metrics = mean_val_metrics
best_model = copy.deepcopy(model)
print('New best model!')
if save_models_path is not None:
torch.save(best_model, os.path.join(save_models_path, 'best_model.pth'))
elif epoch_i - best_epoch_i > early_stopping_patience:
print('Model has not improved during the last {} epochs, stopping training early'.format(
early_stopping_patience))
break
if lr_scheduler is not None:
lr_scheduler.step(mean_val_loss)
print()
except KeyboardInterrupt:
print('Interrupted by user')
break
except Exception as ex:
print('Fatal error during training: {}\n{}'.format(ex, traceback.format_exc()))
break
return best_val_loss, best_val_metrics, best_model
def predict_with_model(model, dataset, device='cuda', batch_size=32, num_workers=0, return_labels=False):
"""
:param model: torch.nn.Module - trained model
:param dataset: torch.utils.data.Dataset - data to apply model
:param device: cuda/cpu
:param batch_size:
:return: numpy.array dimensionality len(dataset) x *
"""
results_by_batch = []
device = torch.device(device)
model.to(device)
model.eval()
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels = []
with torch.no_grad():
import tqdm
for batch_x, batch_y in tqdm.tqdm_notebook(dataloader, total=len(dataset)/batch_size):
batch_x = copy_data_to_device(batch_x, device)
if return_labels:
labels.append(batch_y.numpy())
batch_pred = model(batch_x)
results_by_batch.append(batch_pred.detach().cpu().numpy())
if return_labels:
return np.concatenate(results_by_batch, 0), np.concatenate(labels, 0)
else:
return np.concatenate(results_by_batch, 0)
|
34221
|
import relstorage.storage
import ZODB.Connection
# Monkey patches, ook
def _ex_cursor(self, name=None):
if self._stale_error is not None:
raise self._stale_error
with self._lock:
self._before_load()
return self._load_conn.cursor(name)
relstorage.storage.RelStorage.ex_cursor = _ex_cursor
def _ex_connect(self):
return self._adapter.connmanager.open()
relstorage.storage.RelStorage.ex_connect = _ex_connect
def _ex_get(self, oid, ghost_pickle):
"""Return the persistent object with oid 'oid'."""
if self.opened is None:
raise ConnectionStateError("The database connection is closed")
obj = self._cache.get(oid, None)
if obj is not None:
return obj
obj = self._added.get(oid, None)
if obj is not None:
return obj
obj = self._pre_cache.get(oid, None)
if obj is not None:
return obj
obj = self._reader.getGhost(ghost_pickle) # New code
# Avoid infiniate loop if obj tries to load its state before
# it is added to the cache and it's state refers to it.
# (This will typically be the case for non-ghostifyable objects,
# like persistent caches.)
self._pre_cache[oid] = obj
self._cache.new_ghost(oid, obj)
self._pre_cache.pop(oid)
return obj
ZODB.Connection.Connection.ex_get = _ex_get
|
34225
|
import copy
import sys
from . import compat
from .compat import urlencode, parse_qs
class Request(compat.Request):
def __init__(self, url, parameters=None, headers=None):
self.parameters = parameters
if parameters is None:
data = None
else:
if sys.version_info >= (3, 0):
data = urlencode(parameters).encode('utf-8')
else:
byte_parameters = dict(
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in parameters.items())
data = urlencode(byte_parameters)
assert isinstance(data, bytes)
if headers is None:
headers = {}
compat.Request.__init__(self, url, data, headers)
def copy(self):
return copy.copy(self)
@property
def url(self):
return self.get_full_url()
|
34246
|
from ._unselected import Unselected
from plotly.graph_objs.scattermapbox import unselected
from ._textfont import Textfont
from ._stream import Stream
from ._selected import Selected
from plotly.graph_objs.scattermapbox import selected
from ._marker import Marker
from plotly.graph_objs.scattermapbox import marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.scattermapbox import hoverlabel
|
34256
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extras', '0060_customlink_button_class'),
]
operations = [
migrations.AddField(
model_name='customfield',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customfield',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
34302
|
def handler(event, context):
return {
"statusCode": 302,
"headers": {
"Location": "https://www.nhsx.nhs.uk/covid-19-response/data-and-covid-19/national-covid-19-chest-imaging-database-nccid/"
},
}
|
34308
|
import sys
import cv2
from keras.models import load_model
from matplotlib import pyplot as plt
import time
model = load_model("models/model.h5")
def find_faces(image):
face_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_default.xml')
face_rects = face_cascade.detectMultiScale(
image,
scaleFactor = 1.1,
minNeighbors = 22
)
return face_rects
def load_image(filepath):
image = cv2.imread(filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image, gray_image
def predict(gray_image):
face_rects = find_faces(gray_image)
for face_rect in face_rects:
x, y, w, h = face_rect
face = gray_image[y:y+h, x:x+w]
face = cv2.resize(face, (48, 48)).reshape((1, 48, 48, 1))
predicted_emotions = model.predict(face)[0]
best_emotion = 'happiness' if predicted_emotions[1] > predicted_emotions[0] else 'neutral'
# Create a json serializable result
yield dict(
border = dict(
x = float(x),
y = float(y),
width = float(w),
height = float(h),
),
prediction = {'happiness': float(predicted_emotions[0]), 'neutral': float(predicted_emotions[1])},
emotion = best_emotion
)
def put_text(image, rect, text):
x, y, w, h = rect
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = h / 30.0
font_thickness = int(round(font_scale * 1.5))
text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)
center_text_x = x + (w // 2)
center_text_y = y + (h // 2)
text_w, text_h = text_size
lower_left_text_x = center_text_x - (text_w // 2)
lower_left_text_y = center_text_y + (text_h // 2)
cv2.putText(
image, text,
(lower_left_text_x, lower_left_text_y),
font, font_scale, (0, 255, 0), font_thickness
)
def draw_face_info(image, face_info):
x = int(face_info['border']['x'])
y = int(face_info['border']['y'])
w = int(face_info['border']['width'])
h = int(face_info['border']['height'])
emotion = face_info['emotion']
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
put_text(image, (x, y, w, h // 5), emotion)
def show_image(image, title='Result'):
plt.subplot(111), plt.imshow(image), plt.title(title)
plt.show()
if __name__ == '__main__':
# start time
start_time = time.time()
image, gray_image = load_image(sys.argv[1])
for face_info in predict(gray_image):
print(face_info)
draw_face_info(image, face_info)
# end time
end_time = time.time()
show_image(image)
response_time = end_time - start_time
print(response_time)
|
34330
|
import statistics
import hpbandster.core.result as hpres
# smallest value is best -> reverse_loss = True
# largest value is best -> reverse_loss = False
REVERSE_LOSS = True
EXP_LOSS = 1
OUTLIER_PERC_WORST = 0.1
OUTLIER_PERC_BEST = 0.0
def analyze_bohb(log_dir):
# load the example run from the log files
result = hpres.logged_results_to_HBS_result(log_dir)
# get all executed runs
all_runs = result.get_all_runs()
if __name__ == '__main__':
# load the example run from the log files
result = hpres.logged_results_to_HBS_result('../results/GTNC_evaluate_cmc_subopt_2021-01-21-09_5')
# get all executed runs
all_runs = result.get_all_runs()
t_arr = []
for dat in result.data.values():
for time_stamp in dat.time_stamps.values():
ts = time_stamp['started']
te = time_stamp['finished']
if te-ts > 60:
t_arr.append(te-ts)
print(statistics.mean(t_arr))
|
34380
|
from wireless_msgs.msg import Connection
from .translator import Translator, TableTranslatorMixin
class ConnectionTranslator(Translator, TableTranslatorMixin):
messageType = Connection
geomType = Translator.GeomTypes.NoGeometry
@staticmethod
def translate(msg):
# Some forks of wireless_msgs/Connection have a header.
try:
seq = msg.header.seq
stamp = msg.header.stamp.to_sec()
except AttributeError:
seq = None
stamp = None
return [{
'type': 'Feature',
'properties': {
'bitrate': msg.bitrate,
'txpower': msg.txpower,
'link_quality_raw': msg.link_quality_raw,
'link_quality': msg.link_quality,
'signal_level': msg.signal_level,
'noise_level': msg.noise_level,
'essid': msg.essid,
'bssid': msg.bssid,
'frequency': msg.frequency,
'seq': seq,
'stamp': stamp
}
}]
|
34439
|
from .request_context import RequestContext
from .tracker import ContextTracker
from .request import DjangoRequest, FlaskRequest
|
34475
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.Bench.YahooStocks as ys
import warnings
symbol_lists = tsds.get_yahoo_symbol_lists();
y_keys = sorted(symbol_lists.keys())
print(y_keys)
k = "nysecomp"
tester = ys.cYahoo_Tester(tsds.load_yahoo_stock_prices(k) , "YAHOO_STOCKS_" + k);
with warnings.catch_warnings():
warnings.simplefilter("error")
tester.testSignals('VRS')
|
34492
|
from ray.rllib.contrib.bandits.envs.discrete import LinearDiscreteEnv, \
WheelBanditEnv
from ray.rllib.contrib.bandits.envs.parametric import ParametricItemRecoEnv
__all__ = ["LinearDiscreteEnv", "WheelBanditEnv", "ParametricItemRecoEnv"]
|
34497
|
import string
import warnings
import re
from . import util
import spacy
class FileParser(object):
def __init__(self,
file_parser='txt',
xml_node_path=None, fparser=None):
if file_parser not in ['txt', 'xml', 'defined']:
msg = 'file_parser should be txt, xml or defined, not "{file_parser}"'
raise ValueError(msg.format(file_parser=file_parser))
if file_parser == 'defined' and fparser is None:
msg = 'Please define you own file_parser.'
raise ValueError(msg)
self.file_parser = file_parser
self.xml_node_path = xml_node_path
self.fparser = fparser
def xml_parser(self, file_path, xml_node_path):
for paragraph in util.search_all_specific_nodes_in_xml_known_node_path(file_path, xml_node_path):
for sent in util.tokenize_informal_paragraph_into_sentences(paragraph):
yield str.strip(sent)
def txt_parser(self, file_path):
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
yield str.strip(line)
def __call__(self, file_path):
if self.file_parser == 'txt':
for sent in self.txt_parser(file_path):
yield sent
if self.file_parser == 'xml':
for sent in self.xml_parser(file_path, self.xml_node_path):
yield sent
if self.file_parser == 'defined':
for sent in self.fparser(file_path):
yield sent
class WordPreprocessor(object):
# default: config file.
def __init__(self, remove_stop_words, remove_numbers, replace_digits_to_zeros, remove_punctuations,
stem_word, lowercase, wpreprocessor):
self.remove_stop_words = remove_stop_words
self.remove_numbers = remove_numbers
self.replace_digits_to_zeros = replace_digits_to_zeros
self.remove_punctuations = remove_punctuations
self.stem_word = stem_word
self.lowercase = lowercase
self.wpreprocessor = wpreprocessor
punctuations = set(string.punctuation)
punctuations.update({'“', '”', '—'}) # English
punctuations.update({'...', '«', '»'}) # French
self.puncs = punctuations
def apply(self, word, spacy_loader=None):
# Removing
if self.remove_numbers and word.isnumeric():
return ''
if self.replace_digits_to_zeros:
word = re.sub('\d', '0', word)
if self.remove_punctuations:
if all(c in self.puncs for c in word):
return ''
# Remove combinations of punctuations and digits
if self.remove_numbers and self.remove_punctuations:
if all(j.isdigit() or j in self.puncs for j in word):
return ''
# remove stop words
if self.remove_stop_words and spacy_loader.vocab[word].is_stop:
# print(word, 'is stop words')
return ''
# Stem word
if self.stem_word:
word = util.stem_word(word)
# Make all words in lowercase
if self.lowercase:
word = word.lower()
# customized word preprocessor
if self.wpreprocessor is not None:
if not callable(self.wpreprocessor):
msg = 'wpreprocessor should be callable'
warnings.warn(msg)
else:
word = self.wpreprocessor(word)
if not isinstance(word, str):
msg = 'The output of wpreprocessor should be string'
raise ValueError(msg)
return word
def __call__(self, word):
return self.apply(word)
class Tokenizer(object):
@staticmethod
def mytok(s):
"""
An example of user customized tokenizer.
:return: list of tokens
"""
# TODO NOW spacy.load here is a really stupid idea, cause each time apply has been called spacy.load need to run. TOO SLOW!!!
tk = spacy.load('en')
return [token.text for token in tk(s)]
def __init__(self, word_tokenizer='Treebank', wtokenizer=None):
self.word_tokenizer = None
if word_tokenizer not in ['Treebank', 'PunktWord', 'WordPunct', 'spacy', '']:
msg = 'word_tokenizer "{word_tokenizer}" should be Treebank, PunktWord, WordPunct or empty'
raise ValueError(msg.format(word_tokenizer=word_tokenizer))
if word_tokenizer == 'spacy':
self.tokenizer = None
self.word_tokenizer = 'spacy'
elif word_tokenizer == 'Treebank':
from nltk.tokenize import TreebankWordTokenizer
self.tokenizer = TreebankWordTokenizer().tokenize
elif word_tokenizer == 'PunktWord':
# PunktTokenizer splits on punctuation, but keeps it with the word. => [‘this’, “‘s”, ‘a’, ‘test’]
from nltk.tokenize import PunktWordTokenizer
self.tokenizer = PunktWordTokenizer().tokenize
elif word_tokenizer == 'WordPunct':
# WordPunctTokenizer splits all punctuations into separate tokens. => [‘This’, “‘”, ‘s’, ‘a’, ‘test’]
from nltk.tokenize import WordPunctTokenizer
self.tokenizer = WordPunctTokenizer().tokenize
else:
if wtokenizer is None:
self.tokenizer = None
else:
if not callable(wtokenizer):
msg = 'wtokenizer should be callable'
warnings.warn(msg)
self.tokenizer = None
else:
self.tokenizer = wtokenizer
def apply(self, text, spacy_loader=None):
if self.word_tokenizer == 'spacy':
return [token.text for token in spacy_loader(text)]
if self.tokenizer is not None:
return self.tokenizer(text)
else:
return [text]
def __call__(self, text):
return self.apply(text)
|
34500
|
import logging
import time
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
def join_master(duthost, master_vip):
"""
Joins DUT to Kubernetes master
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
If join fails, test will fail at the assertion to check_connected
"""
logger.info("Joining DUT to Kubernetes master")
dut_join_cmds = ['sudo config kube server disable on',
'sudo config kube server ip {}'.format(master_vip),
'sudo config kube server disable off']
duthost.shell_cmds(cmds=dut_join_cmds)
pytest_assert(poll_for_status_change(duthost, True),"DUT failed to successfully join Kubernetes master")
def make_vip_unreachable(duthost, master_vip):
"""
Makes Kubernetes master VIP unreachable from SONiC DUT by configuring iptables rules. Cleans preexisting iptables rules for VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
logger.info("Making Kubernetes master VIP unreachable from DUT")
clean_vip_iptables_rules(duthost, master_vip)
duthost.shell('sudo iptables -A INPUT -s {} -j DROP'.format(master_vip))
duthost.shell('sudo iptables -A OUTPUT -d {} -j DROP'.format(master_vip))
def make_vip_reachable(duthost, master_vip):
"""
Makes Kubernetes master VIP reachable from SONiC DUT by removing any iptables rules associated with the VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
logger.info("Making Kubernetes master VIP reachable from DUT")
clean_vip_iptables_rules(duthost, master_vip)
def clean_vip_iptables_rules(duthost, master_vip):
"""
Removes all iptables rules associated with the VIP.
Args:
duthost: DUT host object
master_vip: VIP of high availability Kubernetes master
"""
iptables_rules = duthost.shell('sudo iptables -S | grep {} || true'.format(master_vip))["stdout_lines"]
logger.info('iptables rules: {}'.format(iptables_rules))
for line in iptables_rules:
if line:
duthost.shell('sudo iptables -D {}'.format(line[2:]))
def check_connected(duthost):
"""
Checks if the DUT already shows status 'connected' to Kubernetes master
Args:
duthost: DUT host object
Returns:
True if connected, False if not connected
"""
kube_server_status = duthost.shell('show kube server')["stdout_lines"]
logger.info("Kube server status: {}".format(kube_server_status))
for line in kube_server_status:
if line.startswith("KUBERNETES_MASTER SERVER connected"):
return line.endswith("true")
logger.info("Kubernetes server check_connected failed to check server status")
def poll_for_status_change(duthost, exp_status, poll_wait_secs=5, min_wait_time=20, max_wait_time=120):
"""
Polls to see if kube server connected status updates as expected
Args:
duthost: DUT host object
exp_status: expected server connected status once processes are synced
poll_wait_secs: seconds between each server connected status poll. Default: 5 seconds
min_wait_time: seconds before starting poll of server connected status. Default: 20 seconds
max_wait_time: maximum amount of time to spend polling for status change. Default: 120 seconds
Returns:
True if server connected status updates as expected by max_wait_time
False if server connected status fails to update as expected by max_wait_time
"""
time.sleep(min_wait_time)
timeout_wait_secs = max_wait_time - min_wait_time
while (timeout_wait_secs > 0):
if (check_connected(duthost) == exp_status):
logging.info("Time taken to update Kube server status: {} seconds".format(timeout_wait_secs))
return True
time.sleep(poll_wait_secs)
timeout_wait_secs -= poll_wait_secs
return False
|
34548
|
import os
import os.path as osp
import pickle
import time
import numpy as np
from multiprocessing import Pool
from ..utils import get_bbox_dim
from .misc import read_img_info, change_cls_order, get_classes
def load_imgs(img_dir, ann_dir=None, classes=None, nproc=10,
def_bbox_type='poly'):
assert def_bbox_type in ['hbb', 'obb', 'poly', None]
assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
if ann_dir is not None:
print('ann_dir is no use in load_imgs function')
print('Starting loading images information')
start_time = time.time()
imgpaths = [osp.join(img_dir, imgfile)
for imgfile in os.listdir(img_dir)]
if nproc > 1:
pool = Pool(nproc)
infos = pool.map(read_img_info, imgpaths)
pool.close()
else:
infos = list(map(read_img_info, imgpaths))
if def_bbox_type is not None:
for info in infos:
if info is None:
continue
bbox_dim = get_bbox_dim(def_bbox_type)
bboxes = np.zeros((0, bbox_dim), dtype=np.float32)
labels = np.zeros((0, ), dtype=np.int64)
info['ann'] = dict(bboxes=bboxes, labels=labels)
classes = () if classes is None else classes
end_time = time.time()
print(f'Finishing loading images, get {len(infos)} iamges,',
f'using {end_time-start_time:.3f}s.')
return infos, classes
def load_pkl(ann_dir, img_dir=None, classes=None, nproc=10):
assert osp.isfile(ann_dir), f'The {ann_dir} is not an existing pkl file!'
assert img_dir is None or osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
print('Starting loading pkl information')
start_time = time.time()
data = pickle.load(open(ann_dir, 'rb'))
old_classes, contents = data['cls'], data['content']
if img_dir is not None:
imgpaths = [osp.join(img_dir, content['filename'])
for content in contents]
if nproc > 1:
pool = Pool(nproc)
infos = pool.map(read_img_info, imgpaths)
pool.close()
else:
infos = list(map(read_img_info, imgpaths))
for info, content in zip(infos, contents):
content.update(info)
if classes is None:
classes = old_classes
else:
classes = get_classes(classes)
change_cls_order(contents, old_classes, classes)
end_time = time.time()
print(f'Finishing loading pkl, get {len(contents)} iamges,',
f'using {end_time-start_time:.3f}s.')
return contents, classes
def save_pkl(save_dir, contents, classes):
assert save_dir.endswith('.pkl')
filepath = osp.split(save_dir)[0]
if not osp.exists(filepath):
os.makedirs(filepath)
data = dict(cls=classes, content=contents)
pickle.dump(data, open(save_dir, 'wb'))
|
34558
|
from unittest import TestCase
from pylibsrtp import Error, Policy, Session
RTP = (
b"\x80\x08\x00\x00" # version, packet type, sequence number
b"\x00\x00\x00\x00" # timestamp
b"\x00\x00\x30\x39" # ssrc: 12345
) + (b"\xd4" * 160)
RTCP = (
b"\x80\xc8\x00\x06\xf3\xcb\x20\x01\x83\xab\x03\xa1\xeb\x02\x0b\x3a"
b"\x00\x00\x94\x20\x00\x00\x00\x9e\x00\x00\x9b\x88"
)
# Set key to predetermined value
KEY = (
b"\<KEY>"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
b"\x10\x11\x12\x13\x14\x15\x16\x17"
b"\x18\x19\x1a\x1b\x1c\x1d"
)
class PolicyTest(TestCase):
def test_allow_repeat_tx(self):
policy = Policy()
self.assertEqual(policy.allow_repeat_tx, False)
policy.allow_repeat_tx = True
self.assertEqual(policy.allow_repeat_tx, True)
policy.allow_repeat_tx = False
self.assertEqual(policy.allow_repeat_tx, False)
policy.allow_repeat_tx = 1
self.assertEqual(policy.allow_repeat_tx, True)
policy.allow_repeat_tx = 0
self.assertEqual(policy.allow_repeat_tx, False)
def test_key(self):
policy = Policy()
self.assertEqual(policy.key, None)
policy.key = KEY
self.assertEqual(policy.key, KEY)
policy.key = None
self.assertEqual(policy.key, None)
with self.assertRaises(TypeError) as cm:
policy.key = 1234
self.assertEqual(policy.key, None)
self.assertEqual(str(cm.exception), "key must be bytes")
def test_ssrc_type(self):
policy = Policy()
self.assertEqual(policy.ssrc_type, Policy.SSRC_UNDEFINED)
policy.ssrc_type = Policy.SSRC_ANY_INBOUND
self.assertEqual(policy.ssrc_type, Policy.SSRC_ANY_INBOUND)
def test_ssrc_value(self):
policy = Policy()
self.assertEqual(policy.ssrc_value, 0)
policy.ssrc_value = 12345
self.assertEqual(policy.ssrc_value, 12345)
def test_window_size(self):
policy = Policy()
self.assertEqual(policy.window_size, 0)
policy.window_size = 1024
self.assertEqual(policy.window_size, 1024)
class SessionTest(TestCase):
def test_no_key(self):
policy = Policy(ssrc_type=Policy.SSRC_ANY_OUTBOUND)
with self.assertRaises(Error) as cm:
Session(policy=policy)
self.assertEqual(str(cm.exception), "unsupported parameter")
def test_add_remove_stream(self):
# protect RTP
tx_session = Session(
policy=Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
protected = tx_session.protect(RTP)
self.assertEqual(len(protected), 182)
# add stream and unprotect RTP
rx_session = Session()
rx_session.add_stream(
Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
unprotected = rx_session.unprotect(protected)
self.assertEqual(len(unprotected), 172)
self.assertEqual(unprotected, RTP)
# remove stream
rx_session.remove_stream(12345)
# try removing stream again
with self.assertRaises(Error) as cm:
rx_session.remove_stream(12345)
self.assertEqual(str(cm.exception), "no appropriate context found")
def test_rtp_any_ssrc(self):
# protect RTP
tx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_OUTBOUND))
protected = tx_session.protect(RTP)
self.assertEqual(len(protected), 182)
# bad type
with self.assertRaises(TypeError) as cm:
tx_session.protect(4567)
self.assertEqual(str(cm.exception), "packet must be bytes")
# bad length
with self.assertRaises(ValueError) as cm:
tx_session.protect(b"0" * 1500)
self.assertEqual(str(cm.exception), "packet is too long")
# unprotect RTP
rx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_INBOUND))
unprotected = rx_session.unprotect(protected)
self.assertEqual(len(unprotected), 172)
self.assertEqual(unprotected, RTP)
def test_rtcp_any_ssrc(self):
# protect RCTP
tx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_OUTBOUND))
protected = tx_session.protect_rtcp(RTCP)
self.assertEqual(len(protected), 42)
# bad type
with self.assertRaises(TypeError) as cm:
tx_session.protect_rtcp(4567)
self.assertEqual(str(cm.exception), "packet must be bytes")
# bad length
with self.assertRaises(ValueError) as cm:
tx_session.protect_rtcp(b"0" * 1500)
self.assertEqual(str(cm.exception), "packet is too long")
# unprotect RTCP
rx_session = Session(policy=Policy(key=KEY, ssrc_type=Policy.SSRC_ANY_INBOUND))
unprotected = rx_session.unprotect_rtcp(protected)
self.assertEqual(len(unprotected), 28)
self.assertEqual(unprotected, RTCP)
def test_rtp_specific_ssrc(self):
# protect RTP
tx_session = Session(
policy=Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
protected = tx_session.protect(RTP)
self.assertEqual(len(protected), 182)
# unprotect RTP
rx_session = Session(
policy=Policy(key=KEY, ssrc_type=Policy.SSRC_SPECIFIC, ssrc_value=12345)
)
unprotected = rx_session.unprotect(protected)
self.assertEqual(len(unprotected), 172)
self.assertEqual(unprotected, RTP)
|
34575
|
import json
import logging
from typing import Tuple
import requests
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.views.generic import FormView, TemplateView
from .forms import CodeSchoolForm
logger = logging.getLogger(__name__)
class IndexView(TemplateView):
template_name = 'frontend/index.html'
class CodeschoolFormView(FormView):
form_class = CodeSchoolForm
template_name = 'frontend/codeschool-form.html'
success_url = f'https://github.com/{settings.GITHUB_REPO}/issues'
def form_valid(self, form):
form.save()
handle_submission(form.cleaned_data)
return super().form_valid(form)
def form_invalid(self, form):
return super().form_invalid(form)
class BotMessagesView(TemplateView):
template_name = 'frontend/messages.html'
def get_logo_and_users(logo: InMemoryUploadedFile) -> Tuple[str, str]:
school_logo = logo.name.replace(' ', '_')
if settings.DEBUG or settings.PRE_PROD:
users = '@wimo7083 @AllenAnthes,'
else:
users = '@wimo7083 @jhampton @kylemh'
logo_url = f'{settings.MEDIA_URL}logos/{school_logo}'
return logo_url, users
def handle_submission(form: dict):
repo_path = settings.GITHUB_REPO
url = f"https://api.github.com/repos/{repo_path}/issues"
headers = {"Authorization": f"Bearer {settings.GITHUB_JWT}"}
params = make_params(**form)
res = requests.post(url, headers=headers, data=json.dumps(params))
logger.info(f'response from github API call {res}')
def make_params(logo, name, url, address1, city, state, zipcode, country, rep_name, rep_email, recaptcha='',
address2=None, fulltime=False, hardware=False, has_online=False, only_online=False, accredited=False,
housing=False, mooc=False):
logo_url, notify_users = get_logo_and_users(logo)
return ({
'title': f'New Code School Request: {name}',
'body': (
f"Name: {name}\n"
f"Website: {url}\n"
f"Full Time: {fulltime}\n"
f"Hardware Included: {hardware}\n"
f"Has Online: {has_online}\n"
f"Only Online: {only_online}\n"
f"VA Accredited: {accredited}\n"
f"Housing Included: {housing}\n"
f"MOOC Only: {mooc}\n"
f"Address: {address1} {address2}\n"
f"City: {city}\n"
f"State: {state}\n"
f"Country: {country}\n"
f"Zip: {zipcode}\n\n"
f"Representative Name: {rep_name}\n"
f"Representative Email: {rep_email}\n"
f"logo: \n"
'This code school is ready to be added/updated:\n'
f"{notify_users}\n"
"Please close this issue once you've added/updated the code school."
)
})
|
34599
|
import calendar
import datetime
import re
import sys
from dateutil.relativedelta import relativedelta
import gam
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam import utils
from gam.gapi.directory import orgunits as gapi_directory_orgunits
def build():
return gam.buildGAPIObject('reports')
REPORT_CHOICE_MAP = {
'access': 'access_transparency',
'accesstransparency': 'access_transparency',
'calendars': 'calendar',
'customers': 'customer',
'doc': 'drive',
'docs': 'drive',
'domain': 'customer',
'enterprisegroups': 'groups_enterprise',
'google+': 'gplus',
'group': 'groups',
'groupsenterprise': 'groups_enterprise',
'hangoutsmeet': 'meet',
'logins': 'login',
'oauthtoken': 'token',
'tokens': 'token',
'usage': 'usage',
'usageparameters': 'usageparameters',
'users': 'user',
'useraccounts': 'user_accounts',
}
def showUsageParameters():
rep = build()
throw_reasons = [
gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST
]
todrive = False
if len(sys.argv) == 3:
controlflow.missing_argument_exit('user or customer',
'report usageparameters')
report = sys.argv[3].lower()
titles = ['parameter']
if report == 'customer':
endpoint = rep.customerUsageReports()
kwargs = {}
elif report == 'user':
endpoint = rep.userUsageReport()
kwargs = {'userKey': gam._get_admin_email()}
else:
controlflow.expected_argument_exit('usageparameters',
['user', 'customer'], report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT)
all_parameters = set()
i = 4
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'todrive':
todrive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam report usageparameters')
fullDataRequired = ['all']
while True:
try:
result = gapi.call(endpoint,
'get',
throw_reasons=throw_reasons,
date=tryDate,
customerId=customerId,
fields='warnings,usageReports(parameters(name))',
**kwargs)
warnings = result.get('warnings', [])
usage = result.get('usageReports')
has_reports = bool(usage)
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No usage parameters available.')
sys.exit(1)
if has_reports:
for parameter in usage[0]['parameters']:
name = parameter.get('name')
if name:
all_parameters.add(name)
if fullData == 1:
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
csvRows = []
for parameter in sorted(all_parameters):
csvRows.append({'parameter': parameter})
display.write_csv_file(csvRows, titles,
f'{report.capitalize()} Report Usage Parameters',
todrive)
REPORTS_PARAMETERS_SIMPLE_TYPES = [
'intValue', 'boolValue', 'datetimeValue', 'stringValue'
]
def showUsage():
rep = build()
throw_reasons = [
gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST
]
todrive = False
if len(sys.argv) == 3:
controlflow.missing_argument_exit('user or customer', 'report usage')
report = sys.argv[3].lower()
titles = ['date']
if report == 'customer':
endpoint = rep.customerUsageReports()
kwargs = [{}]
elif report == 'user':
endpoint = rep.userUsageReport()
kwargs = [{'userKey': 'all'}]
titles.append('user')
else:
controlflow.expected_argument_exit('usage', ['user', 'customer'],
report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
parameters = []
start_date = end_date = orgUnitId = None
skip_day_numbers = []
skip_dates = set()
one_day = datetime.timedelta(days=1)
i = 4
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'startdate':
start_date = utils.get_yyyymmdd(sys.argv[i + 1],
returnDateTime=True)
i += 2
elif myarg == 'enddate':
end_date = utils.get_yyyymmdd(sys.argv[i + 1], returnDateTime=True)
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg in ['fields', 'parameters']:
parameters = sys.argv[i + 1].split(',')
i += 2
elif myarg == 'skipdates':
for skip in sys.argv[i + 1].split(','):
if skip.find(':') == -1:
skip_dates.add(utils.get_yyyymmdd(skip,
returnDateTime=True))
else:
skip_start, skip_end = skip.split(':', 1)
skip_start = utils.get_yyyymmdd(skip_start,
returnDateTime=True)
skip_end = utils.get_yyyymmdd(skip_end, returnDateTime=True)
while skip_start <= skip_end:
skip_dates.add(skip_start)
skip_start += one_day
i += 2
elif myarg == 'skipdaysofweek':
skipdaynames = sys.argv[i + 1].split(',')
dow = [d.lower() for d in calendar.day_abbr]
skip_day_numbers = [dow.index(d) for d in skipdaynames if d in dow]
i += 2
elif report == 'user' and myarg in ['orgunit', 'org', 'ou']:
_, orgUnitId = gapi_directory_orgunits.getOrgUnitId(sys.argv[i + 1])
i += 2
elif report == 'user' and myarg in usergroup_types:
users = gam.getUsersToModify(myarg, sys.argv[i + 1])
kwargs = [{'userKey': user} for user in users]
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i],
f'gam report usage {report}')
if parameters:
titles.extend(parameters)
parameters = ','.join(parameters)
else:
parameters = None
if not end_date:
end_date = datetime.datetime.now()
if not start_date:
start_date = end_date + relativedelta(months=-1)
if orgUnitId:
for kw in kwargs:
kw['orgUnitID'] = orgUnitId
usage_on_date = start_date
start_date = usage_on_date.strftime(YYYYMMDD_FORMAT)
usage_end_date = end_date
end_date = end_date.strftime(YYYYMMDD_FORMAT)
start_use_date = end_use_date = None
csvRows = []
while usage_on_date <= usage_end_date:
if usage_on_date.weekday() in skip_day_numbers or \
usage_on_date in skip_dates:
usage_on_date += one_day
continue
use_date = usage_on_date.strftime(YYYYMMDD_FORMAT)
usage_on_date += one_day
try:
for kwarg in kwargs:
try:
usage = gapi.get_all_pages(endpoint,
'get',
'usageReports',
throw_reasons=throw_reasons,
customerId=customerId,
date=use_date,
parameters=parameters,
**kwarg)
except gapi.errors.GapiBadRequestError:
continue
for entity in usage:
row = {'date': use_date}
if 'userEmail' in entity['entity']:
row['user'] = entity['entity']['userEmail']
for item in entity.get('parameters', []):
if 'name' not in item:
continue
name = item['name']
if name == 'cros:device_version_distribution':
for cros_ver in item['msgValue']:
v = cros_ver['version_number']
column_name = f'cros:num_devices_chrome_{v}'
if column_name not in titles:
titles.append(column_name)
row[column_name] = cros_ver['num_devices']
else:
if not name in titles:
titles.append(name)
for ptype in REPORTS_PARAMETERS_SIMPLE_TYPES:
if ptype in item:
row[name] = item[ptype]
break
else:
row[name] = ''
if not start_use_date:
start_use_date = use_date
end_use_date = use_date
csvRows.append(row)
except gapi.errors.GapiInvalidError as e:
display.print_warning(str(e))
break
if start_use_date:
report_name = f'{report.capitalize()} Usage Report - {start_use_date}:{end_use_date}'
else:
report_name = f'{report.capitalize()} Usage Report - {start_date}:{end_date} - No Data'
display.write_csv_file(csvRows, titles, report_name, todrive)
def showReport():
rep = build()
throw_reasons = [gapi.errors.ErrorReason.INVALID]
report = sys.argv[2].lower()
report = REPORT_CHOICE_MAP.get(report.replace('_', ''), report)
if report == 'usage':
showUsage()
return
if report == 'usageparameters':
showUsageParameters()
return
valid_apps = gapi.get_enum_values_minus_unspecified(
rep._rootDesc['resources']['activities']['methods']['list']
['parameters']['applicationName']['enum']) + ['customer', 'user']
if report not in valid_apps:
controlflow.expected_argument_exit('report',
', '.join(sorted(valid_apps)),
report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
filters = parameters = actorIpAddress = groupIdFilter = startTime = endTime = eventName = orgUnitId = None
tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT)
to_drive = False
userKey = 'all'
fullDataRequired = None
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower()
if myarg == 'date':
tryDate = utils.get_yyyymmdd(sys.argv[i + 1])
i += 2
elif myarg in ['orgunit', 'org', 'ou']:
_, orgUnitId = gapi_directory_orgunits.getOrgUnitId(sys.argv[i + 1])
i += 2
elif myarg == 'fulldatarequired':
fullDataRequired = []
fdr = sys.argv[i + 1].lower()
if fdr and fdr == 'all':
fullDataRequired = 'all'
else:
fullDataRequired = fdr.replace(',', ' ').split()
i += 2
elif myarg == 'start':
startTime = utils.get_time_or_delta_from_now(sys.argv[i + 1])
i += 2
elif myarg == 'end':
endTime = utils.get_time_or_delta_from_now(sys.argv[i + 1])
i += 2
elif myarg == 'event':
eventName = sys.argv[i + 1]
i += 2
elif myarg == 'user':
userKey = sys.argv[i + 1].lower()
if userKey != 'all':
userKey = gam.normalizeEmailAddressOrUID(sys.argv[i + 1])
i += 2
elif myarg in ['filter', 'filters']:
filters = sys.argv[i + 1]
i += 2
elif myarg in ['fields', 'parameters']:
parameters = sys.argv[i + 1]
i += 2
elif myarg == 'ip':
actorIpAddress = sys.argv[i + 1]
i += 2
elif myarg == 'groupidfilter':
groupIdFilter = sys.argv[i + 1]
i += 2
elif myarg == 'todrive':
to_drive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam report')
if report == 'user':
while True:
try:
one_page = gapi.call(rep.userUsageReport(),
'get',
throw_reasons=throw_reasons,
date=tryDate,
userKey=userKey,
customerId=customerId,
orgUnitID=orgUnitId,
fields='warnings,usageReports',
maxResults=1)
warnings = one_page.get('warnings', [])
has_reports = bool(one_page.get('usageReports'))
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No user report available.')
sys.exit(1)
if fullData == 0:
continue
page_message = gapi.got_total_items_msg('Users', '...\n')
usage = gapi.get_all_pages(rep.userUsageReport(),
'get',
'usageReports',
page_message=page_message,
throw_reasons=throw_reasons,
date=tryDate,
userKey=userKey,
customerId=customerId,
orgUnitID=orgUnitId,
filters=filters,
parameters=parameters)
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
if not usage:
print('No user report available.')
sys.exit(1)
titles = ['email', 'date']
csvRows = []
for user_report in usage:
if 'entity' not in user_report:
continue
row = {'email': user_report['entity']['userEmail'], 'date': tryDate}
for item in user_report.get('parameters', []):
if 'name' not in item:
continue
name = item['name']
if not name in titles:
titles.append(name)
for ptype in REPORTS_PARAMETERS_SIMPLE_TYPES:
if ptype in item:
row[name] = item[ptype]
break
else:
row[name] = ''
csvRows.append(row)
display.write_csv_file(csvRows, titles, f'User Reports - {tryDate}',
to_drive)
elif report == 'customer':
while True:
try:
first_page = gapi.call(rep.customerUsageReports(),
'get',
throw_reasons=throw_reasons,
customerId=customerId,
date=tryDate,
fields='warnings,usageReports')
warnings = first_page.get('warnings', [])
has_reports = bool(first_page.get('usageReports'))
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No customer report available.')
sys.exit(1)
if fullData == 0:
continue
usage = gapi.get_all_pages(rep.customerUsageReports(),
'get',
'usageReports',
throw_reasons=throw_reasons,
customerId=customerId,
date=tryDate,
parameters=parameters)
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
if not usage:
print('No customer report available.')
sys.exit(1)
titles = ['name', 'value', 'client_id']
csvRows = []
auth_apps = list()
for item in usage[0]['parameters']:
if 'name' not in item:
continue
name = item['name']
if 'intValue' in item:
value = item['intValue']
elif 'msgValue' in item:
if name == 'accounts:authorized_apps':
for subitem in item['msgValue']:
app = {}
for an_item in subitem:
if an_item == 'client_name':
app['name'] = 'App: ' + \
subitem[an_item].replace('\n', '\\n')
elif an_item == 'num_users':
app['value'] = f'{subitem[an_item]} users'
elif an_item == 'client_id':
app['client_id'] = subitem[an_item]
auth_apps.append(app)
continue
values = []
for subitem in item['msgValue']:
if 'count' in subitem:
mycount = myvalue = None
for key, value in list(subitem.items()):
if key == 'count':
mycount = value
else:
myvalue = value
if mycount and myvalue:
values.append(f'{myvalue}:{mycount}')
value = ' '.join(values)
elif 'version_number' in subitem \
and 'num_devices' in subitem:
values.append(f'{subitem["version_number"]}:'
f'{subitem["num_devices"]}')
else:
continue
value = ' '.join(sorted(values, reverse=True))
csvRows.append({'name': name, 'value': value})
for app in auth_apps: # put apps at bottom
csvRows.append(app)
display.write_csv_file(csvRows,
titles,
f'Customer Report - {tryDate}',
todrive=to_drive)
else:
page_message = gapi.got_total_items_msg('Activities', '...\n')
activities = gapi.get_all_pages(rep.activities(),
'list',
'items',
page_message=page_message,
applicationName=report,
userKey=userKey,
customerId=customerId,
actorIpAddress=actorIpAddress,
startTime=startTime,
endTime=endTime,
eventName=eventName,
filters=filters,
orgUnitID=orgUnitId,
groupIdFilter=groupIdFilter)
if activities:
titles = ['name']
csvRows = []
for activity in activities:
events = activity['events']
del activity['events']
activity_row = utils.flatten_json(activity)
purge_parameters = True
for event in events:
for item in event.get('parameters', []):
if set(item) == {'value', 'name'}:
event[item['name']] = item['value']
elif set(item) == {'intValue', 'name'}:
if item['name'] in ['start_time', 'end_time']:
val = item.get('intValue')
if val is not None:
val = int(val)
if val >= 62135683200:
event[item['name']] = \
datetime.datetime.fromtimestamp(
val-62135683200).isoformat()
else:
event[item['name']] = item['intValue']
elif set(item) == {'boolValue', 'name'}:
event[item['name']] = item['boolValue']
elif set(item) == {'multiValue', 'name'}:
event[item['name']] = ' '.join(item['multiValue'])
elif item['name'] == 'scope_data':
parts = {}
for message in item['multiMessageValue']:
for mess in message['parameter']:
value = mess.get(
'value',
' '.join(mess.get('multiValue', [])))
parts[mess['name']] = parts.get(
mess['name'], []) + [value]
for part, v in parts.items():
if part == 'scope_name':
part = 'scope'
event[part] = ' '.join(v)
else:
purge_parameters = False
if purge_parameters:
event.pop('parameters', None)
row = utils.flatten_json(event)
row.update(activity_row)
for item in row:
if item not in titles:
titles.append(item)
csvRows.append(row)
display.sort_csv_titles([
'name',
], titles)
display.write_csv_file(csvRows, titles,
f'{report.capitalize()} Activity Report',
to_drive)
def _adjust_date(errMsg):
match_date = re.match(
'Data for dates later than (.*) is not yet '
'available. Please check back later', errMsg)
if not match_date:
match_date = re.match('Start date can not be later than (.*)', errMsg)
if not match_date:
controlflow.system_error_exit(4, errMsg)
return str(match_date.group(1))
def _check_full_data_available(warnings, tryDate, fullDataRequired,
has_reports):
one_day = datetime.timedelta(days=1)
tryDateTime = datetime.datetime.strptime(tryDate, YYYYMMDD_FORMAT)
# move to day before if we don't have at least one usageReport
if not has_reports:
tryDateTime -= one_day
return (0, tryDateTime.strftime(YYYYMMDD_FORMAT))
for warning in warnings:
if warning['code'] == 'PARTIAL_DATA_AVAILABLE':
for app in warning['data']:
if app['key'] == 'application' and \
app['value'] != 'docs' and \
fullDataRequired is not None and \
(fullDataRequired == 'all' or app['value'] in fullDataRequired):
tryDateTime -= one_day
return (0, tryDateTime.strftime(YYYYMMDD_FORMAT))
elif warning['code'] == 'DATA_NOT_AVAILABLE':
for app in warning['data']:
if app['key'] == 'application' and \
app['value'] != 'docs' and \
(not fullDataRequired or app['value'] in fullDataRequired):
return (-1, tryDate)
return (1, tryDate)
|
34606
|
import pandas as pd
X_train = pd.read_csv("X_train.csv")
df_y = pd.read_csv("y_train.csv")
y_train = df_y["y"]
X_test = pd.read_csv("X_test.csv")
|
34642
|
import uuid
from datetime import datetime, timedelta
from controllers import zfsController
import jwt
import pam
import render
JWT_SECRET = "<KEY>"
JWT_ALGORITHM = "HS256"
JWT_EXP_DELTA_SECONDS = 4300
async def index(request):
return render.json({'error': 'nothing to see here...'}, 200)
async def auth(request):
try:
data = await request.json()
user = data['username']
password = data['password']
if pam.authenticate(user, password):
payload = {
'user': user,
'session_id': str(uuid.uuid4()),
'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)
}
jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)
return await render.json({'token': jwt_token.decode('utf-8')}, 200)
else:
return None
except Exception as e:
return await render.json({'error': str(e)}, 200)
async def check_token(request):
try:
jwt_token = request.headers.get('Authorization', None)
payload = jwt.decode(jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
return payload['session_id']
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return False
async def create_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.create_pool(data['name'], data['raid'], data['devices'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def delete_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.delete_pool(data['name'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def check_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_status()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
async def get_storage_info(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_disk_info()
return await render.json(res, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def get_io_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_IO_stats()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def add_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_new_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def add_spare_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_spare_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def replace_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.replace_disk(data['pool'], data['old_device'], data['new_device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def set_mountpoint(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.set_mountpoint(data['mountpoint'], data['pool'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
|
34724
|
import abc
#inteface Component creamos la funcion buscador que es la que buscara alguna PaginaWeb en el SitioWeb
class ISitioWebComponent(metaclass=abc.ABCMeta):
@abc.abstractmethod
def buscador(self):
pass
# Concrete Component
class SitioWebConcreteComponent(ISitioWebComponent):
def __init__(self, dominio: str,categoria: str, paginas: list):
self._dominio = dominio
self._categoria = categoria
self._paginas = paginas
def __str__(self):
return f"""
El dominio del sitio es: {self._dominio}
La categoria del sitio es: {self._categoria}
Las paginas del sitio son: {self._paginas}
"""
def buscador(self):
return f"Pagina no buscada"
# Base Decorator
class SitioWebDecorator(ISitioWebComponent, metaclass=abc.ABCMeta):
def __init__(self,sitio_web: ISitioWebComponent):
self._sitio_web = sitio_web
@abc.abstractmethod
def buscador(self):
pass
# Concrete Decorator: A
class BuscadorConcreteDecorator(SitioWebDecorator):
# La logica del buscador es recibir un objeto de la clase PaginaWeb luego utiliza la url que es unica de cada pagina
# llama a la url de la pagina pedida por atributo y la compara con la url de las paginas que estan dentro del SitioWeb
# si encuentra que la url de la pagina es igual a la url de las paginas en el sitio regresa un string con los datos de
# la pagina junto con un mensaje diciendo que existe y si no encuentra la pagina regresa un mensaje de error
def buscador(self,pagina : object):
i = 0
for pag in self._sitio_web._paginas:
if(pagina._url == self._sitio_web._paginas[i]._url):
return f"La pagina: {self._sitio_web._paginas[i]}\nsi Existe"
i = i+1
return f"ERROR-HTTP 404 page Not found"
#clase PaginaWeb la misma del Ejercicio_1
class PaginaWeb(object):
def __init__(self,url: str, ruta: str, formato: str,contenido: str,titulo: str,slug: str,metatags: list):
self._url = url
self._ruta = ruta
self._formato = formato
self._contenido = contenido
self._titulo = titulo
self._slug = slug
self._metatags = metatags
def __str__(self):
return f"""
El url de la pagina es: {self._url}
La ruta del archivo es:{self._ruta}
El formato del archivo es: {self._formato}
El contenido de la pagina es: {self._contenido}
El titulo de la pagina es: {self._titulo}
El slug de la pagina es: {self._slug}
Los meta-tags de la pagina son: {self._metatags}
"""
def main():
#llenamos los objetos de PaginaWeb y SitioWeb
pagina1 = PaginaWeb("https://www.youtube.com/watch?v=dQw4w9WgXcQ",
"C://User/youtube/user",
"HTML",
"<body> <p> hola soy una pagina de youtube 1 </p></body>",
"<h1>Youtube 1</h1>",
"youtube-1",
['<meta name = "description" content = "this is the description">',
'<meta http-equiv = "refresh" content = "100"'])
pagina2 = PaginaWeb("https://www.youtube.com/watch?v=r1lEc1w92RE",
"C://User/youtube/user",
"HTML",
"<body> <p> hola soy una pagina de youtube 2 </p></body>",
"<h1>Youtube 2</h1>",
"youtube-2",
['<meta name = "description" content = "this is the description">',
'<meta http-equiv = "refresh" content = "100"'])
pagina3 = PaginaWeb("https://www.youtube.com/watch?v=8OJf0-r7sZ0",
"C://User/youtube/user",
"HTML",
"<body> <p> hola soy una pagina de youtube 3 </p></body>",
"<h1>Youtube 3</h1>",
"youtube-3",
['<meta name = "description" content = "this is the description">',
'<meta http-equiv = "refresh" content = "100"'])
sitio = SitioWebConcreteComponent("www.youtube.com","Entretenimiento",[pagina1,pagina2])
#Creamos un objeto del decorador y le mandamos nuestro SitioWeb
buscar = BuscadorConcreteDecorator(sitio)
#Luego llamamos a la funcion buscador junto con una pagina e introducimos el return de Buscador
# a una variable y la imprimimos
resultado = buscar.buscador(pagina2)
print(resultado)
if __name__ == '__main__':
main()
|
34745
|
from leapp.actors import Actor
from leapp.models import InstalledRedHatSignedRPM
from leapp.libraries.common.rpms import has_package
from leapp.reporting import Report, create_report
from leapp import reporting
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckAcpid(Actor):
"""
Check if acpid is installed. If yes, write information about non-compatible changes.
"""
name = 'checkacpid'
consumes = (InstalledRedHatSignedRPM,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
if has_package(InstalledRedHatSignedRPM, 'acpid'):
create_report([
reporting.Title('Acpid incompatible changes in the next major version'),
reporting.Summary('The option -d (debug) no longer implies -f (foreground).'),
reporting.Severity(reporting.Severity.LOW),
reporting.Remediation(
hint='You must now use both options (\'-df\') for the same behavior. Please update '
'your scripts to be compatible with the changes.'),
reporting.Tags([reporting.Tags.KERNEL, reporting.Tags.SERVICES]),
reporting.RelatedResource('package', 'acpid')
])
|
34747
|
import os.path
import scipy.io as sio
import numpy as np # for algebraic operations, matrices
import keras.models
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout # , Layer, Flatten
# from keras.layers import containers
from keras.models import model_from_json,Model
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from hyperas.distributions import choice, uniform, conditional
from hyperopt import Trials, STATUS_OK
from sklearn.metrics import confusion_matrix
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D as pool2
from keras.callbacks import EarlyStopping,ModelCheckpoint
# from keras.layers.convolutional import ZeroPadding2D as zero2d
from keras.regularizers import l2 # , activity_l2
# from theano import functionfrom keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.optimizers import SGD
from keras.layers.merge import concatenate
from keras.layers import Input,add
from keras.layers.advanced_activations import PReLU,ELU
from keras.layers.pooling import GlobalAveragePooling2D
#temp/Inception-ResNet for 180180
def create180180Model(patchSize):
seed=5
np.random.seed(seed)
input=Input(shape=(1,patchSize[0, 0], patchSize[0, 1]))
out1=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(input)
out2=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out1)
out2=pool2(pool_size=(2,2),data_format='channels_first')(out2)
out3=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out2)
out4=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out3)
out4=add([out2,out4])
out4=pool2(pool_size=(2,2),data_format='channels_first')(out4)
out_3=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out4)
out_4=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_3)
out5_1=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_2)
out5_3=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_3=Conv2D(filters=128,kernel_size=(5,5),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_3)
out5_4=pool2(pool_size=(3,3),strides=(1,1),padding='same',data_format='channels_first')(out_4)
out5_4=Conv2D(filters=128,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_4)
out5=concatenate(inputs=[out5_1,out5_2,out5_3],axis=1)
out7=Conv2D(filters=288,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out5)
out7=add([out5, out7])
out7=pool2(pool_size=(2,2),data_format='channels_first')(out7)
sout7=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out8=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out9=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out8)
out9=add([sout7, out9])
out9=pool2(pool_size=(2,2),data_format='channels_first')(out9)
out10=Flatten()(out9)
out11=Dense(units=11,
kernel_initializer='normal',
kernel_regularizer='l2',
activation='softmax')(out10)
cnn = Model(inputs=input,outputs=out11)
return cnn
def fTrain(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSizes=None, learningRates=None, iEpochs=None):
# grid search on batch_sizes and learning rates
# parse inputs
batchSizes = 64 if batchSizes is None else batchSizes
learningRates = 0.01 if learningRates is None else learningRates
iEpochs = 300 if iEpochs is None else iEpochs
for iBatch in batchSizes:
for iLearn in learningRates:
fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, iBatch, iLearn, iEpochs)
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
# parse inputs
batchSize = 64 if batchSize is None else batchSize
learningRate = 0.01 if learningRate is None else learningRate
iEpochs = 300 if iEpochs is None else iEpochs
print('Training CNN InceptionNet')
print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))
# save names
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_lr_' + str(
learningRate) + '_bs_' + str(batchSize)
weight_name = model_name + '_weights.h5'
model_json = model_name + '_json'
model_all = model_name + '_model.h5'
model_mat = model_name + '.mat'
if (os.path.isfile(model_mat)): # no training if output file exists
return
# create model
if (patchSize[0,0]!=180 & patchSize[0,1]!=180):
print('NO model for patch size ' + patchSize[0, 0] + patchSize[0, 0])
else:
cnn = create180180Model(patchSize)
# opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=20, verbose=1), ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
#callbacks = [ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
cnn.summary()
result = cnn.fit(X_train,
y_train,
validation_data=[X_test, y_test],
epochs=iEpochs,
batch_size=batchSize,
callbacks=callbacks,
verbose=1)
score_test, acc_test = cnn.evaluate(X_test, y_test, batch_size=batchSize )
prob_test = cnn.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_test,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# save model
json_string = cnn.to_json()
open(model_json, 'w').write(json_string)
# wei = cnn.get_weights()
cnn.save_weights(weight_name, overwrite=True)
# cnn.save(model_all) # keras > v0.7
# matlab
acc = result.history['acc']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
print('Saving results: ' + model_name)
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': acc,
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test,
'confusion_mat':confusion_mat})
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
weight_name = model_name[0]
#model_json = model_name[1] + '_json'
#model_all = model_name[0] + '.hdf5'
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sOutPath)
#sFilename, sExt = os.path.splitext(sFilename)
#f = h5py.File(weight_name, 'r+')
#del f['optimizer_weights']
#f.close()
model=load_model(weight_name)
opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
#model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
#model.load_weights(weight_name)
model.summary();
score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
prob_pre = model.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_pre,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# modelSave = model_name[:-5] + '_pred.mat'
modelSave = sOutPath + '/' + sFilename + '_result.mat'
sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test, 'confusion_mat':confusion_mat})
|
34749
|
from twarc import Twarc2, expansions
import json
# Replace your bearer token below
client = Twarc2(bearer_token="<PASSWORD>")
def main():
# The followers function gets followers for specified user
followers = client.followers(user="twitterdev")
for page in followers:
result = expansions.flatten(page)
for user in result:
# Here we are printing the full Tweet object JSON to the console
print(json.dumps(user))
if __name__ == "__main__":
main()
|
34760
|
import re
class AlphabetPosition:
alphabet = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9,
'j': 10,
'k': 11,
'l': 12,
'm': 13,
'n': 14,
'o': 15,
'p': 16,
'q': 17,
'r': 18,
's': 19,
't': 20,
'u': 21,
'v': 22,
'w': 23,
'x': 24,
'y': 25,
'z': 26,
}
def find_position(self, sentence: str):
# Convert all letters to lowercase
sentence = sentence.lower()
# Remove all spaces and split sentence to list of chars
sentence = sentence.replace(" ", "")
# Extract only letters
characters = ''.join(re.findall("[a-zA-Z]+", sentence))
# Make string into list of characters
characters = list(characters)
# Initiate an empty list to save all positions of the characters in
positions = []
# Iterate through each character and find its position in the alphabet.
# once found replace the character with it's relevant position number
for character in characters:
positions.append(self.alphabet.get(character))
# Convert list of integers to single string
return ' '.join(map(str, positions))
|
34762
|
from utils import (load_data, data_to_series_features,
apply_weight, is_minimum)
from algorithm import (initialize_weights, individual_to_key,
pop_to_weights, select, reconstruct_population)
from sklearn.metrics import mean_squared_error, mean_absolute_error
from tensorflow.keras import optimizers
from tensorflow.keras.models import clone_model
import argparse
import math
import numpy as np
from model import make_model
from copy import copy
from sklearn.model_selection import train_test_split
def parse_arguments():
# argument parsing
parser = argparse.ArgumentParser(description="Specify Params for Experimental Setting")
parser.add_argument('--iterations', type=int, default=20,
help="Specify the number of evolution iterations")
parser.add_argument('--batch_size', type=int, default=256,
help="Specify batch size")
parser.add_argument('--initial_epochs', type=int, default=100,
help="Specify the number of epochs for initial training")
parser.add_argument('--num_epochs', type=int, default=20,
help="Specify the number of epochs for competitive search")
parser.add_argument('--log_step', type=int, default=100,
help="Specify log step size for training")
parser.add_argument('--learning_rate', type=float, default=1e-3,
help="Learning rate")
parser.add_argument('--data', type=str, default='pollution.csv',
help="Path to the dataset")
parser.add_argument('--pop_size', type=int, default=36)
parser.add_argument('--code_length', type=int, default=6)
parser.add_argument('--n_select', type=int, default=6)
parser.add_argument('--time_steps', type=int, default=18)
parser.add_argument('--n_hidden', type=int, default=128)
parser.add_argument('--n_output', type=int, default=1)
parser.add_argument('--max_grad_norm', type=float, default=1.0)
return parser.parse_args()
def main():
args = parse_arguments()
data, y_scaler = load_data(args.data)
args.n_features = np.size(data, axis=-1)
X, y = data_to_series_features(data, args.time_steps)
train_X, X, train_y, y = train_test_split(X, y, test_size=0.3)
valid_X, test_X, valid_y, test_y = train_test_split(X, y, test_size=0.5)
optimizer = optimizers.Adam(learning_rate=args.learning_rate, clipnorm=args.max_grad_norm)
best_model = make_model(args)
best_weight = [1.0] * args.time_steps
best_model.compile(loss='mse', optimizer=optimizer)
print("Initial training before competitive random search")
best_model.fit(apply_weight(train_X, best_weight), train_y, epochs=args.initial_epochs,
validation_data=(apply_weight(valid_X, best_weight), valid_y), shuffle=True)
print("\nInitial training is done. Start competitive random search.\n")
pop, weights = initialize_weights(args.pop_size, args.time_steps, args.code_length)
key_to_rmse = {}
for iteration in range(args.iterations):
for enum, (indiv, weight) in enumerate(zip(pop, weights)):
print('iteration: [%d/%d] indiv_no: [%d/%d]' % (iteration + 1, args.iterations, enum + 1, args.pop_size))
key = individual_to_key(indiv)
if key not in key_to_rmse.keys():
model = make_model(args)
model.compile(loss='mse', optimizer=optimizer)
model.set_weights(best_model.get_weights())
model.fit(apply_weight(train_X, weight), train_y, epochs=args.num_epochs,
validation_data=(apply_weight(valid_X, weight), valid_y), shuffle=True)
pred_y = model.predict(apply_weight(valid_X, weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_valid_y = y_scaler.inverse_transform(np.expand_dims(valid_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_valid_y, inv_pred_y))
mae = mean_absolute_error(inv_valid_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if is_minimum(rmse, key_to_rmse):
best_model.set_weights(model.get_weights())
best_weight = copy(weight)
key_to_rmse[key] = rmse
pop_selected, fitness_selected = select(pop, args.n_select, key_to_rmse)
pop = reconstruct_population(pop_selected, args.pop_size)
weights = pop_to_weights(pop, args.time_steps, args.code_length)
print('test evaluation:')
pred_y = best_model.predict(apply_weight(test_X, best_weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_test_y = y_scaler.inverse_transform(np.expand_dims(test_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_test_y, inv_pred_y))
mae = mean_absolute_error(inv_test_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if __name__ == '__main__':
main()
|
34789
|
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
# create mouth arduino and servo
ear = Runtime.createAndStart("ear","Sphinx")
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM4")
servo = Runtime.createAndStart("servo","Servo")
servo.attach(arduino, 10)
# start listening for the words we are interested in
ear.startListening("go forward|go backwards|stop")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard", String().getClass());
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
def heard(phrase):
print("I heard ", phrase)
if phrase == "go forward":
servo.moveTo(170)
elif phrase == "go backwards":
servo.moveTo(10)
elif phrase == "stop":
servo.moveTo(90)
|
34793
|
import numpy as np
import dace as dc
M, N = (dc.symbol(s, dtype=dc.int64) for s in ('M', 'N'))
@dc.program
def flip(A: dc.float64[M]):
B = np.ndarray((M, ), dtype=np.float64)
for i in dc.map[0:M]:
B[i] = A[M - 1 - i]
return B
@dc.program
def kernel(r: dc.float64[N]):
y = np.empty_like(r)
alpha = -r[0]
beta = 1.0
y[0] = -r[0]
for k in range(1, N):
beta *= 1.0 - alpha * alpha
alpha = -(r[k] + np.dot(flip(r[:k]), y[:k])) / beta
y[:k] += alpha * flip(y[:k])
y[k] = alpha
return y
|
34794
|
import starry
import numpy as np
import matplotlib.pyplot as plt
import pytest
@pytest.mark.parametrize("ydeg,nw", [[0, None], [0, 10], [1, None], [1, 10]])
def test_system(ydeg, nw):
# Oblate map
map = starry.Map(udeg=2, ydeg=ydeg, oblate=True, nw=nw)
map[1] = 0.5
map[2] = 0.25
map.omega = 0.5
map.beta = 1.23
map.tpole = 8000
map.f = 1 - 2 / (map.omega ** 2 + 2)
map.obl = 30
# Compute system flux
star = starry.Primary(map, r=1.5)
planet = starry.Secondary(starry.Map(amp=0, nw=nw), porb=1.0, r=0.1, m=0)
sys = starry.System(star, planet)
t = np.linspace(-0.1, 0.1, 1000)
flux_sys = sys.flux(t, integrated=True)
# Compute map flux manually
x, y, z = sys.position(t)
xo = x[1] / star._r
yo = y[1] / star._r
flux_map = map.flux(xo=xo, yo=yo, ro=planet._r / star._r, integrated=True)
# Check that they agree
assert np.allclose(flux_map, flux_sys)
|
34807
|
from electrum_gui.common.provider.chains.bch.provider import BCHProvider
from electrum_gui.common.provider.chains.btc.clients.blockbook import BlockBook
|
34821
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ImageConfig(AppConfig):
name = 'image'
|
34858
|
from rest_framework import viewsets, permissions, serializers
from rest_framework.response import Response
from iaso.models import MatchingAlgorithm
from .common import HasPermission
class AlgorithmsSerializer(serializers.ModelSerializer):
class Meta:
model = MatchingAlgorithm
fields = ["id", "name", "description", "created_at"]
read_only_fields = ["created_at"]
class AlgorithmsViewSet(viewsets.ModelViewSet):
"""Algorithms API
This API is restricted to authenticated users having the "menupermissions.iaso_links" permission
GET /api/algorithms/
"""
permission_classes = [permissions.IsAuthenticated]
serializer_class = AlgorithmsSerializer
http_method_names = ["get", "post", "put", "head", "options", "trace", "delete"]
def get_queryset(self):
algos = MatchingAlgorithm.objects.all()
return algos.order_by("id")
|
34896
|
import unittest
import numpy as np
import prml.nn as nn
class TestGaussian(unittest.TestCase):
def test_gaussian_draw_forward(self):
mu = nn.array(0)
sigma = nn.softplus(nn.array(-1))
gaussian = nn.Gaussian(mu, sigma)
sample = []
for _ in range(1000):
sample.append(gaussian.draw().value)
self.assertTrue(np.allclose(np.mean(sample), 0, rtol=0.1, atol=0.1), np.mean(sample))
self.assertTrue(np.allclose(np.std(sample), gaussian.std.value, 0.1, 0.1))
def test_gaussian_draw_backward(self):
mu = nn.array(0)
s = nn.array(2)
optimizer = nn.optimizer.Gradient({0: mu, 1: s}, 0.01)
prior = nn.Gaussian(1, 1)
for _ in range(1000):
mu.cleargrad()
s.cleargrad()
gaussian = nn.Gaussian(mu, nn.softplus(s))
gaussian.draw()
loss = nn.loss.kl_divergence(gaussian, prior).sum()
optimizer.minimize(loss)
self.assertTrue(np.allclose(gaussian.mean.value, 1, 0.1, 0.1))
self.assertTrue(np.allclose(gaussian.std.value, 1, 0.1, 0.1))
if __name__ == "__main__":
unittest.main()
|
34952
|
import gevent
from gevent.queue import Queue, Empty
from gevent_subprocess import Popen, PIPE
from gsh.plugin import BaseExecutor, BaseInnerExecutor
class SshExecutor(BaseExecutor):
def __init__(self, args, kwargs):
self.ssh_opts = kwargs.get("ssh_opts", [])
super(SshExecutor, self).__init__(args, kwargs)
class Executor(BaseInnerExecutor):
def __init__(self, *args, **kwargs):
self.names = {}
self._output_queue = Queue()
super(SshExecutor.Executor, self).__init__(*args, **kwargs)
@staticmethod
def _stream_fd(fd, queue):
for line in iter(fd.readline, b""):
queue.put_nowait((fd, line))
def _consume(self, queue):
while True:
try:
output = queue.get()
except Empty:
continue
# None is explicitly sent to shutdown the consumer
if output is None:
return
fd, line = output
self.update(self.hostname, self.names[fd], line)
def run(self):
_proc = Popen(
["ssh", "-no", "PasswordAuthentication=no"] + self.parent.ssh_opts + [self.hostname] + self.command,
stdout=PIPE, stderr=PIPE
)
self.names = {
_proc.stdout: "stdout",
_proc.stderr: "stderr",
}
out_worker = gevent.spawn(self._stream_fd, _proc.stdout, self._output_queue)
err_worker = gevent.spawn(self._stream_fd, _proc.stderr, self._output_queue)
waiter = gevent.spawn(_proc.wait)
consumer = gevent.spawn(self._consume, self._output_queue)
gevent.joinall([out_worker, err_worker, waiter], timeout=self.timeout)
# If we've made it here and the process hasn't completed we've timed out.
if _proc.poll() is None:
self._output_queue.put_nowait(
(_proc.stderr, "GSH: command timed out after %s second(s).\n" % self.timeout))
_proc.kill()
rc = _proc.wait()
self._output_queue.put_nowait(None)
consumer.join()
return rc
|
35028
|
import torch
import torch.nn as nn
from ..utils.torch import pack_forward
from .pooling import GatherLastLayer
class CharEncoder(nn.Module):
FORWARD_BACKWARD_AGGREGATION_METHODS = ["cat", "linear_sum"]
def __init__(
self,
char_embedding_dim,
hidden_size,
char_fw_bw_agg_method="cat",
bidirectional=True,
train_char_embeddings=True,
use_cuda=True,
):
if char_fw_bw_agg_method not in self.FORWARD_BACKWARD_AGGREGATION_METHODS:
raise ValueError(
f"{char_fw_bw_agg_method} not recognized, try with one of "
f"{self.FORWARD_BACKWARD_AGGREGATION_METHODS}"
)
super(CharEncoder, self).__init__()
self.char_embedding_dim = char_embedding_dim
self.n_layers = 1
self.char_hidden_dim = hidden_size
self.bidirectional = bidirectional
self.num_dirs = 2 if bidirectional else 1
self.hidden_x_dirs = self.num_dirs * self.char_hidden_dim
self.use_cuda = use_cuda
self.char_lstm = nn.LSTM(
self.char_embedding_dim,
self.char_hidden_dim,
self.n_layers,
bidirectional=self.bidirectional,
dropout=0.0,
)
self.gather_last = GatherLastLayer(
self.char_hidden_dim, bidirectional=self.bidirectional
)
self.char_fw_bw_agg_method = char_fw_bw_agg_method
if self.char_fw_bw_agg_method == "cat":
self.out_dim = self.hidden_x_dirs
elif self.char_fw_bw_agg_method == "linear_sum":
self.out_dim = self.char_hidden_dim
self.linear_layer = nn.Linear(
self.hidden_x_dirs, self.char_hidden_dim
)
def forward(self, char_batch, word_lengths):
"""char_batch: (batch_size, seq_len, word_len, char_emb_dim)
word_lengths: (batch_size, seq_len)"""
(batch_size, seq_len, word_len, char_emb_dim) = char_batch.size()
# (batch_size * seq_len, word_len, char_emb_dim)
char_batch = char_batch.view(batch_size * seq_len, word_len, char_emb_dim)
# (batch_size, seq_len) -> (batch_size * seq_len)
word_lengths = word_lengths.view(batch_size * seq_len)
# (batch_size * seq_len, word_len, hidden_x_dirs)
word_lvl_repr = pack_forward(self.char_lstm, char_batch, word_lengths)
# (batch_size * seq_len, hidden_x_dirs)
word_lvl_repr = self.gather_last(word_lvl_repr, lengths=word_lengths)
# last dimension of gather_last will always correspond to concatenated
# last hidden states of lstm if bidirectional
# (batch_size, seq_len, hidden_x_dirs)
word_lvl_repr = word_lvl_repr.view(
batch_size, seq_len, self.hidden_x_dirs
)
# We store this tensor for future introspection
self.concat_fw_bw_reprs = word_lvl_repr.clone()
if self.char_fw_bw_agg_method == "linear_sum":
# Based on the paper: http://www.anthology.aclweb.org/D/D16/D16-1209.pdf
# Line below is W*word_lvl_repr + b which is equivalent to
# [W_f; W_b] * [h_f;h_b] + b which in turn is equivalent to
# W_f * h_f + W_b * h_b + b
word_lvl_repr = self.linear_layer(word_lvl_repr)
return word_lvl_repr
class LinearAggregationLayer(nn.Module):
def __init__(self, in_dim):
"""
Simply concatenate the provided tensors on their last dimension
which needs to have the same size, along with their
element-wise multiplication and difference
Taken from the paper:
"Learning Natural Language Inference using Bidirectional
LSTM model and Inner-Attention"
https://arxiv.org/abs/1605.09090
"""
super(LinearAggregationLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = 4 * in_dim
def forward(self, input_1, input_2):
"""
:param : input_1
Size is (*, hidden_size)
:param input_2:
Size is (*, hidden_size)
:return:
Merged vectors, size is (*, 4*hidden size)
"""
assert input_1.size(-1) == input_2.size(-1)
mult_combined_vec = torch.mul(input_1, input_2)
diff_combined_vec = torch.abs(input_1 - input_2)
# cosine_sim = simple_columnwise_cosine_similarity(input_1, input_2)
# cosine_sim = cosine_sim.unsqueeze(1)
# euclidean_dist = distance(input_1, input_2, 2)
combined_vec = torch.cat(
(input_1, input_2, mult_combined_vec, diff_combined_vec),
input_1.dim() - 1,
)
return combined_vec
|
35050
|
import sys
if len(sys.argv) < 2:
print('Need the dataset name!')
exit(0)
for split in ['train', 'valid', 'test']:
with open('data/'+sys.argv[1]+'/'+split+'.txt', 'r') as f1, open(
'data/'+sys.argv[1]+'_pos_only/'+split+'.txt', 'r') as f2, open(
'data/'+sys.argv[1]+'_pos/'+split+'.txt', 'w') as fout:
for i, (line, pline) in enumerate(zip(f1,f2)):
if line.strip().split(' ')[0] == '': # empty lines in wiki
fout.write(line)
continue
line = line.strip().split(' ')
pline = pline.strip().split(' ')
line = [w+'_'+p for w, p in zip(line, pline)]
fout.write(' '.join(line)+' \n')
|
35060
|
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from datetime import datetime
import torch.cuda
from ..viz.plot import plot_timeline
def time(name=None, sync=False):
return Task(name=name, sync=sync, log=True)
class Task:
__slots__ = ('name', 'start_time', 'end_time', 'meta', 'sync', 'log')
def __init__(self, name=None, start=None, end=None, meta=None, sync=False, log=False):
self.name = name
self.start_time = start
self.end_time = end
self.meta = meta or {}
self.sync = sync
self.log = log
def start(self, time=None, meta=None, sync=None):
if meta:
self.meta.update(meta)
sync = sync if sync is not None else self.sync
if sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.start_time = time or datetime.now()
if self.log:
print(f'starting {self.name or id(self)}')
def end(self, time=None, meta=None, sync=None):
sync = sync if sync is not None else self.sync
if sync and torch.cuda.is_available():
torch.cuda.synchronize()
self.end_time = time or datetime.now()
if self.log:
print(f'completed {self.name or id(self)} in {self.seconds:.9g} seconds')
if meta:
self.meta.update(meta)
@classmethod
def begin(cls, name=None, meta=None, sync=None):
t = cls(name=name, meta=meta, sync=sync)
t.start()
return t
@property
def seconds(self):
if self.start_time is None or self.end_time is None:
return None
return (self.end_time - self.start_time).total_seconds()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end()
def __repr__(self):
return f"Task({self.name or id(self)}, seconds={self.seconds:.9g}, sync={self.sync})"
class Timer:
def __init__(self, name=None, log=False):
self.tasks = []
self.name = name
self.log = log
self.active_tasks = {}
def start(self, name, sync=True, **meta):
task = self.task(name, sync=sync, **meta)
if self.log: print('Started', name)
if task in self.active_tasks:
raise ValueError(f'Nesting tasks is not allowed, "{name}" was already started and not finished')
self.active_tasks[name] = task
def end(self, name, sync=True, **meta):
task = self.active_tasks.pop(name)
if not task:
raise ValueError(f"{name} is not an active task so can't be ended")
task.end(sync=sync, meta=meta)
if self.log:
print('Ended', task.name, ', took', task.seconds, 'seconds')
def task(self, name, sync=True, **meta):
task = Task.begin(name=name, meta=meta, sync=sync)
self.tasks.append(task)
return task
def __enter__(self):
self.start(self.name)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end(self.name)
def plot(self):
plot_timeline(self.tasks)
|
35084
|
class Colors:
END = '\033[0m'
ERROR = '\033[91m[ERROR] '
INFO = '\033[94m[INFO] '
WARN = '\033[93m[WARN] '
def get_color(msg_type):
if msg_type == 'ERROR':
return Colors.ERROR
elif msg_type == 'INFO':
return Colors.INFO
elif msg_type == 'WARN':
return Colors.WARN
else:
return Colors.END
def get_msg(msg, msg_type=None):
color = get_color(msg_type)
msg = ''.join([color, msg, Colors.END])
return msg
def print_msg(msg, msg_type=None):
msg = get_msg(msg, msg_type)
print(msg)
|
35094
|
from __future__ import division
"""
critical properties of diffBragg objects which should be logged for reproducibility
"""
# TODO : implement a savestate and getstate for these objects
# attrs of diffBragg() instances
DIFFBRAGG_ATTRS = [
'Amatrix',
'Bmatrix',
'Ncells_abc',
'Ncells_abc_aniso',
'Ncells_def',
'Npix_to_allocate',
'Omatrix',
'Umatrix',
'beamsize_mm',
'compute_curvatures',
'default_F',
'detector_thick_mm',
'detector_thickstep_mm',
'detector_thicksteps',
'detector_twotheta_deg',
'device_Id',
'diffuse_gamma',
'diffuse_sigma',
'exposure_s',
'fluence',
'flux',
'has_anisotropic_mosaic_spread',
'interpolate',
'isotropic_ncells',
'lambda_coefficients',
'mosaic_domains',
'mosaic_spread_deg',
'no_Nabc_scale',
'nopolar',
'only_diffuse',
'only_save_omega_kahn',
'oversample',
'oversample_omega',
'phi_deg',
'phistep_deg',
'phisteps',
'point_pixel',
'polar_vector',
'polarization',
'spindle_axis',
'spot_scale',
'twotheta_axis',
'unit_cell_Adeg',
'unit_cell_tuple',
'use_diffuse',
'use_lambda_coefficients']
# properties of nanoBragg_crystal.NBcryst instances
NB_CRYST_ATTRS = [
'anisotropic_mos_spread_deg',
'isotropic_ncells',
'miller_is_complex',
'mos_spread_deg',
'n_mos_domains',
'symbol',
'xtal_shape']
# properties of nanoBragg_beam.NBbeam instances
NB_BEAM_ATTRS = [
'divergence',
'polarization_fraction',
'size_mm',
'number_of_sources',
'unit_s0']
|
35095
|
from allennlp.data.fields import Field
def test_eq_with_inheritance():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
__slots__ = ["b"]
def __init__(self, a, b):
super().__init__(a)
self.b = b
class SubSubSubField(SubSubField):
__slots__ = ["c"]
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
assert SubSubSubField(1, 2, 3) == SubSubSubField(1, 2, 3)
assert SubSubSubField(1, 2, 3) != SubSubSubField(0, 2, 3)
def test_eq_with_inheritance_for_non_slots_field():
class SubField(Field):
def __init__(self, a):
self.a = a
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
def test_eq_with_inheritance_for_mixed_field():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
def __init__(self, a, b):
super().__init__(a)
self.b = b
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
|
35124
|
from ..helpers import IFPTestCase
from intficpy.things import Thing, Container, Liquid
class TestDropVerb(IFPTestCase):
def test_verb_func_drops_item(self):
item = Thing(self.game, self._get_unique_noun())
item.invItem = True
self.me.addThing(item)
self.assertIn(item.ix, self.me.contains)
self.assertEqual(len(self.me.contains[item.ix]), 1)
self.assertIn(item, self.me.contains[item.ix])
self.game.turnMain(f"drop {item.verbose_name}")
self.assertItemNotIn(
item, self.me.contains, "Dropped item, but item still in inventory"
)
def test_drop_item_not_in_inv(self):
item = Thing(self.game, "shoe")
item.invItem = True
self.start_room.addThing(item)
self.assertFalse(self.me.containsItem(item))
self.game.turnMain(f"drop {item.verbose_name}")
self.assertIn("You are not holding", self.app.print_stack.pop())
def test_drop_liquid_in_container(self):
cup = Container(self.game, "cup")
water = Liquid(self.game, "water", "water")
water.moveTo(cup)
cup.moveTo(self.me)
self.game.turnMain("drop water")
self.assertIn("You drop the cup", self.app.print_stack.pop())
self.assertFalse(self.game.me.containsItem(cup))
self.assertTrue(cup.containsItem(water))
def test_drop_composite_child(self):
machine = Thing(self.game, "machine")
wheel = Thing(self.game, "wheel")
machine.addComposite(wheel)
machine.moveTo(self.me)
self.game.turnMain("drop wheel")
self.assertIn("wheel is attached to the machine", self.app.print_stack.pop())
self.assertTrue(self.me.containsItem(wheel))
|
35160
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from enum import Enum
class Type(Enum):
STRING = 'string'
NUMBER = 'number'
BOOLEAN = 'boolean'
DATE = 'date'
DATETIME = 'datetime'
TIMEOFDAY = 'timeofday'
|
35205
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import time
from datetime import timedelta
import os
# Importing a helper module for the functions of the Inception model.
import inception
import cifar10
from cifar10 import num_classes
from inception import transfer_values_cache
#Importing the color map for plotting each class with different color.
import matplotlib.cm as color_map
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
cifar10.data_path = "data/CIFAR-10/"
cifar10.maybe_download_and_extract()
class_names = cifar10.load_class_names()
print(class_names)
print('Loading the training set...')
training_images, training_cls_integers, trainig_one_hot_labels = cifar10.load_training_data()
print('Loading the test set...')
testing_images, testing_cls_integers, testing_one_hot_labels = cifar10.load_test_data()
print("-Number of images in the training set:\t\t{}".format(len(training_images)))
print("-Number of images in the testing set:\t\t{}".format(len(testing_images)))
def plot_imgs(imgs, true_class, predicted_class=None):
assert len(imgs) == len(true_class)
# Creating a placeholders for 9 subplots
fig, axes = plt.subplots(3, 3)
# Adjustting spacing.
if predicted_class is None:
hspace = 0.3
else:
hspace = 0.6
fig.subplots_adjust(hspace=hspace, wspace=0.3)
for i, ax in enumerate(axes.flat):
# There may be less than 9 images, ensure it doesn't crash.
if i < len(imgs):
# Plot image.
ax.imshow(imgs[i],
interpolation='nearest')
# Get the actual name of the true class from the class_names array
true_class_name = class_names[true_class[i]]
# Showing labels for the predicted and true classes
if predicted_class is None:
xlabel = "True: {0}".format(true_class_name)
else:
# Name of the predicted class.
predicted_class_name = class_names[predicted_class[i]]
xlabel = "True: {0}\nPred: {1}".format(true_class_name, predicted_class_name)
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# get the first 9 images in the test set
imgs = testing_images[0:9]
# Get the integer representation of the true class.
true_class = testing_cls_integers[0:9]
# Plotting the images
plot_imgs(imgs=imgs, true_class=true_class)
print('Downloading the pretrained inception v3 model')
inception.maybe_download()
# Loading the inception model so that we can inialized it with the pretrained weights and customize for our model
inception_model = inception.Inception()
file_path_train = os.path.join(cifar10.data_path, 'inception_cifar10_train.pkl')
file_path_test = os.path.join(cifar10.data_path, 'inception_cifar10_test.pkl')
print("Processing Inception transfer-values for the training images of Cifar-10 ...")
# First we need to scale the imgs to fit the Inception model requirements as it requires all pixels to be from 0 to 255,
# while our training examples of the CIFAR-10 pixels are between 0.0 and 1.0
imgs_scaled = training_images * 255.0
# Checking if the transfer-values for our training images are already calculated and loading them, if not calcaulate and save them.
transfer_values_training = transfer_values_cache(cache_path=file_path_train,
images=imgs_scaled,
model=inception_model)
print("Processing Inception transfer-values for the testing images of Cifar-10 ...")
# First we need to scale the imgs to fit the Inception model requirements as it requires all pixels to be from 0 to 255,
# while our training examples of the CIFAR-10 pixels are between 0.0 and 1.0
imgs_scaled = testing_images * 255.0
# Checking if the transfer-values for our training images are already calculated and loading them, if not calcaulate and save them.
transfer_values_testing = transfer_values_cache(cache_path=file_path_test,
images=imgs_scaled,
model=inception_model)
print('Shape of the training set transfer values...')
print(transfer_values_training.shape)
print('Shape of the testing set transfer values...')
print(transfer_values_testing.shape)
def plot_transferValues(ind):
print("Original input image:")
# Plot the image at index ind of the test set.
plt.imshow(testing_images[ind], interpolation='nearest')
plt.show()
print("Transfer values using Inception model:")
# Visualize the transfer values as an image.
transferValues_img = transfer_values_testing[ind]
transferValues_img = transferValues_img.reshape((32, 64))
# Plotting the transfer values image.
plt.imshow(transferValues_img, interpolation='nearest', cmap='Reds')
plt.show()
plot_transferValues(ind=15)
pca_obj = PCA(n_components=2)
subset_transferValues = transfer_values_training[0:3000]
cls_integers = testing_cls_integers[0:3000]
print('Shape of a subset form the transfer values...')
print(subset_transferValues.shape)
reduced_transferValues = pca_obj.fit_transform(subset_transferValues)
print('Shape of the reduced version of the transfer values...')
print(reduced_transferValues.shape)
def plot_reduced_transferValues(transferValues, cls_integers):
# Create a color-map with a different color for each class.
c_map = color_map.rainbow(np.linspace(0.0, 1.0, num_classes))
# Getting the color for each sample.
colors = c_map[cls_integers]
# Getting the x and y values.
x_val = transferValues[:, 0]
y_val = transferValues[:, 1]
# Plot the transfer values in a scatter plot
plt.scatter(x_val, y_val, color=colors)
plt.show()
plot_reduced_transferValues(reduced_transferValues, cls_integers)
pca_obj = PCA(n_components=50)
transferValues_50d = pca_obj.fit_transform(subset_transferValues)
tsne_obj = TSNE(n_components=2)
reduced_transferValues = tsne_obj.fit_transform(transferValues_50d)
print('Shape of the reduced version of the transfer values using t-SNE method...')
print(reduced_transferValues.shape)
plot_reduced_transferValues(reduced_transferValues, cls_integers)
transferValues_arrLength = inception_model.transfer_len
input_values = tf.placeholder(tf.float32, shape=[None, transferValues_arrLength], name='input_values')
y_actual = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_actual')
y_actual_cls = tf.argmax(y_actual, axis=1)
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
# Create new weights and biases.
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
# Calculate the layer as the matrix multiplication of
# the input and weights, and then add the bias-values.
layer = tf.matmul(input, weights) + biases
# Use ReLU?
if use_relu:
layer = tf.nn.relu(layer)
return layer
# First fully-connected layer.
layer_fc1 = new_fc_layer(input=input_values,
num_inputs=2048,
num_outputs=1024,
use_relu=True)
# Second fully-connected layer.
layer_fc2 = new_fc_layer(input=layer_fc1,
num_inputs=1024,
num_outputs=num_classes,
use_relu=False)
# Predicted class-label.
y_predicted = tf.nn.softmax(layer_fc2)
# Cross-entropy for the classification of each image.
cross_entropy = \
tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_actual)
# Loss aka. cost-measure.
# This is the scalar value that must be minimized.
loss = tf.reduce_mean(cross_entropy)
step = tf.Variable(initial_value=0,
name='step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss, step)
y_predicted_cls = tf.argmax(y_predicted, axis=1)
correct_prediction = tf.equal(y_predicted_cls, y_actual_cls)
model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.global_variables_initializer())
training_batch_size = 32
def select_random_batch():
# Number of images (transfer-values) in the training-set.
num_imgs = len(transfer_values_training)
# Create a random index.
ind = np.random.choice(num_imgs,
size=training_batch_size,
replace=False)
# Use the random index to select random x and y-values.
# We use the transfer-values instead of images as x-values.
x_batch = transfer_values_training[ind]
y_batch = trainig_one_hot_labels[ind]
return x_batch, y_batch
def optimize(num_iterations):
for i in range(num_iterations):
# Selectin a random batch of images for training
# where the transfer values of the images will be stored in input_batch
# and the actual labels of those batch of images will be stored in y_actual_batch
input_batch, y_actual_batch = select_random_batch()
# storing the batch in a dict with the proper names
# such as the input placeholder variables that we define above.
feed_dict = {input_values: input_batch,
y_actual: y_actual_batch}
# Now we call the optimizer of this batch of images
# TensorFlow will automatically feed the values of the dict we created above
# to the model input placeholder variables that we defined above.
i_global, _ = session.run([step, optimizer],
feed_dict=feed_dict)
# print the accuracy every 100 steps.
if (i_global % 100 == 0) or (i == num_iterations - 1):
# Calculate the accuracy on the training-batch.
batch_accuracy = session.run(model_accuracy,
feed_dict=feed_dict)
msg = "Step: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i_global, batch_accuracy))
def plot_errors(cls_predicted, cls_correct):
# cls_predicted is an array of the predicted class-number for
# all images in the test-set.
# cls_correct is an array with boolean values to indicate
# whether is the model predicted the correct class or not.
# Negate the boolean array.
incorrect = (cls_correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
incorrectly_classified_images = testing_images[incorrect]
# Get the predicted classes for those images.
cls_predicted = cls_predicted[incorrect]
# Get the true classes for those images.
true_class = testing_cls_integers[incorrect]
n = min(9, len(incorrectly_classified_images))
# Plot the first n images.
plot_imgs(imgs=incorrectly_classified_images[0:n],
true_class=true_class[0:n],
predicted_class=cls_predicted[0:n])
def plot_confusionMatrix(cls_predicted):
# cls_predicted array of all the predicted
# classes numbers in the test.
# Call the confucion matrix of sklearn
cm = confusion_matrix(y_true=testing_cls_integers,
y_pred=cls_predicted)
# Printing the confusion matrix
for i in range(num_classes):
# Append the class-name to each line.
class_name = "({}) {}".format(i, class_names[i])
print(cm[i, :], class_name)
# labeling each column of the confusion matrix with the class number
cls_numbers = [" ({0})".format(i) for i in range(num_classes)]
print("".join(cls_numbers))
# Split the data-set in batches of this size to limit RAM usage.
batch_size = 128
def predict_class(transferValues, labels, cls_true):
# Number of images.
num_imgs = len(transferValues)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_predicted = np.zeros(shape=num_imgs, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_imgs:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_imgs)
# Create a feed-dict with the images and labels
# between index i and j.
feed_dict = {input_values: transferValues[i:j],
y_actual: labels[i:j]}
# Calculate the predicted class using TensorFlow.
cls_predicted[i:j] = session.run(y_predicted_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Create a boolean array whether each image is correctly classified.
correct = [a == p for a, p in zip(cls_true, cls_predicted)]
print(type(correct))
return correct, cls_predicted
def predict_class_test():
return predict_class(transferValues = transfer_values_testing,
labels = trainig_one_hot_labels,
cls_true = training_cls_integers)
def classification_accuracy(correct):
# When averaging a boolean array, False means 0 and True means 1.
# So we are calculating: number of True / len(correct) which is
# the same as the classification accuracy.
# Return the classification accuracy
# and the number of correct classifications.
return np.mean(correct), np.sum(correct)
def test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# For all the images in the test-set,
# calculate the predicted classes and whether they are correct.
correct, cls_pred = predict_class_test()
print(type(correct))
# Classification accuracypredict_class_test and the number of correct classifications.
accuracy, num_correct = classification_accuracy(correct)
# Number of images being classified.
num_images = len(correct)
# Print the accuracy.
msg = "Test set accuracy: {0:.1%} ({1} / {2})"
print(msg.format(accuracy, num_correct, num_images))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_errors(cls_predicted=cls_pred, cls_correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusionMatrix(cls_predicted=cls_pred)
test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
optimize(num_iterations=1000)
test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
|
35230
|
from demo.components.server import server
from chips.api.api import *
def application(chip):
eth = Component("application.c")
eth(
chip,
inputs = {
"eth_in" : chip.inputs["input_eth_rx"],
"am_in" : chip.inputs["input_radio_am"],
"fm_in" : chip.inputs["input_radio_fm"],
"rs232_rx":chip.inputs["input_rs232_rx"],
},
outputs = {
"eth_out" : chip.outputs["output_eth_tx"],
"audio_out" : chip.outputs["output_audio"],
"frequency_out" : chip.outputs["output_radio_frequency"],
"samples_out" : chip.outputs["output_radio_average_samples"],
"rs232_tx":chip.outputs["output_rs232_tx"],
},
)
|
35287
|
import keras
import pandas as pd
import urllib2
from bs4 import BeautifulSoup
from pprint import pprint
from matplotlib import pyplot as plt
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=100)
# --
# Config + Init
num_features = 75 # Character
# max_len = 100 # Character
max_len = 350
formatter = KerasFormatter(num_features, max_len)
# --
# Load and format data
in_store = pd.HDFStore(
'/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5',
complevel = 9,
complib = 'bzip2'
)
source = in_store.keys()[3]
df = in_store[source]
in_store.close()
# Subset to frequent paths
chash = df.groupby('hash').apply(lambda x: len(x.obj.unique()))
keep = list(chash[chash > 100].index)
df = df[df.hash.apply(lambda x: x in keep)]
df['content'] = df.obj.apply(lambda x: BeautifulSoup(x).text.encode('utf8'))
# --
# Make all pairs
train = make_triplet_train(df, N = 600)
pd.crosstab(train.doc, train.hash)
trn, _ = formatter.format(train, ['content'], 'hash')
# Test set of all unique points
unq = df.copy()
del unq['id']
unq = unq.drop_duplicates()
awl, _ = formatter.format(unq, ['content'], 'hash')
# --
# Defining model
recurrent_size = 32
dense_size = 5
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_euclidean', optimizer = 'adam')
# --
# Training model
# Shuffles while maintaining groups
ms = modsel(train.shape[0], N = 3)
_ = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 1,
batch_size = 3 * 250,
shuffle = False
)
preds = model.predict(awl['x'][0], verbose = True)
colors = awl['y'].argmax(1)
plt.scatter(preds[:,0], preds[:,1], c = colors)
plt.show()
# --
# Clustering results
#
# Could do better -- actually may want some kind of metric for "projection overlap"
from sklearn.cluster import DBSCAN
db = DBSCAN(eps = .1, min_samples = 50).fit(preds)
res = unq.hash.groupby(db.labels_).apply(lambda x: x.value_counts()).reset_index()
res.columns = ('cluster', 'hash', 'cnt')
res = res.sort('hash')
good_res = res[(res.cnt > 50) & (res.cluster > -1)]
good_res
sorted(res.hash.unique())
sorted(good_res.hash.unique())
eqv = list(good_res.groupby('cluster').hash.apply(lambda x: list(x)))
eqv = map(eval, np.unique(map(str, eqv)))
print_eqv(eqv, df)
|
35307
|
import config as cfg
import cv2
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from keras import backend as K
import tensorflow as tf
import keras
'''
esto es necesario para que no haya errores a la hora de exponer el servicio con flask
info --> https://github.com/tensorflow/tensorflow/issues/28287#issuecomment-495005162
'''
from keras.backend import set_session
sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
model_emotions = load_model(cfg.path_model)
class predict_emotions():
'''
def __init__(self):
# cargo modelo de deteccion de emociones
global graph
self.graph = tf.get_default_graph()
self.model_emotions = load_model(cfg.path_model)
'''
def preprocess_img(self,face_image,rgb=True,w=48,h=48):
face_image = cv2.resize(face_image, (w,h))
if rgb == False:
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = face_image.astype("float") / 255.0
face_image= img_to_array(face_image)
face_image = np.expand_dims(face_image, axis=0)
return face_image
def get_emotion(self,img,boxes_face):
emotions = []
if len(boxes_face)!=0:
for box in boxes_face:
y0,x0,y1,x1 = box
face_image = img[x0:x1,y0:y1]
# preprocesar data
face_image = self.preprocess_img(face_image ,cfg.rgb, cfg.w, cfg.h)
# predecir imagen
global sess
global graph
with graph.as_default():
set_session(sess)
prediction = model_emotions.predict(face_image)
emotion = cfg.labels[prediction.argmax()]
emotions.append(emotion)
else:
emotions = []
boxes_face = []
return boxes_face,emotions
|
35314
|
from dps.hyper import run_experiment
from dps.utils import copy_update
from dps.tf.updater import DummyUpdater
from silot.run import basic_config, alg_configs, env_configs
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--max-digits', type=int, choices=[6, 12], required=True)
args, _ = parser.parse_known_args()
readme = "Running SILOT experiment on moving_mnist."
run_kwargs = dict(
max_hosts=1, ppn=6, cpp=2, gpu_set="0,1", pmem=10000, project="rpp-bengioy",
wall_time="96hours", cleanup_time="5mins", slack_time="5mins", n_repeats=6,
copy_locally=True, config=dict(render_step=1000000)
)
durations = dict(
long=copy_update(run_kwargs),
short=dict(
wall_time="180mins", gpu_set="0", ppn=4, n_repeats=4, distributions=None,
config=dict(max_steps=3000, render_step=500, eval_step=100, display_step=100, stage_steps=600, curriculum=[dict()]),
),
build=dict(
ppn=1, cpp=1, gpu_set="0", wall_time="180mins", n_repeats=1, distributions=None,
config=dict(
do_train=False, get_updater=DummyUpdater, render_hook=None,
curriculum=[dict()] + [dict(max_digits=i, n_train=100, n_val=1000) for i in range(1, 13)]
)
),
)
config = basic_config.copy()
config.update(env_configs['moving_mnist'])
config.update(alg_configs['silot'], max_digits=args.max_digits)
config.update(final_count_prior_log_odds=0.0125, stage_steps=40000)
run_experiment(
"moving_mnist_silot",
config, "silot on moving_mnist.",
name_variables="max_digits",
durations=durations
)
|
35358
|
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
import requests
def login():
driver = webdriver.Chrome()
driver.implicitly_wait(20)
driver.get("https://tixcraft.com/login")
WebDriverWait(driver, 600).until(
EC.visibility_of_element_located((By.XPATH, "//*[@class='user-name']"))
)
cookies = driver.get_cookies()
driver.quit()
return cookies
def user_verify(driver, url):
driver.get(url)
url = driver.current_url
while "ticket/verify" in url:
try:
url = driver.current_url
WebDriverWait(driver, 2).until(EC.alert_is_present())
alert = driver.switch_to_alert()
alert.accept()
except:
pass
return url
def session_to_driver(session):
cookies = session.cookies.get_dict()
driver = webdriver.Chrome()
driver.get("https://tixcraft.com")
for name, value in cookies.items():
cookie = {"name": name, "value": value}
driver.add_cookie(cookie)
return driver
def driver_to_session(driver):
cookies = driver.get_cookies()
session = requests.Session()
for cookie in cookies:
session.cookies.set(cookie["name"], cookie["value"])
return session
|
35377
|
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*block{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
# Calculate SD conv weight
w = state_dict[old_key]
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
state_dict[new_key] = w
del state_dict[old_key]
return state_dict
def load_big_format(cfg, f):
model = OrderedDict()
weights = np.load(f)
cmap = {'a':1, 'b':2, 'c':3}
for key, val in weights.items():
old_key = key.replace('resnet/', '')
if 'root_block' in old_key:
new_key = 'root.conv.weight'
elif '/proj/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/proj/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.downsample.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/gamma' in old_key:
key_pattern = old_key.replace('/group_norm/gamma', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/beta' in old_key:
key_pattern = old_key.replace('/group_norm/beta', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.bias'.format(bname,uname,cmap[cidx])
else:
print('Unknown key {}'.format(old_key))
continue
print('Map {} -> {}'.format(key, new_key))
model[new_key] = tf2th(val)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model)
|
35398
|
from .watch_time import time_str
import fitlog
class Logger:
def __init__(self , fil_path = None):
self.log_fil = open(fil_path , "w" , encoding = "utf-8")
def nolog(self , cont = ""):
pass
def log_print(self , cont = ""):
self.log_fil.write(cont + "\n")
self.log_fil.flush()
print (cont)
fitlog.add_to_line(cont)
def log_print_w_time(self , cont = ""):
self.log_print(str(cont) + " | " + time_str())
|
35421
|
from generator.actions import Actions
import random
import string
import struct
import numpy as np
import math
import datetime as dt
import ctypes
def kaprica_mixin(self):
if hasattr(self, 'xlat_seed'):
return
def xlat_seed(seed):
def hash_string(seed):
H = 0x314abc86
for c in seed:
H = (H * 37) & 0xffffffff
H ^= ord(c)
H = ((H << 13) ^ (H >> 19)) & 0xffffffff
return H
def hash_iterate(H):
H = (H * 3) & 0xffffffff
H = ((H << 13) ^ (H >> 19) ^ (H >> 21)) & 0xffffffff
return H
xmap = list(xrange(256))
xmap_inv = list(xrange(256))
state = hash_string(seed)
for i in xrange(255, 0, -1):
j = state % i
state = hash_iterate(state)
xmap[i], xmap[j] = xmap[j], xmap[i]
for i in xrange(256):
xmap_inv[xmap[i]] = i
self.xlat_map = xmap
self.xlat_map_inv = xmap_inv
self.xlat_seed = xlat_seed
self.xlat_map = None
self.xlat_map_inv = None
def xlat_string(s, inverse=False):
if inverse:
return ''.join([chr(self.xlat_map_inv[ord(c)]) for c in s])
return ''.join([chr(self.xlat_map[ord(c)]) for c in s])
self.xlat_string = xlat_string
def read(delim=None, length=None, expect=None):
if self.xlat_map:
if delim:
delim = self.xlat_string(delim)
if expect:
expect = self.xlat_string(expect)
return self._original_read(delim=delim, length=length, expect=expect)
self._original_read = self.read
self.read = read
def write(s):
if self.xlat_map:
if s:
s = self.xlat_string(s)
return self._original_write(s)
self._original_write = self.write
self.write = write
def random_word(max_size=10, min_size=2):
if random.randint(0,4):
characters = string.letters + string.digits
else:
characters = string.letters
max_size = max_size if max_size >= min_size else min_size
max_size = random.randint(min_size, max_size)
return ("".join(random.choice(characters) for c in range(max_size))).lower()
def random_text(max_words=10, min_words=3):
max_words = max_words if max_words >= min_words else min_words
text = ''
for x in xrange(random.randint(min_words, max_words)):
text += random_word() + ' '
return text
class Cs10Fun(Actions):
def _read(self, data):
self.read(length=len(data), expect=data)
def _read_array(self, array=None):
if not array:
array = self._cur_array
if not len(array):
self._read("Empty Array\n")
else:
self._read("Array data\n")
temp_list = []
for val in array:
temp_list.append("%d, " % val)
temp_list[-1] = temp_list[-1][:-2]
self._read("[" + ''.join(temp_list) + ']\n')
def start(self):
#self.delay(100)
kaprica_mixin(self)
self.xlat_seed('2e0715f1709c')
self.write(self.magic_page[:4])
self.read(length=0x1000, expect=self.magic_page)
self._cur_array = []
self._read("Welcome to CS10Fun!\n")
self._read("Today's lesson is on sorting! We're going to cover 4 types of sorts.\n")
self._read("Before we begin, create an array of ints\n")
self.menu()
self.enter_array()
def menu(self):
self._read("Main Menu\n")
self._read("1. Enter Array\n")
self._read("2. Multiply Array\n")
self._read("3. Try Insertion Sort\n")
self._read("4. Try Selection Sort\n")
self._read("5. Try Heap Sort\n")
self._read("6. Try Merge Sort\n")
self._read("7. Reprint Array\n")
self._read("0. Exit\n")
self._read(" -----\n")
self._read("Current Number of items in array = %d\n" % len(self._cur_array))
#self._read_array()
self._read(" -----\n")
self._read("Make a selection\n")
def enter_array(self):
self.write('1\n')
self._read("Enter a list of numbers to sort. End the list with ';;'\n")
self._cur_array = []
for x in xrange(random.randint(1,100)):
self._cur_array.append(random.randint(-1000000000, 1000000000))
self.write(','.join([str(x) for x in self._cur_array]) + ',;;\n')
self._read("New Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def multiply_array(self):
self.write('2\n')
if len(self._cur_array) > 10000:
self._read("Array is too long. Can't multiply any more\n")
elif len(self._cur_array):
self._read("Quick Grow! Enter a list multiplier. End number with ';'\n")
multiplier = random.randint(1,3)
while multiplier * len(self._cur_array) > 1024 and multiplier * len(self._cur_array) <= 1048:
multiplier = random.randint(1,3)
self.write("%d;\n" % multiplier)
self._cur_array *= multiplier
self._read("Multiplied Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def insert_sort(self):
self.write('3\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Insertion sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def selection_sort(self):
self.write('4\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Selection sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def heap_sort(self):
self.write('5\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Heap sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def merge_sort(self):
self.write('6\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Merge sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def reprint_array(self):
self.write('7\n')
self._read("Current Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def exit(self):
self.write('0\n')
self._read("Thanks for joining us\n")
self._read("See you next time\n")
|
35442
|
from gamegym.game import Game, Situation
from gamegym.utils import get_rng
from gamegym.distribution import Explicit
from gamegym.value_learning.valuestore import LinearValueStore
import numpy as np
import pytest
from scipy.sparse import csr_matrix
def test_init():
LinearValueStore(shape=(3, 3))
LinearValueStore(np.zeros((4, 3)))
LinearValueStore(np.zeros((4, 3)), shape=(4, 3))
with pytest.raises(Exception):
LinearValueStore((3, 3))
with pytest.raises(Exception):
LinearValueStore(np.zeros((4, 3)), shape=(4, 4))
def test_value_update():
a = np.ones((4, ))
vs = LinearValueStore(a)
f = [0, 2, -1, 3]
assert vs.get(f) == pytest.approx(4.0)
assert vs.get(np.array(f)) == pytest.approx(4.0)
#assert vs.get(csr_matrix(f)) == pytest.approx(4.0)
vs.update(f, -0.5)
assert vs.values == pytest.approx([1, 0, 1.5, -0.5])
assert vs.get(f) == pytest.approx(-3.0)
def test_norm():
vs = LinearValueStore(shape=(2, 3), fix_mean=1.0)
|
35501
|
import os
import reader
import json
# todo: get this logger from elsewhere
from celery.utils.log import get_task_logger
log = get_task_logger(__name__)
defaultFieldMappings = [
### SET
(['info','protocol'], 'getReplayProtocolVersion'),
(['info','bytes'], 'getReplayFileByteSize'),
(['info','gameloops'], 'getMatchLengthGameloops'),
(['info','seconds'], 'getMatchLengthSeconds'),
(['info','start_timestamp'], 'getMatchUTCTimestamp'),
(['info','speed'], 'getMatchSpeed'),
(['info','match_type'], 'getMatchType'),
(['info','hero_selelection_mode'], 'getHeroSelectionMode'),
(['map','name'], 'getMapName'),
(['map',{'m_mapSizeX':'width', 'm_mapSizeY':'height'}], 'getGameDescription'),
(['team', [], 'levels'], 'getTeamLevels'),
#(['players', [], 'talents'], 'getTalents'),
#(['players', [], 'talents', [], {'name':'name'}], 'getTalents'),
#(['players', [], {'m_teamId': 'team', 'm_name': 'name', 'm_toonId': 'toon_id'}], 'getPlayers'),
(['raw','players'], 'getPlayers'),
(['raw','details'], 'getReplayDetails'),
(['raw','init_data'], 'getReplayInitData'),
#(['raw','translated_attributes_events'], 'getTranslatedReplayAttributesEvents'),
#(['players', [], 'hero'], 'getPlayersHeroChoiceArray'),
]
named_field_mappings = {
'RawReplayDetails': [(['raw','details'], 'getReplayDetails')],
'RawReplayInitData': [(['raw','init_data'], 'getReplayInitData')],
'RawReplayTrackerEvents': [(['raw','tracker_events'], 'getReplayTrackerEvents')],
'RawReplayAttributesEvents': [(['raw','attributes_events'], 'getReplayAttributesEvents')],
'RawReplayGameEvents': [(['raw','game_events'], 'getReplayGameEvents')],
'RawReplayMessageEvents': [(['raw','message_events'], 'getReplayMessageEvents')],
'RawTalentSelectionGameEvents': [(['raw','selections'], 'getTalentSelectionGameEvents')],
}
class StormReplayAnalyzer:
@staticmethod
def getAllFieldMappingNames():
return named_field_mappings.keys()
@staticmethod
def getFieldMappingForNames(names):
fieldMapping = []
for name in names:
fieldMapping = fieldMapping + named_field_mappings.get(name, [])
return fieldMapping
def __init__(self, reader):
self.reader = reader
def analyze(self, fieldMappings=None):
if fieldMappings is None:
fieldMappings = defaultFieldMappings
retval = {}
for field in fieldMappings:
value = getattr(self, field[1])()
worklist = [(retval, field[0], value)]
while len(worklist) > 0:
workItem = worklist.pop()
obj = workItem[0]
keyPath = workItem[1]
value = workItem[2]
key = keyPath[0]
isArray = isinstance(key, (int, long))
if isArray and key >= len(obj):
obj.extend([None]*(key + 1 - len(obj)))
if len(keyPath) == 1:
obj[key] = value
elif isinstance(keyPath[1], basestring):
if isArray:
if obj[key] is None:
obj[key] = {}
obj = obj[key]
else:
obj = obj.setdefault(key, {})
worklist.append( (obj, keyPath[1:], value) )
elif isinstance(keyPath[1], list):
if isArray:
if obj[key] is None:
obj[key] = []
obj = obj[key]
else:
obj = obj.setdefault(key, [])
for index, element in enumerate(value):
worklist.append( (obj, [index] + keyPath[2:], element) )
elif isinstance(keyPath[1], dict):
if isArray:
if obj[key] is None:
obj[key] = {}
obj = obj[key]
else:
obj = obj.setdefault(key, {})
for dictKey in value:
if 0 == len(keyPath[1]):
keyToWrite = dictKey
elif keyPath[1].has_key(dictKey):
keyToWrite = keyPath[1][dictKey]
else:
continue
worklist.append( (obj, [keyToWrite] + keyPath[2:], value[dictKey]) )
else:
raise Exception('Key of invalid type: %s' % str(key))
return retval
def getReplayFileByteSize(self):
return self.reader.getReplayFileByteSize()
def getTalentSelectionGameEvents(self):
events = []
for event in self.reader.getReplayGameEvents():
if (event['_event'] != 'NNet.Game.SHeroTalentTreeSelectedEvent'):
continue
events.append(event)
return events
def getReplayProtocolVersion(self):
return self.reader.getReplayProtocolVersion()
def getReplayInitData(self):
return self.reader.getReplayInitData()
def getReplayAttributesEvents(self):
return self.reader.getReplayAttributesEvents()
def getReplayDetails(self):
return self.reader.getReplayDetails()
def getReplayTrackerEvents(self):
return self.reader.getReplayTrackerEvents()
def getReplayGameEvents(self):
return self.reader.getReplayGameEvents()
def getReplayMessageEvents(self):
return self.reader.getReplayMessageEvents()
def getTranslatedReplayAttributesEvents(self):
talentsReader = self.getTalentsReader()
return talentsReader.translate_replay_attributes_events(self.getReplayAttributesEvents())
def getGameDescription(self):
initData = self.getReplayInitData()
return initData['m_syncLobbyState']['m_gameDescription']
def getGameSpeed(self):
try:
return self.gameSpeed
except AttributeError:
self.gameSpeed = 0
return self.gameSpeed
def getTalentsReader(self):
try:
return self.talentsReader
except AttributeError:
replayVersion = self.reader.getReplayProtocolVersion()
try:
self.talentsReader = __import__('stormreplay.talents%s' % replayVersion, fromlist=['talents'])
except ImportError:
raise Exception('Unsupported StormReplay build number for talents: %i' % replayVersion)
return self.talentsReader
def getTalents(self):
try:
return self.talents
except AttributeError:
self.talents = [[] for _ in xrange(10)]
talentsReader = self.getTalentsReader()
generator = talentsReader.decode_game_events_talent_choices(self.reader.getReplayGameEvents(), self.getPlayersHeroChoiceArray())
for choice in generator:
self.talents[choice['_userid']].append({
'seconds': self.gameloopToSeconds(choice['_gameloop']),
'level': choice['m_level'],
'name': choice['m_talentName'],
'description': choice['m_talentDescription'],
'index': choice['m_talentIndex'],
})
return self.talents
def getTeamTalentTierTimes(self):
try:
return self.teamTalentTierTimes
except AttributeError:
teamTalentTierLevel = [[], []]
teamTalentTiersFirstPick = [[], []]
teamTalentTiersLastPick = [[], []]
players = self.getPlayers()
for playerIndex, playerTalentPicks in enumerate(self.getTalents()):
player = players[playerIndex]
for talentTierIndex, talentPick in enumerate(playerTalentPicks):
talentPickTime = talentPick['seconds']
teamIndex = player['m_teamId']
tiersFirstPick = teamTalentTiersFirstPick[teamIndex]
if (talentTierIndex >= len(tiersFirstPick)):
tiersFirstPick.append(talentPickTime)
elif (talentPickTime < tiersFirstPick[talentTierIndex]):
tiersFirstPick[talentTierIndex] = talentPickTime
tiersLastPick = teamTalentTiersLastPick[teamIndex]
if (talentTierIndex >= len(tiersLastPick)):
tiersLastPick.append(talentPickTime)
elif (talentPickTime > tiersLastPick[talentTierIndex]):
tiersLastPick[talentTierIndex] = talentPickTime
if (talentTierIndex >= len(teamTalentTierLevel[teamIndex])):
teamTalentTierLevel[teamIndex].append(talentPick['level'])
else:
teamTalentTierLevel[teamIndex][talentTierIndex] = talentPick['level']
self.teamTalentTierTimes = [[], []]
for teamIndex in xrange(2):
for talentTierIndex, level in enumerate(teamTalentTierLevel[teamIndex]):
self.teamTalentTierTimes[teamIndex].append({
'earliest': teamTalentTiersFirstPick[teamIndex][talentTierIndex],
'latest': teamTalentTiersLastPick[teamIndex][talentTierIndex],
'level': level,
})
return self.teamTalentTierTimes
def getTeamLevels(self):
try:
return self.teamLevels
except AttributeError:
teamTalentTierTimes = self.getTeamTalentTierTimes()
self.teamLevels = [[], []]
for teamIndex in xrange(2):
talentTierTimes = teamTalentTierTimes[teamIndex]
levelTimes = [0] * talentTierTimes[-1]['level']
for firstTier, nextTier in zip(talentTierTimes, talentTierTimes[1:]):
levelRange = nextTier['level'] - firstTier['level']
for level in xrange(firstTier['level'], nextTier['level']+1):
levelIndex = level-1
lerp = float(level - firstTier['level']) / levelRange
time = lerp * (nextTier['earliest'] - firstTier['earliest']) + firstTier['earliest']
levelTimes[levelIndex] = time
levelToTalentTierInfo = {}
for tierInfo in talentTierTimes:
levelToTalentTierInfo[str(tierInfo['level'])] = tierInfo
for levelIndex, time in enumerate(levelTimes):
level = levelIndex + 1
levelInfo = {
'level': levelIndex + 1,
'seconds': time,
'is_talent_tier': False,
}
if levelToTalentTierInfo.has_key(str(level)):
tierInfo = levelToTalentTierInfo[str(level)]
levelInfo['is_talent_tier'] = True
levelInfo['earliest_talent_picked_time'] = tierInfo['earliest']
levelInfo['latest_talent_picked_time'] = tierInfo['latest']
self.teamLevels[teamIndex].append(levelInfo)
return self.teamLevels
def getMapName(self):
try:
return self.mapName
except AttributeError:
self.mapName = self.reader.getReplayDetails()['m_title']['utf8']
return self.mapName
def getPlayersHeroChoiceArray(self):
try:
return self.playersHeroArray
except AttributeError:
self.playersHeroArray = [None] * 10
for i, player in enumerate(self.getPlayerSpawnInfo()):
self.playersHeroArray[i] = player['hero']
return self.playersHeroArray
# returns array indexed by user ID
def getPlayers(self):
try:
return self.players
except AttributeError:
self.players = [None] * 10
for i, player in enumerate(self.getReplayDetails()['m_playerList']):
#TODO: confirm that m_workingSetSlotId == i always
toon = player['m_toon']
player['m_toonId'] = "%i-%s-%i-%i" % (toon['m_region'], toon['m_programId'], toon['m_realm'], toon['m_id'])
player['m_name'] = player['m_name']['utf8']
player['m_controlPlayerId'] = i+1
self.players[i] = player
return self.players
# returns array indexed by user ID
def getPlayerSpawnInfo(self):
try:
return self.playerSpawnInfo
except AttributeError:
self.playerSpawnInfo = [None] * 10
playerIdToUserId = {}
for event in self.getReplayTrackerEvents():
if event['_event'] == 'NNet.Replay.Tracker.SPlayerSetupEvent':
playerIdToUserId[event['m_playerId']] = event['m_userId']
elif event['_event'] == 'NNet.Replay.Tracker.SUnitBornEvent' and (int(event['_gameloop']) > 0):
playerId = event['m_controlPlayerId']
if (playerIdToUserId.has_key(playerId)):
playerIndex = playerIdToUserId[playerId] # always playerId-1 so far, but this is safer
self.playerSpawnInfo[playerIndex] = {
'hero': event['m_unitTypeName']['utf8'],
'unit_tag': event['m_unitTag']
}
del playerIdToUserId[playerId]
if len(playerIdToUserId) == 0:
break
return self.playerSpawnInfo
def getMatchSpeed(self):
attributes = self.getTranslatedReplayAttributesEvents()
return attributes[16]['m_gameSpeed']
def getMatchType(self):
attributes = self.getTranslatedReplayAttributesEvents()
return attributes[16]['m_gameType']
def getHeroSelectionMode(self):
attributes = self.getTranslatedReplayAttributesEvents()
return attributes[16]['m_heroSelectionMode']
def getMatchUTCTimestamp(self):
try:
return self.utcTimestamp
except AttributeError:
self.utcTimestamp = (self.getReplayDetails()['m_timeUTC'] / 10000000) - 11644473600
return self.utcTimestamp
def getMatchLengthGameloops(self):
lastEvent = self.getReplayTrackerEvents()[-1]
return lastEvent['_gameloop']
def getMatchLengthSeconds(self):
return self.gameloopToSeconds(self.getMatchLengthGameloops())
def gameloopToSeconds(self, gameloop):
return gameloop / 16.0
def gameloopToTimestamp(self, gameloop):
return self.getMatchUTCTimestamp() + _gameloop / 16.0
def getChat(self):
try:
return self.chat
except AttributeError:
self.chat = []
for messageEvent in self.getReplayMessageEvents():
if (messageEvent['_event'] != 'NNet.Game.SChatMessage'):
continue
userId = messageEvent['_userid']['m_userId']
chatData = {
't': self.gameloopToTimestamp(messageEvent['_gameloop']),
'user': userId,
'msg': messageEvent['m_string']['utf8'],
}
self.chat.append(chatData)
return self.chat
|
35520
|
from typing import Any
from starlette.datastructures import State
class DefaultState:
state = State()
def get(self,key:str, value: Any = None) -> Any:
if hasattr(self.state, key):
return getattr(self.state, key)
else:
if not value:
raise Exception('state don`t %s attribute' %key)
else:
return value
def set(self, key:str, value: Any) -> None:
if hasattr(self.state, key):
raise Exception('state don`t %s attribute' %key)
else:
setattr(self.state, key, value)
def update(self, key:str, value: Any) -> None:
if hasattr(self.state, key):
setattr(self.state, key, value)
default_state = DefaultState()
|
35566
|
import os, sys, inspect, logging, time
lib_folder = os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0], '..')
lib_load = os.path.realpath(os.path.abspath(lib_folder))
if lib_load not in sys.path:
sys.path.insert(0, lib_load)
import capablerobot_usbhub
hub = capablerobot_usbhub.USBHub()
## Input enabled here on the output so that reading the output's current state works
hub.gpio.configure(ios=[0], output=True, input=True)
hub.gpio.configure(ios=[1], input=True, pull_down=True)
while True:
hub.gpio.io0 = True
print("IO {} {}".format(*hub.gpio.io))
time.sleep(1)
hub.gpio.io0 = False
print("IO {} {}".format(*hub.gpio.io))
time.sleep(1)
|
35585
|
from django import template
register = template.Library()
@register.filter
def is_bbb_mod(room, user):
return room.is_moderator(user)
|
35600
|
import os
import sys
import neuron
import json
from pprint import pprint
from neuron import h
import matplotlib.pyplot as plt
import numpy as np
import h5py
## Runs the 5 cell iclamp simulation but in NEURON for each individual cell
# $ python pure_nrn.py <gid>
neuron.load_mechanisms('../components/mechanisms')
h.load_file('stdgui.hoc')
h.load_file('import3d.hoc')
cells_table = {
# gid = [model id, cre line, morph file]
0: [472363762, 'Scnn1a', 'Scnn1a_473845048_m.swc'],
1: [473863510, 'Rorb', 'Rorb_325404214_m.swc'],
2: [473863035, 'Nr5a1', 'Nr5a1_471087815_m.swc'],
3: [472912177, 'PV1', 'Pvalb_470522102_m.swc'],
4: [473862421, 'PV2', 'Pvalb_469628681_m.swc']
}
def run_simulation(gid, morphologies_dir='../components/morphologies', plot_results=True):
swc_file = os.path.join(morphologies_dir, cells_table[gid][2])
model_file = 'model_gid{}_{}_{}.json'.format(gid, cells_table[gid][0], cells_table[gid][1])
params_dict = json.load(open(model_file, 'r'))
# pprint(params_dict)
# load the cell
nrn_swc = h.Import3d_SWC_read()
nrn_swc.input(str(swc_file))
imprt = h.Import3d_GUI(nrn_swc, 0)
h("objref this")
imprt.instantiate(h.this)
# Cut the axon
h("soma[0] area(0.5)")
for sec in h.allsec():
sec.nseg = 1 + 2 * int(sec.L / 40.0)
if sec.name()[:4] == "axon":
h.delete_section(sec=sec)
h('create axon[2]')
for sec in h.axon:
sec.L = 30
sec.diam = 1
sec.nseg = 1 + 2 * int(sec.L / 40.0)
h.axon[0].connect(h.soma[0], 0.5, 0.0)
h.axon[1].connect(h.axon[0], 1.0, 0.0)
h.define_shape()
# set model params
h("access soma")
for sec in h.allsec():
sec_name = sec.name().split('[')[0]
# special case for passive channels rev. potential
sec.insert('pas')
for seg in sec:
if sec_name not in params_dict['e_pas']:
continue
seg.pas.e = params_dict['e_pas'][sec_name]
# insert mechanisms (if req.) and set density
for prop in params_dict[sec_name]:
if 'mechanism' in prop:
sec.insert(prop['mechanism'])
setattr(sec, prop['name'], prop['value'])
# simulation properties
h.stdinit()
h.tstop = 4000.0
h.dt = 0.1
h.steps_per_ms = 1/h.dt
h.celsius = 34.0
h.v_init = -80.0
# stimuli is an increasing series of 3 step currents
cclamp1 = h.IClamp(h.soma[0](0.5))
cclamp1.delay = 500.0
cclamp1.dur = 500.0
cclamp1.amp = 0.1500
cclamp2 = h.IClamp(h.soma[0](0.5))
cclamp2.delay = 1500.0
cclamp2.dur = 500.0
cclamp2.amp = 0.1750
cclamp3 = h.IClamp(h.soma[0](0.5))
cclamp3.delay = 2500.0
cclamp3.dur = 500.0
cclamp3.amp = 0.2000
# run simulation
v_vec = h.Vector()
v_vec.record(h.soma[0](0.5)._ref_v)
h.startsw()
h.run(h.tstop)
voltages = [v for v in v_vec]
cell_var_name = 'cellvar_gid{}_{}_{}.h5'.format(gid, cells_table[gid][0], cells_table[gid][1])
with h5py.File(cell_var_name, 'w') as h5:
# fake a mapping table just for convience
h5.create_dataset('/mapping/gids', data=[gid], dtype=np.uint16)
h5.create_dataset('/mapping/element_pos', data=[0.5], dtype=np.float)
h5.create_dataset('/mapping/element_id', data=[0], dtype=np.uint16)
h5.create_dataset('/mapping/index_pointer', data=[0], dtype=np.uint16)
h5.create_dataset('/v/data', data=voltages, dtype=np.float64)
if plot_results:
times = np.linspace(0.0, h.tstop, len(voltages))
plt.plot(times, voltages)
plt.show()
if __name__ == '__main__':
if __file__ != sys.argv[-1]:
run_simulation(sys.argv[-1])
else:
for gid in range(5):
run_simulation(gid, plot_results=False)
|
35609
|
import init_file as variables
import cj_function_lib as cj
from datetime import datetime
fert_table = cj.extract_table_from_mdb(variables.QSWAT_MDB, "fert", variables.path + "\\fert.tmp~")
fert = ""
for fert_line in fert_table:
fert += cj.trailing_spaces(4, fert_line.split(",")[1], 0) + cj.string_trailing_spaces(9, fert_line.split(",")[2]) + cj.trailing_spaces(8, fert_line.split(",")[3], 3) + cj.trailing_spaces(8, fert_line.split(",")[4], 3) + cj.trailing_spaces(8, fert_line.split(",")[5], 3) + cj.trailing_spaces(8, fert_line.split(",")[6], 3) + cj.trailing_spaces(8, fert_line.split(",")[7], 3) + cj.trailing_spaces(4, fert_line.split(",")[8], 2) + "E+00" + cj.trailing_spaces(4, fert_line.split(",")[9], 2)+ "E+00" + cj.trailing_spaces(8, fert_line.split(",")[10], 3) + "\n"
fileName = "fert.dat"
cj.write_to(variables.DefaultSimDir + "TxtInOut\\" + fileName, fert)
#print fileName
|
35631
|
import codecs
import collections
import io
import os
import re
import struct
from .instruction import Instruction
from .opcode import Opcodes
from .registers import Registers
from .section import Section
from .symbol import Symbol
def p32(v):
return struct.pack('<I', v)
def unescape_str_to_bytes(x):
return codecs.escape_decode(x.encode('utf8'))[0]
class QueueReader(object):
def __init__(self, *files):
self.fq = list(files)
def add_file(self, f):
self.fq.append(f)
def insert_file(self, f, idx=0):
self.fq.insert(idx, f)
def readline(self):
while len(self.fq) > 0:
r = self.fq[0].readline()
if not r:
self.fq.pop(0)
continue
return r
return ''
class Parser(object):
def __init__(self, fin):
self.sections = None
self.section_bodies = {}
self.entry = None
if type(fin) is str:
fin = io.StringIO(fin)
self.reader = QueueReader(fin)
self.parse()
def parse(self):
sections = collections.OrderedDict()
current_section = None
lineno = 0
while True:
lineno += 1
raw = self.reader.readline()
if not raw:
break
line = raw.split(';')[0].strip()
if not line:
continue
elif line.startswith('.sect'):
args = line.split(maxsplit=1)[1].split(' ')
name = args[0].upper()
if len(args) > 1:
addr = int(args[1], 16)
else:
if name == 'TEXT':
addr = 0x4000
else:
addr = 0x6000
new_sect = Section(addr)
sections[name] = new_sect
current_section = new_sect
elif line.startswith('.include'):
filename = line.split(maxsplit=1)[1].strip()
if filename.startswith('zstdlib/'):
filename = os.path.join(os.path.dirname(__file__), '../../..', filename)
self.reader.insert_file(open(filename))
elif line.startswith('.entry'):
entry = line.split()[1]
return self.try_parse_imm(entry)
elif line.startswith('.align'):
current_section.align(self._parse_int(line.split()[1]))
elif line.startswith('.db'):
data = line[3:].split(',')
bytes_data = bytes(int(i.strip(), 16) for i in data)
current_section.write(bytes_data)
elif line.startswith('.zero'):
data = line[5:].strip()
if data.startswith('0x'):
n = int(data, 16)
else:
n = int(data)
current_section.write(b'\0' * n)
elif line.startswith('.str'):
data = line[4:].strip()
bytes_data = unescape_str_to_bytes(data[1:-1])
current_section.write(bytes_data + b'\0\0')
elif line[-1] == ':':
label_name = line[:-1]
current_section.label(label_name)
else:
for ins in self.parse_instruction(line):
current_section.write(ins)
self.sections = sections
def resolve_label(self, name):
for section in self.sections.values():
addr = section.labels.get(name, None)
if addr:
return addr
def get_entry(self):
if type(self.entry) is Symbol:
return self.entry.resolve(self.resolve_label)
elif self.entry is not None:
return self.entry
elif self.resolve_label('start'):
return self.resolve_label('start')
else:
return 0x4000
def build(self):
sections = []
bodies = []
for name, section in self.sections.items():
buff = io.BytesIO()
ip = section.addr
for data in section.container:
if type(data) is Instruction:
ins = data
if type(ins.imm) is Symbol:
sym = ins.imm
buff.write(ins.compose(sym.resolve(self.resolve_label, ip)))
else:
buff.write(ins.compose())
ip += 4
elif type(data) is Symbol:
val = data.resolve(self.resolve_label, ip)
buff.write(p32(val))
ip += 4
elif type(data) is bytes:
buff.write(data)
ip += len(data)
body = buff.getvalue()
self.section_bodies[name] = body
bodies.append(body)
sections.append(struct.pack('<HH',
section.addr, # section_addr
len(body), # section_size
))
header = struct.pack('<ccHHH',
b'Z', b'z', # magic
0, # file_ver
self.get_entry(), # entry
len(bodies), # section_count
)
return header + b''.join(sections) + b''.join(bodies)
def parse_instruction(self, line):
try:
ins_name, args = line.split(maxsplit=1)
args = [ i.strip() for i in args.split(',') ]
except:
ins_name = line
args = []
if ins_name.upper() == 'JMP':
is_jmp = True
ins_name = 'ADDI'
args = ['IP', 'IP', args[0]]
else:
is_jmp = False
if len(args) > 0:
if ins_name[0].upper() == 'J' or ins_name.upper() == 'CALL' or is_jmp:
rel = True
else:
rel = False
imm = self.try_parse_imm(args[-1], rel=rel)
if imm is None:
if rel:
raise ValueError('jump instruction must have target\nline: %r' % line)
regs = args
else:
regs = args[:-1]
yield Instruction(ins_name, *regs, imm=imm)
else:
yield Instruction(ins_name, *args)
def try_parse_imm(self, val, rel=False):
if val[0] == '$':
if '+' in val:
name, offset = val[1:].split('+')
offset = self._parse_int(offset)
return Symbol(name, offset, is_relative=rel)
else:
return Symbol(val[1:], is_relative=rel)
try:
return self._parse_int(val)
except:
pass
def _parse_int(self, s):
s = s.strip()
if s[:2] == '0x':
return int(s, 16)
elif s[0] == '#':
return int(s[1:], 10)
else:
return int (s)
|
35647
|
from decimal import Decimal
import simplejson as json
import requests
from .converter import RatesNotAvailableError, DecimalFloatMismatchError
class BtcConverter(object):
"""
Get bit coin rates and convertion
"""
def __init__(self, force_decimal=False):
self._force_decimal = force_decimal
def _decode_rates(self, response, use_decimal=False):
if self._force_decimal or use_decimal:
decoded_data = json.loads(response.text, use_decimal=True)
else:
decoded_data = response.json()
return decoded_data
def get_latest_price(self, currency):
"""
Get Lates price of one bitcoin to valid Currency 1BTC => X USD
"""
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if self._force_decimal:
return Decimal(price)
return price
return None
def get_previous_price(self, currency, date_obj):
"""
Get Price for one bit coin on given date
"""
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if self._force_decimal:
return Decimal(price)
return price
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def get_previous_price_list(self, currency, start_date, end_date):
"""
Get List of prices between two dates
"""
start = start_date.strftime('%Y-%m-%d')
end = end_date.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get('bpi', {})
return price_dict
return {}
def convert_to_btc(self, amount, currency):
"""
Convert X amount to Bit Coins
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_btc_to_cur(self, coins, currency):
"""
Convert X bit coins to valid currency amount
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_amount = coins * price
return converted_amount
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur requires coins parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_to_btc_on(self, amount, currency, date_obj):
"""
Convert X amount to BTC based on given date rate
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def convert_btc_to_cur_on(self, coins, currency, date_obj):
"""
Convert X BTC to valid currency amount based on given date
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = coins*price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def get_symbol(self):
"""
Here is Unicode symbol for bitcoin
"""
return "\u0E3F"
_Btc_Converter = BtcConverter()
get_btc_symbol = _Btc_Converter.get_symbol
convert_btc_to_cur_on = _Btc_Converter.convert_btc_to_cur_on
convert_to_btc_on = _Btc_Converter.convert_to_btc_on
convert_btc_to_cur = _Btc_Converter.convert_btc_to_cur
convert_to_btc = _Btc_Converter.convert_to_btc
get_latest_price = _Btc_Converter.get_latest_price
get_previous_price = _Btc_Converter.get_previous_price
get_previous_price_list = _Btc_Converter.get_previous_price_list
|
35670
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.authentication.link_conditions import condition_user_is_authenticated
from mayan.apps.navigation.classes import Link, Separator, Text
from mayan.apps.navigation.utils import factory_condition_queryset_access
from .icons import (
icon_current_user_details, icon_group_create, icon_group_delete_single,
icon_group_delete_multiple, icon_group_edit, icon_group_list,
icon_group_setup, icon_group_user_list, icon_user_create,
icon_user_edit, icon_user_group_list, icon_user_list,
icon_user_delete_single, icon_user_delete_multiple,
icon_user_set_options, icon_user_setup
)
from .link_conditions import condition_user_is_not_superuser
from .permissions import (
permission_group_create, permission_group_delete, permission_group_edit,
permission_group_view, permission_user_create, permission_user_delete,
permission_user_edit, permission_user_view
)
from .utils import get_user_label_text
# Current user
link_current_user_details = Link(
args='request.user.id',
condition=condition_user_is_authenticated,
icon=icon_current_user_details, text=_('User details'),
view='user_management:user_details'
)
# Group
link_group_create = Link(
icon=icon_group_create, permissions=(permission_group_create,),
text=_('Create new group'), view='user_management:group_create'
)
link_group_delete_single = Link(
args='object.id', icon=icon_group_delete_single,
permissions=(permission_group_delete,), tags='dangerous',
text=_('Delete'), view='user_management:group_delete_single'
)
link_group_delete_multiple = Link(
icon=icon_group_delete_multiple, tags='dangerous', text=_('Delete'),
view='user_management:group_delete_multiple'
)
link_group_edit = Link(
args='object.id', icon=icon_group_edit,
permissions=(permission_group_edit,), text=_('Edit'),
view='user_management:group_edit'
)
link_group_list = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='Group',
object_permission=permission_group_view,
), icon=icon_group_list, text=_('Groups'),
view='user_management:group_list'
)
link_group_user_list = Link(
args='object.id', icon=icon_group_user_list,
permissions=(permission_group_edit,), text=_('Users'),
view='user_management:group_members'
)
link_group_setup = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='Group',
callback=condition_user_is_not_superuser,
object_permission=permission_group_view,
view_permission=permission_group_create
), icon=icon_group_setup, text=_('Groups'),
view='user_management:group_list'
)
# User
link_user_create = Link(
condition=condition_user_is_authenticated, icon=icon_user_create,
permissions=(permission_user_create,), text=_('Create new user'),
view='user_management:user_create'
)
link_user_delete_single = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_delete_single, permissions=(permission_user_delete,),
tags='dangerous', text=_('Delete'),
view='user_management:user_delete_single'
)
link_user_delete_multiple = Link(
icon=icon_user_delete_multiple, tags='dangerous', text=_('Delete'),
view='user_management:user_delete_multiple'
)
link_user_edit = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_edit, permissions=(permission_user_edit,), text=_('Edit'),
view='user_management:user_edit'
)
link_user_group_list = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_group_list, permissions=(permission_user_edit,),
text=_('Groups'), view='user_management:user_groups'
)
link_user_list = Link(
icon=icon_user_list, text=_('Users'),
condition=factory_condition_queryset_access(
app_label='auth', model_name='User',
callback=condition_user_is_authenticated,
object_permission=permission_user_view,
view_permission=permission_user_create
), view='user_management:user_list'
)
link_user_set_options = Link(
args='object.id', condition=condition_user_is_authenticated,
icon=icon_user_set_options, permissions=(permission_user_edit,),
text=_('User options'), view='user_management:user_options'
)
link_user_setup = Link(
condition=factory_condition_queryset_access(
app_label='auth', model_name='User',
object_permission=permission_user_view,
view_permission=permission_user_create,
), icon=icon_user_setup, text=_('Users'),
view='user_management:user_list'
)
separator_user_label = Separator()
text_user_label = Text(
html_extra_classes='menu-user-name', text=get_user_label_text
)
|
35683
|
import inspect
try:
from unittest import mock
except ImportError:
import mock
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver, WebElement
from selenium.common.exceptions import NoSuchElementException
from page_objects import PageObject, PageElement, MultiPageElement
@pytest.fixture()
def webdriver():
return mock.Mock(spec=WebDriver)
class TestConstructor:
def test_page_element(self):
elem = PageElement(css='foo')
assert elem.locator == (By.CSS_SELECTOR, 'foo')
def test_multi_page_element(self):
elem = MultiPageElement(id_='bar')
assert elem.locator == (By.ID, 'bar')
def test_page_element_bad_args(self):
with pytest.raises(ValueError):
PageElement()
with pytest.raises(ValueError):
PageElement(id_='foo', xpath='bar')
class TestGet:
def test_get_descriptors(self, webdriver):
class TestPage(PageObject):
test_elem1 = PageElement(css='foo')
test_elem2 = PageElement(id_='bar')
webdriver.find_element.side_effect = ["XXX", "YYY"]
page = TestPage(webdriver=webdriver)
assert page.test_elem1 == "XXX"
assert page.test_elem2 == "YYY"
assert webdriver.find_element.mock_calls == [
mock.call(By.CSS_SELECTOR, 'foo'),
mock.call(By.ID, 'bar'),
]
def test_get_element_with_context(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='bar', context=True)
page = TestPage(webdriver=webdriver)
elem = mock.Mock(spec=WebElement, name="My Elem")
res = page.test_elem(elem)
assert elem.find_element.called_once_with(By.CSS_SELECTOR, 'bar')
assert res == elem.find_element.return_value
def test_get_not_found(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='bar')
page = TestPage(webdriver=webdriver)
webdriver.find_element.side_effect = NoSuchElementException
assert page.test_elem is None
def test_get_unattached(self):
assert PageElement(css='bar').__get__(None, None) is None
def test_get_multi(self, webdriver):
class TestPage(PageObject):
test_elems = MultiPageElement(css='foo')
webdriver.find_elements.return_value = ["XXX", "YYY"]
page = TestPage(webdriver=webdriver)
assert page.test_elems == ["XXX", "YYY"]
assert webdriver.find_elements.called_once_with(By.CSS_SELECTOR, 'foo')
def test_get_multi_not_found(self, webdriver):
class TestPage(PageObject):
test_elems = MultiPageElement(css='foo')
webdriver.find_elements.side_effect = NoSuchElementException
page = TestPage(webdriver=webdriver)
assert page.test_elems == []
class TestSet:
def test_set_descriptors(self, webdriver):
class TestPage(PageObject):
test_elem1 = PageElement(css='foo')
page = TestPage(webdriver=webdriver)
elem = mock.Mock(spec=WebElement, name="My Elem")
webdriver.find_element.return_value = elem
page.test_elem1 = "XXX"
assert webdriver.find_elements.called_once_with(By.CSS_SELECTOR, 'foo')
elem.send_keys.assert_called_once_with('XXX')
def test_cannot_set_with_context(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='foo', context=True)
page = TestPage(webdriver=webdriver)
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "doesn't support elements with context" in e.value.args[0]
def test_cannot_set_not_found(self, webdriver):
class TestPage(PageObject):
test_elem = PageElement(css='foo')
page = TestPage(webdriver=webdriver)
webdriver.find_element.side_effect = NoSuchElementException
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "element not found" in e.value.args[0]
def test_set_multi(self, webdriver):
class TestPage(PageObject):
test_elems = MultiPageElement(css='foo')
page = TestPage(webdriver=webdriver)
elem1 = mock.Mock(spec=WebElement)
elem2 = mock.Mock(spec=WebElement)
webdriver.find_elements.return_value = [elem1, elem2]
page.test_elems = "XXX"
assert webdriver.find_elements.called_once_with(By.CSS_SELECTOR, 'foo')
elem1.send_keys.assert_called_once_with('XXX')
elem2.send_keys.assert_called_once_with('XXX')
def test_cannot_set_multi_with_context(self, webdriver):
class TestPage(PageObject):
test_elem = MultiPageElement(css='foo', context=True)
page = TestPage(webdriver=webdriver)
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "doesn't support elements with context" in e.value.args[0]
def test_cannot_set_multi_not_found(self, webdriver):
class TestPage(PageObject):
test_elem = MultiPageElement(css='foo')
page = TestPage(webdriver=webdriver)
webdriver.find_elements.side_effect = NoSuchElementException
with pytest.raises(ValueError) as e:
page.test_elem = 'xxx'
assert "no elements found" in e.value.args[0]
class TestRootURI:
class TestPage(PageObject):
pass
def test_from_constructor(self, webdriver):
page = self.TestPage(webdriver=webdriver, root_uri="http://example.com")
assert page.root_uri == 'http://example.com'
def test_from_webdriver(self):
webdriver = mock.Mock(spec=WebDriver, root_uri="http://example.com/foo")
page = self.TestPage(webdriver=webdriver)
assert page.root_uri == 'http://example.com/foo'
def test_get(self, webdriver):
page = self.TestPage(webdriver=webdriver, root_uri="http://example.com")
page.get('/foo/bar')
assert webdriver.get.called_once_with("http://example.com/foo/bar")
def test_get_no_root(self, webdriver):
page = self.TestPage(webdriver=webdriver)
page.get('/foo/bar')
assert webdriver.get.called_once_with("/foo/bar")
|
35739
|
from metaflow import FlowSpec, step
class ForeachFlow(FlowSpec):
@step
def start(self):
self.creatures = ['bird', 'mouse', 'dog']
self.next(self.analyze_creatures, foreach='creatures')
@step
def analyze_creatures(self):
print("Analyzing", self.input)
self.creature = self.input
self.score = len(self.creature)
self.next(self.join)
@step
def join(self, inputs):
self.best = max(inputs, key=lambda x: x.score).creature
self.next(self.end)
@step
def end(self):
print(self.best, 'won!')
if __name__ == '__main__':
ForeachFlow()
|
35745
|
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional
from talon import Context, Module, actions, app, cron, ui
# XXX(nriley) actions are being returned out of order; that's a problem if we want to pop up a menu
mod = Module()
mod.list("notification_actions", desc="Notification actions")
mod.list("notification_apps", desc="Notification apps")
notification_debug = mod.setting(
"notification_debug",
type=bool,
default=False,
desc="Display macOS notification debugging information.",
)
try:
from rich.console import Console
console = Console(color_system="truecolor", soft_wrap=True)
def debug_print(obj: any, *args):
"""Pretty prints the object"""
if not notification_debug.get():
return
if args:
console.out(obj, *args)
else:
console.print(obj)
except ImportError:
def debug_print(obj: any, *args):
if not notification_debug.get():
return
print(obj, *args)
@mod.action_class
class Actions:
def notification_action(index: int, action: str) -> bool:
"""Perform the specified action on the notification (stack) at the specified index"""
return False
def notification_app_action(app_name: str, action: str) -> bool:
"""Perform the specified action on the first notification (stack) for the specified app"""
return False
def notifications_update():
"""Update notification list to reflect what is currently onscreen"""
# (poll? not try to keep up? not sure what else to do)
def notification_center():
"""Display or hide Notification Center"""
@dataclass(frozen=True)
class Notification:
identifier: int
subrole: str = field(default=None, compare=False)
app_name: str = field(default=None, compare=False)
stacking_identifier: str = field(default=None, compare=False)
title: str = field(default=None, compare=False)
subtitle: str = field(default=None, compare=False)
body: str = field(default=None, compare=False)
# action values are named "Name:<name>\nTarget:0x0\nSelector:(null)"; keys are speakable
actions: dict[str, str] = field(default=None, compare=False)
@staticmethod
def group_identifier(group):
identifier = getattr(group, "AXIdentifier", None)
if identifier is None or not str.isdigit(identifier):
return None
return int(identifier)
@staticmethod
def from_group(group, identifier):
group_actions = group.actions
if "AXScrollToVisible" in group_actions:
del group_actions["AXScrollToVisible"] # not useful
# XXX(nriley) create_spoken_forms_from_list doesn't handle apostrophes correctly
# https://github.com/knausj85/knausj_talon/issues/780
group_actions = {
name.lower().replace("’", "'"): action
for action, name in group_actions.items()
}
title = body = subtitle = None
try:
title = group.children.find_one(AXIdentifier="title").AXValue
except ui.UIErr:
pass
try:
body = group.children.find_one(AXIdentifier="body").AXValue
except ui.UIErr:
pass
try:
subtitle = group.children.find_one(AXIdentifier="subtitle").AXValue
except ui.UIErr:
pass
return Notification(
identifier=identifier,
subrole=group.AXSubrole,
app_name=group.AXDescription,
stacking_identifier=group.AXStackingIdentifier,
title=title,
subtitle=subtitle,
body=body,
actions=group_actions,
)
@staticmethod
def notifications_in_window(window):
notifications = []
for group in window.children.find(AXRole="AXGroup"):
if not (identifier := Notification.group_identifier(group)):
continue
notification = Notification.from_group(group, identifier)
notifications.append(notification)
return notifications
MONITOR = None
ctx = Context()
ctx.matches = r"""
os: mac
"""
ctx.lists["user.notification_actions"] = {}
ctx.lists["user.notification_apps"] = {}
@ctx.action_class("user")
class UserActions:
def notification_action(index: int, action: str) -> bool:
return MONITOR.perform_action(action, index=index)
def notification_app_action(app_name: str, action: str) -> bool:
return MONITOR.perform_action(action, app_name=app_name)
def notifications_update():
MONITOR.update_notifications()
def notification_center():
cc = ui.apps(bundle="com.apple.controlcenter")[0]
cc.element.children.find_one(AXRole="AXMenuBar", max_depth=0).children.find_one(
AXRole="AXMenuBarItem",
AXSubrole="AXMenuExtra",
AXIdentifier="com.apple.menuextra.clock",
max_depth=0,
).perform("AXPress")
class NotificationMonitor:
__slots__ = (
"pid",
"notifications",
)
def __init__(self, app: ui.App):
self.pid = app.pid
self.notifications = []
ui.register("win_open", self.win_open)
ui.register("win_close", self.win_close)
ui.register("app_close", self.app_closed)
self.update_notifications()
def win_open(self, window):
if not window.app.pid == self.pid:
return
notifications = Notification.notifications_in_window(window)
self.update_notifications(adding=notifications)
def notification_groups(self):
ncui = ui.apps(pid=self.pid)[0]
for window in ncui.windows():
for group in window.children.find(AXRole="AXGroup"):
if not (identifier := Notification.group_identifier(group)):
continue
yield identifier, group
def perform_action(
self, action: str, index: Optional[int] = None, app_name: str = None
):
self.update_notifications()
cron.after("500ms", self.update_notifications)
notification = None
if index is not None:
if index < 0 or index > len(self.notifications) - 1:
app.notify(f"Unable to locate notification #{index + 1}", "Try again?")
return False
notification = self.notifications[index]
elif app_name is not None:
try:
notification = next(
notification
for notification in self.notifications
if notification.app_name == app_name
)
except StopIteration:
app.notify(
f"Unable to locate notification for {app_name}", "Try again?"
)
return False
for identifier, group in self.notification_groups():
if identifier != notification.identifier:
continue
if action not in notification.actions:
# allow closing a notification stack like an individual notification
if action == "close" and "clear all" in notification.actions:
action = "clear all"
else:
app.notify(f"No such action “{action}”", "Try again?")
return False
group.perform(notification.actions[action])
return True
app.notify("Unable to locate notification", "Try again?")
return False
def update_notifications(self, adding=[]):
if adding:
self.notifications += adding
notifications = {}
for identifier, group in self.notification_groups():
y = group.AXPosition.y
try:
notifications[y] = self.notifications[
self.notifications.index(Notification(identifier=identifier))
]
except ValueError:
notifications[y] = Notification.from_group(group, identifier)
self.notifications = list(notifications.values())
if notifications:
debug_print(notifications)
notification_actions = set()
notification_apps = set()
for notification in self.notifications:
notification_actions.update(notification.actions.keys())
notification_apps.add(notification.app_name)
notification_actions = list(notification_actions)
# XXX(nriley) create_spoken_forms_from_list doesn't handle apostrophes correctly
# https://github.com/knausj85/knausj_talon/issues/780
apostrophe_words = {
word.replace("'", " "): word
for word in chain.from_iterable(
action.split() for action in notification_actions
)
if "'" in word
}
words_to_exclude = [word.split(" ")[0] for word in apostrophe_words]
notification_actions = actions.user.create_spoken_forms_from_list(
notification_actions, words_to_exclude=words_to_exclude
)
if apostrophe_words:
notification_actions = {
spoken_form.replace(mangled_word, word): action
for mangled_word, word in apostrophe_words.items()
for spoken_form, action in notification_actions.items()
if "apostrophe" not in spoken_form
}
if notification_actions:
debug_print("actions", notification_actions)
if "close" not in notification_actions and "clear all" in notification_actions:
# allow closing a notification stack like an individual notification
notification_actions["close"] = "clear all"
ctx.lists["user.notification_actions"] = notification_actions
# XXX(nriley) use app name overrides from knausj?
notification_apps = actions.user.create_spoken_forms_from_list(
notification_apps
)
ctx.lists["user.notification_apps"] = notification_apps
if notification_apps:
debug_print("apps", notification_apps)
def win_close(self, window):
if not window.app.pid == self.pid:
return
self.update_notifications()
def app_closed(self, app):
if app.pid == self.pid:
ui.unregister("app_close", self.app_closed)
def app_launched(app):
global MONITOR
if not app.bundle == "com.apple.notificationcenterui":
return
MONITOR = NotificationMonitor(app)
def monitor():
global MONITOR
apps = ui.apps(bundle="com.apple.notificationcenterui")
if apps:
MONITOR = NotificationMonitor(apps[0])
ui.register("app_launch", app_launched)
app.register("ready", monitor)
|
35753
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from transformers import PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer
from transformers.data.datasets import GlueDataset
from transformers.data.datasets import GlueDataTrainingArguments
from transformers.data.processors.glue import glue_convert_examples_to_features
from transformers.data.processors.utils import InputFeatures
from loguru import logger
from ..processors.seq_clf import seq_clf_output_modes, seq_clf_processors, seq_clf_tasks_num_labels
class Split(Enum):
train = 'train'
dev = 'dev'
test = 'test'
class SeqClfDataset(GlueDataset):
"""
Why this class even exists?
`class GlueDataset(Dataset)` has a constructor `def __init__()` with
`processor = glue_processors[args.task_name]()`, however I want to expand `glue_processors`
with protein clf task names. The line `processor = glue_processors[args.task_name]()` in parent
class doesn't accomodate this.
"""
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: GlueDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
cache_dir: Optional[str] = None,
):
self.args = args
self.processor = seq_clf_processors[args.task_name]()
self.output_mode = seq_clf_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
'cached_{}_{}_{}_{}'.format(
mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), args.task_name,
),
)
label_list = self.processor.get_labels()
if args.task_name in ['mnli', 'mnli-mm'] and tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + '.lock'
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start
)
else:
logger.info(f'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
# Load a data file into a list of ``InputFeatures``
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
'Saving features into cached file %s [took %.3f s]', cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
|
35771
|
import os
import ipaddress
import numpy as np
import pandas as pd
import datetime
import boto3
import gzip
import json
from signal_processing import signalProcess
BUCKET_NAME = os.environ.get("BUCKET_NAME", None)
VPC_FLOW_LOGS_PATH = os.environ.get("VPC_FLOW_LOGS_PATH", None)
FINDINGS_PATH = os.environ.get("FINDINGS_PATH", None)
TMP_DOWNLOAD_DIR = "/tmp/s3_download"
FLOW_COLUMNS = [
"date",
"version",
"account-id",
"interface-id",
"srcaddr",
"dstaddr",
"srcport",
"dstport",
"protocol",
"packets",
"bytes",
"start",
"end",
"action",
"log-status",
]
def cloud_sniper_beaconing_detection(event, context):
bucket_name = BUCKET_NAME
vpc_flow_logs_path = VPC_FLOW_LOGS_PATH
findings_path = FINDINGS_PATH
df = load_data(bucket_name, vpc_flow_logs_path)
print(f"Number of raw records: {len(df.index)}")
version = df.version.iloc[0] # constant
account_id = df["account-id"].iloc[0] # constant
df = filter_format_data(df)
print(f"Number of records after filtering missing data: {len(df.index)}")
df = sort_data(df)
print(f"Number of records after filtering by time: {len(df.index)}")
df = filter_useless_data(df)
print(f"Number of records after filtering by port: {len(df.index)}")
df = filter_unfrequent_data(df)
print(f"Number of records after filtering unfrequent: {len(df.index)}")
res = find_beacons(df)
new_fields = {
"hits": "",
"cloud.provider": "aws",
"event.type": "beaconing",
"cloud.account.name": "",
"interface.vpc.id": "",
"protocol": "",
"version": version,
"cloud.account.id": account_id,
}
list(map(lambda x: x.update(new_fields), res))
print(f"Result: {res}")
save_results(bucket_name, findings_path, res)
return res
def load_data(s3_bucket, s3_vpc_flow_logs_path):
s3 = boto3.resource('s3')
bucket = s3.Bucket(name=s3_bucket)
prefix = s3_vpc_flow_logs_path
if prefix.startswith("/"):
prefix = prefix[1:]
if not prefix.endswith("/"):
prefix += "/"
if not os.path.exists(TMP_DOWNLOAD_DIR):
os.mkdir(TMP_DOWNLOAD_DIR)
for i, s3_file_obj in enumerate(bucket.objects.filter(Prefix=prefix)):
if s3_file_obj.key.endswith(".log.gz"):
extension = "log.gz"
elif s3_file_obj.key.endswith(".log"):
extension = "log"
else:
continue
bucket.download_file(s3_file_obj.key,
TMP_DOWNLOAD_DIR + "/%06d" % i + "." + extension)
data = []
for fname in sorted(os.listdir(TMP_DOWNLOAD_DIR)):
if fname.endswith(".log.gz"):
open_ = gzip.open
decode = True
elif fname.endswith(".log"):
open_ = open
decode = False
else:
continue
with open_(os.path.join(TMP_DOWNLOAD_DIR, fname), 'r') as fd:
first_line = True
for line in fd:
if first_line:
first_line = False
continue
if decode:
line = line.decode("utf-8").strip().split(" ")
else:
line = line.strip().split(" ")
data.append(line)
if data and (len(data[0]) == len(FLOW_COLUMNS)):
df = pd.DataFrame(data, columns=FLOW_COLUMNS)
df.drop(['date'], axis=1, inplace=True)
else:
df = pd.DataFrame(data, columns=FLOW_COLUMNS[1:])
return df
def filter_format_data(df):
df = df[df.srcaddr != "-"]
df = df[df.dstaddr != "-"]
df.drop(["version", "srcport"], axis=1, inplace=True)
df = df.replace("-", np.nan)
df = df.replace("-", np.nan)
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] = \
df[["dstport", "protocol", "packets", "bytes", "start", "end"]] \
.apply(pd.to_numeric)
return df
def sort_data(df):
df['datetime'] = pd.to_datetime(df.start, unit='s')
# TODO: should we process just the last hours?
df = df.set_index('datetime')
df.sort_index(inplace=True)
return df.reset_index(level=0)
def filter_useless_data(df):
# Requirements
# * srcIP should be private
# * dstport < 1024 and != 123
if df.empty:
return df
df = df[df.srcaddr.map(lambda x: ipaddress.ip_address(x).is_private)]
df = df[df.dstport <= 1024]
df = df[df.dstport != 123]
return df
def filter_unfrequent_data(df):
# remove communications if there were less than 6 snippets
selection = df.groupby(["srcaddr", "dstaddr", "dstport"])
df = selection.filter(lambda x: len(x) >= 6)
df = df.reset_index(level=0)
return df
def find_beacons(df):
res = []
time_fmt = "%Y-%m-%dT%H:%M:%S.%f"
groups = df.groupby(["srcaddr", "dstaddr", "dstport"])
data_in = {
"data": {},
"time": {}
}
for (srcaddr, dstaddr, port), traffic in groups:
k = (srcaddr, dstaddr, port)
data_in["data"][k] = traffic.bytes
data_in["time"][k] = traffic.datetime
lrner = signalProcess(data_in, options_in=None)
output = lrner.getPrimaryPeriods()
for (srcaddr, dstaddr, port) in output["powers"]:
if output["powers"][(srcaddr, dstaddr, port)][0] is not None:
print(data_in["time"][k])
k = (srcaddr, dstaddr, port)
start_time = data_in["time"][k].iloc[0].strftime(time_fmt)[:-3] + 'Z'
end_time = data_in["time"][k].iloc[-1].strftime(time_fmt)[:-3] + 'Z'
res.append({
"source.ip": srcaddr,
"destination.ip": dstaddr,
"destination.port": int(port),
"timestamp": start_time,
"event.end": end_time,
"event.start": start_time
})
return res
def save_results(bucket_name, findings_path, res):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(name=bucket_name)
if findings_path.startswith("/"):
findings_path = findings_path[1:]
if findings_path.endswith("/"):
findings_path = findings_path[:-1]
(bucket.Object(key=f"{findings_path}/beaconing_detection_{now}.json")
.put(Body=bytes(json.dumps(res).encode('UTF-8'))))
if __name__ == "__main__":
print(json.dumps(cloud_sniper_beaconing_detection(None, None), indent=4))
|
35772
|
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--text", type=str, help="path to original text file")
parser.add_argument("--train", type=str, help="path to original training data file")
parser.add_argument("--valid", type=str, help="path to original validation data file")
parser.add_argument("--converted_text", type=str, default="Qdesc.txt", help="path to converted text file")
parser.add_argument("--converted_train", type=str, default="train.txt", help="path to converted training file")
parser.add_argument("--converted_valid", type=str, default="valid.txt", help="path to converted validation file")
if __name__=='__main__':
args = parser.parse_args()
Qid={} #Entity to id (line number in the description file)
Pid={} #Relation to id
def getNum(s):
return int(s[1:])
with open(args.text, "r") as fin:
with open(args.converted_text, "w") as fout:
lines = fin.readlines()
Cnt=0
for idx, line in enumerate(lines):
data = line.split('\t')
assert len(data) >= 2
assert data[0].startswith('Q')
desc = '\t'.join(data[1:]).strip()
if getNum(data[0])>1000:
continue
fout.write(desc+"\n")
Qid[data[0]] = Cnt#idx
Cnt+=1
def convert_triples(inFile, outFile):
with open(inFile, "r") as fin:
with open(outFile, "w") as fout:
lines = fin.readlines()
for line in lines:
data = line.strip().split('\t')
assert len(data) == 3
if getNum(data[0])>1000 or getNum(data[2]) > 1000:
continue
if data[1] not in Pid:
Pid[data[1]] = len(Pid)
fout.write("%d %d %d\n"%(Qid[data[0]], Pid[data[1]], Qid[data[2]]))
convert_triples(args.train, args.converted_train)
convert_triples(args.valid, args.converted_valid)
|
35806
|
import re, logging
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext as _
from django.contrib.localflavor.us.forms import USStateSelect,\
USPhoneNumberField
from models import Preference, ShippingWeight, ShippingPrice, ShippingItem, TaxState, DnsShop, EmailNotification
from preferences.models import ShopPolicies
from auth.models import User
from users.models import Profile
class GeneralPreferenceForm(ModelForm):
email = forms.EmailField(required=False)
phone = USPhoneNumberField(required=False)
class Meta:
model = Preference
fields = ['name_store', 'email', 'phone']
class ProfileForm(ModelForm):
state = forms.CharField(widget=USStateSelect)
class Meta:
model = Profile
fields = ['street_address', 'zip', 'city', 'state', 'country', ]
def clean_zip(self):
zip = self.cleaned_data.get("zip", "")
if zip.strip() == "": raise forms.ValidationError("Zip is a required field.")
if not (re.match("[0-9]{5}(-[0-9]{4})?$", zip)): raise forms.ValidationError("Invalid Zip code. Valid formats are XXXXX or XXXXX-XXXX")
return zip
def clean_country(self):
country = self.cleaned_data.get("country", "")
if country.strip() == "": raise forms.ValidationError("Country is a required field.")
return country
def clean_street_address(self):
street = self.cleaned_data.get("street_address", "")
if street.strip() == "": raise forms.ValidationError("Street is a required field.")
return street
def clean_city(self):
city = self.cleaned_data.get("city", "")
if city.strip() == "": raise forms.ValidationError("City is a required field.")
return city
class TaxesPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['taxes_same_state_store', 'taxes_to_shipping_fees']
class TaxStateForm(ModelForm):
#state = forms.CharField(widget=USStateSelect)
tax = forms.DecimalField(help_text=_("Enter a state tax rate number (between 1 and 100)"))
class Meta:
model = TaxState
exclude = ['shop']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateForm, self).__init__(*args, ** kwargs)
def clean_state(self):
state = self.cleaned_data['state']
try:
TaxState.objects.get(shop=self.shop, state=state)
except TaxState.DoesNotExist:
return state
raise forms.ValidationError(_("A tax for state %s already exists." % state))
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class TaxStateEditForm(ModelForm):
class Meta:
model = TaxState
exclude = ['shop', 'state']
def __init__(self, shop, *args, ** kwargs):
self.shop = shop
super(TaxStateEditForm, self).__init__(*args, ** kwargs)
def clean_tax(self):
tax = self.cleaned_data['tax']
if tax < 0:
raise forms.ValidationError(_("A tax has to be more or equal 0%"))
elif tax > 100:
raise forms.ValidationError(_("A tax has to be less than 100%"))
return tax
class AuctionsPreferenceForm(ModelForm):
class Meta:
model = Preference
fields = ['allow_sessions', 'allow_open_auctions', 'default_days', 'open_auto_extend', 'session_auto_extend']
class DnsShopForm(ModelForm):
class Meta:
model = DnsShop
exclude = ['shop']
def clean_dns(self):
dns = self.cleaned_data['dns']
try:
DnsShop.objects.get(dns=dns)
except DnsShop.DoesNotExist:
return dns
raise forms.ValidationError(_("A shop with that dns already exists."))
class ShippingWeightForm(ModelForm):
class Meta:
model = ShippingWeight
exclude = ['shop']
class ShippingPriceForm(ModelForm):
class Meta:
model = ShippingPrice
exclude = ['shop']
class ShippingItemForm(ModelForm):
class Meta:
model = ShippingItem
exclude = ['shop']
class EmailNotificationForm(ModelForm):
class Meta:
model = EmailNotification
fields = ['subject', 'body']
class ShopPoliciesForm(ModelForm):
class Meta:
model = ShopPolicies
fields = ['refund_policy', 'privacy_policy', 'terms_of_service']
class MarketingForm(ModelForm):
class Meta:
model = Preference
fields = ['google_analytics_account_number']
def clean_google_analytics_account_number(self):
google_analytics_account_number = self.cleaned_data['google_analytics_account_number']
if re.match(r"^\w{2}\-\d{4,8}\-\d$", google_analytics_account_number) is None:
raise forms.ValidationError('Invalid analitycs account number')
return google_analytics_account_number
class UsernameChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
class Meta:
model = User
fields = ['username']
|
35826
|
import asyncio
import json
import os
from datetime import datetime, timedelta
import aiohttp
import tweepy
from dateutil.parser import parse
from fpl import FPL, utils
from pymongo import MongoClient
from constants import lineup_markers, twitter_usernames
dirname = os.path.dirname(os.path.realpath(__file__))
client = MongoClient()
database = client.team_news
def short_name_converter(team_id):
"""Converts a team's ID to their short name."""
short_name_map = {
1: "ARS",
2: "AVL",
3: "BHA",
4: "BUR",
5: "CHE",
6: "CRY",
7: "EVE",
8: "FUL",
9: "LEI",
10: "LEE",
11: "LIV",
12: "MCI",
13: "MUN",
14: "NEW",
15: "SHU",
16: "SOU",
17: "TOT",
18: "WBA",
19: "WHU",
20: "WOL",
None: None
}
return short_name_map[team_id]
async def get_current_fixtures():
async with aiohttp.ClientSession() as session:
fpl = FPL(session)
current_gameweek = await utils.get_current_gameweek(session)
fixtures = await fpl.get_fixtures_by_gameweek(current_gameweek)
min_range = timedelta(minutes=2)
return [fixture for fixture in fixtures
if fixture.team_news_time.replace(tzinfo=None) - min_range <
datetime.now() <
fixture.team_news_time.replace(tzinfo=None) + min_range]
def is_new_lineup(fixture_id, team_id):
if database.lineup.count_documents({"fixture_id": fixture_id,
"team_id": team_id}) < 1:
return True
return False
def add_lineup_to_database(fixture_id, team_id, url):
self.database.lineup.update_one(
{"fixture_id": fixture_id},
{"$set": {"fixture_id": fixture_id,
"team_id": team_id,
"url": url}},
upsert=True
)
def lineup_handler(team_id, team_short_name, opponent_id):
team_name = twitter_usernames[team_short_name]
for status in api.user_timeline(screen_name=team_name,
tweet_mode="extended",
count=3):
status_split = status.full_text.lower().replace("-", " ").split()
for marker in lineup_markers:
if marker in list(zip(split_status, split_status[1:])):
if "media" not in status.entities:
continue
media = status.entities["media"][0]
media_url = media["media_url_https"]
if is_new_lineup(fixture.id, team_id):
add_lineup_to_database(fixture.id, team_id, media_url)
return
async def main(config):
auth = tweepy.OAuthHandler(config["CONSUMER_API_KEY"],
config["CONSUMER_API_SECRET_KEY"])
auth.set_access_token(config["ACCESS_TOKEN"],
config["ACCESS_TOKEN_SECRET"])
api = tweepy.API(auth)
current_fixtures = await get_current_fixtures()
images_urls = []
for fixture in current_fixtures:
team_h_short = short_name_converter(fixture.team_h)
team_a_short = short_name_converter(fixture.team_a)
lineup_handler(fixture.team_h, team_h_short, fixture.team_a)
lineup_handler(fixture.team_a, team_a_short, fixture.team_h)
if __name__ == "__main__":
with open(f"{dirname}/../twitter_config.json") as file:
config = json.loads(file.read())
try:
asyncio.run(main(config))
except AttributeError:
loop = asyncio.get_event_loop()
loop.run_until_complete(main(config))
loop.close()
|
35891
|
from submission import Submission
class JulesSubmission(Submission):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# your solution code goes here
def find_for_row(row):
for fi in range(len(row)):
for si in range(fi + 1, len(row)):
if row[fi] > row[si] and row[fi] % row[si] == 0:
return int(row[fi] / row[si])
elif row[si] % row[fi] == 0:
return int(row[si] / row[fi])
row_list = [[int(x) for x in row.split()] for row in s.split('\n')]
return str(sum([find_for_row(row) for row in row_list]))
|
35895
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScChapecoSpider(FecamGazetteSpider):
name = "sc_chapeco"
FECAM_QUERY = "cod_entidade:71"
TERRITORY_ID = "4204202"
|
35926
|
from collections import defaultdict
from ...account.models import Address, CustomerEvent, User
from ..core.dataloaders import DataLoader
class AddressByIdLoader(DataLoader):
context_key = "address_by_id"
def batch_load(self, keys):
address_map = Address.objects.in_bulk(keys)
return [address_map.get(address_id) for address_id in keys]
class UserByUserIdLoader(DataLoader):
context_key = "user_by_id"
def batch_load(self, keys):
user_map = User.objects.in_bulk(keys)
return [user_map.get(user_id) for user_id in keys]
class CustomerEventsByUserLoader(DataLoader):
context_key = "customer_events_by_user"
def batch_load(self, keys):
events = CustomerEvent.objects.filter(user_id__in=keys)
events_by_user_map = defaultdict(list)
for event in events:
events_by_user_map[event.user_id].append(event)
return [events_by_user_map.get(user_id, []) for user_id in keys]
|
36002
|
import os
from djangular import utils
from django.test import SimpleTestCase
class SiteAndPathUtilsTest(SimpleTestCase):
site_utils = utils.SiteAndPathUtils()
def test_djangular_root(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
djangular_dir = os.path.dirname(current_dir)
self.assertEqual(djangular_dir, self.site_utils.get_djangular_root())
|
36005
|
from encoded.searches.mixins import CartAggsToFacetsMixin
from snosearch.responses import BasicQueryResponseWithFacets
from snosearch.responses import BasicMatrixResponseWithFacets
class CartQueryResponseWithFacets(CartAggsToFacetsMixin, BasicQueryResponseWithFacets):
'''
Like BasicQueryResponseWithFacets but uses CartAggsToFacetsMixin instead of AggsToFacetsMixin.
'''
def __init__(self, results, query_builder, *args, **kwargs):
super().__init__(results, query_builder, *args, **kwargs)
class CartMatrixResponseWithFacets(CartAggsToFacetsMixin, BasicMatrixResponseWithFacets):
'''
Like BasicMatrixResponseWithFacets but uses CartAggsToFacetsMixin instead of AggsToFacetsMixin.
'''
def __init__(self, results, query_builder, *args, **kwargs):
super().__init__(results, query_builder, *args, **kwargs)
|
36023
|
import teek
def on_click():
print("You clicked me!")
window = teek.Window()
button = teek.Button(window, "Click me", command=on_click)
button.pack()
window.on_delete_window.connect(teek.quit)
teek.run()
|
36039
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
# Have no idea how to do this
# import sys
# sys.path.insert(0, '../../data_structures')
# import node
def intersection(l1: Node, l2: Node) -> Node:
l1_end, len1 = get_tail(l1)
l2_end, len2 = get_tail(l2)
if l1_end != l2_end:
return None
if len1 > len2:
l1 = move_head(l1, len1 - len2)
else:
l2 = move_head(l2, len2 - len1)
while l1 != l2:
l1 = l1.next
l2 = l2.next
print(l1.value, l2.value)
return l1
def move_head(head: Node, pos: int) -> Node:
current = head
while pos > 0:
current = current.next
pos -= 1
return current
def get_tail(head: Node) -> (Node, int):
current = head
length = 0
while not current.next == None:
current = current.next
length += 1
return (current, length)
inter = Node('c')
inter.next = Node('a')
inter.next.next = Node('r')
l1 = Node('r')
l1.next = Node('a')
l1.next.next = Node('c')
l1.next.next.next = Node('e')
l1.next.next.next.next = inter
l2 = Node('r')
l2.next = Node('e')
l2.next.next = Node('d')
l2.next.next.next = inter
res = intersection(l1, l2)
print(res.value)
|
36048
|
import os
from .abstract_command import AbstractCommand
from ..services.state_utils import StateUtils
from ..services.state import StateHolder
from ..services.command_handler import CommandHandler
from ..services.console_logger import ColorPrint
class Start(AbstractCommand):
command = ["start", "up"]
args = ["[<project/plan>]"]
args_descriptions = {"[<project/plan>]": "Name of the project in the catalog and/or name of the project's plan"}
description = "Run: 'poco start nginx/default' or 'poco up nginx/default' to start nginx project (docker, helm " \
"or kubernetes) with the default plan."
run_command = "start"
need_checkout = True
def prepare_states(self):
StateUtils.calculate_name_and_work_dir()
StateUtils.prepare("compose_handler")
def resolve_dependencies(self):
if StateHolder.catalog_element is not None and not StateUtils.check_variable('repository'):
ColorPrint.exit_after_print_messages(message="Repository not found for: " + str(StateHolder.name))
self.check_poco_file()
def execute(self):
if self.need_checkout:
StateHolder.compose_handler.run_checkouts()
CommandHandler().run(self.run_command)
if hasattr(self, "end_message"):
ColorPrint.print_info(getattr(self, "end_message"))
@staticmethod
def check_poco_file():
if not StateUtils.check_variable('poco_file'):
poco_file = str(StateHolder.repository.target_dir if StateHolder.repository is not None
else os.getcwd()) + '/poco.yml'
ColorPrint.print_error(message="Poco file not found: " + poco_file)
ColorPrint.exit_after_print_messages(message="Use 'poco init " + StateHolder.name +
"', that will generate a default poco file for you",
msg_type="warn")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.