filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_19937 | """
Code to allow this package to be pip-installed
"""
import os
import sys
from setuptools import setup, find_packages
LIBRARY_VERSION = '1.0.5'
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write('''
==========================
Unsupported Python version
==========================
This version of esp-sdk requires Python {}.{}, but you're trying to
install it on Python {}.{}.
'''.format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
CUR_DIRECTORY_PATH = os.path.abspath(os.path.dirname(__file__))
# Python doesn't allow hyphens in package names so use underscore instead
PACKAGE_NAME = 'covid_xprize'
LIB_NAME = 'covid-xprize'
def read(fname):
"""
Read file contents into a string
:param fname: File to be read
:return: String containing contents of file
"""
with open(os.path.join(os.path.dirname(__file__), fname)) as file:
return file.read()
setup(
name=LIB_NAME,
version=LIBRARY_VERSION,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
packages=find_packages(),
package_dir={PACKAGE_NAME: PACKAGE_NAME}, # the one line where all the magic happens
package_data={
PACKAGE_NAME: [
'covid_xprize/examples/predictors/lstm/tests/fixtures/*',
'covid_xprize/validation/data',
'examples/predictors/lstm/data/*',
],
'.': [
'LICENSE.md'
]
},
install_requires=[
'keras==2.4.3',
'neat-python==0.92',
'numpy==1.18.5',
'pandas==1.1.2',
'scikit-learn==0.23.2',
'scipy==1.5.2',
'setuptools==41.0.0',
'tensorflow==2.2.1'
],
description='Contains sample code and notebooks '
'for developing and validating entries for the Cognizant COVID X-Prize.',
long_description=read('README.md'),
author='Olivier Francon, Darren Sargent, Elliot Meyerson',
url='https://github.com/leaf-ai/covid-xprize/',
license='See LICENSE.md'
)
|
the-stack_0_19938 | from google.oauth2 import service_account
from googleapiclient.discovery import build
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
SPREADSHEET_ID = '1kvHv1OBCzr9GnFxRu9RTJC7jjQjc9M4rAiDnhyak2Sg'
# cell containing the total number of entries in the sheet
# so that we know where the new entry has to be added
NUM_ENTRIES_CELL = 'N4'
CREDENTIALS_PATH = ('./gsheet/creds.json')
def _get_sheets_service_client():
creds = service_account.Credentials.from_service_account_file(
CREDENTIALS_PATH, scopes=SCOPES)
service = build('sheets', 'v4', credentials=creds)
return service
def write_to_google_sheet(worksheet: str, data) -> None:
"""Calls the API to update the values of a sheet.
Args:
worksheet: string, name of the worksheet to be edited appended by a "!"
NUM_ENTRIES_CELL in the worksheet should have the total number of entries
present in the worksheet
data: list of tuples/lists, data to be added to the worksheet
Raises:
HttpError: For any Google Sheets API call related errors
"""
sheets_client = _get_sheets_service_client()
spreadsheet_response = sheets_client.spreadsheets().values().get(
spreadsheetId=SPREADSHEET_ID,
range='{}{}'.format(worksheet, NUM_ENTRIES_CELL)).execute()
entries = int(spreadsheet_response.get('values', [])[0][0])
sheets_client.spreadsheets().values().update(
spreadsheetId=SPREADSHEET_ID,
valueInputOption='USER_ENTERED',
body={
'majorDimension': 'ROWS',
'values': data
},
range='{}A{}'.format(worksheet, entries+2)).execute()
|
the-stack_0_19939 | import csv
import cv2
import numpy as np
lines = []
with open("data/driving_log.csv") as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
steering_measurements = []
for line in lines:
source_path = line[0]
image = cv2.imread(source_path)
images.append(image)
steering_measurements.append(float(line[3]))
images.append(cv2.flip(image, 1))
steering_measurements.append(float(line[3]) * -1.0)
X_train = np.array(images)
y_train = np.array(steering_measurements)
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Activation, Convolution2D, Cropping2D
def get_model():
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (160, 320, 3)))
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3)))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1)))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse")
return model
model = get_model()
model.fit(X_train, y_train, validation_split = 0.2, shuffle = True, nb_epoch = 5)
model.save("../../model.h5")
|
the-stack_0_19940 | import numpy as np
import torch
import torch.nn.functional as F
from deep4cast import custom_layers
class WaveNet(torch.nn.Module):
""":param input_channels: Number of covariates in input time series.
:param output_channels: Number of covariates in target time series.
:param horizon: Number of time steps for forecast.
:param hidden_channels: Number of channels in convolutional hidden layers.
:param skip_channels: Number of channels in convolutional layers for skip
connections.
:param dense_units: Number of hidden units in final dense layer.
:param n_layers: Number of layers per Wavenet block (determines receptive
field size).
:param n_blocks: Number of Wavenet blocks.
:param dilation: Dilation factor for temporal convolution.
"""
def __init__(self,
input_channels: int,
output_channels: int,
horizon: int,
hidden_channels=64,
skip_channels=64,
dense_units=128,
n_layers=7,
n_blocks=1,
dilation=2):
super(WaveNet, self).__init__()
self.output_channels = output_channels
self.horizon = horizon
self.hidden_channels = hidden_channels
self.skip_channels = skip_channels
self.n_layers = n_layers
self.n_blocks = n_blocks
self.dilation = dilation
self.dilations = [dilation**i for i in range(n_layers)] * n_blocks
# Set up first layer for input
self.do_conv_input = custom_layers.ConcreteDropout(channel_wise=True)
self.conv_input = torch.nn.Conv1d(
in_channels=input_channels,
out_channels=hidden_channels,
kernel_size=1
)
# Set up main WaveNet layers
self.do, self.conv, self.skip, self.resi = [], [], [], []
for d in self.dilations:
self.do.append(custom_layers.ConcreteDropout(channel_wise=True))
self.conv.append(torch.nn.Conv1d(in_channels=hidden_channels,
out_channels=hidden_channels,
kernel_size=2,
dilation=d))
self.skip.append(torch.nn.Conv1d(in_channels=hidden_channels,
out_channels=skip_channels,
kernel_size=1))
self.resi.append(torch.nn.Conv1d(in_channels=hidden_channels,
out_channels=hidden_channels,
kernel_size=1))
self.do = torch.nn.ModuleList(self.do)
self.conv = torch.nn.ModuleList(self.conv)
self.skip = torch.nn.ModuleList(self.skip)
self.resi = torch.nn.ModuleList(self.resi)
# Set up nonlinear output layers
self.do_conv_post = custom_layers.ConcreteDropout(channel_wise=True)
self.conv_post = torch.nn.Conv1d(
in_channels=skip_channels,
out_channels=skip_channels,
kernel_size=1
)
self.do_linear_mean = custom_layers.ConcreteDropout()
self.do_linear_std = custom_layers.ConcreteDropout()
self.linear_mean = torch.nn.Linear(skip_channels, horizon*output_channels)
self.linear_std = torch.nn.Linear(skip_channels, horizon*output_channels)
def forward(self, inputs):
"""Returns the parameters for a Gaussian distribution."""
output, reg_e = self.encode(inputs)
output_mean, output_std, reg_d = self.decode(output)
# Regularization
regularizer = reg_e + reg_d
return {'loc': output_mean, 'scale': output_std, 'regularizer': regularizer}
def encode(self, inputs):
"""Encoder part of the architecture."""
# Input layer
output, res_conv_input = self.do_conv_input(inputs)
output = self.conv_input(output)
# Loop over WaveNet layers and blocks
regs, skip_connections = [], []
for do, conv, skip, resi in zip(self.do, self.conv, self.skip, self.resi):
layer_in = output
output, reg = do(layer_in)
output = conv(output)
output = torch.nn.functional.relu(output)
skip = skip(output)
output = resi(output)
output = output + layer_in[:, :, -output.size(2):]
regs.append(reg)
skip_connections.append(skip)
# Sum up regularizer terms and skip connections
regs = sum(r for r in regs)
output = sum([s[:, :, -output.size(2):] for s in skip_connections])
# Nonlinear output layers
output, res_conv_post = self.do_conv_post(output)
output = torch.nn.functional.relu(output)
output = self.conv_post(output)
output = torch.nn.functional.relu(output)
output = output[:, :, [-1]]
output = output.transpose(1, 2)
# Regularization terms
regularizer = res_conv_input \
+ regs \
+ res_conv_post
return output, regularizer
def decode(self, inputs):
"""Decoder part of the architecture."""
# Apply dense layer to match output length
output_mean, res_linear_mean = self.do_linear_mean(inputs)
output_std, res_linear_std = self.do_linear_std(inputs)
output_mean = self.linear_mean(output_mean)
output_std = self.linear_std(output_std).exp()
# Reshape the layer output to match targets
# Shape is (batch_size, output_channels, horizon)
batch_size = inputs.shape[0]
output_mean = output_mean.reshape(
(batch_size, self.output_channels, self.horizon)
)
output_std = output_std.reshape(
(batch_size, self.output_channels, self.horizon)
)
# Regularization terms
regularizer = res_linear_mean + res_linear_std
return output_mean, output_std, regularizer
@property
def n_parameters(self):
"""Return the number of parameters of model."""
par = list(self.parameters())
s = sum([np.prod(list(d.size())) for d in par])
return s
@property
def receptive_field_size(self):
"""Return the length of the receptive field."""
return self.dilation * max(self.dilations)
|
the-stack_0_19942 | import re
import os
import sys
import m3u8
import json
import requests
import subprocess
from functools import partial
from bs4 import BeautifulSoup
from utils import get_encoder_name, parallel_run, makedirs
API_URL = 'http://api.jtbc.joins.com/ad/pre/NV10173083'
BASE_URL = 'http://nsvc.jtbc.joins.com/API/News/Newapp/Default.aspx'
def soupify(text):
return BeautifulSoup(text, "html.parser")
def get_news_ids(page_id):
params = {
'NJC': 'NJC300',
'CAID': 'NC10011174',
'PGI': page_id,
}
response = requests.request(
method='GET', url=BASE_URL, params=params,
)
soup = soupify(response.text)
return [item.text for item in soup.find_all('news_id')]
def download_news_video_and_content(
news_id, base_dir, chunk_size=32*1024,
video_dir="video", asset_dir="assets", audio_dir="audio"):
video_dir = os.path.join(base_dir, video_dir)
asset_dir = os.path.join(base_dir, asset_dir)
audio_dir = os.path.join(base_dir, audio_dir)
makedirs(video_dir)
makedirs(asset_dir)
makedirs(audio_dir)
text_path = os.path.join(asset_dir, "{}.txt".format(news_id))
original_text_path = os.path.join(asset_dir, "original-{}.txt".format(news_id))
video_path = os.path.join(video_dir, "{}.ts".format(news_id))
audio_path = os.path.join(audio_dir, "{}.wav".format(news_id))
params = {
'NJC': 'NJC400',
'NID': news_id, # NB11515152
'CD': 'A0100',
}
response = requests.request(
method='GET', url=BASE_URL, params=params,
)
soup = soupify(response.text)
article_contents = soup.find_all('article_contents')
assert len(article_contents) == 1, \
"# of <article_contents> of {} should be 1: {}".format(news_id, response.text)
text = soupify(article_contents[0].text).get_text() # remove <div>
with open(original_text_path, "w", encoding='utf-8') as f:
f.write(text)
with open(text_path, "w", encoding='utf-8') as f:
from nltk import sent_tokenize
text = re.sub(r'\[.{0,80} :\s.+]', '', text) # remove quote
text = re.sub(r'☞.+http.+\)', '', text) # remove quote
text = re.sub(r'\(https?:\/\/.*[\r\n]*\)', '', text) # remove url
sentences = sent_tokenize(text)
sentences = [sent for sentence in sentences for sent in sentence.split('\n') if sent]
new_texts = []
for sent in sentences:
sent = sent.strip()
sent = re.sub(r'\([^)]*\)', '', sent)
#sent = re.sub(r'\<.{0,80}\>', '', sent)
sent = sent.replace('…', '.')
new_texts.append(sent)
f.write("\n".join([sent for sent in new_texts if sent]))
vod_paths = soup.find_all('vod_path')
assert len(vod_paths) == 1, \
"# of <vod_path> of {} should be 1: {}".format(news_id, response.text)
if not os.path.exists(video_path):
redirect_url = soup.find_all('vod_path')[0].text
list_url = m3u8.load(redirect_url).playlists[0].absolute_uri
video_urls = [segment.absolute_uri for segment in m3u8.load(list_url).segments]
with open(video_path, "wb") as f:
for url in video_urls:
response = requests.get(url, stream=True)
total_size = int(response.headers.get('content-length', 0))
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if not os.path.exists(audio_path):
encoder = get_encoder_name()
command = "{} -y -loglevel panic -i {} -ab 160k -ac 2 -ar 44100 -vn {}".\
format(encoder, video_path, audio_path)
subprocess.call(command, shell=True)
return True
if __name__ == '__main__':
news_ids = []
page_idx = 1
base_dir = os.path.dirname(os.path.realpath(__file__))
news_id_path = os.path.join(base_dir, "news_ids.json")
if not os.path.exists(news_id_path):
while True:
tmp_ids = get_news_ids(page_idx)
if len(tmp_ids) == 0:
break
news_ids.extend(tmp_ids)
print(" [*] Download page {}: {}/{}".format(page_idx, len(tmp_ids), len(news_ids)))
page_idx += 1
with open(news_id_path, "w", encoding='utf-8') as f:
json.dump(news_ids, f, indent=2, ensure_ascii=False)
else:
with open(news_id_path) as f:
news_ids = json.loads(f.read())
exceptions = ["NB10830162"]
news_ids = list(set(news_ids) - set(exceptions))
fn = partial(download_news_video_and_content, base_dir=base_dir)
results = parallel_run(
fn, news_ids, desc="Download news video+text", parallel=True)
|
the-stack_0_19945 | import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(BASE_DIR, "apps"))
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'gunicorn',
'djsupervisor',
'django_extensions',
'sekizai',
'djangobower',
'ckeditor',
'lineage',
'endless_pagination',
'website',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.core.context_processors.request',
'sekizai.context_processors.sekizai'
)
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
# i18n
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# static configuration
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
# wysiwyg settings
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'CMS, Full',
'toolbar_CMS': [
{
'name': 'basicstyles',
'groups': ['basicstyles', 'cleanup'],
'items': ['Bold', 'Italic', 'Underline', '-', 'RemoveFormat']
},
{
'name': 'paragraph',
'groups': ['list', 'indent', 'blocks'],
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote']
},
{
'name': 'links',
'items': ['Link', 'Unlink']
},
{
'name': 'insert',
'items': ['Image', 'HorizontalRule', 'Table', 'Iframe', ]
},
{
'name': 'colors',
'items': ['TextColor', 'BGColor']
}
],
'height': 400,
'width': 1500,
'skin': 'moono',
'uiColor': '#eeeeee'
},
}
# Bower components
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'static')
BOWER_INSTALLED_APPS = (
'zurb/bower-foundation#5.5.0',
'slick.js',
)
# pagination
ENDLESS_PAGINATION_PER_PAGE = 3
# load local_settings
try:
from local_settings import *
except ImportError:
pass
|
the-stack_0_19947 | import numpy as np
# input: coeff with shape [1,257]
def Split_coeff(coeff):
id_coeff = coeff[:,:80] # identity(shape) coeff of dim 80
ex_coeff = coeff[:,80:144] # expression coeff of dim 64
tex_coeff = coeff[:,144:224] # texture(albedo) coeff of dim 80
angles = coeff[:,224:227] # ruler angles(x,y,z) for rotation of dim 3
gamma = coeff[:,227:254] # lighting coeff for 3 channel SH function of dim 27
translation = coeff[:,254:] # translation coeff of dim 3
return id_coeff,ex_coeff,tex_coeff,angles,gamma,translation
# compute face shape with identity and expression coeff, based on BFM model
# input: id_coeff with shape [1,80]
# ex_coeff with shape [1,64]
# output: face_shape with shape [1,N,3], N is number of vertices
def Shape_formation(id_coeff,ex_coeff,facemodel):
face_shape = np.einsum('ij,aj->ai',facemodel.idBase,id_coeff) + \
np.einsum('ij,aj->ai',facemodel.exBase,ex_coeff) + \
facemodel.meanshape
face_shape = np.reshape(face_shape,[1,-1,3])
# re-center face shape
face_shape = face_shape - np.mean(np.reshape(facemodel.meanshape,[1,-1,3]), axis = 1, keepdims = True)
return face_shape
# compute vertex normal using one-ring neighborhood
# input: face_shape with shape [1,N,3]
# output: v_norm with shape [1,N,3]
def Compute_norm(face_shape,facemodel):
face_id = facemodel.tri # vertex index for each triangle face, with shape [F,3], F is number of faces
point_id = facemodel.point_buf # adjacent face index for each vertex, with shape [N,8], N is number of vertex
shape = face_shape
face_id = (face_id - 1).astype(np.int32)
point_id = (point_id - 1).astype(np.int32)
v1 = shape[:,face_id[:,0],:]
v2 = shape[:,face_id[:,1],:]
v3 = shape[:,face_id[:,2],:]
e1 = v1 - v2
e2 = v2 - v3
face_norm = np.cross(e1,e2) # compute normal for each face
face_norm = np.concatenate([face_norm,np.zeros([1,1,3])], axis = 1) # concat face_normal with a zero vector at the end
v_norm = np.sum(face_norm[:,point_id,:], axis = 2) # compute vertex normal using one-ring neighborhood
v_norm = v_norm/np.expand_dims(np.linalg.norm(v_norm,axis = 2),2) # normalize normal vectors
return v_norm
# compute vertex texture(albedo) with tex_coeff
# input: tex_coeff with shape [1,N,3]
# output: face_texture with shape [1,N,3], RGB order, range from 0-255
def Texture_formation(tex_coeff,facemodel):
face_texture = np.einsum('ij,aj->ai',facemodel.texBase,tex_coeff) + facemodel.meantex
face_texture = np.reshape(face_texture,[1,-1,3])
return face_texture
# compute rotation matrix based on 3 ruler angles
# input: angles with shape [1,3]
# output: rotation matrix with shape [1,3,3]
def Compute_rotation_matrix(angles):
angle_x = angles[:,0][0]
angle_y = angles[:,1][0]
angle_z = angles[:,2][0]
# compute rotation matrix for X,Y,Z axis respectively
rotation_X = np.array([1.0,0,0,\
0,np.cos(angle_x),-np.sin(angle_x),\
0,np.sin(angle_x),np.cos(angle_x)])
rotation_Y = np.array([np.cos(angle_y),0,np.sin(angle_y),\
0,1,0,\
-np.sin(angle_y),0,np.cos(angle_y)])
rotation_Z = np.array([np.cos(angle_z),-np.sin(angle_z),0,\
np.sin(angle_z),np.cos(angle_z),0,\
0,0,1])
rotation_X = np.reshape(rotation_X,[1,3,3])
rotation_Y = np.reshape(rotation_Y,[1,3,3])
rotation_Z = np.reshape(rotation_Z,[1,3,3])
rotation = np.matmul(np.matmul(rotation_Z,rotation_Y),rotation_X)
rotation = np.transpose(rotation, axes = [0,2,1]) #transpose row and column (dimension 1 and 2)
return rotation
# project 3D face onto image plane
# input: face_shape with shape [1,N,3]
# rotation with shape [1,3,3]
# translation with shape [1,3]
# output: face_projection with shape [1,N,2]
# z_buffer with shape [1,N,1]
def Projection_layer(face_shape,rotation,translation,focal=1015.0,center=112.0): # we choose the focal length and camera position empirically
camera_pos = np.reshape(np.array([0.0,0.0,10.0]),[1,1,3]) # camera position
reverse_z = np.reshape(np.array([1.0,0,0,0,1,0,0,0,-1.0]),[1,3,3])
p_matrix = np.concatenate([[focal],[0.0],[center],[0.0],[focal],[center],[0.0],[0.0],[1.0]],axis = 0) # projection matrix
p_matrix = np.reshape(p_matrix,[1,3,3])
# calculate face position in camera space
face_shape_r = np.matmul(face_shape,rotation)
face_shape_t = face_shape_r + np.reshape(translation,[1,1,3])
face_shape_t = np.matmul(face_shape_t,reverse_z) + camera_pos
# calculate projection of face vertex using perspective projection
aug_projection = np.matmul(face_shape_t,np.transpose(p_matrix,[0,2,1]))
face_projection = aug_projection[:,:,0:2]/np.reshape(aug_projection[:,:,2],[1,np.shape(aug_projection)[1],1])
z_buffer = np.reshape(aug_projection[:,:,2],[1,-1,1])
return face_projection,z_buffer
# compute vertex color using face_texture and SH function lighting approximation
# input: face_texture with shape [1,N,3]
# norm with shape [1,N,3]
# gamma with shape [1,27]
# output: face_color with shape [1,N,3], RGB order, range from 0-255
# lighting with shape [1,N,3], color under uniform texture
def Illumination_layer(face_texture,norm,gamma):
num_vertex = np.shape(face_texture)[1]
init_lit = np.array([0.8,0,0,0,0,0,0,0,0])
gamma = np.reshape(gamma,[-1,3,9])
gamma = gamma + np.reshape(init_lit,[1,1,9])
# parameter of 9 SH function
a0 = np.pi
a1 = 2*np.pi/np.sqrt(3.0)
a2 = 2*np.pi/np.sqrt(8.0)
c0 = 1/np.sqrt(4*np.pi)
c1 = np.sqrt(3.0)/np.sqrt(4*np.pi)
c2 = 3*np.sqrt(5.0)/np.sqrt(12*np.pi)
Y0 = np.tile(np.reshape(a0*c0,[1,1,1]),[1,num_vertex,1])
Y1 = np.reshape(-a1*c1*norm[:,:,1],[1,num_vertex,1])
Y2 = np.reshape(a1*c1*norm[:,:,2],[1,num_vertex,1])
Y3 = np.reshape(-a1*c1*norm[:,:,0],[1,num_vertex,1])
Y4 = np.reshape(a2*c2*norm[:,:,0]*norm[:,:,1],[1,num_vertex,1])
Y5 = np.reshape(-a2*c2*norm[:,:,1]*norm[:,:,2],[1,num_vertex,1])
Y6 = np.reshape(a2*c2*0.5/np.sqrt(3.0)*(3*np.square(norm[:,:,2])-1),[1,num_vertex,1])
Y7 = np.reshape(-a2*c2*norm[:,:,0]*norm[:,:,2],[1,num_vertex,1])
Y8 = np.reshape(a2*c2*0.5*(np.square(norm[:,:,0])-np.square(norm[:,:,1])),[1,num_vertex,1])
Y = np.concatenate([Y0,Y1,Y2,Y3,Y4,Y5,Y6,Y7,Y8],axis=2)
# Y shape:[batch,N,9].
lit_r = np.squeeze(np.matmul(Y,np.expand_dims(gamma[:,0,:],2)),2) #[batch,N,9] * [batch,9,1] = [batch,N]
lit_g = np.squeeze(np.matmul(Y,np.expand_dims(gamma[:,1,:],2)),2)
lit_b = np.squeeze(np.matmul(Y,np.expand_dims(gamma[:,2,:],2)),2)
# shape:[batch,N,3]
face_color = np.stack([lit_r*face_texture[:,:,0],lit_g*face_texture[:,:,1],lit_b*face_texture[:,:,2]],axis = 2)
lighting = np.stack([lit_r,lit_g,lit_b],axis = 2)*128
return face_color,lighting
# face reconstruction with coeff and BFM model
def Reconstruction(coeff,facemodel):
id_coeff,ex_coeff,tex_coeff,angles,gamma,translation = Split_coeff(coeff)
# compute face shape
face_shape = Shape_formation(id_coeff, ex_coeff, facemodel)
# compute vertex texture(albedo)
face_texture = Texture_formation(tex_coeff, facemodel)
# vertex normal
face_norm = Compute_norm(face_shape,facemodel)
# rotation matrix
rotation = Compute_rotation_matrix(angles)
face_norm_r = np.matmul(face_norm,rotation)
# compute vertex projection on image plane (with image sized 224*224)
face_projection,z_buffer = Projection_layer(face_shape,rotation,translation)
face_projection = np.stack([face_projection[:,:,0],224 - face_projection[:,:,1]], axis = 2)
# compute 68 landmark on image plane
landmarks_2d = face_projection[:,facemodel.keypoints,:]
# compute vertex color using SH function lighting approximation
face_color,lighting = Illumination_layer(face_texture, face_norm_r, gamma)
# vertex index for each face of BFM model
tri = facemodel.tri
return face_shape,face_texture,face_color,tri,face_projection,z_buffer,landmarks_2d
# def Reconstruction_for_render(coeff,facemodel):
# id_coeff,ex_coeff,tex_coeff,angles,gamma,translation = Split_coeff(coeff)
# face_shape = Shape_formation(id_coeff, ex_coeff, facemodel)
# face_texture = Texture_formation(tex_coeff, facemodel)
# face_norm = Compute_norm(face_shape,facemodel)
# rotation = Compute_rotation_matrix(angles)
# face_shape_r = np.matmul(face_shape,rotation)
# face_shape_r = face_shape_r + np.reshape(translation,[1,1,3])
# face_norm_r = np.matmul(face_norm,rotation)
# face_color,lighting = Illumination_layer(face_texture, face_norm_r, gamma)
# tri = facemodel.face_buf
# return face_shape_r,face_norm_r,face_color,tri |
the-stack_0_19948 | """TODO(race): Add a description here."""
from __future__ import absolute_import, division, print_function
import json
import os
import nlp
# TODO(race): BibTeX citation
_CITATION = """\
@article{lai2017large,
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
journal={arXiv preprint arXiv:1704.04683},
year={2017}
}
"""
# TODO(race):
_DESCRIPTION = """\
Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
dataset is collected from English examinations in China, which are designed for middle school and high school students.
The dataset can be served as the training and test sets for machine comprehension.
"""
_URL = "http://www.cs.cmu.edu/~glai1/data/race/RACE.tar.gz"
class Race(nlp.GeneratorBasedBuilder):
"""TODO(race): Short description of my dataset."""
# TODO(race): Set up version.
VERSION = nlp.Version("0.1.0")
def _info(self):
# TODO(race): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features(
{
"article": nlp.Value("string"),
"answer": nlp.Value("string"),
"question": nlp.Value("string"),
"options": nlp.features.Sequence(nlp.Value("string"))
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="http://www.cs.cmu.edu/~glai1/data/race/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(race): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
return [
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": sorted(os.listdir(os.path.join(dl_dir, "RACE/test/high"))),
"filespath": os.path.join(dl_dir, "RACE/test/high"),
},
),
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": sorted(os.listdir(os.path.join(dl_dir, "RACE/train/high"))),
"filespath": os.path.join(dl_dir, "RACE/train/high"),
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"files": sorted(os.listdir(os.path.join(dl_dir, "RACE/dev/high"))),
"filespath": os.path.join(dl_dir, "RACE/dev/high"),
},
),
]
def _generate_examples(self, files, filespath):
"""Yields examples."""
# TODO(race): Yields (key, example) tuples from the dataset
for file in files:
filepath = os.path.join(filespath, file)
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
questions = data["questions"]
answers = data["answers"]
options = data["options"]
for i in range(len(questions)):
question = questions[i]
answer = answers[i]
option = options[i]
yield i, {
"article": data["article"],
"question": question,
"answer": answer,
"options": option,
}
|
the-stack_0_19949 | #!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Exports AppSearch Androidx code to Framework
#
# NOTE: This will remove and replace all files in the
# frameworks/base/apex/appsearch path.
#
# Example usage (from root dir of androidx workspace):
# $ ./frameworks/support/appsearch/exportToFramework.py "$HOME/android/master" "<jetpack changeid>"
import os
import re
import subprocess
import sys
# Jetpack paths relative to frameworks/support/appsearch
JETPACK_API_ROOT = 'appsearch/src/main/java/androidx/appsearch'
JETPACK_API_TEST_ROOT = 'appsearch/src/androidTest/java/androidx/appsearch'
JETPACK_IMPL_ROOT = 'local-storage/src/main/java/androidx/appsearch'
JETPACK_IMPL_TEST_ROOT = 'local-storage/src/androidTest/java/androidx/appsearch'
# Framework paths relative to frameworks/base/apex/appsearch
FRAMEWORK_API_ROOT = 'framework/java/external/android/app/appsearch'
FRAMEWORK_API_TEST_ROOT = (
'../../core/tests/coretests/src/'
'android/app/appsearch/external')
FRAMEWORK_IMPL_ROOT = 'service/java/com/android/server/appsearch/external'
FRAMEWORK_IMPL_TEST_ROOT = (
'../../services/tests/servicestests/src/'
'com/android/server/appsearch/external')
GOOGLE_JAVA_FORMAT = (
'../../../../prebuilts/tools/common/google-java-format/google-java-format')
# Miscellaneous constants
CHANGEID_FILE_NAME = 'synced_jetpack_changeid.txt'
class ExportToFramework:
def __init__(self, jetpack_appsearch_root, framework_appsearch_root):
self._jetpack_appsearch_root = jetpack_appsearch_root
self._framework_appsearch_root = framework_appsearch_root
def _PruneDir(self, dir_to_prune):
for walk_path, walk_folders, walk_files in os.walk(dir_to_prune):
for walk_filename in walk_files:
abs_path = os.path.join(walk_path, walk_filename)
print('Prune: remove "%s"' % abs_path)
os.remove(abs_path)
def _TransformAndCopyFile(self, source_path, dest_path, transform_func=None):
with open(source_path, 'r') as fh:
contents = fh.read()
if '@exportToFramework:skipFile()' in contents:
print('Skipping: "%s" -> "%s"' % (source_path, dest_path), file=sys.stderr)
return
print('Copy: "%s" -> "%s"' % (source_path, dest_path), file=sys.stderr)
if transform_func:
contents = transform_func(contents)
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
with open(dest_path, 'w') as fh:
fh.write(contents)
# Run formatter
google_java_format_cmd = [GOOGLE_JAVA_FORMAT, '--aosp', '-i', dest_path]
print('$ ' + ' '.join(google_java_format_cmd))
subprocess.check_call(google_java_format_cmd, cwd=self._framework_appsearch_root)
def _TransformCommonCode(self, contents):
return (contents
.replace('androidx.appsearch.app', 'android.app.appsearch')
.replace(
'androidx.appsearch.localstorage.',
'com.android.server.appsearch.external.localstorage.')
.replace('androidx.appsearch', 'android.app.appsearch')
.replace(
'androidx.annotation.GuardedBy',
'com.android.internal.annotations.GuardedBy')
.replace(
'androidx.annotation.VisibleForTesting',
'com.android.internal.annotations.VisibleForTesting')
.replace('androidx.collection.ArrayMap', 'android.util.ArrayMap')
.replace('androidx.collection.ArraySet', 'android.util.ArraySet')
.replace(
'androidx.core.util.ObjectsCompat',
'java.util.Objects')
.replace(
'androidx.core.util.Preconditions',
'com.android.internal.util.Preconditions')
.replace('import androidx.annotation.RestrictTo;', '')
.replace('@RestrictTo(RestrictTo.Scope.LIBRARY_GROUP)', '')
.replace('ObjectsCompat.', 'Objects.')
.replace('androidx.', 'android.')
)
def _TransformTestCode(self, contents):
contents = (contents
.replace('org.junit.Assert.assertThrows',
'org.testng.Assert.expectThrows')
.replace('assertThrows(', 'expectThrows(')
)
return self._TransformCommonCode(contents)
def _TransformAndCopyFolder(self, source_dir, dest_dir, transform_func=None):
for currentpath, folders, files in os.walk(source_dir):
dir_rel_to_root = os.path.relpath(currentpath, source_dir)
for filename in files:
source_abs_path = os.path.join(currentpath, filename)
dest_path = os.path.join(dest_dir, dir_rel_to_root, filename)
self._TransformAndCopyFile(source_abs_path, dest_path, transform_func)
def _ExportApiCode(self):
api_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_API_ROOT)
api_test_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_API_TEST_ROOT)
api_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_API_ROOT)
api_test_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_API_TEST_ROOT)
# Prune existing files
self._PruneDir(api_dest_dir)
self._PruneDir(api_test_dest_dir)
# Copy api classes. We can't use _TransformAndCopyFolder here because we
# need to specially handle the 'app' package.
def _TransformApiCode(contents):
contents = contents.replace(
'package androidx.appsearch.app;',
'package android.app.appsearch;')
return self._TransformCommonCode(contents)
for currentpath, folders, files in os.walk(api_source_dir):
dir_rel_to_root = os.path.relpath(currentpath, api_source_dir)
for filename in files:
# Figure out what folder to place them into
source_abs_path = os.path.join(currentpath, filename)
if dir_rel_to_root == 'app':
# Files in the 'app' folder live in the root of the platform tree
dest_path = os.path.join(api_dest_dir, filename)
else:
dest_path = os.path.join(api_dest_dir, dir_rel_to_root, filename)
self._TransformAndCopyFile(source_abs_path, dest_path, _TransformApiCode)
# Copy api test classes.
self._TransformAndCopyFolder(
api_test_source_dir, api_test_dest_dir, transform_func=self._TransformTestCode)
def _ExportImplCode(self):
impl_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_IMPL_ROOT)
impl_test_source_dir = os.path.join(self._jetpack_appsearch_root, JETPACK_IMPL_TEST_ROOT)
impl_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_IMPL_ROOT)
impl_test_dest_dir = os.path.join(self._framework_appsearch_root, FRAMEWORK_IMPL_TEST_ROOT)
# Prune
self._PruneDir(impl_dest_dir)
self._PruneDir(impl_test_dest_dir)
# Copy impl classes
def _TransformImplCode(contents):
contents = (contents
.replace('package androidx.appsearch',
'package com.android.server.appsearch.external')
.replace('com.google.android.icing.protobuf.', 'com.google.protobuf.')
)
return self._TransformCommonCode(contents)
self._TransformAndCopyFolder(
impl_source_dir, impl_dest_dir, transform_func=_TransformImplCode)
# Copy servicestests
def _TransformImplTestCode(contents):
contents = (contents
.replace('package androidx.appsearch',
'package com.android.server.appsearch.external')
.replace('com.google.android.icing.proto.',
'com.android.server.appsearch.proto.')
.replace('com.google.android.icing.protobuf.',
'com.android.server.appsearch.protobuf.')
)
return self._TransformTestCode(contents)
self._TransformAndCopyFolder(
impl_test_source_dir, impl_test_dest_dir, transform_func=_TransformImplTestCode)
def ExportCode(self):
self._ExportApiCode()
self._ExportImplCode()
def WriteChangeIdFile(self, changeid):
"""Copies the changeid of the most recent public CL into a file on the framework side.
This file is used for tracking, to determine what framework is synced to.
You must always provide a changeid of an exported, preferably even submitted CL. If you
abandon the CL pointed to by this changeid, the next person syncing framework will be unable
to find what CL it is synced to.
"""
file_path = os.path.join(self._framework_appsearch_root, CHANGEID_FILE_NAME)
with open(file_path, 'w') as fh:
print(changeid, file=fh)
print('Wrote "%s"' % file_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <path/to/frameworks/base> <changeId of head jetpack commit>' % sys.argv[0],
file=sys.stderr)
sys.exit(1)
source_dir = os.path.normpath(os.path.dirname(sys.argv[0]))
dest_dir = os.path.normpath(sys.argv[1])
if os.path.basename(dest_dir) == 'appsearch':
pass
elif os.path.basename(dest_dir) == 'base':
dest_dir = os.path.join(dest_dir, 'apex/appsearch')
else:
dest_dir = os.path.join(dest_dir, 'frameworks/base/apex/appsearch')
if not os.path.isdir(dest_dir):
print('Destination path "%s" does not exist or is not a directory' % (
dest_dir),
file=sys.stderr)
sys.exit(1)
exporter = ExportToFramework(source_dir, dest_dir)
exporter.ExportCode()
exporter.WriteChangeIdFile(sys.argv[2])
|
the-stack_0_19950 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gslib.tests.testcase as testcase
from gslib.util import Retry
from gslib.tests.util import ObjectToURI as suri
class TestSetMeta(testcase.GsUtilIntegrationTestCase):
"""Integration tests for setmeta command."""
def test_initial_metadata(self):
objuri = suri(self.CreateObject(contents='foo'))
inpath = self.CreateTempFile()
ct = 'image/gif'
self.RunGsUtil(['-h', 'x-goog-meta-xyz:abc', '-h', 'Content-Type:%s' % ct,
'cp', inpath, objuri])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', objuri], return_stdout=True)
self.assertRegexpMatches(stdout, 'Content-Type:\s+%s' % ct)
self.assertRegexpMatches(stdout, 'x-goog-meta-xyz:\s+abc')
_Check1()
def test_overwrite_existing(self):
objuri = suri(self.CreateObject(contents='foo'))
inpath = self.CreateTempFile()
self.RunGsUtil(['-h', 'x-goog-meta-xyz:abc', '-h', 'Content-Type:image/gif',
'cp', inpath, objuri])
self.RunGsUtil(['setmeta', '-n', '-h', 'Content-Type:text/html', '-h',
'x-goog-meta-xyz', objuri])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', objuri], return_stdout=True)
self.assertRegexpMatches(stdout, 'Content-Type:\s+text/html')
self.assertNotIn('xyz', stdout)
_Check1()
def test_duplicate_header_removal(self):
stderr = self.RunGsUtil(
['setmeta', '-h', 'Content-Type:text/html', '-h', 'Content-Type',
'gs://foo/bar'], expected_status=1, return_stderr=True)
self.assertIn('Each header must appear at most once', stderr)
def test_duplicate_header(self):
stderr = self.RunGsUtil(
['setmeta', '-h', 'Content-Type:text/html', '-h', 'Content-Type:foobar',
'gs://foo/bar'], expected_status=1, return_stderr=True)
self.assertIn('Each header must appear at most once', stderr)
def test_recursion_works(self):
bucket_uri = self.CreateBucket()
object1_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
object2_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
self.RunGsUtil(['setmeta', '-R', '-h', 'content-type:footype',
suri(bucket_uri)])
for obj_uri in [object1_uri, object2_uri]:
stdout = self.RunGsUtil(['stat', suri(obj_uri)], return_stdout=True)
self.assertIn('footype', stdout)
def test_invalid_non_ascii_custom_header(self):
unicode_header = u'x-goog-meta-soufflé:5'
unicode_header_bytes = unicode_header.encode('utf-8')
stderr = self.RunGsUtil(
['setmeta', '-h', unicode_header_bytes, 'gs://foo/bar'],
expected_status=1, return_stderr=True)
self.assertIn('Invalid non-ASCII header', stderr)
def test_valid_non_ascii_custom_header(self):
objuri = self.CreateObject(contents='foo')
unicode_header = u'x-goog-meta-dessert:soufflé'
unicode_header_bytes = unicode_header.encode('utf-8')
self.RunGsUtil(['setmeta', '-h', unicode_header_bytes, suri(objuri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(objuri)], return_stdout=True)
stdout = stdout.decode('utf-8')
self.assertIn(u'x-goog-meta-dessert:\tsoufflé', stdout)
_Check1()
def test_disallowed_header(self):
stderr = self.RunGsUtil(
['setmeta', '-h', 'Content-Length:5', 'gs://foo/bar'],
expected_status=1, return_stderr=True)
self.assertIn('Invalid or disallowed header', stderr)
def test_setmeta_bucket(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['setmeta', '-h', 'x-goog-meta-foo:5', suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('must name an object', stderr)
def test_setmeta_invalid_arg(self):
stderr = self.RunGsUtil(
['setmeta', '-h', 'foo:bar:baz', 'gs://foo/bar'], expected_status=1,
return_stderr=True)
self.assertIn('must be either header or header:value', stderr)
def test_invalid_non_ascii_header_value(self):
unicode_header = u'Content-Type:dessert/soufflé'
unicode_header_bytes = unicode_header.encode('utf-8')
stderr = self.RunGsUtil(
['setmeta', '-h', unicode_header_bytes, 'gs://foo/bar'],
expected_status=1, return_stderr=True)
self.assertIn('Invalid non-ASCII header', stderr)
|
the-stack_0_19951 | import os
import factory
from datetime import datetime
import json
from django.conf import settings
from django.utils.timezone import make_aware
from core.models import MatomoVisitsResource
class MatomoVisitsResourceFactory(factory.django.DjangoModelFactory):
class Meta:
model = MatomoVisitsResource
strategy = factory.BUILD_STRATEGY
class Params:
is_initial = True
is_empty = False
since = factory.Maybe(
"is_initial",
make_aware(datetime(year=2020, month=1, day=1)),
make_aware(datetime(year=2020, month=2, day=10, hour=13, minute=8, second=39, microsecond=315000))
)
status = 200
head = {
"content-type": "application/json"
}
@factory.lazy_attribute
def uri(self):
return f"webstats.surf.nl/?date={self.since:%Y-%m-%dT%H:%M:%SZ}%2C2021-12-12&" \
f"filter_offset=0&format=JSON&idSite=63&method=Live.getLastVisitsDetails&module=API&period=range"
@factory.lazy_attribute
def request(self):
return {
"args": [f"{self.since:%Y-%m-%d}"] if not self.is_initial else [],
"kwargs": {},
"method": "get",
"url": "https://" + self.uri,
"headers": {},
"data": {}
}
@factory.lazy_attribute
def body(self):
if self.is_empty:
return json.dumps([])
response_file_path = os.path.join(
settings.BASE_DIR,
"core",
"fixtures",
"matomo-visits.json"
)
with open(response_file_path, "r") as response:
return response.read()
|
the-stack_0_19952 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import math
import numpy as np
import sys
nid_pos_dict = {int(ll[0]): dict(zip(("column", "row", "chassis", "slot", "node"), tuple(map(int, ll[1:])))) for ll in map(lambda l: l.strip().split(), open('theta_nodelist_broken.txt').readlines())}
# dealing with the deprecated column names in the nekbone traces
keymap = {"seconds": "time",
"progress-0": "region_progress",
"runtime-0": "region_runtime",
"pkg_energy-0": "energy_package",
"dram_energy-0": "energy_dram"}
def progress(iterable, message=lambda x: x, log=sys.stdout, length=None):
if length is None:
length = len(iterable)
index = 0
for x in iterable:
yield(x)
index += 1
if log:
log.write("\r%s = %d/%d" % (message(x), index, length))
log.flush()
if log:
log.write("\n")
log.flush()
def trace_iterate(trace_file):
headers = None
last = {}
for line in open(trace_file):
if line[0] == '#':
continue
if headers is None:
raw_headers = line.strip().split("|")
headers = []
for h in raw_headers:
if h in keymap:
headers.append(keymap[h])
else:
headers.append(h)
continue
ll = line.strip().split("|")
ldict = {h: {True: lambda: l, False: lambda: float(l)}[l.startswith("0x")]() for h, l in zip(headers, ll)}
delta = {}
if ldict['region_id'] == '0x2000000000000000':
continue
if ldict['region_id'] != '0x8000000000000000':
continue
if ldict['region_id'] not in last:
last[ldict['region_id']] = ldict.copy()
if ldict['region_progress'] == 1.0:
region_id = ldict['region_id']
out = {}
for k in ldict:
out[k+"_i"] = last[region_id][k]
out[k+"_f"] = ldict[k]
if type(ldict[k]) == float:
out[k + "_delta"] = ldict[k] - last[region_id][k]
del last[region_id]
# ignore epochs of 0 time (??)
if out['time_delta'] > 0:
yield out
def power_and_temperature_stats(nids, traces):
global_stats = {'power': 0, 'temperature': 0} ; global_Z = 0
per_node_stats = {}
for nid, trace_file in progress(zip(nids, traces), message=lambda n__: "Computing Stats [Node %5d]" % n__[0]):
Z = 0
per_node_stats[nid] = {'power': 0, 'temperature': 0}
for ldict in trace_iterate(trace_file):
per_node_stats[nid]['power'] += ldict['energy_package_delta']/ldict['time_delta']
per_node_stats[nid]['temperature'] += (ldict['temperature_core_i'] + ldict['temperature_core_f'])*0.5
Z += 1.
global_stats['power'] += per_node_stats[nid]['power']
global_stats['temperature'] += per_node_stats[nid]['temperature']
global_Z += Z
per_node_stats[nid]['power'] /= Z
per_node_stats[nid]['temperature'] /= Z
global_stats['power'] /= global_Z
global_stats['temperature'] /= global_Z
return global_stats, per_node_stats
def global_fit(nids, traces, dict_to_signal):
mu = None ; sigma = None ; N = 0
for nid, trace_file in progress(zip(nids, traces), message=lambda n__1: "Global Fit [Node %5d]" % n__1[0]):
allsignals = []
for ldict in trace_iterate(trace_file):
ldict.update(nid_pos_dict[nid])
allsignals.append(dict_to_signal(ldict))
for signal in allsignals[1:-1]:
if mu is None:
mu = signal
sigma = np.matmul(signal.T, signal)
N = 1
else:
mu += signal
sigma += np.matmul(signal.T, signal)
N += 1
mu /= N
sigma /= N
sigma -= np.matmul(mu.T, mu)
return mu, sigma
def compute_likelihood(nids, traces, mu, sigma, dict_to_signal):
logltable = {}
siginv = sigma.I
# TODO compute mean power and temperature
for nid, trace_file in progress(zip(nids, traces), message=lambda n__2: "Computing Likelihood [Node %5d]" % n__2[0]):
allsignals = []
for ldict in trace_iterate(trace_file):
ldict.update(nid_pos_dict[nid])
allsignals.append(ldict)
likelihood = 0 ; duration = 0
for ldict in allsignals[1:-1]:
signal = dict_to_signal(ldict)
dist = ((signal - mu) * siginv * (signal - mu).T).item(0)
likelihood += ldict['time_delta'] * dist
duration += ldict['time_delta']
logltable[nid] = likelihood/duration
return logltable
def compute_outliers(nids, traces, logltable, base_thresh, p_val=0.05):
outliers = []
for nid in logltable:
not_outlier_prob = math.exp(-(logltable[nid] - base_thresh))
if not_outlier_prob < p_val:
outliers.append((1-not_outlier_prob, nid))
return outliers
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Detect some outliers.')
parser.add_argument('files', metavar='tracefile', type=str, nargs='+',
help='trace files')
args = parser.parse_args()
traces = args.files
traces.sort()
nids = [int(trf[-5:]) for trf in traces]
dict_to_signal = lambda ldict: np.matrix([
math.log(ldict['energy_package_delta']/ldict['time_delta']),
math.log(ldict['cycles_thread_delta']/ldict['cycles_reference_delta']),
(ldict['temperature_core_i'] + ldict['temperature_core_f'])*0.5,
ldict['row'],
])
mu, sigma = global_fit(nids, traces, dict_to_signal=dict_to_signal)
logltable = compute_likelihood(nids, traces, mu, sigma, dict_to_signal=dict_to_signal)
base_thresh = (lambda L: L[int(len(L)*0.9)])(sorted(logltable.values()))
outliers = compute_outliers(nids, traces, logltable, base_thresh)
global_avg, node_stats = power_and_temperature_stats(nids, traces)
if len(outliers) == 0:
print("No outliers detected.")
else:
print("OUTLIERS IDENTIFIED:")
outliers.sort()
for outlier_prob, nid in outliers:
delta = {k: node_stats[nid][k] - global_avg[k] for k in global_avg}
status = {True: "Runt", False: "Pick"}[delta['power'] >= 0]
print("Node %5d, %5.2f%%, %s, %5.1fW, %5.1fC" % (nid, 100*outlier_prob, status, delta['power'], delta['temperature']))
|
the-stack_0_19953 | # The __init__() function
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
print(p1.name)
print(p1.age)
# Object Methods
p1.myfunc()
|
the-stack_0_19954 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
import math
from collections import deque
def count_photographs(adjacency, rows, cols):
moves = ((2, 1), (1, 2), (2, -1), (-1, 2), (-2, 1), (1, -2), (-2, -1), (-1, -2))
res = 1
visited = set()
for i in range(rows):
for j in range(cols):
if adjacency[i][j] and (i, j) not in visited:
count = 0
queue = deque([(i, j)])
while queue:
u, v = queue.popleft()
if (u, v) not in visited:
visited.add((u, v))
count += 1
for move in moves:
move_u = u + move[0]
move_v = v + move[-1]
if 0 <= move_u < rows and 0 <= move_v < cols:
if adjacency[move_u][move_v]:
queue.append((move_u, move_v))
if count > 1:
res *= math.factorial(count)
res %= 1000000007
return res
t = int(input())
for _ in range(t):
n, m, q = map(int, input().strip().split())
chess_board = [[False] * m for _ in range(n)]
for _ in range(q):
x, y = map(int, input().strip().split())
chess_board[x - 1][y - 1] = True
print(count_photographs(chess_board, n, m))
|
the-stack_0_19956 | import warnings
import functools
import locale
import weakref
import ctypes
import llvmlite.llvmpy.core as lc
import llvmlite.llvmpy.passes as lp
import llvmlite.binding as ll
import llvmlite.ir as llvmir
from numba.core import utils, config, cgutils
from numba.core.runtime.nrtopt import remove_redundant_nrt_refct
from numba.core.runtime import rtsys
from numba.core.compiler_lock import require_global_compiler_lock
_x86arch = frozenset(['x86', 'i386', 'i486', 'i586', 'i686', 'i786',
'i886', 'i986'])
def _is_x86(triple):
arch = triple.split('-')[0]
return arch in _x86arch
def dump(header, body):
if config.HIGHLIGHT_DUMPS:
try:
import pygments
except ImportError:
msg = "Please install pygments to see highlighted dumps"
raise ValueError(msg)
else:
from pygments import highlight
from pygments.lexers import GasLexer as lexer
from pygments.formatters import Terminal256Formatter
from numba.misc.dump_style import by_colorscheme
def printer(arg):
print(highlight(arg, lexer(),
Terminal256Formatter(style=by_colorscheme())))
else:
printer = print
print('=' * 80)
print(header.center(80, '-'))
printer(body)
print('=' * 80)
class _CFG(object):
"""
Wraps the CFG graph for different display method.
Instance of the class can be stringified (``__repr__`` is defined) to get
the graph in DOT format. The ``.display()`` method plots the graph in
PDF. If in IPython notebook, the returned image can be inlined.
"""
def __init__(self, dot):
self.dot = dot
def display(self, filename=None, view=False):
"""
Plot the CFG. In IPython notebook, the return image object can be
inlined.
The *filename* option can be set to a specific path for the rendered
output to write to. If *view* option is True, the plot is opened by
the system default application for the image format (PDF).
"""
return ll.view_dot_graph(self.dot, filename=filename, view=view)
def __repr__(self):
return self.dot
class CodeLibrary(object):
"""
An interface for bundling LLVM code together and compiling it.
It is tied to a *codegen* instance (e.g. JITCPUCodegen) that will
determine how the LLVM code is transformed and linked together.
"""
_finalized = False
_object_caching_enabled = False
_disable_inspection = False
def __init__(self, codegen, name):
self._codegen = codegen
self._name = name
self._linking_libraries = [] # maintain insertion order
self._final_module = ll.parse_assembly(
str(self._codegen._create_empty_module(self._name)))
self._final_module.name = cgutils.normalize_ir_text(self._name)
self._shared_module = None
# Track names of the dynamic globals
self._dynamic_globals = []
@property
def has_dynamic_globals(self):
self._ensure_finalized()
return len(self._dynamic_globals) > 0
@property
def codegen(self):
"""
The codegen object owning this library.
"""
return self._codegen
def __repr__(self):
return "<Library %r at 0x%x>" % (self._name, id(self))
def _raise_if_finalized(self):
if self._finalized:
raise RuntimeError("operation impossible on finalized object %r"
% (self,))
def _ensure_finalized(self):
if not self._finalized:
self.finalize()
def _optimize_functions(self, ll_module):
"""
Internal: run function-level optimizations inside *ll_module*.
"""
# Enforce data layout to enable layout-specific optimizations
ll_module.data_layout = self._codegen._data_layout
with self._codegen._function_pass_manager(ll_module) as fpm:
# Run function-level optimizations to reduce memory usage and improve
# module-level optimization.
for func in ll_module.functions:
fpm.initialize()
fpm.run(func)
fpm.finalize()
def _optimize_final_module(self):
"""
Internal: optimize this library's final module.
"""
self._codegen._mpm.run(self._final_module)
self._final_module = remove_redundant_nrt_refct(self._final_module)
def _get_module_for_linking(self):
"""
Internal: get a LLVM module suitable for linking multiple times
into another library. Exported functions are made "linkonce_odr"
to allow for multiple definitions, inlining, and removal of
unused exports.
See discussion in https://github.com/numba/numba/pull/890
"""
self._ensure_finalized()
if self._shared_module is not None:
return self._shared_module
mod = self._final_module
to_fix = []
nfuncs = 0
for fn in mod.functions:
nfuncs += 1
if not fn.is_declaration and fn.linkage == ll.Linkage.external:
to_fix.append(fn.name)
if nfuncs == 0:
# This is an issue which can occur if loading a module
# from an object file and trying to link with it, so detect it
# here to make debugging easier.
raise RuntimeError("library unfit for linking: "
"no available functions in %s"
% (self,))
if to_fix:
mod = mod.clone()
for name in to_fix:
# NOTE: this will mark the symbol WEAK if serialized
# to an ELF file
mod.get_function(name).linkage = 'linkonce_odr'
self._shared_module = mod
return mod
def create_ir_module(self, name):
"""
Create a LLVM IR module for use by this library.
"""
self._raise_if_finalized()
ir_module = self._codegen._create_empty_module(name)
return ir_module
def add_linking_library(self, library):
"""
Add a library for linking into this library, without losing
the original library.
"""
library._ensure_finalized()
self._linking_libraries.append(library)
def add_ir_module(self, ir_module):
"""
Add a LLVM IR module's contents to this library.
"""
self._raise_if_finalized()
assert isinstance(ir_module, llvmir.Module)
ir = cgutils.normalize_ir_text(str(ir_module))
ll_module = ll.parse_assembly(ir)
ll_module.name = ir_module.name
ll_module.verify()
self.add_llvm_module(ll_module)
def add_llvm_module(self, ll_module):
self._optimize_functions(ll_module)
# TODO: we shouldn't need to recreate the LLVM module object
ll_module = remove_redundant_nrt_refct(ll_module)
self._final_module.link_in(ll_module)
def finalize(self):
"""
Finalize the library. After this call, nothing can be added anymore.
Finalization involves various stages of code optimization and
linking.
"""
require_global_compiler_lock()
# Report any LLVM-related problems to the user
self._codegen._check_llvm_bugs()
self._raise_if_finalized()
if config.DUMP_FUNC_OPT:
dump("FUNCTION OPTIMIZED DUMP %s" % self._name, self.get_llvm_str())
# Link libraries for shared code
seen = set()
for library in self._linking_libraries:
if library not in seen:
seen.add(library)
self._final_module.link_in(
library._get_module_for_linking(), preserve=True,
)
# Optimize the module after all dependences are linked in above,
# to allow for inlining.
self._optimize_final_module()
self._final_module.verify()
self._finalize_final_module()
def _finalize_dyanmic_globals(self):
# Scan for dynamic globals
for gv in self._final_module.global_variables:
if gv.name.startswith('numba.dynamic.globals'):
self._dynamic_globals.append(gv.name)
def _verify_declare_only_symbols(self):
# Verify that no declare-only function compiled by numba.
for fn in self._final_module.functions:
# We will only check for symbol name starting with '_ZN5numba'
if fn.is_declaration and fn.name.startswith('_ZN5numba'):
msg = 'Symbol {} not linked properly'
raise AssertionError(msg.format(fn.name))
def _finalize_final_module(self):
"""
Make the underlying LLVM module ready to use.
"""
self._finalize_dyanmic_globals()
self._verify_declare_only_symbols()
# Remember this on the module, for the object cache hooks
self._final_module.__library = weakref.proxy(self)
# It seems add_module() must be done only here and not before
# linking in other modules, otherwise get_pointer_to_function()
# could fail.
cleanup = self._codegen._add_module(self._final_module)
if cleanup:
weakref.finalize(self, cleanup)
self._finalize_specific()
self._finalized = True
if config.DUMP_OPTIMIZED:
dump("OPTIMIZED DUMP %s" % self._name, self.get_llvm_str())
if config.DUMP_ASSEMBLY:
# CUDA backend cannot return assembly this early, so don't
# attempt to dump assembly if nothing is produced.
asm = self.get_asm_str()
if asm:
dump("ASSEMBLY %s" % self._name, self.get_asm_str())
def get_defined_functions(self):
"""
Get all functions defined in the library. The library must have
been finalized.
"""
mod = self._final_module
for fn in mod.functions:
if not fn.is_declaration:
yield fn
def get_function(self, name):
return self._final_module.get_function(name)
def _sentry_cache_disable_inspection(self):
if self._disable_inspection:
warnings.warn('Inspection disabled for cached code. '
'Invalid result is returned.')
def get_llvm_str(self):
"""
Get the human-readable form of the LLVM module.
"""
self._sentry_cache_disable_inspection()
return str(self._final_module)
def get_asm_str(self):
"""
Get the human-readable assembly.
"""
self._sentry_cache_disable_inspection()
return str(self._codegen._tm.emit_assembly(self._final_module))
def get_function_cfg(self, name):
"""
Get control-flow graph of the LLVM function
"""
self._sentry_cache_disable_inspection()
fn = self.get_function(name)
dot = ll.get_function_cfg(fn)
return _CFG(dot)
#
# Object cache hooks and serialization
#
def enable_object_caching(self):
self._object_caching_enabled = True
self._compiled_object = None
self._compiled = False
def _get_compiled_object(self):
if not self._object_caching_enabled:
raise ValueError("object caching not enabled in %s" % (self,))
if self._compiled_object is None:
raise RuntimeError("no compiled object yet for %s" % (self,))
return self._compiled_object
def _set_compiled_object(self, value):
if not self._object_caching_enabled:
raise ValueError("object caching not enabled in %s" % (self,))
if self._compiled:
raise ValueError("library already compiled: %s" % (self,))
self._compiled_object = value
self._disable_inspection = True
@classmethod
def _dump_elf(cls, buf):
"""
Dump the symbol table of an ELF file.
Needs pyelftools (https://github.com/eliben/pyelftools)
"""
from elftools.elf.elffile import ELFFile
from elftools.elf import descriptions
from io import BytesIO
f = ELFFile(BytesIO(buf))
print("ELF file:")
for sec in f.iter_sections():
if sec['sh_type'] == 'SHT_SYMTAB':
symbols = sorted(sec.iter_symbols(), key=lambda sym: sym.name)
print(" symbols:")
for sym in symbols:
if not sym.name:
continue
print(" - %r: size=%d, value=0x%x, type=%s, bind=%s"
% (sym.name.decode(),
sym['st_size'],
sym['st_value'],
descriptions.describe_symbol_type(sym['st_info']['type']),
descriptions.describe_symbol_bind(sym['st_info']['bind']),
))
print()
@classmethod
def _object_compiled_hook(cls, ll_module, buf):
"""
`ll_module` was compiled into object code `buf`.
"""
try:
self = ll_module.__library
except AttributeError:
return
if self._object_caching_enabled:
self._compiled = True
self._compiled_object = buf
@classmethod
def _object_getbuffer_hook(cls, ll_module):
"""
Return a cached object code for `ll_module`.
"""
try:
self = ll_module.__library
except AttributeError:
return
if self._object_caching_enabled and self._compiled_object:
buf = self._compiled_object
self._compiled_object = None
return buf
def serialize_using_bitcode(self):
"""
Serialize this library using its bitcode as the cached representation.
"""
self._ensure_finalized()
return (self._name, 'bitcode', self._final_module.as_bitcode())
def serialize_using_object_code(self):
"""
Serialize this library using its object code as the cached
representation. We also include its bitcode for further inlining
with other libraries.
"""
self._ensure_finalized()
data = (self._get_compiled_object(),
self._get_module_for_linking().as_bitcode())
return (self._name, 'object', data)
@classmethod
def _unserialize(cls, codegen, state):
name, kind, data = state
self = codegen.create_library(name)
assert isinstance(self, cls)
if kind == 'bitcode':
# No need to re-run optimizations, just make the module ready
self._final_module = ll.parse_bitcode(data)
self._finalize_final_module()
return self
elif kind == 'object':
object_code, shared_bitcode = data
self.enable_object_caching()
self._set_compiled_object(object_code)
self._shared_module = ll.parse_bitcode(shared_bitcode)
self._finalize_final_module()
# Load symbols from cache
self._codegen._engine._load_defined_symbols(self._shared_module)
return self
else:
raise ValueError("unsupported serialization kind %r" % (kind,))
class AOTCodeLibrary(CodeLibrary):
def emit_native_object(self):
"""
Return this library as a native object (a bytestring) -- for example
ELF under Linux.
This function implicitly calls .finalize().
"""
self._ensure_finalized()
return self._codegen._tm.emit_object(self._final_module)
def emit_bitcode(self):
"""
Return this library as LLVM bitcode (a bytestring).
This function implicitly calls .finalize().
"""
self._ensure_finalized()
return self._final_module.as_bitcode()
def _finalize_specific(self):
pass
class JITCodeLibrary(CodeLibrary):
def get_pointer_to_function(self, name):
"""
Generate native code for function named *name* and return a pointer
to the start of the function (as an integer).
This function implicitly calls .finalize().
Returns
-------
pointer : int
- zero (null) if no symbol of *name* is defined by this code
library.
- non-zero if the symbol is defined.
"""
self._ensure_finalized()
ee = self._codegen._engine
if not ee.is_symbol_defined(name):
return 0
else:
return self._codegen._engine.get_function_address(name)
def _finalize_specific(self):
self._codegen._scan_and_fix_unresolved_refs(self._final_module)
self._codegen._engine.finalize_object()
class RuntimeLinker(object):
"""
For tracking unresolved symbols generated at runtime due to recursion.
"""
PREFIX = '.numba.unresolved$'
def __init__(self):
self._unresolved = utils.UniqueDict()
self._defined = set()
self._resolved = []
def scan_unresolved_symbols(self, module, engine):
"""
Scan and track all unresolved external symbols in the module and
allocate memory for it.
"""
prefix = self.PREFIX
for gv in module.global_variables:
if gv.name.startswith(prefix):
sym = gv.name[len(prefix):]
# Avoid remapping to existing GV
if engine.is_symbol_defined(gv.name):
continue
# Allocate a memory space for the pointer
abortfn = rtsys.library.get_pointer_to_function("nrt_unresolved_abort")
ptr = ctypes.c_void_p(abortfn)
engine.add_global_mapping(gv, ctypes.addressof(ptr))
self._unresolved[sym] = ptr
def scan_defined_symbols(self, module):
"""
Scan and track all defined symbols.
"""
for fn in module.functions:
if not fn.is_declaration:
self._defined.add(fn.name)
def resolve(self, engine):
"""
Fix unresolved symbols if they are defined.
"""
# An iterator to get all unresolved but available symbols
pending = [name for name in self._unresolved if name in self._defined]
# Resolve pending symbols
for name in pending:
# Get runtime address
fnptr = engine.get_function_address(name)
# Fix all usage
ptr = self._unresolved[name]
ptr.value = fnptr
self._resolved.append((name, ptr)) # keep ptr alive
# Delete resolved
del self._unresolved[name]
def _proxy(old):
@functools.wraps(old)
def wrapper(self, *args, **kwargs):
return old(self._ee, *args, **kwargs)
return wrapper
class JitEngine(object):
"""Wraps an ExecutionEngine to provide custom symbol tracking.
Since the symbol tracking is incomplete (doesn't consider
loaded code object), we are not putting it in llvmlite.
"""
def __init__(self, ee):
self._ee = ee
# Track symbol defined via codegen'd Module
# but not any cached object.
# NOTE: `llvm::ExecutionEngine` will catch duplicated symbols and
# we are not going to protect against that. A proper duplicated
# symbol detection will need a more logic to check for the linkage
# (e.g. like `weak` linkage symbol can override). This
# `_defined_symbols` set will be just enough to tell if a symbol
# exists and will not cause the `EE` symbol lookup to `exit(1)`
# when symbol-not-found.
self._defined_symbols = set()
def is_symbol_defined(self, name):
"""Is the symbol defined in this session?
"""
return name in self._defined_symbols
def _load_defined_symbols(self, mod):
"""Extract symbols from the module
"""
for gsets in (mod.functions, mod.global_variables):
self._defined_symbols |= {gv.name for gv in gsets
if not gv.is_declaration}
def add_module(self, module):
"""Override ExecutionEngine.add_module
to keep info about defined symbols.
"""
self._load_defined_symbols(module)
return self._ee.add_module(module)
def add_global_mapping(self, gv, addr):
"""Override ExecutionEngine.add_global_mapping
to keep info about defined symbols.
"""
self._defined_symbols.add(gv.name)
return self._ee.add_global_mapping(gv, addr)
#
# The remaining methods are re-export of the ExecutionEngine APIs
#
set_object_cache = _proxy(ll.ExecutionEngine.set_object_cache)
finalize_object = _proxy(ll.ExecutionEngine.finalize_object)
get_function_address = _proxy(ll.ExecutionEngine.get_function_address)
get_global_value_address = _proxy(
ll.ExecutionEngine.get_global_value_address
)
class BaseCPUCodegen(object):
def __init__(self, module_name):
initialize_llvm()
self._data_layout = None
self._llvm_module = ll.parse_assembly(
str(self._create_empty_module(module_name)))
self._llvm_module.name = "global_codegen_module"
self._rtlinker = RuntimeLinker()
self._init(self._llvm_module)
def _init(self, llvm_module):
assert list(llvm_module.global_variables) == [], "Module isn't empty"
target = ll.Target.from_triple(ll.get_process_triple())
tm_options = dict(opt=config.OPT)
self._tm_features = self._customize_tm_features()
self._customize_tm_options(tm_options)
tm = target.create_target_machine(**tm_options)
engine = ll.create_mcjit_compiler(llvm_module, tm)
if config.ENABLE_PROFILING:
engine.enable_jit_events()
self._tm = tm
self._engine = JitEngine(engine)
self._target_data = engine.target_data
self._data_layout = str(self._target_data)
self._mpm = self._module_pass_manager()
self._engine.set_object_cache(self._library_class._object_compiled_hook,
self._library_class._object_getbuffer_hook)
def _create_empty_module(self, name):
ir_module = lc.Module(cgutils.normalize_ir_text(name))
ir_module.triple = ll.get_process_triple()
if self._data_layout:
ir_module.data_layout = self._data_layout
return ir_module
@property
def target_data(self):
"""
The LLVM "target data" object for this codegen instance.
"""
return self._target_data
def create_library(self, name):
"""
Create a :class:`CodeLibrary` object for use with this codegen
instance.
"""
return self._library_class(self, name)
def unserialize_library(self, serialized):
return self._library_class._unserialize(self, serialized)
def _module_pass_manager(self):
pm = ll.create_module_pass_manager()
self._tm.add_analysis_passes(pm)
with self._pass_manager_builder() as pmb:
pmb.populate(pm)
return pm
def _function_pass_manager(self, llvm_module):
pm = ll.create_function_pass_manager(llvm_module)
self._tm.add_analysis_passes(pm)
with self._pass_manager_builder() as pmb:
pmb.populate(pm)
return pm
def _pass_manager_builder(self):
"""
Create a PassManagerBuilder.
Note: a PassManagerBuilder seems good only for one use, so you
should call this method each time you want to populate a module
or function pass manager. Otherwise some optimizations will be
missed...
"""
pmb = lp.create_pass_manager_builder(
opt=config.OPT, loop_vectorize=config.LOOP_VECTORIZE)
return pmb
def _check_llvm_bugs(self):
"""
Guard against some well-known LLVM bug(s).
"""
# Check the locale bug at https://github.com/numba/numba/issues/1569
# Note we can't cache the result as locale settings can change
# across a process's lifetime. Also, for this same reason,
# the check here is a mere heuristic (there may be a race condition
# between now and actually compiling IR).
ir = """
define double @func()
{
ret double 1.23e+01
}
"""
mod = ll.parse_assembly(ir)
ir_out = str(mod)
if "12.3" in ir_out or "1.23" in ir_out:
# Everything ok
return
if "1.0" in ir_out:
loc = locale.getlocale()
raise RuntimeError(
"LLVM will produce incorrect floating-point code "
"in the current locale %s.\nPlease read "
"http://numba.pydata.org/numba-doc/latest/user/faq.html#llvm-locale-bug "
"for more information."
% (loc,))
raise AssertionError("Unexpected IR:\n%s\n" % (ir_out,))
def magic_tuple(self):
"""
Return a tuple unambiguously describing the codegen behaviour.
"""
return (self._llvm_module.triple, self._get_host_cpu_name(),
self._tm_features)
def _scan_and_fix_unresolved_refs(self, module):
self._rtlinker.scan_unresolved_symbols(module, self._engine)
self._rtlinker.scan_defined_symbols(module)
self._rtlinker.resolve(self._engine)
def insert_unresolved_ref(self, builder, fnty, name):
voidptr = llvmir.IntType(8).as_pointer()
ptrname = self._rtlinker.PREFIX + name
llvm_mod = builder.module
try:
fnptr = llvm_mod.get_global(ptrname)
except KeyError:
# Not defined?
fnptr = llvmir.GlobalVariable(llvm_mod, voidptr, name=ptrname)
fnptr.linkage = 'external'
return builder.bitcast(builder.load(fnptr), fnty.as_pointer())
def _get_host_cpu_name(self):
return (ll.get_host_cpu_name()
if config.CPU_NAME is None
else config.CPU_NAME)
def _get_host_cpu_features(self):
if config.CPU_FEATURES is not None:
return config.CPU_FEATURES
return get_host_cpu_features()
class AOTCPUCodegen(BaseCPUCodegen):
"""
A codegen implementation suitable for Ahead-Of-Time compilation
(e.g. generation of object files).
"""
_library_class = AOTCodeLibrary
def __init__(self, module_name, cpu_name=None):
# By default, use generic cpu model for the arch
self._cpu_name = cpu_name or ''
BaseCPUCodegen.__init__(self, module_name)
def _customize_tm_options(self, options):
cpu_name = self._cpu_name
if cpu_name == 'host':
cpu_name = self._get_host_cpu_name()
options['cpu'] = cpu_name
options['reloc'] = 'pic'
options['codemodel'] = 'default'
options['features'] = self._tm_features
def _customize_tm_features(self):
# ISA features are selected according to the requested CPU model
# in _customize_tm_options()
return ''
def _add_module(self, module):
pass
class JITCPUCodegen(BaseCPUCodegen):
"""
A codegen implementation suitable for Just-In-Time compilation.
"""
_library_class = JITCodeLibrary
def _customize_tm_options(self, options):
# As long as we don't want to ship the code to another machine,
# we can specialize for this CPU.
options['cpu'] = self._get_host_cpu_name()
# LLVM 7 change: # https://reviews.llvm.org/D47211#inline-425406
# JIT needs static relocation on x86*
# native target is already initialized from base class __init__
arch = ll.Target.from_default_triple().name
if arch.startswith('x86'): # one of x86 or x86_64
reloc_model = 'static'
else:
reloc_model = 'default'
options['reloc'] = reloc_model
options['codemodel'] = 'jitdefault'
# Set feature attributes (such as ISA extensions)
# This overrides default feature selection by CPU model above
options['features'] = self._tm_features
# Enable JIT debug
options['jitdebug'] = True
def _customize_tm_features(self):
# For JIT target, we will use LLVM to get the feature map
return self._get_host_cpu_features()
def _add_module(self, module):
self._engine.add_module(module)
# XXX: disabling remove module due to MCJIT engine leakage in
# removeModule. The removeModule causes consistent access
# violation with certain test combinations.
# # Early bind the engine method to avoid keeping a reference to self.
# return functools.partial(self._engine.remove_module, module)
def set_env(self, env_name, env):
"""Set the environment address.
Update the GlobalVariable named *env_name* to the address of *env*.
"""
gvaddr = self._engine.get_global_value_address(env_name)
envptr = (ctypes.c_void_p * 1).from_address(gvaddr)
envptr[0] = ctypes.c_void_p(id(env))
def initialize_llvm():
"""Safe to use multiple times.
"""
ll.initialize()
ll.initialize_native_target()
ll.initialize_native_asmprinter()
def get_host_cpu_features():
"""Get host CPU features using LLVM.
The features may be modified due to user setting.
See numba.config.ENABLE_AVX.
"""
try:
features = ll.get_host_cpu_features()
except RuntimeError:
return ''
else:
if not config.ENABLE_AVX:
# Disable all features with name starting with 'avx'
for k in features:
if k.startswith('avx'):
features[k] = False
# Set feature attributes
return features.flatten()
|
the-stack_0_19957 | import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(
os.path.join(BASE_DIR, 'apps')
)
REQUESTS_LOG_PATH = 'log.txt'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'rest_framework',
'rest_framework_swagger',
'rest_framework_nested',
'django_user_agents',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.normpath(os.path.join(BASE_DIR, 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db/development.db'),
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = os.path.join(ROOT_DIR, 'assets')
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.normpath(os.path.join(ROOT_DIR, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filters': ['require_debug_false'],
'filename': 'log/error.log',
'formatter': 'verbose'
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
},
'django.request': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
the-stack_0_19961 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
from nova import exception
from nova import flags
from nova.network import linux_net
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import netutils
from nova.virt import vif
from nova.virt.libvirt import config
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.StrOpt('libvirt_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
default=False,
help='Use virtio for bridge interfaces'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(libvirt_vif_opts)
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.driver')
LINUX_DEV_LEN = 14
class LibvirtBridgeDriver(vif.VIFDriver):
"""VIF driver for Linux bridge."""
def _get_configurations(self, instance, network, mapping):
"""Get a dictionary of VIF configurations for bridge type."""
mac_id = mapping['mac'].replace(':', '')
conf = config.LibvirtConfigGuestInterface()
conf.net_type = "bridge"
conf.mac_addr = mapping['mac']
conf.source_dev = network['bridge']
conf.script = ""
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
conf.add_filter_param("IP", mapping['ips'][0]['ip'])
if mapping['dhcp_server']:
conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
if FLAGS.use_ipv6:
conf.add_filter_param("RASERVER",
mapping.get('gateway_v6') + "/128")
if FLAGS.allow_same_net_traffic:
net, mask = netutils.get_net_and_mask(network['cidr'])
conf.add_filter_param("PROJNET", net)
conf.add_filter_param("PROJMASK", mask)
if FLAGS.use_ipv6:
net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
network['cidr_v6'])
conf.add_filter_param("PROJNET6", net_v6)
conf.add_filter_param("PROJMASK6", prefixlen_v6)
return conf
def plug(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network, mapping = vif
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'):
iface = FLAGS.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': network['bridge']},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
iface)
else:
iface = FLAGS.flat_interface or network['bridge_interface']
LOG.debug(_("Ensuring bridge %s"), network['bridge'],
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface)
return self._get_configurations(instance, network, mapping)
def unplug(self, instance, vif):
"""No manual unplugging required."""
pass
class LibvirtOpenVswitchDriver(vif.VIFDriver):
"""VIF driver for Open vSwitch that uses libivrt type='ethernet'
Used for libvirt versions that do not support
OVS virtual port XML (0.9.10 or earlier).
"""
def get_dev_name(self, iface_id):
return ("tap" + iface_id)[:LINUX_DEV_LEN]
def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
FLAGS.libvirt_ovs_bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id,
run_as_root=True)
def delete_ovs_vif_port(self, dev):
utils.execute('ovs-vsctl', 'del-port', FLAGS.libvirt_ovs_bridge,
dev, run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
if not linux_net._device_exists(dev):
# Older version of the command 'ip' from the iproute2 package
# don't have support for the tuntap option (lp:882568). If it
# turns out we're on an old version we work around this by using
# tunctl.
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True)
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
self.create_ovs_vif_port(dev, iface_id, mapping['mac'],
instance['uuid'])
conf = config.LibvirtConfigGuestInterface()
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.net_type = "ethernet"
conf.target_dev = dev
conf.script = ""
conf.mac_addr = mapping['mac']
return conf
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
network, mapping = vif
self.delete_ovs_vif_port(self.get_dev_name(mapping['vif_uuid']))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
LibvirtOpenVswitchDriver):
"""VIF driver that uses OVS + Linux Bridge for iptables compatibility.
Enables the use of OVS-based Quantum plugins while at the same
time using iptables-based filtering, which requires that vifs be
plugged into a linux bridge, not OVS. IPtables filtering is useful for
in particular for Nova security groups.
"""
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:LINUX_DEV_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:LINUX_DEV_LEN],
("qvo%s" % iface_id)[:LINUX_DEV_LEN])
def plug(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms
"""
network, mapping = vif
iface_id = mapping['vif_uuid']
br_name = self.get_br_name(iface_id)
v1_name, v2_name = self.get_veth_pair_names(iface_id)
linux_net._create_veth_pair(v1_name, v2_name)
if not linux_net._device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
self.create_ovs_vif_port(v2_name, iface_id, mapping['mac'],
instance['uuid'])
network['bridge'] = br_name
return self._get_configurations(instance, network, mapping)
def unplug(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
network, mapping = vif
iface_id = mapping['vif_uuid']
br_name = self.get_br_name(iface_id)
v1_name, v2_name = self.get_veth_pair_names(iface_id)
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
self.delete_ovs_vif_port(v2_name)
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver):
"""VIF driver for Open vSwitch that uses integrated libvirt
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def plug(self, instance, vif):
""" Pass data required to create OVS virtual port element"""
network, mapping = vif
conf = config.LibvirtConfigGuestInterface()
conf.net_type = "bridge"
conf.source_dev = FLAGS.libvirt_ovs_bridge
conf.mac_addr = mapping['mac']
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid", mapping['vif_uuid'])
return conf
def unplug(self, instance, vif):
"""No action needed. Libvirt takes care of cleanup"""
pass
class QuantumLinuxBridgeVIFDriver(vif.VIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
def get_dev_name(self, iface_id):
return ("tap" + iface_id)[:LINUX_DEV_LEN]
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
if FLAGS.libvirt_type != 'xen':
linux_net.QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev)
conf = config.LibvirtConfigGuestInterface()
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = 'virtio'
conf.net_type = "ethernet"
conf.target_dev = dev
conf.script = ""
conf.mac_addr = mapping['mac']
return conf
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
network, mapping = vif
dev = self.get_dev_name(mapping['vif_uuid'])
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
except exception.ProcessExecutionError:
LOG.warning(_("Failed while unplugging vif"), instance=instance)
raise
|
the-stack_0_19963 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.node import PipeIndoor
log = logging.getLogger(__name__)
class TestPipeIndoor(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_pipeindoor(self):
pyidf.validation_level = ValidationLevel.error
obj = PipeIndoor()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_construction_name = "object-list|Construction Name"
obj.construction_name = var_construction_name
# node
var_fluid_inlet_node_name = "node|Fluid Inlet Node Name"
obj.fluid_inlet_node_name = var_fluid_inlet_node_name
# node
var_fluid_outlet_node_name = "node|Fluid Outlet Node Name"
obj.fluid_outlet_node_name = var_fluid_outlet_node_name
# alpha
var_environment_type = "Zone"
obj.environment_type = var_environment_type
# object-list
var_ambient_temperature_zone_name = "object-list|Ambient Temperature Zone Name"
obj.ambient_temperature_zone_name = var_ambient_temperature_zone_name
# object-list
var_ambient_temperature_schedule_name = "object-list|Ambient Temperature Schedule Name"
obj.ambient_temperature_schedule_name = var_ambient_temperature_schedule_name
# object-list
var_ambient_air_velocity_schedule_name = "object-list|Ambient Air Velocity Schedule Name"
obj.ambient_air_velocity_schedule_name = var_ambient_air_velocity_schedule_name
# real
var_pipe_inside_diameter = 0.0001
obj.pipe_inside_diameter = var_pipe_inside_diameter
# real
var_pipe_length = 0.0001
obj.pipe_length = var_pipe_length
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.pipeindoors[0].name, var_name)
self.assertEqual(idf2.pipeindoors[0].construction_name, var_construction_name)
self.assertEqual(idf2.pipeindoors[0].fluid_inlet_node_name, var_fluid_inlet_node_name)
self.assertEqual(idf2.pipeindoors[0].fluid_outlet_node_name, var_fluid_outlet_node_name)
self.assertEqual(idf2.pipeindoors[0].environment_type, var_environment_type)
self.assertEqual(idf2.pipeindoors[0].ambient_temperature_zone_name, var_ambient_temperature_zone_name)
self.assertEqual(idf2.pipeindoors[0].ambient_temperature_schedule_name, var_ambient_temperature_schedule_name)
self.assertEqual(idf2.pipeindoors[0].ambient_air_velocity_schedule_name, var_ambient_air_velocity_schedule_name)
self.assertAlmostEqual(idf2.pipeindoors[0].pipe_inside_diameter, var_pipe_inside_diameter)
self.assertAlmostEqual(idf2.pipeindoors[0].pipe_length, var_pipe_length) |
the-stack_0_19964 | import jwt
import os
import requests
from datetime import datetime, timedelta
from common.basedir import PERSIST
from selfdrive.version import version
from common.params import Params
if Params().get("OPKRServer", encoding="utf8") == "0":
API_HOST = os.getenv('API_HOST', 'https://api.retropilot.org')
elif Params().get("OPKRServer", encoding="utf8") == "1":
API_HOST = os.getenv('API_HOST', 'https://api.commadotai.com')
elif Params().get("OPKRServer", encoding="utf8") == "2":
API_HOST = os.getenv('API_HOST', 'http://' + Params().get("OPKRServerAPI", encoding="utf8"))
else:
API_HOST = os.getenv('API_HOST', 'https://api.retropilot.org')
class Api():
def __init__(self, dongle_id):
self.dongle_id = dongle_id
with open(PERSIST+'/comma/id_rsa') as f:
self.private_key = f.read()
def get(self, *args, **kwargs):
return self.request('GET', *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def request(self, method, endpoint, timeout=None, access_token=None, **params):
return api_get(endpoint, method=method, timeout=timeout, access_token=access_token, **params)
def get_token(self):
now = datetime.utcnow()
payload = {
'identity': self.dongle_id,
'nbf': now,
'iat': now,
'exp': now + timedelta(hours=1)
}
token = jwt.encode(payload, self.private_key, algorithm='RS256')
if isinstance(token, bytes):
token = token.decode('utf8')
return token
def api_get(endpoint, method='GET', timeout=None, access_token=None, **params):
headers = {}
if access_token is not None:
headers['Authorization'] = "JWT "+access_token
headers['User-Agent'] = "openpilot-" + version
return requests.request(method, API_HOST + "/" + endpoint, timeout=timeout, headers=headers, params=params)
|
the-stack_0_19970 | """
Copyright 2018 Google LLC
Modifications copyright 2021 Lukas Kreisköther
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from multiprocessing import dummy as multiprocessing
import numpy as np
import tensorflow as tf
from six.moves import range
from SPACE import run_params
from SPACE import utils
from SPACE.cav import CAV
from SPACE.cav import get_or_train_cav
class TCAV(object):
"""TCAV object: runs TCAV for one target and a set of concepts.
The static methods (get_direction_dir_sign, compute_tcav_score,
get_directional_dir) invole getting directional derivatives and calculating
TCAV scores. These are static because they might be useful independently,
for instance, if you are developing a new interpretability method using
CAVs.
See https://arxiv.org/abs/1711.11279
"""
@staticmethod
def get_direction_dir_sign(mymodel, act, cav, concept, class_id, example):
"""Get the sign of directional derivative.
Args:
mymodel: a model class instance
act: activations of one bottleneck to get gradient with respect to.
cav: an instance of cav
concept: one concept
class_id: index of the class of interest (target) in logit layer.
example: example corresponding to the given activation
Returns:
sign of the directional derivative
"""
# Grad points in the direction which DECREASES probability of class
grad = np.reshape(mymodel.get_gradient(
act, [class_id], cav.bottleneck, example), -1)
dot_prod = np.dot(grad, cav.get_direction(concept))
return dot_prod < 0
@staticmethod
def compute_tcav_score(mymodel,
target_class,
concept,
cav,
class_acts,
examples,
run_parallel=False,
num_workers=20):
"""Compute TCAV score.
Args:
mymodel: a model class instance
target_class: one target class
concept: one concept
cav: an instance of cav
class_acts: activations of the examples in the target class where
examples[i] corresponds to class_acts[i]
examples: an array of examples of the target class where examples[i]
corresponds to class_acts[i]
run_parallel: run this parallel fashion
num_workers: number of workers if we run in parallel.
Returns:
TCAV score (i.e., ratio of pictures that returns negative dot product
wrt loss).
"""
count = 0
class_id = mymodel.label_to_id(target_class)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
directions = pool.map(
lambda i: TCAV.get_direction_dir_sign(
mymodel, np.expand_dims(class_acts[i], 0),
cav, concept, class_id, examples[i]),
range(len(class_acts)))
return sum(directions) / float(len(class_acts))
else:
for i in range(len(class_acts)):
act = np.expand_dims(class_acts[i], 0)
example = examples[i]
if TCAV.get_direction_dir_sign(
mymodel, act, cav, concept, class_id, example):
count += 1
return float(count) / float(len(class_acts))
@staticmethod
def get_directional_dir(
mymodel, target_class, concept, cav, class_acts, examples):
"""Return the list of values of directional derivatives.
(Only called when the values are needed as a referece)
Args:
mymodel: a model class instance
target_class: one target class
concept: one concept
cav: an instance of cav
class_acts: activations of the examples in the target class where
examples[i] corresponds to class_acts[i]
examples: an array of examples of the target class where examples[i]
corresponds to class_acts[i]
Returns:
list of values of directional derivatives.
"""
class_id = mymodel.label_to_id(target_class)
directional_dir_vals = []
for i in range(len(class_acts)):
act = np.expand_dims(class_acts[i], 0)
example = examples[i]
grad = np.reshape(
mymodel.get_gradient(act, [class_id], cav.bottleneck, example), -1)
directional_dir_vals.append(np.dot(grad, cav.get_direction(concept)))
return directional_dir_vals
def __init__(self,
sess,
target,
concepts,
bottlenecks,
activation_generator,
alphas,
random_counterpart=None,
cav_dir=None,
num_random_exp=5,
random_concepts=None):
"""Initialze tcav class.
Args:
sess: tensorflow session.
target: one target class
concepts: A list of names of positive concept sets.
bottlenecks: the name of a bottleneck of interest.
activation_generator: an ActivationGeneratorInterface instance to return
activations.
alphas: list of hyper parameters to run
cav_dir: the path to store CAVs
random_counterpart: the random concept to run against the concepts for
statistical testing. If supplied, only this set will be
used as a positive set for calculating random TCAVs
num_random_exp: number of random experiments to compare against.
random_concepts: A list of names of random concepts for the random
experiments to draw from. Optional, if not provided, the
names will be random500_{i} for i in num_random_exp.
Relative TCAV can be performed by passing in the same
value for both concepts and random_concepts.
"""
self.target = target
self.concepts = concepts
self.bottlenecks = bottlenecks
self.activation_generator = activation_generator
self.cav_dir = cav_dir
self.alphas = alphas
self.mymodel = activation_generator.get_model()
self.model_to_run = self.mymodel.model_name
self.sess = sess
self.random_counterpart = random_counterpart
self.relative_tcav = (random_concepts is not None) and (set(concepts) == set(random_concepts))
if random_concepts:
num_random_exp = len(random_concepts)
# make pairs to test.
self._process_what_to_run_expand(num_random_exp=num_random_exp,
random_concepts=random_concepts)
# parameters
self.params = self.get_params()
tf.logging.info('TCAV will %s params' % len(self.params))
def run(self, num_workers=10, run_parallel=False, overwrite=False, return_proto=False):
"""Run TCAV for all parameters (concept and random), write results to html.
Args:
num_workers: number of workers to parallelize
run_parallel: run this parallel.
overwrite: if True, overwrite any saved CAV files.
return_proto: if True, returns results as a tcav.Results object; else,
return as a list of dicts.
Returns:
results: an object (either a Results proto object or a list of
dictionaries) containing metrics for TCAV results.
"""
# for random exp, a machine with cpu = 30, ram = 300G, disk = 10G and
# pool worker 50 seems to work.
tf.logging.info('running %s params' % len(self.params))
now = time.time()
if run_parallel:
pool = multiprocessing.Pool(num_workers)
results = pool.map(lambda param: self._run_single_set(param, overwrite=overwrite), self.params)
else:
results = []
for i, param in enumerate(self.params):
tf.logging.info('Running param %s of %s' % (i, len(self.params)))
results.append(self._run_single_set(param, overwrite=overwrite))
tf.logging.info('Done running %s params. Took %s seconds...' % (len(
self.params), time.time() - now))
if return_proto:
return utils.results_to_proto(results)
else:
return results
def _run_single_set(self, param, overwrite=False):
"""Run TCAV with provided for one set of (target, concepts).
Args:
param: parameters to run
overwrite: if True, overwrite any saved CAV files.
Returns:
a dictionary of results (panda frame)
"""
bottleneck = param.bottleneck
concepts = param.concepts
target_class = param.target_class
activation_generator = param.activation_generator
alpha = param.alpha
mymodel = param.model
cav_dir = param.cav_dir
# first check if target class is in model.
tf.logging.info('running %s %s' % (target_class, concepts))
# Get acts
acts = activation_generator.process_and_load_activations(
[bottleneck], concepts + [target_class])
# Get CAVs
cav_hparams = CAV.default_hparams()
cav_hparams.alpha = alpha
cav_instance = get_or_train_cav(
concepts,
bottleneck,
acts,
cav_dir=cav_dir,
cav_hparams=cav_hparams,
overwrite=overwrite)
# clean up
for c in concepts:
del acts[c]
# Hypo testing
a_cav_key = CAV.cav_key(concepts, bottleneck, cav_hparams.model_type,
cav_hparams.alpha)
target_class_for_compute_tcav_score = target_class
cav_concept = concepts[0]
i_up = self.compute_tcav_score(
mymodel, target_class_for_compute_tcav_score, cav_concept,
cav_instance, acts[target_class][cav_instance.bottleneck],
activation_generator.get_examples_for_concept(target_class))
val_directional_dirs = self.get_directional_dir(
mymodel, target_class_for_compute_tcav_score, cav_concept,
cav_instance, acts[target_class][cav_instance.bottleneck],
activation_generator.get_examples_for_concept(target_class))
result = {
'cav_key':
a_cav_key,
'cav_concept':
cav_concept,
'negative_concept':
concepts[1],
'target_class':
target_class,
'cav_accuracies':
cav_instance.accuracies,
'i_up':
i_up,
'val_directional_dirs_abs_mean':
np.mean(np.abs(val_directional_dirs)),
'val_directional_dirs_mean':
np.mean(val_directional_dirs),
'val_directional_dirs_std':
np.std(val_directional_dirs),
'val_directional_dirs':
val_directional_dirs,
'note':
'alpha_%s ' % (alpha),
'alpha':
alpha,
'bottleneck':
bottleneck
}
del acts
return result
def _process_what_to_run_expand(self, num_random_exp=100, random_concepts=None):
"""Get tuples of parameters to run TCAV with.
TCAV builds random concept to conduct statistical significance testing
againts the concept. To do this, we build many concept vectors, and many
random vectors. This function prepares runs by expanding parameters.
Args:
num_random_exp: number of random experiments to run to compare.
random_concepts: A list of names of random concepts for the random experiments
to draw from. Optional, if not provided, the names will be
random500_{i} for i in num_random_exp.
"""
target_concept_pairs = [(self.target, self.concepts)]
# take away 1 random experiment if the random counterpart already in random concepts
# take away 1 random experiment if computing Relative TCAV
all_concepts_concepts, pairs_to_run_concepts = (
utils.process_what_to_run_expand(
utils.process_what_to_run_concepts(target_concept_pairs),
self.random_counterpart,
num_random_exp=num_random_exp -
(1 if random_concepts and self.random_counterpart in random_concepts
else 0) - (1 if self.relative_tcav else 0),
random_concepts=random_concepts))
pairs_to_run_randoms = []
all_concepts_randoms = []
# ith random concept
def get_random_concept(i):
return (random_concepts[i] if random_concepts
else 'random500_{}'.format(i))
if self.random_counterpart is None:
# TODO random500_1 vs random500_0 is the same as 1 - (random500_0 vs random500_1)
for i in range(num_random_exp):
all_concepts_randoms_tmp, pairs_to_run_randoms_tmp = (
utils.process_what_to_run_expand(
utils.process_what_to_run_randoms(target_concept_pairs,
get_random_concept(i)),
num_random_exp=num_random_exp - 1,
random_concepts=random_concepts))
pairs_to_run_randoms.extend(pairs_to_run_randoms_tmp)
all_concepts_randoms.extend(all_concepts_randoms_tmp)
else:
# run only random_counterpart as the positve set for random experiments
all_concepts_randoms_tmp, pairs_to_run_randoms_tmp = (
utils.process_what_to_run_expand(
utils.process_what_to_run_randoms(target_concept_pairs,
self.random_counterpart),
self.random_counterpart,
num_random_exp=num_random_exp - (1 if random_concepts and
self.random_counterpart in random_concepts else 0),
random_concepts=random_concepts))
pairs_to_run_randoms.extend(pairs_to_run_randoms_tmp)
all_concepts_randoms.extend(all_concepts_randoms_tmp)
self.all_concepts = list(set(all_concepts_concepts + all_concepts_randoms))
self.pairs_to_test = pairs_to_run_concepts if self.relative_tcav else pairs_to_run_concepts + pairs_to_run_randoms
def get_params(self):
"""Enumerate parameters for the run function.
Returns:
parameters
"""
params = []
for bottleneck in self.bottlenecks:
for target_in_test, concepts_in_test in self.pairs_to_test:
for alpha in self.alphas:
tf.logging.info('%s %s %s %s', bottleneck, concepts_in_test,
target_in_test, alpha)
params.append(
run_params.RunParams(bottleneck, concepts_in_test, target_in_test,
self.activation_generator, self.cav_dir,
alpha, self.mymodel))
return params
|
the-stack_0_19976 | from elasticsearch import Elasticsearch
from geoip import open_database
es = Elasticsearch(hosts=[{'host': "127.0.0.1", 'port': 9200}])
res = es.search(index="ssh", body={
"aggs": {
"sources": {
"terms": {
"field": "sourceEntryIp",
"size": 10000011
}
}
}
})
count = 0
data = {}
with open_database('/var/lib/GeoIP/GeoLite2-Country.mmdb') as db:
for hit in res['aggregations']['sources']['buckets']:
if (count <= 11):
match = db.lookup(str(hit["key"]))
print(str(hit["key"]) + " Counter: " + str(hit["doc_count"]) + " Country: " + str(match.country))
count = count + 1
print("Total number of unique IPs:" + str(count))
|
the-stack_0_19977 | #!/bin/env python3
# Copyright 2021 Rémi Bernon for CodeWeavers
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import print_function
import gdb
import re
import subprocess
import sys
class LoadSymbolFiles(gdb.Command):
'Command to load symbol files directly from /proc/<pid>/maps.'
def __init__(self):
sup = super(LoadSymbolFiles, self)
sup.__init__('load-symbol-files', gdb.COMMAND_FILES, gdb.COMPLETE_NONE,
False)
self.libs = {}
try:
gdb.execute('alias -a lsf = load-symbol-files', True)
except gdb.error:
pass
def invoke(self, arg, from_tty):
pid = gdb.selected_inferior().pid
if not pid in self.libs: self.libs[pid] = {}
def command(cmd, confirm=from_tty):
to_string = not from_tty
gdb.execute(cmd, from_tty=confirm, to_string=to_string)
def execute(cmd):
return subprocess.check_output(cmd, stderr=subprocess.STDOUT) \
.decode('utf-8')
# load mappings addresses
libs = {}
with open('/proc/{}/maps'.format(pid), 'r') as maps:
for line in maps:
addr, _, _, _, node, path = re.split(r'\s+', line, 5)
path = path.strip()
if node == '0': continue
if path in libs: continue
libs[path] = int(addr.split('-')[0], 16)
# unload symbol file if address changed
for k in set(libs) & set(self.libs[pid]):
if libs[k] != self.libs[pid][k]:
command('remove-symbol-file "{}"'.format(k), confirm=False)
del self.libs[k]
# load symbol file for new mappings
for k in set(libs) - set(self.libs[pid]):
if arg is not None and re.search(arg, k) is None: continue
addr = self.libs[pid][k] = libs[k]
offs = None
try:
out = execute(['file', k])
except:
continue
# try loading mapping as ELF
try:
out = execute(['readelf', '-l', k])
for line in out.split('\n'):
if not 'LOAD' in line: continue
base = int(line.split()[2], 16)
break
out = execute(['objdump', '-h', k])
for line in out.split('\n'):
if not '.text' in line: continue
offs = int(line.split()[3], 16) - base
break
if offs is None: continue
# try again, assuming mapping is PE
except:
try:
out = execute(['objdump', '-h', k])
for line in out.split('\n'):
if not '.text' in line: continue
offs = int(line.split()[5], 16)
break
if offs is None: continue
except:
continue
command('add-symbol-file "{}" 0x{:x}'.format(k, addr + offs),
confirm=False)
LoadSymbolFiles()
|
the-stack_0_19978 | import re
from inventory.general_functions import parse_uptime
from inventory.gather_inventory import GatherInventory
class CiscoGatherInventory(GatherInventory):
'''
Based-on Cisco IOS
'''
def find_vendor(self):
if 'Cisco' in self.output:
self.net_device.vendor = 'cisco'
self.net_device.save()
else:
raise ValueError("Invalid vendor")
def find_model(self):
match = re.search(r'.*bytes of memory', self.output)
if match:
model = match.group()
self.net_device.model = model.split()[1]
self.net_device.save()
def find_device_type(self):
if self.net_device.model == '881':
self.net_device.device_type = 'router'
else:
raise ValueError("Unable to find device_type from model({})".format(
self.net_device.model))
self.net_device.save()
def find_os_version(self):
'''
String in show version will be similar to the following:
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.4(2)T1,
RELEASE SOFTWARE (fc3)
'''
match = re.search(r'Cisco IOS Software, (.*)', self.output)
if match:
self.net_device.os_version = match.group(1)
self.net_device.save()
def find_serial_number(self):
'''
String in show version will be similar to the following:
Processor board ID FTX10000001
'''
match = re.search(r'Processor board ID (.*)', self.output)
if match:
self.net_device.serial_number = match.group(1)
self.net_device.save()
def find_uptime(self):
'''
String in show version will be similar to the following:
hostname uptime is 8 weeks, 2 days, 23 hours, 22 minutes
'''
match = re.search(r'uptime is (.*)', self.output)
if match:
time_str = match.group(1)
self.net_device.uptime_seconds = parse_uptime(time_str)
|
the-stack_0_19985 | """
The actual layout for the renderer.
"""
import pathlib
import prompt_toolkit.filters
import prompt_toolkit.layout
import prompt_toolkit.widgets
import prompt_toolkit.layout.menus
import prompt_toolkit.layout.processors
__all__ = (
'EditorLayout',
)
class EditorLayout(object):
"""
The main layout class.
"""
def __init__(self, config_directory: pathlib.Path):
from ..editor_root import EditorRoot
from .report_message_toolbar import ReportMessageToolbar
from .simple_arg_toolbar import SimpleArgToolbar
from .logger import LoggerWindow
from .lsp_status import LspStatus
self.editor_root = EditorRoot(config_directory)
editor_layout = prompt_toolkit.layout.FloatContainer(
content=prompt_toolkit.layout.HSplit([
self.editor_root.tabbar,
self.editor_root,
self.editor_root.commandline,
ReportMessageToolbar(self.editor_root.commandline.has_focus),
prompt_toolkit.widgets.SystemToolbar(),
self.editor_root.window_arrangement.searchline,
]),
floats=[
prompt_toolkit.layout.Float(right=0, height=1, bottom=0, width=5,
content=SimpleArgToolbar()),
]
)
logger_window = LoggerWindow()
self.lsp_status = LspStatus()
editor_logger = prompt_toolkit.layout.HSplit([
editor_layout,
self.lsp_status,
logger_window,
])
# background color
self.container = prompt_toolkit.layout.FloatContainer(
content=prompt_toolkit.layout.Window(
char=' ',
ignore_content_width=True,
ignore_content_height=True,
),
floats=[
prompt_toolkit.layout.Float(
editor_logger,
transparent=True,
left=0,
right=0,
top=0,
bottom=0
),
],
)
def __pt_container__(self):
return self.container
|
the-stack_0_19988 | # --------------
import pandas as pd
from collections import Counter
# Load dataset
data = pd.read_csv(path)
print(data.isnull().sum())
print(data.info())
print(data.describe())
# --------------
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style(style='darkgrid')
# Store the label values
label = data['Activity']
plt.figure(figsize=(10,6))
sns.countplot(label)
# plot the countplot
# --------------
# make the copy of dataset
data_copy = data.copy()
data_copy['duration'] = ''
#duration_df = data.groupby('WALKING_UPSTAIRS','WALKING_DOWNSTAIRS').count()
duration_df = (data_copy.groupby([label[label.isin(['WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'])], 'subject'])['duration'].count() * 1.28)
duration_df = pd.DataFrame(duration_df)
plot_data = duration_df.reset_index().sort_values('duration', ascending=False)
plot_data['Activity'] = plot_data['Activity'].map({'WALKING_UPSTAIRS':'Upstairs', 'WALKING_DOWNSTAIRS':'Downstairs'})
# Create an empty column
plt.figure(figsize=(15,5))
sns.barplot(data=plot_data, x='subject', y='duration', hue='Activity')
plt.title('Participants Compared By Their Staircase Walking Duration')
plt.xlabel('Participants')
plt.ylabel('Total Duration [s]')
plt.show()
# Calculate the duration
# Sort the values of duration
# --------------
#exclude the Activity column and the subject column
feature_cols = data.columns[: -2]
#Calculate the correlation values
correlated_values = data[feature_cols].corr()
#stack the data and convert to a dataframe
correlated_values = (correlated_values.stack().to_frame().reset_index()
.rename(columns={'level_0': 'Feature_1', 'level_1': 'Feature_2', 0:'Correlation_score'}))
#create an abs_correlation column
correlated_values['abs_correlation'] = correlated_values.Correlation_score.abs()
#Picking most correlated features without having self correlated pairs
top_corr_fields = correlated_values.sort_values('Correlation_score', ascending = False).query('abs_correlation>0.8 ')
top_corr_fields = top_corr_fields[top_corr_fields['Feature_1'] != top_corr_fields['Feature_2']].reset_index(drop=True)
# --------------
# importing neccessary libraries
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import precision_recall_fscore_support as error_metric
from sklearn.metrics import confusion_matrix, accuracy_score
# Encoding the target variable
le = LabelEncoder()
data['Activity'] = le.fit_transform(data['Activity'])
X = data.drop('Activity',1)
y = data['Activity'].copy()
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=40)
classifier = SVC()
clf = classifier.fit(X_train, y_train)
y_pred = clf.predict(X_test)
precision, recall, f_score, _ = error_metric(y_test, y_pred, average = 'weighted')
model1_score = accuracy_score(y_test, y_pred)
print(model1_score)
print(precision, recall, f_score)
# --------------
# importing libraries
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
# Feature selection using Linear SVC
lsvc = LinearSVC(C = 0.01, penalty="l1", dual=False, random_state=42).fit(X_train, y_train)
model_2 = SelectFromModel(lsvc, prefit=True)
new_train_features = model_2.transform(X_train)
new_test_features = model_2.transform(X_test)
print(new_train_features.shape,new_test_features.shape )
# model building on reduced set of features
classifier_2 = SVC()
clf_2 = classifier_2.fit(new_train_features, y_train)
y_pred_new = clf_2.predict(new_test_features)
model2_score =accuracy_score(y_test, y_pred_new)
precision, recall, f_score, _ = error_metric(y_test, y_pred_new, average='weighted')
print(model2_score)
print(precision, recall, f_score)
# --------------
# Importing Libraries
from sklearn.model_selection import GridSearchCV
# Set the hyperparmeters
parameters = {
'kernel': ['linear', 'rbf'],
'C': [100, 20, 1, 0.1]
}
# Usage of grid search to select the best hyperparmeters
selector = GridSearchCV(SVC(), parameters, scoring='accuracy')
selector.fit(new_train_features, y_train)
print('Best parameter set found:')
print(selector.best_params_)
print('Detailed grid scores:')
means = selector.cv_results_['mean_test_score']
stds = selector.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, selector.cv_results_['params']):
print('%0.3f (+/-%0.03f) for %r' % (mean, std * 2, params))
print()
# Model building after Hyperparameter tuning
classifier_3 = SVC(kernel='rbf', C=100)
clf_3 = classifier_3.fit(new_train_features, y_train)
y_pred_final = clf_3.predict(new_test_features)
model3_score = accuracy_score(y_test, y_pred_final)
print('Accuracy score:', model3_score)
|
the-stack_0_19989 | import mxnet as mx
import networkx as nx
import numpy as np
import scipy as sp
import pytest
import dgl
import dgl.nn.mxnet as nn
import dgl.function as fn
import backend as F
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from test_utils import parametrize_dtype
from mxnet import autograd, gluon, nd
def check_close(a, b):
assert np.allclose(a.asnumpy(), b.asnumpy(), rtol=1e-4, atol=1e-4)
def _AXWb(A, X, W, b):
X = mx.nd.dot(X, W.data(X.context))
Y = mx.nd.dot(A, X.reshape(X.shape[0], -1)).reshape(X.shape)
return Y + b.data(X.context)
@parametrize_dtype
def test_graph_conv(idtype):
g = dgl.from_networkx(nx.path_graph(3))
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=False, ctx=ctx)
conv = nn.GraphConv(5, 2, norm='none', bias=True)
conv.initialize(ctx=ctx)
# test#1: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
# test#2: more-dim
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
check_close(h1, _AXWb(adj, h0, conv.weight, conv.bias))
conv = nn.GraphConv(5, 2)
conv.initialize(ctx=ctx)
# test#3: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test#4: basic
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
conv = nn.GraphConv(5, 2)
conv.initialize(ctx=ctx)
with autograd.train_mode():
# test#3: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test#4: basic
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test not override features
g.ndata["h"] = 2 * F.ones((3, 1))
h1 = conv(g, h0)
assert len(g.ndata) == 1
assert len(g.edata) == 0
assert "h" in g.ndata
check_close(g.ndata['h'], 2 * F.ones((3, 1)))
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
def test_graph_conv2(idtype, g, norm, weight, bias):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias)
conv.initialize(ctx=F.ctx())
ext_w = F.randn((5, 2)).as_in_context(F.ctx())
nsrc = ndst = g.number_of_nodes()
h = F.randn((nsrc, 5)).as_in_context(F.ctx())
if weight:
h_out = conv(g, h)
else:
h_out = conv(g, h, ext_w)
assert h_out.shape == (ndst, 2)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
def test_graph_conv2_bi(idtype, g, norm, weight, bias):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias)
conv.initialize(ctx=F.ctx())
ext_w = F.randn((5, 2)).as_in_context(F.ctx())
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5)).as_in_context(F.ctx())
h_dst = F.randn((ndst, 2)).as_in_context(F.ctx())
if weight:
h_out = conv(g, (h, h_dst))
else:
h_out = conv(g, (h, h_dst), ext_w)
assert h_out.shape == (ndst, 2)
def _S2AXWb(A, N, X, W, b):
X1 = X * N
X1 = mx.nd.dot(A, X1.reshape(X1.shape[0], -1))
X1 = X1 * N
X2 = X1 * N
X2 = mx.nd.dot(A, X2.reshape(X2.shape[0], -1))
X2 = X2 * N
X = mx.nd.concat(X, X1, X2, dim=-1)
Y = mx.nd.dot(X, W)
return Y + b
def test_tagconv():
g = dgl.from_networkx(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=False, ctx=ctx)
norm = mx.nd.power(g.in_degrees().astype('float32'), -0.5)
conv = nn.TAGConv(5, 2, bias=True)
conv.initialize(ctx=ctx)
print(conv)
# test#1: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
shp = norm.shape + (1,) * (h0.ndim - 1)
norm = norm.reshape(shp).as_in_context(h0.context)
assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.data(ctx), conv.h_bias.data(ctx)))
conv = nn.TAGConv(5, 2)
conv.initialize(ctx=ctx)
# test#2: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert h1.shape[-1] == 2
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_gat_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(10, 20, 5) # n_heads = 5
gat.initialize(ctx=ctx)
print(gat)
feat = F.randn((g.number_of_nodes(), 10))
h = gat(g, feat)
assert h.shape == (g.number_of_nodes(), 5, 20)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), 5, 1)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_gat_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(5, 2, 4)
gat.initialize(ctx=ctx)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 4, 2)
_, a = gat(g, feat, True)
assert a.shape == (g.number_of_edges(), 4, 1)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
def test_sage_conv(idtype, g, aggre_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
sage = nn.SAGEConv(5, 10, aggre_type)
feat = F.randn((g.number_of_nodes(), 5))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 10
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
def test_sage_conv_bi(idtype, g, aggre_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), 2, aggre_type)
feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == g.number_of_dst_nodes()
@parametrize_dtype
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
def test_sage_conv_bi2(idtype, aggre_type):
# Test the case for graphs without edges
g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3})
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
sage = nn.SAGEConv((3, 3), 2, 'gcn')
feat = (F.randn((5, 3)), F.randn((3, 3)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 3
for aggre_type in ['mean', 'pool']:
sage = nn.SAGEConv((3, 1), 2, aggre_type)
feat = (F.randn((5, 3)), F.randn((3, 1)))
sage.initialize(ctx=ctx)
h = sage(g, feat)
assert h.shape[-1] == 2
assert h.shape[0] == 3
def test_gg_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
gg_conv = nn.GatedGraphConv(10, 20, 3, 4) # n_step = 3, n_etypes = 4
gg_conv.initialize(ctx=ctx)
print(gg_conv)
# test#1: basic
h0 = F.randn((20, 10))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = gg_conv(g, h0, etypes)
assert h1.shape == (20, 20)
def test_cheb_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
cheb = nn.ChebConv(10, 20, 3) # k = 3
cheb.initialize(ctx=ctx)
print(cheb)
# test#1: basic
h0 = F.randn((20, 10))
h1 = cheb(g, h0)
assert h1.shape == (20, 20)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_agnn_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
agnn_conv = nn.AGNNConv(0.1, True)
agnn_conv.initialize(ctx=ctx)
print(agnn_conv)
feat = F.randn((g.number_of_nodes(), 10))
h = agnn_conv(g, feat)
assert h.shape == (g.number_of_nodes(), 10)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_agnn_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
agnn_conv = nn.AGNNConv(0.1, True)
agnn_conv.initialize(ctx=ctx)
print(agnn_conv)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = agnn_conv(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 5)
def test_appnp_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
ctx = F.ctx()
appnp_conv = nn.APPNPConv(3, 0.1, 0)
appnp_conv.initialize(ctx=ctx)
print(appnp_conv)
# test#1: basic
h0 = F.randn((20, 10))
h1 = appnp_conv(g, h0)
assert h1.shape == (20, 10)
def test_dense_cheb_conv():
for k in range(1, 4):
ctx = F.ctx()
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.3)).to(F.ctx())
adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
cheb = nn.ChebConv(5, 2, k)
dense_cheb = nn.DenseChebConv(5, 2, k)
cheb.initialize(ctx=ctx)
dense_cheb.initialize(ctx=ctx)
for i in range(len(cheb.fc)):
dense_cheb.fc[i].weight.set_data(
cheb.fc[i].weight.data())
if cheb.bias is not None:
dense_cheb.bias.set_data(
cheb.bias.data())
feat = F.randn((100, 5))
out_cheb = cheb(g, feat, [2.0])
out_dense_cheb = dense_cheb(adj, feat, 2.0)
assert F.allclose(out_cheb, out_dense_cheb)
@parametrize_dtype
@pytest.mark.parametrize('norm_type', ['both', 'right', 'none'])
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_dense_graph_conv(idtype, g, norm_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
conv.initialize(ctx=ctx)
dense_conv.initialize(ctx=ctx)
dense_conv.weight.set_data(
conv.weight.data())
dense_conv.bias.set_data(
conv.bias.data())
feat = F.randn((g.number_of_src_nodes(), 5))
out_conv = conv(g, feat)
out_dense_conv = dense_conv(adj, feat)
assert F.allclose(out_conv, out_dense_conv)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite', 'block-bipartite']))
def test_dense_sage_conv(idtype, g):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
adj = g.adjacency_matrix(transpose=False, ctx=ctx).tostype('default')
sage = nn.SAGEConv(5, 2, 'gcn')
dense_sage = nn.DenseSAGEConv(5, 2)
sage.initialize(ctx=ctx)
dense_sage.initialize(ctx=ctx)
dense_sage.fc.weight.set_data(
sage.fc_neigh.weight.data())
dense_sage.fc.bias.set_data(
sage.fc_neigh.bias.data())
if len(g.ntypes) == 2:
feat = (
F.randn((g.number_of_src_nodes(), 5)),
F.randn((g.number_of_dst_nodes(), 5))
)
else:
feat = F.randn((g.number_of_nodes(), 5))
out_sage = sage(g, feat)
out_dense_sage = dense_sage(adj, feat)
assert F.allclose(out_sage, out_dense_sage)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_edge_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, 2)
edge_conv.initialize(ctx=ctx)
print(edge_conv)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_nodes(), 2)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_edge_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(5, 2)
edge_conv.initialize(ctx=ctx)
print(edge_conv)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
x0 = F.randn((g.number_of_dst_nodes(), 5))
h1 = edge_conv(g, (h0, x0))
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin_conv = nn.GINConv(lambda x: x, aggregator_type, 0.1)
gin_conv.initialize(ctx=ctx)
print(gin_conv)
# test #1: basic
feat = F.randn((g.number_of_nodes(), 5))
h = gin_conv(g, feat)
assert h.shape == (g.number_of_nodes(), 5)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv_bi(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin_conv = nn.GINConv(lambda x: x, aggregator_type, 0.1)
gin_conv.initialize(ctx=ctx)
print(gin_conv)
# test #2: bipartite
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gin_conv(g, feat)
return h.shape == (g.number_of_dst_nodes(), 5)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
def test_gmm_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmm_conv = nn.GMMConv(5, 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
h0 = F.randn((g.number_of_nodes(), 5))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, h0, pseudo)
assert h1.shape == (g.number_of_nodes(), 2)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
def test_gmm_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gmm_conv = nn.GMMConv((5, 4), 2, 5, 3, 'max')
gmm_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
pseudo = F.randn((g.number_of_edges(), 5))
h1 = gmm_conv(g, (h0, hd), pseudo)
assert h1.shape == (g.number_of_dst_nodes(), 2)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
def test_nn_conv(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
nn_conv = nn.NNConv(5, 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, h0, etypes)
assert h1.shape == (g.number_of_nodes(), 2)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
def test_nn_conv_bi(g, idtype):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
nn_conv = nn.NNConv((5, 4), 2, gluon.nn.Embedding(3, 5 * 2), 'max')
nn_conv.initialize(ctx=ctx)
# test #1: basic
h0 = F.randn((g.number_of_src_nodes(), 5))
hd = F.randn((g.number_of_dst_nodes(), 4))
etypes = nd.random.randint(0, 4, g.number_of_edges()).as_in_context(ctx)
h1 = nn_conv(g, (h0, hd), etypes)
assert h1.shape == (g.number_of_dst_nodes(), 2)
def test_sg_conv():
g = dgl.from_networkx(nx.erdos_renyi_graph(20, 0.3)).to(F.ctx())
g = dgl.add_self_loop(g)
ctx = F.ctx()
sgc = nn.SGConv(5, 2, 2)
sgc.initialize(ctx=ctx)
print(sgc)
# test #1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = sgc(g, h0)
assert h1.shape == (g.number_of_nodes(), 2)
def test_set2set():
g = dgl.from_networkx(nx.path_graph(10)).to(F.ctx())
ctx = F.ctx()
s2s = nn.Set2Set(5, 3, 3) # hidden size 5, 3 iters, 3 layers
s2s.initialize(ctx=ctx)
print(s2s)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = s2s(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = s2s(bg, h0)
assert h1.shape[0] == 3 and h1.shape[1] == 10 and h1.ndim == 2
def test_glob_att_pool():
g = dgl.from_networkx(nx.path_graph(10)).to(F.ctx())
ctx = F.ctx()
gap = nn.GlobalAttentionPooling(gluon.nn.Dense(1), gluon.nn.Dense(10))
gap.initialize(ctx=ctx)
print(gap)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.ndim == 2
def test_simple_pool():
g = dgl.from_networkx(nx.path_graph(15)).to(F.ctx())
sum_pool = nn.SumPooling()
avg_pool = nn.AvgPooling()
max_pool = nn.MaxPooling()
sort_pool = nn.SortPooling(10) # k = 10
print(sum_pool, avg_pool, max_pool, sort_pool)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = sum_pool(g, h0)
check_close(F.squeeze(h1, 0), F.sum(h0, 0))
h1 = avg_pool(g, h0)
check_close(F.squeeze(h1, 0), F.mean(h0, 0))
h1 = max_pool(g, h0)
check_close(F.squeeze(h1, 0), F.max(h0, 0))
h1 = sort_pool(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 * 5 and h1.ndim == 2
# test#2: batched graph
g_ = dgl.from_networkx(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = mx.nd.stack(F.sum(h0[:15], 0),
F.sum(h0[15:20], 0),
F.sum(h0[20:35], 0),
F.sum(h0[35:40], 0),
F.sum(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = avg_pool(bg, h0)
truth = mx.nd.stack(F.mean(h0[:15], 0),
F.mean(h0[15:20], 0),
F.mean(h0[20:35], 0),
F.mean(h0[35:40], 0),
F.mean(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = max_pool(bg, h0)
truth = mx.nd.stack(F.max(h0[:15], 0),
F.max(h0[15:20], 0),
F.max(h0[20:35], 0),
F.max(h0[35:40], 0),
F.max(h0[40:55], 0), axis=0)
check_close(h1, truth)
h1 = sort_pool(bg, h0)
assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.ndim == 2
def test_rgcn():
ctx = F.ctx()
etype = []
g = dgl.from_scipy(sp.sparse.random(100, 100, density=0.1)).to(F.ctx())
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
etype.append(i % 5)
B = 2
I = 10
O = 8
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r)
assert list(h_new.shape) == [100, O]
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_bdd(g, h, r)
assert list(h_new.shape) == [100, O]
# with norm
norm = nd.zeros((g.number_of_edges(), 1), ctx=ctx)
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r, norm)
assert list(h_new.shape) == [100, O]
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd.initialize(ctx=ctx)
h = nd.random.randn(100, I, ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_bdd(g, h, r, norm)
assert list(h_new.shape) == [100, O]
# id input
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis.initialize(ctx=ctx)
h = nd.random.randint(0, I, (100,), ctx=ctx)
r = nd.array(etype, ctx=ctx)
h_new = rgc_basis(g, h, r)
assert list(h_new.shape) == [100, O]
def test_sequential():
ctx = F.ctx()
# test single graph
class ExampleLayer(gluon.nn.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, graph, n_feat, e_feat):
graph = graph.local_var()
graph.ndata['h'] = n_feat
graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
n_feat += graph.ndata['h']
graph.apply_edges(fn.u_add_v('h', 'h', 'e'))
e_feat += graph.edata['e']
return n_feat, e_feat
g = dgl.graph(([], [])).to(F.ctx())
g.add_nodes(3)
g.add_edges([0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2])
net = nn.Sequential()
net.add(ExampleLayer())
net.add(ExampleLayer())
net.add(ExampleLayer())
net.initialize(ctx=ctx)
n_feat = F.randn((3, 4))
e_feat = F.randn((9, 4))
n_feat, e_feat = net(g, n_feat, e_feat)
assert n_feat.shape == (3, 4)
assert e_feat.shape == (9, 4)
# test multiple graphs
class ExampleLayer(gluon.nn.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, graph, n_feat):
graph = graph.local_var()
graph.ndata['h'] = n_feat
graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
n_feat += graph.ndata['h']
return n_feat.reshape(graph.number_of_nodes() // 2, 2, -1).sum(1)
g1 = dgl.from_networkx(nx.erdos_renyi_graph(32, 0.05)).to(F.ctx())
g2 = dgl.from_networkx(nx.erdos_renyi_graph(16, 0.2)).to(F.ctx())
g3 = dgl.from_networkx(nx.erdos_renyi_graph(8, 0.8)).to(F.ctx())
net = nn.Sequential()
net.add(ExampleLayer())
net.add(ExampleLayer())
net.add(ExampleLayer())
net.initialize(ctx=ctx)
n_feat = F.randn((32, 4))
n_feat = net([g1, g2, g3], n_feat)
assert n_feat.shape == (4, 4)
def myagg(alist, dsttype):
rst = alist[0]
for i in range(1, len(alist)):
rst = rst + (i + 1) * alist[i]
return rst
@parametrize_dtype
@pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg])
def test_hetero_conv(agg, idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])},
idtype=idtype, device=F.ctx())
conv = nn.HeteroGraphConv({
'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)},
agg)
conv.initialize(ctx=F.ctx())
print(conv)
uf = F.randn((4, 2))
gf = F.randn((4, 4))
sf = F.randn((2, 3))
h = conv(g, {'user': uf, 'store': sf, 'game': gf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [0, 1, 2, 3], 'store': []}).to(F.ctx())
h = conv(block, ({'user': uf, 'game': gf, 'store': sf}, {'user': uf, 'game': gf, 'store': sf[0:0]}))
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
h = conv(block, {'user': uf, 'game': gf, 'store': sf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
# test with mod args
class MyMod(mx.gluon.nn.Block):
def __init__(self, s1, s2):
super(MyMod, self).__init__()
self.carg1 = 0
self.s1 = s1
self.s2 = s2
def forward(self, g, h, arg1=None): # mxnet does not support kwargs
if arg1 is not None:
self.carg1 += 1
return F.zeros((g.number_of_dst_nodes(), self.s2))
mod1 = MyMod(2, 3)
mod2 = MyMod(2, 4)
mod3 = MyMod(3, 4)
conv = nn.HeteroGraphConv({
'follows': mod1,
'plays': mod2,
'sells': mod3},
agg)
conv.initialize(ctx=F.ctx())
mod_args = {'follows' : (1,), 'plays' : (1,)}
h = conv(g, {'user' : uf, 'store' : sf, 'game': gf}, mod_args)
assert mod1.carg1 == 1
assert mod2.carg1 == 1
assert mod3.carg1 == 0
if __name__ == '__main__':
test_graph_conv()
test_gat_conv()
test_sage_conv()
test_gg_conv()
test_cheb_conv()
test_agnn_conv()
test_appnp_conv()
test_dense_cheb_conv()
test_dense_graph_conv()
test_dense_sage_conv()
test_edge_conv()
test_gin_conv()
test_gmm_conv()
test_nn_conv()
test_sg_conv()
test_set2set()
test_glob_att_pool()
test_simple_pool()
test_rgcn()
test_sequential()
|
the-stack_0_19990 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 Dinesh Pinto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import html
import json
import logging
import os
import random
import sys
import traceback
import pytz
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
from spongebobcase import tospongebob
from telegram import Update, Message, ParseMode
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, Job
from config import TELEGRAM_TOKEN, TELEGRAM_CHAT_ID
from src.reddit_meme_farmer import RedditMemeFarmer
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class TelegramBotManager(RedditMemeFarmer):
def __init__(self):
super().__init__()
# create the updater, that will automatically create also a dispatcher and a queue to
# make them dialogue
self._chat_id = TELEGRAM_CHAT_ID
self._updater = Updater(token=TELEGRAM_TOKEN, use_context=True)
self.dispatcher = self._updater.dispatcher
# add handlers for start and help commands
self.dispatcher.add_handler(CommandHandler("start", self.start))
self.dispatcher.add_handler(CommandHandler("help", self.help))
self.dispatcher.add_handler(CommandHandler("meme", self.get_meme))
self.dispatcher.add_handler(CommandHandler("dailymeme", self.daily_meme_start))
self.dispatcher.add_handler(CommandHandler("dailymemestop", self.daily_meme_stop))
self.dispatcher.add_handler(CommandHandler("conversationstart", self.chatbot_start))
self.dispatcher.add_handler(CommandHandler("conversationstop", self.chatbot_stop))
# add an handler for normal text (not commands)
self.dispatcher.add_handler(MessageHandler(Filters.text, self.text))
# add an handler for errors
self.dispatcher.add_error_handler(self.error_handler)
bot_name, bot_username = self._updater.bot.get_me()["first_name"], self._updater.bot.get_me()["username"]
startup_text = f'{bot_name} (@{bot_username}) is now running using Telegram' \
f' chat id {self._chat_id}'
logger.info(startup_text)
# set up variables
self.chatbot = ChatBot("CryptoMemeBot")
self.chatbot_on = False
def send_message(self, message: str) -> Message:
return self._updater.bot.send_message(self._chat_id, text=message)
def send_photo(self, image_path: str, caption: str = None) -> Message:
return self._updater.bot.send_photo(self._chat_id, photo=open(image_path, "rb"), caption=caption)
def send_video(self, video_path: str, caption: str = None) -> Message:
return self._updater.bot.send_video(self._chat_id, video=open(video_path, "rb"), supports_streaming=True,
caption=caption)
def send_animation(self, animation_path: str, caption: str = None) -> Message:
return self._updater.bot.send_animation(self._chat_id, animation=open(animation_path, "rb"), caption=caption)
# function to handle the /start command
@staticmethod
def start(update: Update, _: CallbackContext):
update.message.reply_text('Start command received')
# function to handle the /help command
@staticmethod
def help(update: Update, _: CallbackContext):
msg = "The following commands are available:\n" \
"/meme: Fetch a dank meme\n" \
"/dailymeme: Fetch a meme daily at 9:30 AM\n" \
"/dailymemestop: Stop fetching daily memes\n" \
"/conversationstart: Start talking with a trained chat bot\n" \
"/conversationstop: Stop the chat bot\n" \
"/help: This help page"
update.message.reply_text(msg)
# function to handle errors occurred in the dispatcher
def error_handler(self, update: object, context: CallbackContext):
context.bot.send_message(chat_id=self._chat_id, text=f'An error occurred: {context.error}')
"""Log the error and send a telegram message to notify the developer."""
# Log the error before we do anything else, so we can see it even if something breaks.
logger.error(msg="Exception while handling an update:", exc_info=context.error)
# traceback.format_exception returns the usual python message about an exception, but as a
# list of strings rather than a single string, so we have to join them together.
tb_list = traceback.format_exception(None, context.error, context.error.__traceback__)
tb_string = ''.join(tb_list)
# Build the message with some markup and additional information about what happened.
# You might need to add some logic to deal with messages longer than the 4096 character limit.
update_str = update.to_dict() if isinstance(update, Update) else str(update)
message = (
f'An exception was raised while handling an update\n'
f'<pre>update = {html.escape(json.dumps(update_str, indent=2, ensure_ascii=False))}'
'</pre>\n\n'
f'<pre>context.chat_data = {html.escape(str(context.chat_data))}</pre>\n\n'
f'<pre>context.user_data = {html.escape(str(context.user_data))}</pre>\n\n'
f'<pre>{html.escape(tb_string)}</pre>'
)
# Finally, send the message
context.bot.send_message(chat_id=self._chat_id, text=message, parse_mode=ParseMode.HTML)
def _send_meme(self, context: CallbackContext):
filepath = RedditMemeFarmer.get_crypto_meme_path(self)
filename, ext = os.path.splitext(os.path.basename(filepath))
# Check if function is called by a Job or directly, and set chat id accordingly
if isinstance(context.job, Job):
chat_id = context.job.context
else:
chat_id = self._chat_id
if ext == ".jpg" or ext == ".png":
context.bot.send_photo(chat_id=chat_id, photo=open(filepath, "rb"), caption=filename)
elif ext == ".gif":
context.bot.send_animation(chat_id=chat_id, animation=open(filepath, "rb"), caption=filename)
elif ext == ".mp4":
context.bot.send_video(chat_id=chat_id, video=open(filepath, "rb"),
supports_streaming=True, caption=filename)
else:
text = f"Unknown file extension '{ext}' in filepath"
context.bot.send_message(chat_id=chat_id, text=text)
logger.warning(text)
def get_meme(self, update: Update, context: CallbackContext):
update.message.reply_text(f'Fetching a dank meme, just for you...')
self._send_meme(context)
def daily_meme_start(self, update: Update, context: CallbackContext):
daily_meme_time = datetime.time(hour=9, minute=30, second=00, tzinfo=pytz.timezone('Europe/Vienna'))
self._updater.bot.send_message(chat_id=update.message.chat_id,
text=f"Daily meme has been set! You'll be sent a meme at "
f"{daily_meme_time.strftime('%H:%M')} daily")
context.job_queue.run_daily(self._send_meme, context=update.message.chat_id,
days=(0, 1, 2, 3, 4, 5, 6), time=daily_meme_time)
def daily_meme_stop(self, update: Update, context: CallbackContext):
self._updater.bot.send_message(chat_id=update.message.chat_id,
text=f"Daily meme stopped")
context.job_queue.stop()
def chatbot_start(self, update: Update, _: CallbackContext):
self._updater.bot.send_message(chat_id=update.message.chat_id,
text="Starting and training chatbot in English")
# Create a new trainer for the chatbot
trainer = ChatterBotCorpusTrainer(self.chatbot)
# Train the chatbot based on the english corpus
trainer.train("chatterbot.corpus.english")
self._updater.bot.send_message(chat_id=update.message.chat_id,
text="Training complete, chatbot is ready!")
self.chatbot_on = True
def chatbot_stop(self, update: Update, _: CallbackContext):
self._updater.bot.send_message(chat_id=update.message.chat_id,
text="Chatbot stopped")
self.chatbot_on = False
# function to handle normal text
def text(self, update: Update, _: CallbackContext):
msg_text = update.message.text
antagonistics = ["annoying", "sad", "boring", "poor"]
if not self.chatbot_on:
if "bad bot" in msg_text.lower():
responses = [
f"I'm sorry {update.effective_user.first_name}, I will try harder next time 😭",
f"But please {update.effective_user.first_name}, I am but a mere bot...😓"
]
update.message.reply_text(random.choice(responses))
if "good bot" in msg_text.lower():
responses = [
f"Woohoo, thanks {update.effective_user.first_name}, I aim to please 😊",
f"Thank you very much {update.effective_user.first_name}! 😉"
]
update.message.reply_text(random.choice(responses))
if any(antagonistic in msg_text.lower() for antagonistic in antagonistics):
update.message.reply_text(tospongebob(msg_text))
else:
response = self.chatbot.get_response(msg_text).text
update.message.reply_text(response)
def start_polling(self):
# start your shiny new bot
self._updater.start_polling()
logger.info(f"Started polling...")
# run the bot until Ctrl-C
self._updater.idle()
def exit(self, update: Update, _: CallbackContext):
try:
text = f'Shutting down bot'
logger.info(text)
update.message.reply_text(text)
self._updater.stop()
except Exception as e:
text = "Failed to shut down bot"
update.message.reply_text(text + f"{e}")
logger.warning(text + f"{e}")
else:
text = "Bot stopped successfully"
update.message.reply_text(text)
logger.info(text)
|
the-stack_0_19991 | """
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Creation from RGB-D Selfies."
Xiangkai Lin*, Yajing Chen*, Linchao Bao*, Haoxian Zhang, Sheng Wang, Xuefei Zhe, Xinwei Jiang, Jue Wang, Dong Yu, and Zhengyou Zhang.
arXiv: https://arxiv.org/abs/2010.05562
Copyright (c) [2020] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
import numpy as np
import cv2
import sys
import os
sys.path.append("..")
from third_party.rasterize_triangles import rasterize_clip_space
from .basis import load_3dmm_basis, get_geometry, get_region_uv_texture, get_texture
class Shader(object):
def __init__(self):
pass
@staticmethod
def _lambertian_attentuation():
""" constant weight from sfsnet matlab """
return np.pi * np.array([1, 2.0 / 3, 1.0 / 4])
@staticmethod
def _harmonics(ver_norm, order=2):
"""compute the spherical harmonics function for 3D vertices.
:param:
ver_norm: [batch, N, 3], vertex normal
:return:
H: [batch, 9], 2-order harmonic basis
"""
lam_attn = Shader._lambertian_attentuation()
x, y, z = tf.split(ver_norm, 3, -1)
x2 = tf.square(x)
y2 = tf.square(y)
z2 = tf.square(z)
xy = x * y
yz = y * z
xz = x * z
PI = np.pi
l0 = np.sqrt(1.0 / (4 * PI)) * tf.ones_like(x)
l1x = np.sqrt(3.0 / (4 * PI)) * x
l1y = np.sqrt(3.0 / (4 * PI)) * y
l1z = np.sqrt(3.0 / (4 * PI)) * z
l2xy = np.sqrt(15.0 / (4 * PI)) * xy
l2yz = np.sqrt(15.0 / (4 * PI)) * yz
l2xz = np.sqrt(15.0 / (4 * PI)) * xz
l2z2 = np.sqrt(5.0 / (16 * PI)) * (3 * z2 - 1)
l2x2_y2 = np.sqrt(15.0 / (16 * PI)) * (x2 - y2)
H = tf.concat(
[l0, l1z, l1x, l1y, l2z2, l2xz, l2yz, l2x2_y2, l2xy],
-1,
name="hamonics_basis_order2",
)
if order == 3:
b9 = 1.0 / 4.0 * np.sqrt(35.0 / (2 * PI)) * (3 * x2 - z2) * z
b10 = 1.0 / 2.0 * np.sqrt(105.0 / PI) * x * yz
b11 = 1.0 / 4 * np.sqrt(21.0 / (2 * PI)) * z * (4 * y2 - x2 - z2)
b12 = 1.0 / 4 * np.sqrt(7.0 / PI) * y * (2 * y2 - 3 * x2 - 3 * z2)
b13 = 1.0 / 4 * np.sqrt(21.0 / (2 * PI)) * x * (4 * y2 - x2 - z2)
b14 = 1.0 / 4 * np.sqrt(105.0 / PI) * (x2 - z2) * y
b15 = 1.0 / 4 * np.sqrt(35.0 / (2 * PI)) * (x2 - 3 * z2) * x
H = tf.concat(
[H, b9, b10, b11, b12, b13, b14, b15], -1, name="harmonics_basis_order3"
)
batch_size, img_height, img_width, _ = ver_norm.get_shape().as_list()
H.set_shape([batch_size, img_height, img_width, 9])
return H
@staticmethod
def sh_shader(normals, alphas, background_images, sh_coefficients, diffuse_colors):
"""
render mesh into image space and return all intermediate results.
:param:
normals: [batch,300,300,3], vertex normals in image space
alphas: [batch,H,W,1], alpha channels
background_images: [batch,H,W,3], background images for rendering results
sh_coefficient: [batch,27], 2-order SH coefficient
diffuse_colors: [batch,H,W,3], vertex colors in image space
sh_coefficient: [batch_size, 27] spherical harmonics coefficients.
"""
batch_size, image_height, image_width = [s.value for s in normals.shape[:-1]]
sh_coef_count = sh_coefficients.get_shape().as_list()[-1]
if sh_coef_count == 27:
init_para_illum = tf.constant([1] + [0] * 8, tf.float32, name="init_illum")
init_para_illum = tf.reshape(
init_para_illum, [1, 9], name="init_illum_reshape"
)
init_para_illum = tf.concat(
[init_para_illum] * 3, axis=1, name="init_illum_concat"
)
sh_coefficients = sh_coefficients + init_para_illum # batch x 27
order = 2
else:
init_para_illum = tf.constant([1.0] * 2 + [0] * 14, tf.float32)
init_para_illum = tf.reshape(init_para_illum, [1, 16])
init_para_illum = tf.concat([init_para_illum] * 3, axis=1)
sh_coefficients = sh_coefficients + init_para_illum
sh_coefficients = tf.tile(
tf.reshape(sh_coefficients, [-1, 1, 1, 3, 16]),
[1, image_height, image_width, 1, 1],
)
order = 3
batch_size = diffuse_colors.get_shape().as_list()[0]
sh_kernels = tf.split(sh_coefficients, batch_size, axis=0)
harmonic_output = Shader._harmonics(normals, order)
harmonic_output_list = tf.split(harmonic_output, batch_size, axis=0)
results = []
for ho, shk in zip(harmonic_output_list, sh_kernels):
shk = tf.reshape(tf.transpose(a=tf.reshape(shk, [3, 9])), [1, 1, 9, 3])
res = tf.nn.conv2d(input=ho, filters=shk, strides=[1, 1, 1, 1], padding="SAME")
results.append(res)
shading = tf.concat(results, axis=0)
rgb_images = shading * diffuse_colors
alpha_images = tf.reshape(
alphas, [-1, image_height, image_width, 1], name="alpha_images"
)
valid_rgb_values = tf.concat(
3 * [alpha_images > 0.5], axis=3, name="valid_rgb_values"
)
rgb_images = tf.compat.v1.where(
valid_rgb_values, rgb_images, background_images, name="rgb_images"
)
return rgb_images, shading
@staticmethod
def remove_shading(images, image_normals, sh_coefficients):
init_para_illum = tf.constant([1] + [0] * 8, tf.float32)
init_para_illum = tf.reshape(init_para_illum, [1, 9])
init_para_illum = tf.concat([init_para_illum] * 3, axis=1)
sh_coefficients = sh_coefficients + init_para_illum # careful
_, image_height, image_width = [s.value for s in image_normals.shape[:-1]]
sh_coefficients = tf.tile(
tf.reshape(sh_coefficients, [-1, 1, 1, 3, 9]),
[1, image_height, image_width, 1, 1],
)
harmonic_output = tf.expand_dims(Shader._harmonics(image_normals), -1)
shading = tf.squeeze(tf.matmul(sh_coefficients, harmonic_output))
diffuse_maps = images / (shading + 1e-18)
return diffuse_maps
class Projector(object):
def __init__(self):
pass
@staticmethod
def get_ver_norm(ver_xyz, tri, scope_name="normal"):
"""
Compute vertex normals.
:param:
ver_xyz: [batch, N, 3], vertex geometry
tri: [M, 3], mesh triangles definition
:return:
ver_normals: [batch, N, 3], vertex normals
"""
with tf.compat.v1.variable_scope(scope_name):
v1_idx, v2_idx, v3_idx = tf.unstack(tri, 3, axis=-1)
v1 = tf.gather(ver_xyz, v1_idx, axis=1, name="v1_tri")
v2 = tf.gather(ver_xyz, v2_idx, axis=1, name="v2_tri")
v3 = tf.gather(ver_xyz, v3_idx, axis=1, name="v3_tri")
EPS = 1e-8
tri_normals = tf.linalg.cross(v2 - v1, v3 - v1)
tri_normals = tf.compat.v1.div(
tri_normals,
(tf.norm(tensor=tri_normals, axis=-1, keepdims=True) + EPS),
name="norm_tri",
)
tri_normals = tf.tile(tf.expand_dims(tri_normals, 2), [1, 1, 3, 1])
tri_normals = tf.reshape(tri_normals, [-1, 3])
tri_votes = tf.cast(tf.greater(tri_normals[:, 2:], float(0.1)), tf.float32)
tri_cnts = tf.ones_like(tri_votes)
B = v1.get_shape().as_list()[0] # batch size
batch_indices = tf.reshape(
tf.tile(tf.expand_dims(tf.range(B), axis=1), [1, len(tri) * 3]),
[-1],
name="batch_indices",
)
tri_inds = tf.stack(
[
batch_indices,
tf.concat([tf.reshape(tri, [len(tri) * 3])] * B, axis=0),
],
axis=1,
)
ver_shape = ver_xyz.get_shape().as_list()
ver_normals = tf.compat.v1.get_variable(
shape=ver_shape,
dtype=tf.float32,
initializer=tf.compat.v1.zeros_initializer(),
name="ver_norm",
trainable=False,
)
init_normals = tf.zeros(shape=ver_shape, dtype=tf.float32)
assign_op = tf.compat.v1.assign(ver_normals, init_normals)
with tf.control_dependencies([assign_op]):
ver_normals = tf.compat.v1.scatter_nd_add(ver_normals, tri_inds, tri_normals)
ver_normals = ver_normals / (
tf.norm(tensor=ver_normals, axis=2, keepdims=True) + EPS
)
votes = tf.reshape(
tf.concat([tri_votes, tri_votes, tri_votes], axis=-1), [-1, 1]
)
cnts = tf.reshape(
tf.concat([tri_cnts, tri_cnts, tri_cnts], axis=-1), [-1, 1]
)
ver_votes = tf.compat.v1.get_variable(
shape=ver_shape[:-1] + [1],
dtype=tf.float32,
initializer=tf.compat.v1.zeros_initializer(),
name="ver_vote",
trainable=False,
)
ver_cnts = tf.compat.v1.get_variable(
shape=ver_shape[:-1] + [1],
dtype=tf.float32,
initializer=tf.compat.v1.zeros_initializer(),
name="ver_cnt",
trainable=False,
)
init_votes = tf.zeros(shape=ver_shape[:-1] + [1], dtype=tf.float32)
assign_op2 = tf.compat.v1.assign(ver_votes, init_votes)
assign_op3 = tf.compat.v1.assign(ver_cnts, init_votes)
with tf.control_dependencies([assign_op2, assign_op3]):
ver_votes = tf.compat.v1.scatter_nd_add(ver_votes, tri_inds, tri_votes)
ver_cnts = tf.compat.v1.scatter_nd_add(ver_cnts, tri_inds, tri_cnts)
ver_votes = ver_votes / (ver_cnts + EPS)
ver_votes1 = tf.less(ver_votes, float(1.0))
ver_votes2 = tf.greater(ver_votes, float(0.0))
ver_votes = tf.cast(tf.logical_and(ver_votes1, ver_votes2), tf.float32)
return ver_normals, ver_votes
@staticmethod
def generate_base_information(basis3dmm, para_shape, para_tex):
vt_list = basis3dmm["vt_list"]
tri = basis3dmm["tri"]
tri_vt = basis3dmm["tri_vt"]
tri = tri.astype(np.int32)
tri_vt = tri_vt.astype(np.int32)
ver_xyz = get_geometry(basis3dmm, para_shape) # 1, 20481, 3
uv_rgb, uv_mask = get_region_uv_texture(basis3dmm["uv"], para_tex, uv_size=512)
uv_rgb = uv_rgb / 255.0
return tri, tri_vt, vt_list, uv_rgb, uv_mask, ver_xyz
@staticmethod
def generate_base_information_BFM(basis3dmm, para_shape, para_tex):
tri = basis3dmm["tri"]
tri = tri.astype(np.int32)
ver_xyz = get_geometry(basis3dmm, para_shape) # 1, 20481, 3
ver_rgb = get_texture(basis3dmm, para_tex) # 1, 20481, 3
ver_rgb = tf.clip_by_value(ver_rgb / 255.0, 0.0, 1.0)
batch_size, _, _ = ver_xyz.get_shape().as_list()
ver_mask = tf.concat(
[np.reshape(basis3dmm["mask_face"], [1, -1, 1])] * batch_size,
axis=0,
name="ver_face_mask",
) # 1, 20481, 1
ver_mask = tf.cast(ver_mask, tf.float32)
return tri, ver_rgb, ver_mask, ver_xyz
@staticmethod
def generate_proj_information(
ver_xyz,
trans_Mat,
K_img,
imageH,
imageW,
tri,
project_type="Pers",
name="ver_norm_and_ver_depth",
):
ver_w = tf.ones_like(ver_xyz[:, :, 0:1], name="ver_w")
ver_xyzw = tf.concat([ver_xyz, ver_w], axis=2) # 1, 20481, 4
vertex_img = tf.matmul(ver_xyzw, trans_Mat) # 1 x 20481 x 4
cam_xyz = vertex_img[:, :, 0:3] # 1 x 20481 x 3
K_img = tf.transpose(a=K_img, perm=[0, 2, 1]) # 1 x 3 x 3
proj_xyz_batch = tf.matmul(cam_xyz, K_img) # 1 x 20481 x 3
proj_xyz_depth_batch = tf.matmul(cam_xyz, K_img) # 1 x 20481 x 3
if project_type == "Orth":
clip_x = tf.expand_dims(
(proj_xyz_batch[:, :, 0] + imageW / 2) / imageW * 2 - 1, axis=2
) # 1 x 20481 x 1
clip_y = tf.expand_dims(
(proj_xyz_batch[:, :, 1] + imageH / 2) / imageH * 2 - 1, axis=2
) # 1 x 20481 x 1
else:
clip_x = tf.expand_dims(
(proj_xyz_batch[:, :, 0] / proj_xyz_batch[:, :, 2]) / imageW * 2 - 1,
axis=2,
) # 1 x 20481 x 1
clip_y = tf.expand_dims(
(proj_xyz_batch[:, :, 1] / proj_xyz_batch[:, :, 2]) / imageH * 2 - 1,
axis=2,
) # 1 x 20481 x 1
clip_z = tf.expand_dims(
tf.nn.l2_normalize(proj_xyz_batch[:, :, 2], axis=1, epsilon=1e-10), axis=2
)
clip_xyz = tf.concat([clip_x, clip_y, clip_z], axis=2) # 1, 20481, 3
clip_w = tf.ones_like(clip_xyz[:, :, 0:1], name="clip_w")
clip_xyzw = tf.concat([clip_xyz, clip_w], axis=2) # 1, 20481, 4
if project_type == "Orth":
proj_x = tf.expand_dims(
proj_xyz_batch[:, :, 0] + imageW / 2, axis=2
) # 1 x 20481 x 1
proj_y = tf.expand_dims(proj_xyz_batch[:, :, 1] + imageH / 2, axis=2)
else:
proj_x = tf.expand_dims(
proj_xyz_batch[:, :, 0] / proj_xyz_batch[:, :, 2], axis=2
) # 1 x 20481 x 1
proj_y = tf.expand_dims(
proj_xyz_batch[:, :, 1] / proj_xyz_batch[:, :, 2], axis=2
) # 1 x 20481 x 1
proj_z = tf.expand_dims(proj_xyz_batch[:, :, 2], axis=2)
proj_xy = tf.concat([proj_x, proj_y], axis=2) # 1, 20481, 2
depth_infor = tf.expand_dims(
proj_xyz_depth_batch[:, :, 2], axis=2
) # 1 x 20481 x 1
with tf.compat.v1.variable_scope(name):
ver_norm, ver_contour_mask = Projector.get_ver_norm(cam_xyz, tri)
norm_depth_infro = tf.concat(
[ver_norm, depth_infor, ver_contour_mask], axis=2
) # 1, 20481, 4
norm_depth_image, alphas = rasterize_clip_space(
clip_xyzw, norm_depth_infro, tri, imageW, imageH, 0.0
)
norm_image = norm_depth_image[:, :, :, 0:3] # (300,300)
depth_image = tf.expand_dims(norm_depth_image[:, :, :, 3], 3) # (300,300)
ver_contour_mask_image = tf.expand_dims(
norm_depth_image[:, :, :, 4], 3
) # (300,300)
return (
norm_image,
ver_norm,
alphas,
clip_xyzw,
proj_xy,
proj_z,
depth_image,
ver_contour_mask,
ver_contour_mask_image,
)
@staticmethod
def project_uv_render(
ori_img,
norm_image,
clip_xyzw,
tri,
tri_vt,
vt_list,
imageH,
imageW,
uv_rgb,
uv_mask,
para_illum,
var_scope_name,
):
batch_size, _, _ = clip_xyzw.get_shape().as_list()
# get uv coordinates
V, U = tf.split(vt_list, 2, axis=1)
uv_size = uv_rgb.get_shape().as_list()[1]
U = (1.0 - U) * uv_size
V = V * uv_size
UV = tf.concat([U, V], axis=1)
batch_UV = tf.tile(UV, [batch_size, 1])
# get clip_xyzw for ver_uv (according to the correspondence between tri and tri_vt)
# gather and scatter
EPS = 1e-12
batch_tri_indices = tf.reshape(
tf.tile(tf.expand_dims(tf.range(batch_size), axis=1), [1, len(tri_vt) * 3]),
[-1],
name="batch_tri_indices",
)
tri_inds = tf.stack(
[
batch_tri_indices,
tf.concat([tf.reshape(tri, [len(tri) * 3])] * batch_size, axis=0),
],
axis=1,
)
tri_vt_inds = tf.stack(
[
batch_tri_indices,
tf.concat([tf.reshape(tri_vt, [len(tri_vt) * 3])] * batch_size, axis=0),
],
axis=1,
)
tri_clip_xyzw = tf.gather_nd(clip_xyzw, tri_inds, name="tri_clip_xyzw")
ver_uv_clip_xyzw_sum = tf.compat.v1.get_variable(
shape=[batch_size, len(vt_list), 4],
dtype=tf.float32,
initializer=tf.compat.v1.zeros_initializer(),
name=var_scope_name + "ver_uv_clip_xyzw_sum",
trainable=False,
)
ver_uv_clip_xyzw_cnt = tf.compat.v1.get_variable(
shape=[batch_size, len(vt_list), 4],
dtype=tf.float32,
initializer=tf.compat.v1.zeros_initializer(),
name=var_scope_name + "ver_uv_clip_xyzw_cnt",
trainable=False,
)
init_ver_uv = tf.zeros(shape=[batch_size, len(vt_list), 4], dtype=tf.float32)
assign_op1 = tf.compat.v1.assign(ver_uv_clip_xyzw_sum, init_ver_uv)
assign_op2 = tf.compat.v1.assign(ver_uv_clip_xyzw_cnt, init_ver_uv)
with tf.control_dependencies([assign_op1, assign_op2]):
ver_uv_clip_xyzw_sum = tf.compat.v1.scatter_nd_add(
ver_uv_clip_xyzw_sum, tri_vt_inds, tri_clip_xyzw
)
ver_uv_clip_xyzw_cnt = tf.compat.v1.scatter_nd_add(
ver_uv_clip_xyzw_cnt, tri_vt_inds, tf.ones_like(tri_clip_xyzw)
)
ver_uv_clip_xyzw = tf.compat.v1.div(ver_uv_clip_xyzw_sum, ver_uv_clip_xyzw_cnt + EPS)
uv_image, uv_alphas = rasterize_clip_space(
ver_uv_clip_xyzw, batch_UV, tri_vt, imageW, imageH, -1.0
)
uv_image = tf.clip_by_value(
tf.cast(uv_image, tf.int32), 0, 511
) # should be integer
batch_vt_indices = tf.reshape(
tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1), [1, imageW * imageH]
),
[-1, 1],
name="batch_indices",
)
batch_vt_indices = tf.concat(
[batch_vt_indices, tf.reshape(uv_image, [-1, 2])], axis=1
)
# careful
diffuse_image = tf.reshape(
tf.gather_nd(uv_rgb, batch_vt_indices), [batch_size, imageH, imageW, 3]
)
uv_alphas = (
tf.reshape(
tf.gather_nd(uv_mask[:, :, :, 0], batch_vt_indices),
[batch_size, imageH, imageW, 1],
)
* uv_alphas
)
# Have shading
para_light = para_illum
background = ori_img
rgb_images, shading_image = Shader.sh_shader(
norm_image, uv_alphas, background, para_light, diffuse_image
)
ori_img_remove_shading = ori_img / shading_image
diffuse_image = tf.clip_by_value(diffuse_image, 0, 1)
rgb_images = tf.clip_by_value(rgb_images, 0, 1)
uv_attrs_image = tf.clip_by_value(uv_alphas, 0, 1)
ori_img_remove_shading = tf.clip_by_value(ori_img_remove_shading, 0, 1)
render_image = rgb_images
render_image = render_image * uv_attrs_image + ori_img * (1 - uv_attrs_image)
return render_image, uv_attrs_image, ori_img_remove_shading
@staticmethod
def project_vertex_render(
ori_img,
norm_image,
clip_xyzw,
tri,
imageH,
imageW,
ver_rgb,
ver_mask,
para_illum,
var_scope_name,
):
with tf.compat.v1.variable_scope(var_scope_name):
batch_size, _, _ = clip_xyzw.get_shape().as_list()
aug_ver_attrs = tf.concat([ver_rgb, ver_mask], axis=2)
attrs, _ = rasterize_clip_space(
clip_xyzw, aug_ver_attrs, tri, imageW, imageH, -1.0
)
# Have shading
diffuse_image = tf.reshape(
attrs[:, :, :, :3], [batch_size, imageH, imageW, 3]
)
alphas = tf.reshape(attrs[:, :, :, 3:], [batch_size, imageH, imageW, 1])
rgb_images, shading_image = Shader.sh_shader(
norm_image, alphas, ori_img, para_illum, diffuse_image
)
ori_img_remove_shading = ori_img / shading_image
diffuse_image = tf.clip_by_value(diffuse_image, 0, 1)
rgb_images = tf.clip_by_value(rgb_images, 0, 1)
attrs_image = tf.clip_by_value(alphas, 0, 1)
ori_img_remove_shading = tf.clip_by_value(ori_img_remove_shading, 0, 1)
render_image = rgb_images * attrs_image + ori_img * (1 - attrs_image)
return render_image, attrs_image, ori_img_remove_shading
# @staticmethod
# def render_fake_view(ori_img, norm_image, alphas, imageH, imageW, uv_rgb, para_illum, batch_vt_indices):
# batch_size,_,_,_ = ori_img.get_shape().as_list()
# diffuse_image = tf.reshape(tf.gather_nd(uv_rgb,batch_vt_indices),[batch_size,imageH,imageW,3])
# # Have shading
# para_light = para_illum
# background = ori_img
# rgb_images, shading_image = Shader.sh_shader(norm_image, alphas, background, para_light, diffuse_image)
# diffuse_image = tf.clip_by_value(diffuse_image,0,1)
# rgb_images = tf.clip_by_value(rgb_images,0,1)
# uv_attrs_image = tf.clip_by_value(alphas,0,1)
# shading_image = tf.clip_by_value(shading_image,0,1)
# render_image = rgb_images
# render_image = render_image * uv_attrs_image + ori_img * (1 - uv_attrs_image)
# return render_image
@staticmethod
def tf_rotationVector_2_trans(pose6, project_type="Pers", scale=1.0):
"""
:param:
pose6: [B, 6, 1], pose paramters
:return:
rr :[B, 3, 3] , tt:[B, 3, 1]
"""
batch_size = pose6.shape[0]
a, b, c, tx, ty, sth = tf.split(pose6, 6, axis=1) # B x 1 x 1
a = a + 0.00001
b = b + 0.00001
c = c + 0.00001
theta = tf.sqrt(tf.multiply(a, a) + tf.multiply(b, b) + tf.multiply(c, c))
zeros = tf.zeros_like(theta)
ones = tf.ones_like(theta)
def tf_Rodrigues(a, b, c, theta):
kx = a / theta
ky = b / theta
kz = c / theta
n = tf.concat([kx, ky, kz], axis=1) # B x 3 x 1
sin_theta = tf.sin(theta) # B x 1 x 1
cos_theta = tf.cos(theta) # B x 1 x 1
zeros = tf.zeros_like(sin_theta)
ones = tf.ones_like(sin_theta)
n_hat = tf.concat(
[zeros, -1 * kz, ky, kz, zeros, -1 * kx, -1 * ky, kx, zeros], axis=2
) # B x 1 x 9
n_hat = tf.reshape(n_hat, [-1, 3, 3]) # B x 3 x 3
I = tf.eye(3, 3, batch_shape=[batch_size]) # B x 3 x 3
# rr0 = cos_theta * I + (1 - cos_theta) * (n * tf.transpose(n)) + sin_theta * n_hat
cos_theta = tf.tile(cos_theta, [1, 3, 3]) # B x 3 x 3
sin_theta = tf.tile(sin_theta, [1, 3, 3]) # B x 3 x 3
rr0 = (
tf.multiply(cos_theta, I)
+ tf.multiply((1 - cos_theta), tf.matmul(n, tf.transpose(a=n, perm=[0, 2, 1])))
+ tf.multiply(sin_theta, n_hat)
)
return rr0
if project_type == "Pers":
rr = tf_Rodrigues(a, b, c, theta) # B x 3 x 3
tt = tf.concat([tx, ty, sth], axis=1) # B x 3 x 1
else:
print("Orth")
rr = tf_Rodrigues(a, b, c, theta) * tf.abs(sth) # B x 3 x 3
tt = tf.concat([tx, ty, ones * 50], axis=1) # B x 3 x 1
T = tf.concat([rr, tt], axis=2) * scale # B * 3 * 4
w = tf.concat([zeros, zeros, zeros, ones], axis=2) # B * 1 * 4
T = tf.concat([T, w], axis=1) # B,4,4
T = tf.transpose(a=T, perm=[0, 2, 1])
return T
@staticmethod
def gen_fix_multi_pose(batch_size, project_type):
"""
generate frontal, left side and right side pose for each sample
"""
if project_type == "Pers":
frontal = tf.constant([0.0, np.pi, 0.0, 0.0, 0.0, 50.0], tf.float32)
left = tf.constant(
[0.0, np.pi + np.pi / 8, 0.0, 0.0, 0.0, 50.0], tf.float32
)
right = tf.constant(
[0.0, np.pi - np.pi / 8, 0.0, 0.0, 0.0, 50.0], tf.float32
)
else:
frontal = tf.constant([0.0, np.pi, 0.0, 0.0, 0.0, 10.0], tf.float32)
left = tf.constant(
[0.0, np.pi + np.pi / 8, 0.0, 0.0, 0.0, 10.0], tf.float32
)
right = tf.constant(
[0.0, np.pi - np.pi / 8, 0.0, 0.0, 0.0, 10.0], tf.float32
)
frontal = tf.stack([frontal] * batch_size, axis=0)
frontal = tf.reshape(frontal, [batch_size, 6, 1])
left = tf.stack([left] * batch_size, axis=0)
left = tf.reshape(left, [batch_size, 6, 1])
right = tf.stack([right] * batch_size, axis=0)
right = tf.reshape(right, [batch_size, 6, 1])
return frontal, left, right
@staticmethod
def gen_fix_multi_light(batch_size):
"""
generate frontal, left and right side illumination parameters.
"""
frontal = tf.reshape(
tf.constant([0.8, -1.2, 0.0, 0.8, 0.0, 0.0, 0.0, 0.0, 0.0], tf.float32),
[1, 9],
)
frontal = tf.tile(frontal, [batch_size, 3])
left = tf.reshape(
tf.constant([0.6, -0.8, 0.8, 0.8, 0.0, 0.0, 0.0, 0.0, 0.0], tf.float32),
[1, 9],
)
left = tf.tile(left, [batch_size, 3])
right = tf.reshape(
tf.constant([0.6, -0.8, -0.8, 0.8, 0.0, 0.0, 0.0, 0.0, 0.0], tf.float32),
[1, 9],
)
right = tf.tile(right, [batch_size, 3])
return frontal, left, right
|
the-stack_0_19992 | import itertools
from discord import Embed
from discord.ext import commands
from discord.ext.commands import HelpCommand, DefaultHelpCommand
from discord.ext.commands.core import Group
class KingHelp(HelpCommand):
def __init__(self, **options):
super().__init__(**options)
self.paginator = None
self.spacer = "\u1160 "
async def send_pages(self, header=False, footer=False, title=False, destination=False):
if not destination:
destination = self.get_destination()
embed = Embed(
color=0x2ECC71,
)
if title:
embed.title = title[0]
embed.description = title[1]
if header:
embed.set_author(
name=self.context.bot.description,
icon_url=self.context.bot.user.avatar_url
)
embed.description = f"• To change your prefix or your server's prefix use `{self.clean_prefix}help prefix`\n• Suggestions or support? Join my [Support Server](https://discord.gg/cWKZAMc)!\n• Like what I do? Consider donating to my patreon at https://www.patreon.com/kingbot.\n• Don't like my commands? `blacklist` them!\n• Use `{self.clean_prefix}help help` to see available commands in your channel."
for category, entries in self.paginator:
embed.add_field(
name=category,
value=entries,
inline=False
)
if footer:
embed.set_footer(
text=f'Prefix: {self.clean_prefix} | Server Prefix: {self.clean_prefix}'
)
await destination.send(embed=embed)
async def send_bot_help(self, destination=None):
ctx = self.context
bot = ctx.bot
def get_category(command):
cog = command.cog
return cog.qualified_name + ':' if cog is not None else 'Help:'
filtered = await self.filter_commands(
bot.commands,
sort=True,
key=get_category
)
to_iterate = itertools.groupby(filtered, key=get_category)
for cog_name, command_grouper in to_iterate:
cmds = sorted(command_grouper, key=lambda c: c.name)
category = f'❯ {cog_name.upper()}'
if len(cmds) == 1:
entries = f'{self.spacer}{cmds[0].name} → {cmds[0].short_doc}'
else:
entries = ''
while len(cmds) > 0:
entries += self.spacer
entries += ' **|** '.join([cmd.name for cmd in cmds[0:8]])
cmds = cmds[8:]
entries += '\n' if cmds else ''
self.paginator.append((category, entries))
await self.send_pages(header=True, footer=True, destination=destination)
async def send_command_help(self, command):
# add the usage text [REQUIRED]
usage = command.usage if command.usage else "`None`"
self.paginator.append(
("❯ Usage", usage)
)
# add examples text [REQUIRED]
examples = command.brief if command.brief else "`None`"
self.paginator.append(
("❯ Examples", examples)
)
# add aliases text [REQUIRED]
aliases = " **|** ".join(
f'`{alias}`' for alias in command.aliases) if command.aliases else "None"
self.paginator.append(
("❯ Aliases", aliases)
)
await self.send_pages(title=(command.name.title(), command.help), footer=True)
async def prepare_help_command(self, ctx, command=None):
self.paginator = []
await super().prepare_help_command(ctx, command)
async def send_error_message(self):
"""This is the override of the default error message method and we consider not passing a command as a parameter an error. Much like Bongo Bot's functionality, it will DM you the list of commands if you do not specify the command as an inference that you do not know the commands.
"""
destination = self.context.author
await self.send_bot_help(destination=destination)
await self.get_destination().send('`✅` I have DMed you my commands!')
async def on_help_command_error(self, ctx, error):
print('Error in {0.command.qualified_name}: {1}'.format(ctx, error))
async def command_callback(self, ctx, *, command=None):
await self.prepare_help_command(ctx, command)
bot = ctx.bot
if command is None:
return await self.send_error_message()
if command.lower() == 'help':
return await self.send_bot_help()
# Check if it's a cog
cog = bot.get_cog(command)
if cog is not None:
return await self.send_cog_help(cog)
# If it's not a cog then it's a command.
# Since we want to have detailed errors when someone
# passes an invalid subcommand, we need to walk through
# the command group chain ourselves.
keys = command.split(' ')
cmd = bot.all_commands.get(keys[0])
if cmd is None:
return await self.send_error_message()
for key in keys[1:]:
try:
found = cmd.all_commands.get(key)
except AttributeError:
return await self.send_error_message()
else:
if found is None:
return await self.send_error_message()
cmd = found
if isinstance(cmd, Group):
return await self.send_group_help(cmd)
else:
return await self.send_command_help(cmd)
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.help_command = KingHelp(
command_attrs={
'aliases': ['halp'],
'help': 'Shows help about the bot, a command, or a category',
'hidden': True
}
)
async def cog_check(self, ctx):
return self.bot.user_is_admin(ctx.author)
def cog_unload(self):
self.bot.get_command('help').hidden = False
self.bot.help_command = DefaultHelpCommand()
@commands.command(
aliases=['halpall'],
hidden=True
)
async def helpall(self, ctx, *, text=None):
"""Print bot help including all hidden commands."""
self.bot.help_command = KingHelp(show_hidden=True)
if text:
await ctx.send_help(text)
else:
await ctx.send_help()
self.bot.help_command = KingHelp()
def setup(bot):
bot.add_cog(Help(bot))
|
the-stack_0_19993 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Iterable
from cirq.circuits import TextDiagramDrawer
from cirq.devices import GridQubit
LineSequence = List[GridQubit]
class NotFoundError(Exception):
pass
class GridQubitLineTuple(tuple):
"""A contiguous non-overlapping sequence of adjacent grid qubits."""
@staticmethod
def best_of(lines: Iterable[LineSequence],
length: int) -> 'GridQubitLineTuple':
lines = list(lines)
longest = max(lines, key=len) if lines else []
if len(longest) < length:
raise NotFoundError('No line placement with desired length found.')
return GridQubitLineTuple(longest[:length])
def __str__(self) -> str:
diagram = TextDiagramDrawer()
dx = min(q.col for q in self)
dy = min(q.row for q in self)
for q in self:
diagram.write(q.col - dx, q.row - dy, str(q))
for q1, q2 in zip(self, self[1:]):
diagram.grid_line(q1.col - dx, q1.row - dy,
q2.col - dx, q2.row - dy,
True)
return diagram.render(horizontal_spacing=2,
vertical_spacing=1,
use_unicode_characters=True)
|
the-stack_0_19994 | import utils
import bubbles
import networkx as nx
import os
import logging
def unzip(args):
if not args.graph[0].endswith(".gfa"):
logging.fatal("Invalid gfa file.")
return
# G=nx.MultiDiGraph()
G=nx.DiGraph()
utils.read_gfa(args.graph[0], None, None, G, remap=False)
if args.source==None and args.sink==None:
unzip_graph(G,args,minunzip=args.minunzip)
else:
b=bubbles.Bubble(G,args.source,args.sink)
unzip_bubble(G,b,minunzip=args.minunzip,idoffset=max([n for n in G.nodes() if type(n)==int])+1)
if args.output==None:
of=os.path.splitext(args.graph[0])[0]+".unzipped.gfa"
else:
of=args.output+".gfa"
utils.write_gfa(G,None,outputfile=of)
#determine uncertainty about bubble positions
def unzip_graph(G,args,minunzip=0):
nid=max([n for n in G.nodes() if type(n)==int])
nid+=1
for b in bubbles.bubbles(G):
if b.maxsize-b.minsize<args.mindiff:
logging.debug("Skipping bubble %s, diff between smallest and largest allele (%dbp) is smaller than mindiff=%d."%(str(b.nodes),b.maxsize-b.minsize,args.mindiff))
continue
if args.maxdiff and b.maxsize-b.minsize>args.maxdiff:
logging.debug("Skipping bubble %s, diff between smallest and largest allele (%dbp) is larger than maxdiff=%d."%(str(b.nodes),b.maxsize-b.minsize,args.maxdiff))
continue
if isinstance(b,bubbles.Bubble):
nid=unzip_bubble(G,b,minunzip=minunzip,idoffset=nid)
def unzip_bubble(G,b,minunzip=0,idoffset=0):
wiggle=b.getwiggle(minwiggle=minunzip)
if type(b.sink)==str:
wiggle=(wiggle[0],0)
if type(b.source)==str:
wiggle=(0,wiggle[1])
if wiggle!=(0,0):
logging.debug("Unzipping bubble between %s and %s"%(b.source,b.sink))
srcl=len(G.node[b.source]['seq'])
snkl=len(G.node[b.sink]['seq'])
maxlw=int(round((srcl-2 if srcl>2 else 0)/float(2)))
maxrw=int(round((snkl-2 if snkl>2 else 0)/float(2)))
if wiggle[0]>maxlw:
wiggle=(maxlw,wiggle[1])
if wiggle[1]>maxrw:
wiggle=(wiggle[0],maxrw)
if wiggle[0]>0:
ls=G.node[b.source]['seq'][-wiggle[0]:]
assert(G.node[b.source]['seq'][:-wiggle[0]]!="")
G.node[b.source]['seq']=G.node[b.source]['seq'][:-wiggle[0]]
else:
ls=""
if wiggle[1]>0:
rs=G.node[b.sink]['seq'][:wiggle[1]]
assert(G.node[b.sink]['seq'][wiggle[1]:]!="")
G.node[b.sink]['seq']=G.node[b.sink]['seq'][wiggle[1]:]
G.node[b.sink]['offsets']={k:G.node[b.sink]['offsets'][k]+len(rs) for k in G.node[b.sink]['offsets']}
else:
rs=""
successors=list(G.successors(b.source))
predecessors=list(G.predecessors(b.sink))
if ls!="":
for n in successors:
if len(list(G.predecessors(n)))>1:
# G.add_node(idoffset,seq=ls if n!=b.sink else ls+rs,offsets={p:(G.node[b.source]['offsets'][p]+srcl)-len(ls) for p in G[b.source][n].values()[0]['paths']})
G.add_node(idoffset,seq=ls if n!=b.sink else ls+rs,offsets={p:(G.node[b.source]['offsets'][p]+srcl)-len(ls) for p in G[b.source][n]['paths']})
# props=G[b.source][n].values()[0].copy() #TODO: consider possibilty of structural variant paths!
props=G[b.source][n]
G.remove_edge(b.source,n)
G.add_edge(b.source,idoffset,**props)
G.add_edge(idoffset,n,**props)
idoffset+=1
else:
G.node[n]['seq']=ls+G.node[n]['seq']
G.node[n]['offsets']={k:G.node[n]['offsets'][k]-len(ls) for k in G.node[n]['offsets']}
if rs!="":
for n in predecessors:
if n==b.source and ls!="":
continue #was already handled by looping over successors
if len(list(G.successors(n)))>1:
# G.add_node(idoffset,seq=rs if n!=b.source else ls+rs,offsets={p:(G.node[b.sink]['offsets'][p])-len(rs) for p in G[n][b.sink].values()[0]['paths']})
G.add_node(idoffset,seq=rs if n!=b.source else ls+rs,offsets={p:(G.node[b.sink]['offsets'][p])-len(rs) for p in G[n][b.sink]['paths']})
# props=G[n][b.sink].values()[0].copy() #TODO: consider possibilty of structural variant paths!
props=G[n][b.sink]
G.remove_edge(n,b.sink)
G.add_edge(n,idoffset,**props)
G.add_edge(idoffset,b.sink,**props)
idoffset+=1
else:
G.node[n]['seq']=G.node[n]['seq']+rs
return idoffset |
the-stack_0_19995 | """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.images.common.types import ImageContainerFormat, ImageDiskFormat
from cloudcafe.images.config import ImagesConfig
from cloudroast.images.fixtures import ImagesFixture
images_config = ImagesConfig()
allow_post_images = images_config.allow_post_images
class TestUpdateImage(ImagesFixture):
@classmethod
def setUpClass(cls):
super(TestUpdateImage, cls).setUpClass()
cls.image = cls.images_behavior.create_new_image()
@unittest.skipUnless(allow_post_images, 'Endpoint has incorrect access')
@tags(type='positive', regression='true', skipable='true')
def test_update_image_replace_core_properties(self):
"""
@summary: Replace values of core properties
1) Given a previously created image, update image replacing all allowed
core properties
2) Verify that the response code is 200
3) Verify that the updated properties are correct
4) Revert protected property
5) Verify that the response code is 200
"""
updated_container_format = ImageContainerFormat.AKI
updated_disk_format = ImageDiskFormat.ISO
updated_name = rand_name('updated_image')
updated_tags = rand_name('updated_tag')
errors = []
image = self.image
response = self.images_client.update_image(
image.id_, replace={'container_format': updated_container_format,
'disk_format': updated_disk_format,
'name': updated_name,
'protected': True,
'tags': [updated_tags]})
self.assertEqual(response.status_code, 200)
updated_image = response.entity
attributes = ['checksum', 'created_at', 'file_', 'id_', 'min_disk',
'min_ram', 'schema', 'self_', 'size', 'status',
'visibility']
for attribute in attributes:
if getattr(updated_image, attribute) != getattr(image, attribute):
errors.append(self.error_msg.format(
attribute, getattr(image, attribute),
getattr(updated_image, attribute)))
attributes = ['container_format', 'disk_format', 'name', 'protected',
'tags']
for attribute in attributes:
if getattr(updated_image, attribute) == getattr(image, attribute):
errors.append(self.error_msg.format(
attribute, getattr(image, attribute),
getattr(updated_image, attribute)))
if updated_image.updated_at < image.updated_at:
errors.append(self.error_msg.format(
'updated_at', image.updated_at, updated_image.updated_at))
# Need to revert protected property so that the image can be torn down
response = self.images_client.update_image(
image.id_, replace={'protected': False})
self.assertEqual(response.status_code, 200)
self.assertListEqual(errors, [])
|
the-stack_0_19996 | import sys
import io
import yaml
import json
from pymongo import MongoClient
def outputPrefixes():
prefixes = [('rdfs', 'http://www.w3.org/2000/01/rdf-schema#'), ('wdt', 'http://www.wikidata.org/prop/direct/'), ('wd','http://www.wikidata.org/entity/')]
prefixesForOutput = u''
for prefix in prefixes:
prefixesForOutput += u'@prefix {0}: <{1}> .\n'.format(prefix[0], prefix[1])
prefixesForOutput += u'\n'
return prefixesForOutput
def main(inputFileName, outputFileName):
seiyuus = yaml.load(io.open(inputFileName, 'r', encoding="utf-8"))
outputFile = io.open(outputFileName, 'w', encoding="utf-8")
outputFile.write(outputPrefixes())
client = MongoClient()
db = client.seiyuuData
seiyuuCollection = db.seiyuu
for seiyuu in seiyuus:
seiyuuUri = seiyuu['seiyu_uri']['value']
seiyuuData = seiyuuCollection.find_one({"id":seiyuuUri})
# seiyu_uri wdt:instance_of wd:human
outputFile.write(u'<{0}> {1} {2} .\n'.format(seiyuuUri, 'wdt:P31', 'wd:Q5'))
# seiyu_uri wdt:occupation wd:seiyu
outputFile.write(u'<{0}> {1} {2} .\n'.format(seiyuuUri, 'wdt:P106', 'wd:Q622807'))
# seiyu_uri rdfs:label name
outputFile.write(u'<{0}> {1} "{2}"@{3} .\n'.format(seiyuuUri, "rdfs:label", seiyuu['seiyu_label']['value'], seiyuu['seiyu_label']['xml:lang']))
if seiyuuData != None:
# seiyu_uri wdt:mal_id mal_id
outputFile.write(u'<{0}> {1} <{2}> .\n'.format(seiyuuUri, "wdt:P4084", 'https://api.jikan.moe/person/' + str(seiyuuData['mal_id'])))
if 'voice_acting_role' in seiyuuData:
for work in seiyuuData['voice_acting_role']:
animeURI = 'https://api.jikan.moe/anime/' + str(work['anime']['mal_id'])
# anime_uri wdt:voice_actor seiyu_uri
outputFile.write(u'<{0}> {1} <{2}> .\n'.format(animeURI, "wdt:P725", seiyuuUri))
outputFile.write(u'\n')
if __name__ == '__main__':
inputFileName = 'output.json'
outputFileName = 'output.ttl'
if len(sys.argv) >= 3:
inputFileName = sys.argv[1]
outputFileName = sys.argv[2] + '.ttl'
main(inputFileName, outputFileName) |
the-stack_0_19998 | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_smapi_model.v1.skill.experiment.update_experiment_input import UpdateExperimentInput as UpdateExperimentInput_2a15a389
class UpdateExperimentRequest(object):
"""
Defines the request body for updating an experiment.
:param experiment:
:type experiment: (optional) ask_smapi_model.v1.skill.experiment.update_experiment_input.UpdateExperimentInput
"""
deserialized_types = {
'experiment': 'ask_smapi_model.v1.skill.experiment.update_experiment_input.UpdateExperimentInput'
} # type: Dict
attribute_map = {
'experiment': 'experiment'
} # type: Dict
supports_multiple_types = False
def __init__(self, experiment=None):
# type: (Optional[UpdateExperimentInput_2a15a389]) -> None
"""Defines the request body for updating an experiment.
:param experiment:
:type experiment: (optional) ask_smapi_model.v1.skill.experiment.update_experiment_input.UpdateExperimentInput
"""
self.__discriminator_value = None # type: str
self.experiment = experiment
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateExperimentRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_19999 | import pandas as pd
import yaml
from stl_rules.rss_lat_safety import RSSLateralSafetyRule
from stl_rules.utils import monitor_trace
# load data
with open("../data/rss_params.yaml", 'r') as stream:
rss_params = yaml.safe_load(stream)
trace = pd.read_csv("../data/toy_examples/example2.csv")
# create rss rule
rss2 = RSSLateralSafetyRule(rss_params=rss_params)
stl_spec = rss2.spec
words = stl_spec.split(" ")
line = ""
for i, word in enumerate(words):
if i % 5 == 0:
line += "\n"
line += f" {word}"
print(line)
# process data to produce monitorable signals
signals = rss2.generate_signals(trace)
# compute robustness
robustness = [r for t, r in monitor_trace(rss2.spec, rss2.variables, rss2.types, signals)]
# plot
import matplotlib.pyplot as plt
plt.title("Monitoring RSS Lateral Safety")
plt.xlabel("time steps")
plt.ylabel("value")
plt.plot(signals['a_lat_l'], label="a_lat_l")
plt.plot(signals['a_lat_r'], label="a_lat_r")
plt.plot(signals['d_lat_lr'], label="d_lat_lr")
plt.plot(signals['d_lat_min'], label="d_lat_min")
plt.plot(robustness, label="robustness")
plt.legend()
plt.show()
|
the-stack_0_20001 | """Training of a Fully Connected Network on All Modes
('sequential', 'vectorized' and 'gpu').
Train a simple Feed-forward neural network to recognize the mnist data set.
Authors:
Lucas David -- <[email protected]>
Paulo Finardi -- <[email protected]>
License: MIT License 2016 (c)
"""
import numpy as np
import matplotlib.pyplot as plt
from convolutional import networks, Device
from convolutional.utils import Timer, dataset_loader
NN_PARAMS = {
'epochs': 10,
'n_batch': 10,
'eta': .1,
'regularization': 0,
'verbose': True,
}
TEST_SIZE = 100
OPERATION_MODES = ('sequential', 'gpu', 'vectorized')
def main():
times, scores = [], []
t = Timer()
try:
print('Loading MNIST dataset...')
train, _, test = dataset_loader.load_data()
test = test[0][:TEST_SIZE], test[1][:TEST_SIZE]
print('Done (%s).' % t.get_time_hhmmss())
for mode in OPERATION_MODES:
print('Training our model with %s operations...' % mode)
t.restart()
with Device(mode):
nn = (networks
.FullyConnected([784, 392, 10], **NN_PARAMS)
.fit(*train))
times.append(t.elapsed())
print('Done (%s).' % t.get_time_hhmmss())
scores.append(nn.score(*test))
print('Score on test data-set: %.2f' % scores[-1])
save_figures(OPERATION_MODES, scores, times)
except KeyboardInterrupt:
print('Interrupted by user (%s)' % t.get_time_hhmmss())
def save_figures(modes, scores, times):
width = .5
fig = plt.figure()
ax = plt.subplot(111)
fig.subplots_adjust(top=1)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.yticks(fontsize=10)
plt.xticks(np.arange(len(modes)) + width / 2, modes, fontsize=10)
plt.title('Time Elapsed on Training and Testing', y=1.05)
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off",
labelleft="on")
ax.set_xlabel('operators')
ax.set_ylabel('time elapsed (sec)')
plt.bar(range(len(modes)), times, width, color=(.4, 0, .2))
fig.savefig('reports/fc_training_times.png', bbox_inches='tight')
if __name__ == '__main__':
print(__doc__)
main()
|
the-stack_0_20002 |
from collections import namedtuple
_attribute_list = [
'scheduler_config',
'job_directory',
'project_name',
'entrypoint',
'params',
'ram',
'num_gpus',
'stream_job_logs',
'command',
]
_deployment_arguments = namedtuple('_deployment_arguments', _attribute_list)
_deployment_arguments.__new__.__defaults__ = (None,) * len(_attribute_list)
def submit(**kwargs):
from foundations_core_cli.job_submission.submit_job import submit
from foundations_contrib.global_state import push_state, pop_state
arguments = _deployment_arguments(**kwargs)
try:
push_state()
return submit(arguments)
finally:
pop_state()
|
the-stack_0_20004 | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import dask_cudf
import numpy as np
from dask import delayed
from dask_cuml.core import new_ipc_thread, parse_host_port
from dask_cuml.core import device_of_devicendarray, build_host_dict
from dask.distributed import wait, default_client
from math import ceil
from numba import cuda
from toolz import first
from tornado import gen
class LinearRegression(object):
"""
Model-Parallel Multi-GPU Linear Regression Model. Single Process Multi GPU
supported currently
"""
def __init__(self, fit_intercept=True, normalize=False):
"""
Initializes the linear regression class.
Parameters
----------
fit_intercept: boolean. For more information, see `scikitlearn's OLS
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_.
normalize: boolean. For more information, see `scikitlearn's OLS
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_.
"""
self.coef_ = None
self.intercept_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self._model_fit = False
self._consec_call = 0
def _build_params_map(self):
return {"fit_intercept": self.fit_intercept,
"normalize": self.normalize}
def fit(self, X, y):
"""
Fits a multi-gpu linear regression model such that each of the
resulting coefficients are also distributed across the GPUs.
:param futures:
:return:
"""
client = default_client()
self.dtype = X[X.columns[0]].compute().dtype
coef, intercept, locations = client.sync(self._do_fit, X, y,
self.dtype)
self.intercept = intercept
self._locations = locations
self._model_fit = True
self._ncols = X.shape[1]
self.coef_ = dask_cudf.from_delayed(coef)
@gen.coroutine
def _do_fit(self, X_df, y_df, dtype):
client = default_client()
# Finding location of parts of y_df to distribute columns of X_df
loc_dict = {}
yield wait(y_df)
tt = yield client.who_has(y_df)
location = tuple(tt.values())
for i in range(X_df.npartitions):
part_number = eval(list(tt.keys())[i])[1]
loc_dict[part_number] = parse_host_port(str(location[i])[:-3])
# Lets divide the columns evenly, matching the order of the labels
part_size = ceil(X_df.shape[1] / X_df.npartitions)
# We scatter delayed operations to gather columns on the workers
scattered = []
coefs = []
for i in range(X_df.npartitions):
up_limit = min((i+1)*part_size, X_df.shape[1])
cols = X_df.columns.values[i*part_size:up_limit]
loc_cudf = X_df[cols]
yield wait(loc_cudf)
scattered.append(client.submit(preprocess_on_worker,
loc_cudf,
workers=[loc_dict[i]]))
yield wait(scattered)
coefs.append(client.submit(dev_array_on_worker,
up_limit - i*part_size,
dtype=dtype,
unique=np.random.randint(0, 1e6),
workers=[loc_dict[i]]))
yield wait(coefs)
del(loc_cudf)
# Break apart Dask.array/dataframe into chunks/parts
# data_parts = map(delayed, scattered)
data_parts = scattered
label_parts = y_df.to_delayed()
coef_parts = coefs
# Arrange parts into pairs. This enforces co-locality
parts = list(map(delayed, zip(data_parts, label_parts, coef_parts)))
parts = client.compute(parts) # Start computation in the background
yield wait(parts)
for part in parts:
if part.status == 'error':
yield part # trigger error locally
# A dict in the form of { part_key: part }
key_to_part_dict = dict([(str(part.key), part) for part in parts])
who_has = yield client.who_has(parts)
worker_parts = {}
for key, workers in who_has.items():
worker = parse_host_port(first(workers))
if worker not in worker_parts:
worker_parts[worker] = []
worker_parts[worker].append(key_to_part_dict[key])
"""
Create IP Handles on each worker hosting input data
"""
# Format of input_devarrays = ([(X, y)..], dev)
input_devarrays = [(worker, client.submit(fit_to_device_arrays,
part, workers=[worker]))
for worker, part in worker_parts.items()]
yield wait(input_devarrays)
"""
Gather IPC handles for each worker and call _fit() on each worker
containing data.
"""
# Last worker is the only one that can have less items.
exec_node = loc_dict[X_df.npartitions-1]
# Need to fetch parts on worker
on_worker = list(filter(lambda x: x[0] == exec_node, input_devarrays))
not_on_worker = list(filter(lambda x: x[0] != exec_node,
input_devarrays))
ipc_handles = [client.submit(get_input_ipc_handles, future,
workers=[a_worker])
for a_worker, future in not_on_worker]
raw_arrays = [future for a_worker, future in on_worker]
# IPC Handles are loaded in separate threads on worker so they can be
# used to make calls through cython
# Calls _fit_on_worker defined in the bottom
intercept = client.submit(_fit_on_worker, (ipc_handles, raw_arrays),
self._build_params_map(),
workers=[exec_node])
yield wait(intercept)
coef_series = [client.submit(coef_on_worker, coefs[i], i,
X_df.shape[1],
X_df.npartitions, loc_dict[i],
workers=[loc_dict[i]])
for i in range(len(loc_dict))]
# coef_on_worker(self, coef, locations, ncols, nparts, worker):
raise gen.Return((coef_series, intercept, loc_dict))
def predict(self, X):
"""
Predict values for the multi-gpu linear regression model by making
calls to the predict function with dask-cudf objects.
:param df:
a dask-cudf with data distributed one worker per GPU
:return:
a dask-cudf containing outputs of the linear regression
"""
if self._model_fit:
client = default_client()
ret = client.sync(self._do_predict, X, self.coef_,
self._locations, self.intercept, self.dtype)
ret = dask_cudf.from_delayed(ret)
return ret
else:
raise ValueError('Model coefficients have not been fit. You need '
'to run the fit() method first. ')
@gen.coroutine
def _do_predict(self, X_df, coefs, loc_dict, intercept, dtype):
client = default_client()
part_size = ceil(X_df.shape[1] / X_df.npartitions)
# We scatter delayed operations to gather columns on the workers
scattered = []
for i in range(X_df.npartitions):
up_limit = min((i+1)*part_size, X_df.shape[1])
cols = X_df.columns.values[i*part_size:up_limit]
loc_cudf = X_df[cols]
yield wait(loc_cudf)
scattered.append(client.submit(preprocess_predict,
loc_cudf,
workers=[loc_dict[i]]))
yield wait(scattered)
del(loc_cudf)
# Break apart Dask.array/dataframe into chunks/parts
data_parts = scattered
coef_parts = coefs.to_delayed()
# Arrange parts into pairs. This enforces co-locality
parts = list(map(delayed, zip(data_parts, coef_parts)))
parts = client.compute(parts) # Start computation in the background
yield wait(parts)
for part in parts:
if part.status == 'error':
yield part # trigger error locally
# A dict in the form of { part_key: part }
key_to_part_dict = dict([(str(part.key), part) for part in parts])
who_has = yield client.who_has(parts)
worker_parts = {}
for key, workers in who_has.items():
worker = parse_host_port(first(workers))
if worker not in worker_parts:
worker_parts[worker] = []
worker_parts[worker].append(key_to_part_dict[key])
"""
Create IP Handles on each worker hosting input data
"""
# Format of input_devarrays = ([(X, y)..], dev)
input_devarrays = [(worker, client.submit(predict_to_device_arrays,
part, worker, loc_dict,
X_df.npartitions,
dtype=dtype,
workers=[worker]))
for worker, part in worker_parts.items()]
yield wait(input_devarrays)
"""
Gather IPC handles for each worker and call _fit() on each worker
containing data.
"""
exec_node = loc_dict[X_df.npartitions-1]
# Need to fetch parts on worker
on_worker = list(filter(lambda x: x[0] == exec_node, input_devarrays))
not_on_worker = list(filter(lambda x: x[0] != exec_node,
input_devarrays))
ipc_handles = [client.submit(get_input_ipc_handles, future,
unique=np.random.randint(0, 1e6),
workers=[a_worker])
for a_worker, future in not_on_worker]
raw_arrays = [future for a_worker, future in on_worker]
# IPC Handles are loaded in separate threads on worker so they can be
# used to make calls through cython
# Calls _predict_on_worker defined in the bottom
ret = client.submit(_predict_on_worker, (ipc_handles, raw_arrays),
self.intercept, self._build_params_map(),
workers=[exec_node])
yield wait(ret)
dfs = [client.submit(series_on_worker, f, worker, loc_dict,
X_df.npartitions, X_df, workers=[worker])
for worker, f in input_devarrays]
return dfs
def _build_host_dict(self, gpu_futures, client):
who_has = client.who_has(gpu_futures)
key_to_host_dict = {}
for key in who_has:
key_to_host_dict[key] = parse_host_port(who_has[key][0])
hosts_to_key_dict = {}
for key, host in key_to_host_dict.items():
if host not in hosts_to_key_dict:
hosts_to_key_dict[host] = set([key])
else:
hosts_to_key_dict[host].add(key)
workers = [key[0] for key in list(who_has.values())]
return build_host_dict(workers)
def _fit_on_worker(data, params):
ipc_dev_list, devarrs_dev_list = data
# Open 1 ipc thread per device
open_ipcs = []
for p, dev in ipc_dev_list:
arrs = []
for x, y, coef in p:
arrs.append(x)
arrs.append(y)
arrs.append(coef)
ipct = new_ipc_thread(arrs, dev)
open_ipcs.append(ipct)
alloc_info = []
for t in open_ipcs:
outsiders = t.info()
triplet = []
for i in range(0, len(outsiders), 3):
triplet.append(outsiders[i])
triplet.append(outsiders[i+1])
triplet.append(outsiders[i+2])
alloc_info.append(triplet)
for p, dev in devarrs_dev_list:
locals = []
for X, coef, pred in p:
locals.append(build_alloc_info(X)[0])
locals.append(build_alloc_info(coef)[0])
locals.append(build_alloc_info(pred)[0])
alloc_info.append(locals)
try:
from cuml.linear_model.linear_regression_mg import LinearRegressionMG as cuOLS
ols = cuOLS()
intercept = ols._fit_mg(alloc_info, params)
except Exception as e:
print("FAILURE in FIT: " + str(e))
[t.close() for t in open_ipcs]
[t.join() for t in open_ipcs]
return intercept
def _predict_on_worker(data, intercept, params):
ipc_dev_list, devarrs_dev_list = data
open_ipcs = []
for p, dev in ipc_dev_list:
arrs = []
for mat, coef, pred in p:
arrs.append(mat)
arrs.append(coef)
arrs.append(pred)
ipct = new_ipc_thread(arrs, dev)
open_ipcs.append(ipct)
alloc_info = []
for t in open_ipcs:
outsiders = t.info()
triplet = []
for i in range(0, len(outsiders), 3):
triplet.append(outsiders[i])
triplet.append(outsiders[i+1])
triplet.append(outsiders[i+2])
alloc_info.append(triplet)
for p, dev in devarrs_dev_list:
locals = []
for X, y, coef in p:
locals.append(
build_alloc_info(X, unique=np.random.randint(0, 1e6))[0])
locals.append(
build_alloc_info(y, unique=np.random.randint(0, 1e6))[0])
locals.append(
build_alloc_info(coef, unique=np.random.randint(0, 1e6))[0])
alloc_info.append(locals)
try:
from cuml.linear_model.linear_regression_mg import LinearRegressionMG as cuOLS
ols = cuOLS()
ols._predict_mg(alloc_info, intercept, params)
except Exception as e:
print("Failure in predict(): " + str(e))
[t.close() for t in open_ipcs]
[t.join() for t in open_ipcs]
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def build_alloc_info(p, unique=0): return [p.__cuda_array_interface__]
def get_input_ipc_handles(arr, unique=0):
arrs, dev = arr
ret = [(X.get_ipc_handle(),
y.get_ipc_handle(),
coef.get_ipc_handle()) for X, y, coef in arrs]
return ret, dev
def as_gpu_matrix(arr):
blap = arr.compute()
mat = blap.as_gpu_matrix(order="F")
dev = device_of_devicendarray(mat)
# Return canonical device id as string
return mat, dev
def to_gpu_array(arr):
mat = arr.to_gpu_array()
dev = device_of_devicendarray(mat)
# Return canonical device id as string
return mat, dev
def fit_to_device_arrays(arr):
"""
:param arr:
A tuple in the form of (X, y, coef)
:return:
"""
mats = [(X.compute().as_gpu_matrix(order='F'),
y.to_gpu_array(),
coef) for X, y, coef in arr]
dev = device_of_devicendarray(mats[0][0])
# Return canonical device id as string
return mats, dev
def predict_to_device_arrays(arr, worker, loc_dict, nparts, dtype):
"""
:param arr:
A tuple in the form of (X, y, coef)
:return:
"""
part_number = list(loc_dict.keys())[list(loc_dict.values()).index(worker)]
mats = []
for X, coef in arr:
nrows = len(X)
part_size = ceil(nrows / nparts)
up_limit = min((part_number+1)*part_size, nrows)
mat = X.compute().as_gpu_matrix(order='F')
pred = cuda.to_device(np.zeros(up_limit-(part_number*part_size),
dtype=dtype))
mats.append([mat, coef.to_gpu_array(), pred])
dev = device_of_devicendarray(mats[0][0])
# Return canonical device id as string
return mats, dev
def extract_part(data, part):
return data[part]
def preprocess_on_worker(arr):
return arr
def dev_array_on_worker(rows, dtype=np.float64, unique=0):
return cuda.to_device(np.zeros(rows, dtype=dtype))
# Need to have different named function for predict to avoid
# dask key colision in case of same rows and columns between
# different arrays
def pred_array_on_worker(rows, cols, dtype=np.float64, unique=0):
return cuda.to_device(np.zeros((rows, cols), dtype=dtype))
def preprocess_predict(arr):
return arr
def series_on_worker(ary, worker, loc_dict, nparts, X):
nrows = len(X)
part_number = list(loc_dict.keys())[list(loc_dict.values()).index(worker)]
part_size = ceil(nrows / nparts)
up_limit = min((part_number+1)*part_size, nrows)
if len(ary) == nparts - 1:
idx = (part_number*len(ary[0][0][2]),
(part_number+1)*len(ary[0][0][2]))
else:
idx = (up_limit-len(ary[0][0][2]), up_limit)
ret = cudf.Series(ary[0][0][2], index=cudf.dataframe.RangeIndex(idx[0],
idx[1]))
return ret
def get_meta(df):
ret = df.iloc[:0]
return ret
def coef_on_worker(coef, part_number, ncols, nparts, worker):
part_size = ceil(ncols / nparts)
up_limit = min((part_number+1)*part_size, ncols)
idx = (part_number*part_size, up_limit)
ret = cudf.Series(coef, index=cudf.dataframe.RangeIndex(idx[0],
idx[1]))
return ret
|
the-stack_0_20006 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
##################################################################################
# File: c:\Projects\KENYA ONE PROJECT\CORE\engines\weights.py #
# Project: c:\Projects\KENYA ONE PROJECT\CORE\engines #
# Created Date: Thursday, January 9th 2020, 8:56:55 pm #
# Author: Geoffrey Nyaga Kinyua ( <[email protected]> ) #
# ----- #
# Last Modified: Thursday January 9th 2020 8:56:55 pm #
# Modified By: Geoffrey Nyaga Kinyua ( <[email protected]> ) #
# ----- #
# MIT License #
# #
# Copyright (c) 2020 KENYA ONE PROJECT #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of#
# this software and associated documentation files (the "Software"), to deal in #
# the Software without restriction, including without limitation the rights to #
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do #
# so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# ----- #
# Copyright (c) 2020 KENYA ONE PROJECT #
##################################################################################
import sys
sys.path.append("../")
from CORE.API.db_API import write_to_db, read_from_db
import numpy as np # type: ignore
import matplotlib.pylab as plt # type: ignore
print(" EMPTY WEIGHT BREAKDOWN ")
# initial percentage weights as given by Kundu
mtow = read_from_db("finalMTOW")
wfus = 0.085 * mtow
wwing = 0.09 * mtow
whtail = 0.02 * mtow
wvtail = 0.016 * mtow
wnacelle = 0.016 * mtow
wundercarriage = 0.05 * mtow
wengine = 0.185 * mtow
wenginecontrol = 0.02 * mtow
wfuelsystem = 0.015 * mtow
woilsystem = 0.003 * mtow
wapu = 0 * mtow
wflightcontsys = 0.015 * mtow
whydpneu = 0.0055 * mtow
welectrical = 0.025 * mtow
winstrument = 0.008 * mtow
wavionics = 0.02 * mtow
wecs = 0.004 * mtow
woxyg = 0 * mtow
wfurnishings = 0.04 * mtow
wmiscelleneous = 0.0015 * mtow
wcontigency = 0.01 * mtow
print(" A) FUSELAGE " + str(wfus) + " lb")
print(" B) WING " + str(wwing) + " lb")
print(" C) PROPULSION ")
print(" a) engine dry weight " + str(wengine) + " lb")
print(" b) nacelle " + str(wnacelle) + " lb")
print(" c) engine control " + str(wenginecontrol) + " lb")
print(" D) UNDERCARRIAGE " + str(wundercarriage) + " lb")
print(" E) TAIL ")
print(" a) horizontal tail " + str(whtail) + " lb")
print(" b) verticall tail " + str(wvtail) + " lb")
print(" F) SYSTEMS ")
print(" a) fuel system " + str(wfuelsystem) + " lb")
print(" b) oil system " + str(woilsystem) + " lb")
print(" c) a.p.u " + str(wapu) + " lb")
print(" d) flight contr. sys " + str(wflightcontsys) + " lb")
print(" e) hyd & pneu sys " + str(whydpneu) + " lb")
print(" f) electrical system " + str(welectrical) + " lb")
print(" g) instruments " + str(winstrument) + " lb")
print(" h) avionics " + str(wavionics) + " lb")
print(" i) ecs " + str(wecs) + " lb")
print(" j) oxygen system " + str(woxyg) + " lb")
print(" G) FURNISHINGS " + str(wfurnishings) + " lb")
print(" H) CONTIGENCY " + str(wcontigency) + " lb")
print(" I)MISCELLLENEOUS " + str(wmiscelleneous) + " lb")
print(" _________ ")
calcemptyw = (
wfus
+ wwing
+ whtail
+ wvtail
+ wnacelle
+ wundercarriage
+ wengine
+ wenginecontrol
+ wfuelsystem
+ woilsystem
+ wapu
+ wflightcontsys
+ whydpneu
+ welectrical
+ winstrument
+ wavionics
+ wecs
+ woxyg
+ wfurnishings
+ wmiscelleneous
+ wcontigency
)
We = read_from_db("emptyWeight")
error = ((calcemptyw - We) / We) * 100
print(" TOTAL CALCULATED EMPTY WEIGHT " + str(calcemptyw) + " lb")
print(" ")
print(" INITIAL ESTIMATED EMPTY WEIGHT " + str(We) + " lb")
print(" PERCENTAGE ERROR " + str(error), " %")
|
the-stack_0_20007 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for dealing with docker."""
import os
import sys
# pylint: disable=wrong-import-position,import-error
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import utils
BASE_BUILDER_TAG = 'gcr.io/oss-fuzz-base/base-builder'
BASE_RUNNER_TAG = 'gcr.io/oss-fuzz-base/base-runner'
MSAN_LIBS_BUILDER_TAG = 'gcr.io/oss-fuzz-base/msan-libs-builder'
PROJECT_TAG_PREFIX = 'gcr.io/oss-fuzz/'
# Default fuzz configuration.
DEFAULT_ENGINE = 'libfuzzer'
DEFAULT_ARCHITECTURE = 'x86_64'
_DEFAULT_DOCKER_RUN_ARGS = [
'--cap-add', 'SYS_PTRACE', '-e', 'FUZZING_ENGINE=' + DEFAULT_ENGINE, '-e',
'ARCHITECTURE=' + DEFAULT_ARCHITECTURE, '-e', 'CIFUZZ=True'
]
EXTERNAL_PROJECT_IMAGE = 'external-project'
_DEFAULT_DOCKER_RUN_COMMAND = [
'docker',
'run',
'--rm',
'--privileged',
]
def get_project_image_name(project):
"""Returns the name of the project builder image for |project_name|."""
# TODO(ochang): We may need unique names to support parallel fuzzing.
if project:
return PROJECT_TAG_PREFIX + project
return EXTERNAL_PROJECT_IMAGE
def delete_images(images):
"""Deletes |images|."""
command = ['docker', 'rmi', '-f'] + images
utils.execute(command)
utils.execute(['docker', 'builder', 'prune', '-f'])
def get_base_docker_run_args(workspace, sanitizer='address', language='c++'):
"""Returns arguments that should be passed to every invocation of 'docker
run'."""
docker_args = _DEFAULT_DOCKER_RUN_ARGS.copy()
docker_args += [
'-e', f'SANITIZER={sanitizer}', '-e', f'FUZZING_LANGUAGE={language}',
'-e', 'OUT=' + workspace.out
]
docker_container = utils.get_container_name()
if docker_container:
# Don't map specific volumes if in a docker container, it breaks when
# running a sibling container.
docker_args += ['--volumes-from', docker_container]
else:
docker_args += _get_args_mapping_host_path_to_container(workspace.workspace)
return docker_args, docker_container
def get_base_docker_run_command(workspace, sanitizer='address', language='c++'):
"""Returns part of the command that should be used everytime 'docker run' is
invoked."""
docker_args, docker_container = get_base_docker_run_args(
workspace, sanitizer, language)
command = _DEFAULT_DOCKER_RUN_COMMAND.copy() + docker_args
return command, docker_container
def _get_args_mapping_host_path_to_container(host_path, container_path=None):
"""Get arguments to docker run that will map |host_path| a path on the host to
a path in the container. If |container_path| is specified, that path is mapped
to. If not, then |host_path| is mapped to itself in the container."""
# WARNING: Do not use this function when running in production (and
# --volumes-from) is used for mapping volumes. It will break production.
container_path = host_path if container_path is None else container_path
return ['-v', f'{host_path}:{container_path}']
class Workspace:
"""Class representing the workspace directory."""
def __init__(self, config):
self.workspace = config.workspace
def initialize_dir(self, directory): # pylint: disable=no-self-use
"""Creates directory if it doesn't already exist, otherwise does nothing."""
os.makedirs(directory, exist_ok=True)
@property
def out(self):
"""The out directory used for storing the fuzzer build built by
build_fuzzers."""
# Don't use 'out' because it needs to be used by artifacts.
return os.path.join(self.workspace, 'build-out')
@property
def work(self):
"""The directory used as the work directory for the fuzzer build/run."""
return os.path.join(self.workspace, 'work')
@property
def artifacts(self):
"""The directory used to store artifacts for download by CI-system users."""
# This is hardcoded by a lot of clients, so we need to use this.
return os.path.join(self.workspace, 'out', 'artifacts')
@property
def clusterfuzz_build(self):
"""The directory where builds from ClusterFuzz are stored."""
return os.path.join(self.workspace, 'cifuzz-prev-build')
@property
def clusterfuzz_coverage(self):
"""The directory where builds from ClusterFuzz are stored."""
return os.path.join(self.workspace, 'cifuzz-prev-coverage')
@property
def coverage_report(self):
"""The directory where coverage reports generated by cifuzz are put."""
return os.path.join(self.workspace, 'cifuzz-coverage')
@property
def corpora(self):
"""The directory where corpora from ClusterFuzz are stored."""
return os.path.join(self.workspace, 'cifuzz-corpus')
|
the-stack_0_20008 | import numpy as np
from signal_processing_algorithms.e_divisive.base import EDivisiveCalculator
from signal_processing_algorithms.e_divisive.calculators import numpy_calculator as EDivisive
class OriginalCalculator(EDivisiveCalculator):
"""Only used for profiling, don't use this in production!"""
@staticmethod
def calculate_diffs(series: np.ndarray) -> np.ndarray:
return EDivisive.calculate_diffs(series)
@staticmethod
def calculate_qhat_values(diffs: np.ndarray) -> np.ndarray:
length = len(diffs)
qhat_values = np.zeros(len(diffs), dtype=np.float)
if length < 5:
return qhat_values
for n in range(2, length - 2):
m = length - n
term1 = sum(diffs[i][j] for i in range(n) for j in range(n, length))
term2 = sum(diffs[i][k] for i in range(n) for k in range(i + 1, n))
term3 = sum(diffs[j][k] for j in range(n, length) for k in range(j + 1, length))
qhat_values[n] = EDivisive._calculate_q(term1, term2, term3, m, n)
return qhat_values
|
the-stack_0_20009 | R, C = map(int, input().split())
start = list(map(int, input().split()))
goal = list(map(int, input().split()))
graph = [[] for _ in range(R)]
for i in range(R):
tmp = list(input())
graph[i] = tmp
visited = [[False for _ in range(C)] for _ in range(R)]
dist = [[0 for _ in range(C)] for _ in range(R)]
def bfs(start_r, start_c):
q = [[start_r, start_c]]
visited[start_r][start_c] = True
while len(q) != 0:
next_p = q.pop(0)
r, c = next_p
if r + 1 < R and not visited[r + 1][c] and graph[r + 1][c] == ".":
q.append([r + 1, c])
dist[r + 1][c] = dist[r][c] + 1
visited[r + 1][c] = True
if r - 1 > 0 and not visited[r - 1][c] and graph[r - 1][c] == ".":
q.append([r - 1, c])
dist[r - 1][c] = dist[r][c] + 1
visited[r - 1][c] = True
if c + 1 < C and not visited[r][c + 1] and graph[r][c + 1] == ".":
q.append([r, c + 1])
dist[r][c + 1] = dist[r][c] + 1
visited[r][c + 1] = True
if c - 1 > 0 and not visited[r][c - 1] and graph[r][c - 1] == ".":
q.append([r, c - 1])
dist[r][c - 1] = dist[r][c] + 1
visited[r][c - 1] = True
bfs(start[0] - 1, start[1] - 1)
print(dist[goal[0] - 1][goal[1] - 1])
|
the-stack_0_20012 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for logging during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import pickle
import numpy as np
import tensorflow.compat.v1 as tf
def custom_clip(vec, low, high):
new_vec = []
for i in range(len(vec)):
new_val = min(vec[i], high)
new_val = max(new_val, low)
new_vec.append(new_val)
return np.array(new_vec)
def log_row(csv_file, row):
with tf.gfile.Open(csv_file, 'ab') as csvfile:
cw = csv.writer(
csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
cw.writerow(row)
class NumpyEncoder(json.JSONEncoder):
"""Special json encoder for numpy types."""
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): #### This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
class AlgorithmState(object):
"""Saves the algorithm state into a pickle.
Particularly useful for resuming real robot experiments.
"""
def __init__(self):
self.fresh = True
self.meta_eval_passed = False
self.single_values = []
self.query_index = 0
self.temp_perturbations = []
def load(self, load_dir):
attr_dict = pickle.load(tf.gfile.Open(load_dir, 'r'))
for k, v in attr_dict.items():
setattr(self, k, v)
self.fresh = False
def save(self, save_dir):
pickle.dump(self.__dict__, tf.gfile.GFile(save_dir, 'w'))
|
the-stack_0_20014 | # Calcular o tempo de viagem v.0.1
# Por Dorcival Leite 202003362174
import time
print("CALCULAR O TEMPO DE VIAGEM\n")
dist = float(input("Digite a distância percorrida (km): "))
velm = float(input("Digite a Velocidade Média: "))
t = float(dist / velm)
min = t * 60
print("\nO tempo de viagem foi de", t, "hora(s) ou", min, "minutos")
time.sleep(20) |
the-stack_0_20016 |
from datetime import *
from django import forms
from django.core.validators import MaxValueValidator, MinValueValidator
from applications.academic_information.models import Constants as Con
from applications.globals.models import DepartmentInfo
from django.forms import CheckboxSelectMultiple, MultiWidget, Select
from .models import Constants, NotifyStudent, Skill, Role
class AddProfile(forms.ModelForm):
"""
The form is used to change profile picture of user.
@variables:
pic - chosen picture
"""
pic = forms.ImageField()
class AddEducation(forms.Form):
"""
The form is used to add education detail of user.
@variables:
institute - name of institute of previous education
degree - name of previous degree
grade - obtained grade
stream - chosen stream for respective education
sdate - start date of respective education
edate - end date of respective education
"""
institute = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'field'}),
label="institute")
degree = forms.CharField(widget=forms.TextInput(attrs={'max_length': 40,
'class': 'field'}),
label="degree")
grade = forms.CharField(widget=forms.TextInput(attrs={'max_length': 10,
'class': 'form-control'}),
label="grade")
stream = forms.CharField(widget=forms.TextInput(attrs={'max_length': 150,
'class': 'form-control'}),
label="stream", required=False)
sdate = forms.DateField(label='sdate', widget=forms.DateInput(attrs={'class':'datepicker'}))
edate = forms.DateField(label='edate', widget=forms.DateInput(attrs={'class':'datepicker'}))
def clean(self):
sdate = self.cleaned_data.get("sdate")
edate = self.cleaned_data.get("edate")
grade = self.cleaned_data.get("grade")
if (sdate> edate):
raise forms.ValidationError("Start Date but me before End Date")
if (len(grade)>3):
raise forms.ValidationError("Invalid")
return self.cleaned_data
class AddSkill(forms.Form):
"""
The form is used to skills in profile of user.
@variables:
skill - name of the skill user knows
skill_rating - weightage of the skill he knows
"""
skill = forms.CharField(widget=forms.TextInput(attrs={'max_length': 30,
'class': 'field'}),
label="skill")
skill_rating = forms.IntegerField(widget=forms.NumberInput(attrs={'min':0, 'max':100}), label="skill_rating")
class AddCourse(forms.Form):
"""
The form is used to add external courses that user has done.
@variables:
course_name - name of the course
description - description of the course
license_no - licence number of the course
sdate - start date of the course
edate - end date of the course
"""
course_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="course_name")
description = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'field'}),
label="description", required=False)
license_no = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'field'}),
label="license_no", required=False)
sdate = forms.DateField(label='sdate', widget=forms.DateInput(attrs={'class':'datepicker'}))
edate = forms.DateField(label='edate', widget=forms.DateInput(attrs={'class':'datepicker'}))
def clean(self):
sdate = self.cleaned_data.get("sdate")
edate = self.cleaned_data.get("edate")
if (sdate > edate):
raise forms.ValidationError("Start Date but me before End Date")
return self.cleaned_data
class AddConference(forms.Form):
"""
The form is used to add external courses that user has done.
@variables:
course_name - name of the course
description - description of the course
license_no - licence number of the course
sdate - start date of the course
edate - end date of the course
"""
conference_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="course_name")
description = forms.CharField(widget=forms.Textarea(attrs={'max_length': 1000,
'class': 'form-control'}),
label="description", required=False)
sdate = forms.DateField(label='sdate', widget=forms.DateInput(attrs={'class':'datepicker'}))
edate = forms.DateField(label='edate', widget=forms.DateInput(attrs={'class':'datepicker'}))
def clean(self):
sdate = self.cleaned_data.get("sdate")
edate = self.cleaned_data.get("edate")
if (sdate > edate):
raise forms.ValidationError("Start Date cant be after End Date")
return self.cleaned_data
class AddExperience(forms.Form):
"""
The form is used to add experience that useris having.
@variables:
title - title of the experience
status - status of experience (ongoing/ended)
description - description of the experience
company - name of company where experience is gained
location - location of the company
sdate - start date of the company experience
edate - end date of the company experience
"""
title = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="title")
status = forms.ChoiceField(choices = Constants.RESUME_TYPE, label="status",
widget=forms.Select(attrs={'style': "height:45px"}))
description = forms.CharField(widget=forms.Textarea(attrs={'max_length': 500,
'class': 'form-control'}),
label="description", required=False)
company = forms.CharField(widget=forms.TextInput(attrs={'max_length': 200,
'class': 'form-control'}),
label="company")
location = forms.CharField(widget=forms.TextInput(attrs={'max_length': 200,
'class': 'form-control'}),
label="location")
sdate = forms.DateField(label='sdate', widget=forms.DateInput(attrs={'class':'datepicker'}))
edate = forms.DateField(label='edate', widget=forms.DateInput(attrs={'class':'datepicker'}))
def clean(self):
sdate = self.cleaned_data.get("sdate")
edate = self.cleaned_data.get("edate")
if (sdate > edate):
raise forms.ValidationError("Start Date cant be after End Date")
return self.cleaned_data
class AddProject(forms.Form):
"""
The form is used to add project that user has done.
@variables:
project_name - name of the project
project_status - status of the project (ongoing/ended)
summary - summary of the project
project_link - link of the project
sdate - start date of the project
edate - end date of the project
"""
project_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 50,
'class': 'field'}),
label="title")
project_status = forms.ChoiceField(choices = Constants.RESUME_TYPE, label="project_status",
widget=forms.Select())
summary = forms.CharField(widget=forms.Textarea(attrs={'max_length': 1000,
'class': 'form-control'}),
label="summary", required=False)
project_link = forms.CharField(widget=forms.TextInput(attrs={'max_length': 200,
'class': 'form-control'}),
label="project_link", required=False)
sdate = forms.DateField(label='sdate', widget=forms.DateInput(attrs={'class':'datepicker'}))
edate = forms.DateField(label='edate', widget=forms.DateInput(attrs={'class':'datepicker'}))
def clean(self):
sdate = self.cleaned_data.get("sdate")
edate = self.cleaned_data.get("edate")
if (sdate > edate):
raise forms.ValidationError("Start Date cant be after End Date")
return self.cleaned_data
class AddAchievement(forms.Form):
"""
The form is used to achievement that user has gained.
@variables:
achievement - name of the achievement
achievement_type - type of achievement (educational/others)
description - description of achievement
issuer - issuer of achievement
date_earned - date of earning of achievement
"""
achievement = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="achievement")
achievement_type = forms.ChoiceField(choices = Constants.ACHIEVEMENT_TYPE,
label="achievement_type", widget=forms.Select(attrs={'style': "height:45px"}))
description = forms.CharField(widget=forms.Textarea(attrs={'max_length': 1000,
'class': 'form-control'}),
label="description", required=False)
issuer = forms.CharField(widget=forms.TextInput(attrs={'max_length': 200,
'class': 'form-control'}),
label="issuer")
date_earned = forms.DateField(label='date_earned', widget=forms.DateInput(attrs={'class':'datepicker'}))
class AddExtracurricular(forms.Form):
"""
The form is used to add social activity that user has participated.
@variables:
event_name - name of the event
description - description of achievement
name_of_position - name of holding position
date_earned - date of earning of achievement
"""
event_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="event_name")
event_type = forms.ChoiceField(choices = Constants.EVENT_TYPE,
label="event_type", widget=forms.Select(attrs={'style': "height:45px"}))
description = forms.CharField(widget=forms.Textarea(attrs={'max_length': 1000,
'class': 'form-control'}),
label="description", required=False)
name_of_position = forms.CharField(widget=forms.TextInput(attrs={'max_length': 200,
'class': 'form-control'}),
label="name_of_position")
date_earned = forms.DateField(label='date_earned', widget=forms.DateInput(attrs={'class':'datepicker'}))
class AddPublication(forms.Form):
"""
The form is used to add publications that user has published.
@variables:
publication_title - title of publication
description - description of publication
publisher - name of publisher
publication_date - date of publication
"""
publication_title = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="publication_title")
description = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'form-control'}),
label="description", required=False)
publisher = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'form-control'}),
label="publisher")
publication_date = forms.DateField(label='publication_date', widget=forms.DateInput(attrs={'class':'datepicker'}))
class AddReference(forms.Form):
"""
The form is used to add reference.
@variables:
reference_name - name of the referenced person
post - post of the referenced person
email - email of the referenced person
mobile_number - mobile number/phone number of the referenced person
"""
reference_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'reference_name'}),
label="reference_name")
post = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'form-control',
'id': 'reference_post'}),
label="post", required=False)
email = forms.CharField(widget=forms.TextInput(attrs={'max_length': 50,
'class': 'form-control',
'id': 'reference_email',
}),
label="email")
mobile_number = forms.CharField(widget=forms.TextInput(attrs={'max_length': 10,
'class': 'field',
'id': 'reference_mobile',
'type': 'number'}),
label="mobile_number")
def clean(self):
mobile_number = self.cleaned_data.get("mobile_number")
if(len(mobile_number)>10):
raise forms.ValidationError("Invalid Number")
return self.cleaned_data
class AddPatent(forms.Form):
"""
The form is used to add patents that user has done.
@variables:
patent_name - name of the patent
description - description of the patent
patent_office - office from which patent has been done
patent_date - date of patent
"""
patent_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="patent_name")
description = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'form-control'}),
label="description", required=False)
patent_office = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'form-control'}),
label="patent_office")
patent_date = forms.DateField(label='patent_date', widget=forms.DateInput(attrs={'class':'datepicker'}))
class AddProfile(forms.Form):
"""
The form is used to change profile section of user.
@variables:
about_me - about me about the user
age - age of user
address - address of user
"""
about_me = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'field'}),
label="about_me", required=False)
age = forms.IntegerField(widget=forms.NumberInput(attrs={'min': 0}), label="age")
address = forms.CharField(widget=forms.TextInput(attrs={'max_length': 250,
'class': 'form-control'}),
label="address")
class AddChairmanVisit(forms.Form):
"""
The form is used to chairman visit schedule of user.
@variables:
company_name - name of company
location - location of company
description - description of company
visiting_date - date of visiting
"""
company_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="company_name")
location = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="location")
description = forms.CharField(widget=forms.Textarea(attrs={'max_length': 1000,
'class': 'form-control'}),
label="description")
visiting_date = forms.DateField(label='visiting_date', widget=forms.DateInput(attrs={'class':'datepicker'}))
class SearchStudentRecord(forms.Form):
"""
The form is used to search from the student record based of various parameters.
@variables:
name - name of the student
rollno - roll no of student
programme - programme of student
department - department of student
cpi - cpi of student
skill - skill of student
debar - debarred or not debarred
placed_type - type of placement
"""
name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100, 'class': 'field'}),
label="name", required=False)
rollno = forms.IntegerField(label="rollno", widget=forms.NumberInput(attrs={'min': 0}), required=False)
programme = forms.ChoiceField(choices = Con.PROGRAMME, required=False,
label="programme", widget=forms.Select(attrs={'style': "height:45px",
'onchange': "changeDeptForSearch()",
'id': "id_programme_search"}))
dep_btech = forms.MultipleChoiceField(choices = Constants.BTECH_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_bdes = forms.MultipleChoiceField(choices = Constants.BDES_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_mtech = forms.MultipleChoiceField(choices = Constants.MTECH_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_mdes = forms.MultipleChoiceField(choices = Constants.MDES_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_phd = forms.MultipleChoiceField(choices = Constants.PHD_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
cpi = forms.DecimalField(label="cpi", required=False)
skill = forms.ModelMultipleChoiceField(required=False, widget=forms.SelectMultiple(),
queryset=Skill.objects.all(), label="skill")
debar = forms.ChoiceField(widget=forms.Select(attrs={'style': "height:45px"}), label="debar", required=False,
choices=Constants.DEBAR_TYPE)
placed_type = forms.ChoiceField(widget=forms.Select(attrs={'style': "height:45px"}), label="placed_type", required=False,
choices=Constants.PLACED_TYPE)
# new_field = DepartmentWidget(attrs={})
class SendInvite(forms.Form):
"""
The form is used to send invite to students about upcoming placement or pbi events.
@variables:
company - name of company
"""
company = forms.ModelChoiceField(required=True, queryset=NotifyStudent.objects.all(), label="company")
rollno = forms.IntegerField(label="rollno", widget=forms.NumberInput(attrs={'min': 0}), required=False)
programme = forms.ChoiceField(choices = Con.PROGRAMME, required=False,
label="programme", widget=forms.Select(attrs={'style': "height:45px",
'onchange': "changeDeptForSend()",
'id': "id_programme_send"}))
dep_btech = forms.MultipleChoiceField(choices = Constants.BTECH_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_bdes = forms.MultipleChoiceField(choices = Constants.BDES_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_mtech = forms.MultipleChoiceField(choices = Constants.MTECH_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_mdes = forms.MultipleChoiceField(choices = Constants.MDES_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
dep_phd = forms.MultipleChoiceField(choices = Constants.PHD_DEP, required=False, label="department",
widget=forms.CheckboxSelectMultiple)
cpi = forms.DecimalField(label="cpi", required=False)
no_of_days = forms.CharField(required=True, widget=forms.NumberInput(attrs={ 'min':0,
'max':30,
'max_length': 10,
'class': 'form-control'}))
class AddSchedule(forms.Form):
"""
The form is used to placement or pbi schedule.
@variables:
time - time of placement activity
ctc - salary
company_name - name of company
placement_type - placement type (placement/pbi)
location - location of company
description - description of company
placement_date - date of placement activity
"""
time = forms.TimeField(label='time', widget=forms.widgets.TimeInput(attrs={'type': "time",
'value':"00:00",
'min':"0:00",
'max':"24:00"}))
ctc = forms.DecimalField(label="ctc", widget=forms.NumberInput(attrs={ 'min':0, 'step': 0.25}) )
company_name = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'list': 'company_dropdown1',
'id': 'company_input'}),
label="company_name")
placement_type = forms.ChoiceField(widget=forms.Select(attrs={'style': "height:45px"}), label="placement_type",
choices=Constants.PLACEMENT_TYPE)
location = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="location")
description = forms.CharField(widget=forms.Textarea(attrs={'max_length': 1000,
'class': 'form-control'}),
label="description", required=False)
attached_file = forms.FileField(required=False)
placement_date = forms.DateField(label='placement_date', widget=forms.DateInput(attrs={'class':'datepicker'}))
def clean_ctc(self):
ctc = self.cleaned_data['ctc']
# print('form validation \n\n\n\n', ctc)
if ctc <= 0:
raise forms.ValidationError("CTC must be positive value")
return ctc
def clean_company_name(self):
company_name = self.cleaned_data['company_name']
# print('form validation \n\n\n\n', ctc)
if NotifyStudent.objects.filter(company_name=company_name):
raise forms.ValidationError("company_name must be unique")
return company_name
def current_year():
return date.today().year
def max_value_current_year(value):
return MaxValueValidator(current_year())(value)
def year_choices():
return [(r,r) for r in range(1984, datetime.date.today().year+1)]
class SearchPlacementRecord(forms.Form):
"""
The form is used to search from placement records based of various parameters.
@variables:
stuname - name of the student
year - year of placement
ctc - salary
roll - roll no of student
cname - name of company
"""
stuname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_placement_stuname'}),
label="stuname", required=False)
year = forms.TypedChoiceField(coerce=int, choices=year_choices, initial=current_year, label="year", required=False, widget=forms.NumberInput(attrs={'id': 'add_placement_year'}))
ctc = forms.CharField(label="ctc", required=False, widget=forms.NumberInput(attrs={ 'min':0, 'id': 'add_placement_ctc', 'step': 0.25}))
roll = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0,
'max_length': 10,
'class': 'form-control',
'id': 'add_placement_roll'}),
label="roll", required=False)
cname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_placement_cname'}),
label="cname", required=False)
class SearchPbiRecord(forms.Form):
"""
The form is used to search from pbi record.
@variables:
stuname - name of student
year - year of pbi
ctc - stipend
roll - roll no of student
cname - name of company
"""
stuname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_pbi_stuname'}),
label="stuname", required=False)
year = forms.TypedChoiceField(coerce=int, choices=year_choices, initial=current_year, label="year", required=False, widget=forms.NumberInput(attrs={'id': 'add_pbi_year'}))
ctc = forms.DecimalField(label="ctc", required=False, widget=forms.NumberInput(attrs={ 'min':0, 'id': 'add_pbi_ctc', 'step': 0.25}))
roll = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0,
'max_length': 10,
'class': 'form-control',
'id': 'add_pbi_roll'}),
label="roll", required=False)
cname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_pbi_cname'}),
label="cname", required=False)
class SearchHigherRecord(forms.Form):
"""
The form is used to search from higher study record based on various parameters .
@variables:
roll - roll no of the student
stuname - name of the student
test_type - type of test for higher study
test_score - score in the test
year -year of clearing the test
uname - name of the university
"""
roll = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0, 'max_length': 10,
'class': 'form-control',
'id': 'add_higher_roll'}),
label="roll", required=False)
stuname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_higher_stuname'}),
label="stuname", required=False,
help_text="Only for searching records")
test_type = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_higher_test_type'}),
label="test_type", required=False)
test_score = forms.IntegerField(label="test_score", required=False, widget=forms.NumberInput(attrs={'id': 'add_higher_test_score'}))
year = forms.TypedChoiceField(coerce=int, choices=year_choices, initial=current_year, label="year", required=False, widget=forms.NumberInput(attrs={'id': 'add_higher_year'}))
uname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field',
'id': 'add_higher_uname'}),
label="uname", required=False)
class ManagePlacementRecord(forms.Form):
"""
The form is used to manage placement records in the database by searching based on given parameters.
@variables:
stuname - name of the student
roll - roll no of student
company - company name
ctc - salary
"""
stuname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="stuname", required=False)
roll = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0,
'max_length': 10,
'class': 'form-control'}),
label="roll", required=False)
company = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="company", required=False)
ctc = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0, 'step': 0.25}), label="ctc", required=False)
class ManagePbiRecord(forms.Form):
"""
The form is used to manage pbi records in the database by searching based on given parameters.
@variables:
stuname - name of student
roll - roll no of student
company - company name
ctc - stipent that company is giving
"""
stuname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="stuname", required=False)
roll = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0,
'max_length': 10,
'class': 'form-control'}),
label="roll", required=False)
company = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="company", required=False)
ctc = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0, 'step': 0.25}), label="ctc", required=False)
class ManageHigherRecord(forms.Form):
"""
The form is used to manage Higher Study records in the database by searching based on given parameters.
@variables:
stuname - name of student
roll - roll no of student
test_type - type of test
company - name of university
test_score - score in the test
"""
stuname = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="stuname", required=False)
roll = forms.IntegerField(widget=forms.NumberInput(attrs={ 'min':0,
'max_length': 10,
'class': 'form-control'}),
label="roll", required=False)
test_type = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="test_type", required=False)
company = forms.CharField(widget=forms.TextInput(attrs={'max_length': 100,
'class': 'field'}),
label="company", required=False)
test_score = forms.IntegerField(label="test_score", required=False)
|
the-stack_0_20018 | from time import clock
from Refinement_based_extraction.ObservationTable import TableTimedOut
from Refinement_based_extraction.DFA import DFA
from Refinement_based_extraction.Teacher import Teacher
from Refinement_based_extraction.Lstar import run_lstar
def extract(rnn,time_limit = 50,initial_split_depth = 10,starting_examples=None):
print("provided counterexamples are:",starting_examples)
guided_teacher = Teacher(rnn,num_dims_initial_split=initial_split_depth,starting_examples=starting_examples)
start = clock()
try:
_, learning_rounds = run_lstar(guided_teacher,time_limit)
except KeyboardInterrupt: #you can press the stop button in the notebook to stop the extraction any time
print("lstar extraction terminated by user")
except TableTimedOut:
print("observation table timed out during refinement")
end = clock()
extraction_time = end-start
dfa = guided_teacher.dfas[-1]
print("overall guided extraction time took: " + str(extraction_time))
print("generated counterexamples were: (format: (counterexample, counterexample generation time))")
print('\n'.join([str(a) for a in guided_teacher.counterexamples_with_times]))
return dfa, learning_rounds |
the-stack_0_20022 | #!/usr/bin/env python3
# coding: utf-8
import io
import json
import logging
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, NoReturn, Optional, Union, cast
import ruamel.yaml
from cwl_utils.parser import load_document_by_string
from cwl_utils.parser.cwl_v1_2 import (CommandInputArraySchema,
CommandInputEnumSchema,
CommandInputParameter,
CommandInputRecordSchema,
CommandLineTool, ExpressionTool,
InputArraySchema, InputRecordSchema,
Workflow)
from cwltool.main import RuntimeContext, arg_parser, argcomplete
from cwltool.main import fetch_document as cwltool_fetch_document
from cwltool.main import (generate_input_template, get_default_args, make_tool,
resolve_and_validate_document, resolve_tool_uri,
setup_loadingContext)
from requests import get
from ruamel.yaml.main import YAML
CWLUtilObj = Union[CommandLineTool, Workflow, ExpressionTool]
CWLUtilLoadResult = Union[List[CWLUtilObj], CWLUtilObj]
def download_file(remote_url: str) -> str:
"""Downloads a file from a URL and returns the content."""
response = get(remote_url)
if response.status_code != 200:
raise Exception(f"Failed to download file: {remote_url}")
return response.text
def is_remote_url(location: str) -> bool:
"""Returns True if the path is a remote URL."""
return location.startswith("http://") or location.startswith("https://")
def fetch_document(location: Union[str, Path]) -> str:
"""Fetches a CWL document from a file path or a remote URL."""
if isinstance(location, str):
if is_remote_url(location):
return download_file(location)
location = Path(location)
if location.is_absolute():
return location.read_text(encoding="utf-8")
return Path().cwd().joinpath(location).read_text(encoding="utf-8")
def as_uri(location: Union[str, Path]) -> str:
"""Converts a location to a URI."""
if isinstance(location, str):
if is_remote_url(location):
return location
location = Path(location)
if not location.is_absolute():
location = Path().cwd().joinpath(location)
return location.as_uri()
def extract_main_tool(cwl_obj: CWLUtilLoadResult) -> CWLUtilObj:
"""Extracts the main tool from a CWL object."""
if isinstance(cwl_obj, list):
for obj in cwl_obj:
if obj.class_ == "Workflow" or str(obj.id).rsplit("#", maxsplit=1)[-1] == "main": # noqa: E501
return obj
return cast(CWLUtilObj, cwl_obj)
class UnsupportedValueError(Exception):
"""Raised when an unsupported value is encountered."""
@dataclass
class SecondaryFile:
"""SecondaryFile"""
pattern: Optional[str] = None
required: Optional[bool] = True
@dataclass
class InputField:
"""
InputField
example:
{
"default": null,
"doc": "doc example",
"id": "id example",
"label": "label example",
"type": "File",
"required": true,
"secondaryFiles": [
{
"pattern": "pattern example",
"required": true,
}
],
}
"""
default: Optional[Any] = None
doc: Optional[str] = None
id: Optional[str] = None
label: Optional[str] = None
type: Optional[str] = None
array: bool = False
required: bool = True
secondaryFiles: Optional[List[SecondaryFile]] = None
class Inputs:
"""Generates InputField from a cwl-utils object."""
def __init__(self, cwl_obj: CWLUtilLoadResult) -> None:
self.ori_cwl_obj = deepcopy(cwl_obj)
self.cwl_obj = deepcopy(extract_main_tool(cwl_obj))
self.fields: List[InputField] = []
self._parse()
def as_json(self) -> str:
"""Dump as json."""
def encode_default(item: Any) -> Dict[str, Any]:
if isinstance(item, object) and hasattr(item, '__dict__'):
return item.__dict__
else:
raise TypeError
return json.dumps(self.fields, default=encode_default, indent=2)
def as_dict(self) -> Any:
"""Dump as dict."""
str_json = self.as_json()
return json.loads(str_json)
def _parse(self) -> None:
"""Parses inputs field from the CWL object."""
for inp_obj in self.cwl_obj.inputs:
if isinstance(inp_obj.type, str):
inp_field = self._typical_field(inp_obj)
elif isinstance(inp_obj.type, list):
if len(inp_obj.type) == 1:
tmp_obj = deepcopy(inp_obj)
tmp_obj.type = inp_obj.type[0]
if isinstance(tmp_obj.type, str):
inp_field = self._typical_field(tmp_obj)
elif isinstance(tmp_obj.type, CommandInputArraySchema):
inp_field = self._command__input_array_field(tmp_obj) # noqa: E501
elif isinstance(tmp_obj.type, InputArraySchema):
inp_field = self._input_array_field(tmp_obj)
else:
raise UnsupportedValueError("The type field contains an unsupported format") # noqa: E501
elif len(inp_obj.type) == 2:
if 'null' in inp_obj.type:
tmp_obj = deepcopy(inp_obj)
for t in inp_obj.type:
if t != 'null':
tmp_obj.type = t
inp_field = self._typical_field(tmp_obj)
inp_field.required = False
else:
# [TODO] not support
raise UnsupportedValueError("The union field does not support by cwl-inputs-parser") # noqa: E501
else:
# [TODO] not support
raise UnsupportedValueError("The union field does not support by cwl-inputs-parser") # noqa: E501
elif isinstance(inp_obj.type, CommandInputArraySchema):
if inp_obj.type.items not in ["boolean", "int", "string", "File", "Directory", "Any"]: # noqa: E501
raise UnsupportedValueError("The type field contains an unsupported format") # noqa: E501
inp_field = self._command__input_array_field(inp_obj)
elif isinstance(inp_obj.type, CommandInputEnumSchema):
# [TODO] not support
# inp_field = self._command_input_enum_field(inp_obj)
raise UnsupportedValueError("The CommandInputEnumSchema field does not support by cwl-inputs-parser") # noqa: E501
elif isinstance(inp_obj.type, CommandInputRecordSchema):
# [TODO] not support
# inp_field = self._command_input_record_field(inp_obj)
raise UnsupportedValueError("The CommandInputRecordSchema field does not support by cwl-inputs-parser") # noqa: E501
elif isinstance(inp_obj.type, InputArraySchema):
if isinstance(inp_obj.type.items, InputRecordSchema):
# [TODO] not support
raise UnsupportedValueError("The InputRecordSchema field in the InputArraySchema field does not support by cwl-inputs-parser") # noqa: E501
if inp_obj.type.items not in ["boolean", "int", "string", "File", "Directory", "Any"]: # noqa: E501
raise UnsupportedValueError("The type field contains an unsupported format") # noqa: E501
inp_field = self._input_array_field(inp_obj)
elif isinstance(inp_obj.type, InputRecordSchema):
# [TODO] not support
raise UnsupportedValueError("The InputRecordSchema field does not support by cwl-inputs-parser") # noqa: E501
else:
# [TODO] not support
raise UnsupportedValueError("The type field contains an unsupported format") # noqa: E501
if inp_field.type == "File":
if inp_obj.secondaryFiles:
inp_field.secondaryFiles = []
for secondary_file in inp_obj.secondaryFiles:
required = secondary_file.required
pattern = secondary_file.pattern
if pattern.endswith("?"):
required = False
pattern = pattern.rstrip("?")
if required is None:
required = True
inp_field.secondaryFiles.append(
SecondaryFile(
pattern=pattern,
required=required
)
)
self.fields.append(inp_field)
def _typical_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a typical fields
like: boolean, int, string, File, stdin, Directory, Any
"""
if inp_obj.type == "boolean":
return self._boolean_field(inp_obj)
elif inp_obj.type == "int":
return self._int_field(inp_obj)
elif inp_obj.type == "string":
return self._string_field(inp_obj)
elif inp_obj.type == "File":
return self._file_field(inp_obj)
elif inp_obj.type == "stdin":
return self._stdin_field(inp_obj)
elif inp_obj.type == "Directory":
return self.directory_field(inp_obj)
elif inp_obj.type == "Any":
return self.any_field(inp_obj)
else:
# [TODO] not support
raise UnsupportedValueError("The type field contains an unsupported format") # noqa: E501
@staticmethod
def _clean_val(val: Optional[Any]) -> Optional[Any]:
"""Cleans a value field."""
if isinstance(val, str):
return deepcopy(val).replace("\n", " ").strip()
return deepcopy(val)
def _template_field(self, inp_obj: CommandInputParameter) -> InputField:
"""Generates a InputField template from a CWL InputParameter."""
id_ = self._clean_val(inp_obj.id)
if isinstance(id_, str):
id_ = id_.split("#")[-1]
return InputField(
default=deepcopy(inp_obj.default),
doc=self._clean_val(inp_obj.doc),
id=id_,
label=self._clean_val(inp_obj.label),
type=self._clean_val(inp_obj.type),
)
def _boolean_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/revsort-packed.cwl'
{
'default': True,
'doc': 'If true, reverse (decending) sort',
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#main/reverse_sort',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7feb232f7be0>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'boolean'
}
make-template result:
reverse_sort: true # default value of type "boolean".
"""
return self._template_field(inp_obj)
def _int_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/bwa-mem-tool.cwl'
{
'default': None,
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#minimum_seed_length', # noqa: E501
'inputBinding': <cwl_utils.parser.cwl_v1_2.CommandLineBinding object at 0x7f3c1af41220>, # noqa: E501
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f3c1cdec730>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'int'
}
make-template result:
minimum_seed_length: 0 # type "int"
"""
return self._template_field(inp_obj)
def _string_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/pass-unconnected.cwl'
{
'default': 'hello inp2',
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#inp2',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7fd904cfe370>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'string'
}
make-template result:
inp2: hello inp2 # default value of type "string".
"""
return self._template_field(inp_obj)
def _file_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/count-lines5-wf.cwl'
{
'default': ordereddict([('class', 'File'), ('location', 'hello.txt')]),
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#file1',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7fa708c4b0d0>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'File'
}
make-template result:
file1: {class: File, location: file:///app/tests/cwl_conformance_test/v1.2/hello.txt} # default value of type "File".
Basically, all path and location can be treated as location.
> As a special case, if the path field is provided but the
location field is not, an implementation may assign the value
of the path field to location, and remove the path field.
"""
field = self._template_field(inp_obj)
if isinstance(inp_obj.default, OrderedDict) and len(inp_obj.default) != 0: # noqa: E501
if "location" in inp_obj.default:
field.default = inp_obj.default["location"]
elif "path" in inp_obj.default:
field.default = inp_obj.default["path"]
return field
def _stdin_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/cat-tool-shortcut.cwl'
{
'default': None,
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#file1',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f3f40a59d60>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'stdin'
}
make-template result:
file1: # type "File"
class: File
path: a/file/path
"""
field = self._file_field(inp_obj)
field.type = "File"
return field
def directory_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/dir.cwl'
{
'default': None,
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#indir',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f44a93efca0>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'Directory'
}
make-template result:
indir: # type "Directory"
class: Directory
path: a/directory/path
"""
return self._template_field(inp_obj)
def any_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/null-expression1-tool.cwl'
{
'default': 'the-default',
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#i1',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f8774eba220>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': 'Any'
}
make-template result:
i1: "the-default" # default value of type "Any".
"""
return self._template_field(inp_obj)
def _command__input_array_field(self, inp_obj: CommandInputParameter) -> InputField: # noqa: E501
"""
Generates a InputField from a CWL InputParameter.
[TODO] more check
inp_obj example from 'v1.2/docker-array-secondaryfiles.cwl'
{
'default': None,
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#fasta_path',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7fd3104d2130>, # noqa: E501
'secondaryFiles': [<cwl_utils.parser.cwl_v1_2.SecondaryFileSchema object at 0x7fd3104e27c0>, # noqa: E501
<cwl_utils.parser.cwl_v1_2.SecondaryFileSchema object at 0x7fd3104e2940>, # noqa: E501
<cwl_utils.parser.cwl_v1_2.SecondaryFileSchema object at 0x7fd3104e2a60>], # noqa: E501
'streamable': None,
'type': <cwl_utils.parser.cwl_v1_2.CommandInputArraySchema object at 0x7fd3104e2dc0>
}
make-template result:
require_dat: false # type "boolean" (optional)
fasta_path: # array of type "File"
- class: File
path: a/file/path
secondaryFiles:
{'extension_fields': ordereddict(),
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f634a456460>,
'pattern': '.bai',
'required': False}
{'extension_fields': ordereddict(),
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f634a456460>,
'pattern': "${ if (inputs.require_dat) {return '.dat'} else {return null} }",
'required': None}
{'extension_fields': ordereddict(),
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f634a456460>,
'pattern': '${ return null; }',
'required': None}
"""
field = self._template_field(inp_obj)
field.type = inp_obj.type.items
field.array = True
return field
def _command_input_enum_field(self, inp_obj: CommandInputParameter) -> NoReturn: # noqa: E501
"""
Generates a InputField from a CWL InputParameter.
[TODO] do not know how to handle enum field in CWL.
"""
def _command_input_record_field(self, inp_obj: CommandInputParameter) -> NoReturn: # noqa: E501
"""
Generates a InputField from a CWL InputParameter.
v1.2/record-output.cwl
v1.2/anon_enum_inside_array.cwl
v1.2/record-in-secondaryFiles.cwl
v1.2/record-in-format.cwl
v1.2/record-out-format.cwl
[TODO] do not know how to handle record field in CWL.
"""
def _input_array_field(self, inp_obj: CommandInputParameter) -> InputField:
"""
Generates a InputField from a CWL InputParameter.
inp_obj example from 'v1.2/count-lines3-wf.cwl'
{
'default': None,
'doc': None,
'extension_fields': ordereddict(),
'format': None,
'id': 'file:///app/tests/cwl_conformance_test/#file1',
'inputBinding': None,
'label': None,
'loadContents': None,
'loadListing': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f26ca1b2160>, # noqa: E501
'secondaryFiles': None,
'streamable': None,
'type': <cwl_utils.parser.cwl_v1_2.InputArraySchema object at 0x7f26ca1422e0>
}
type:
{
'doc': None,
'extension_fields': ordereddict(),
'items': 'File',
'label': None,
'loadingOptions': <cwl_utils.parser.cwl_v1_2.LoadingOptions object at 0x7f26ca1b2160>,
'name': '_:7db2ff08-5fed-4261-b514-fe5eccc43048',
'type': 'array'
}
make-template result:
file1: # array of type "File"
- class: File
path: a/file/path
"""
field = self._template_field(inp_obj)
field.type = inp_obj.type.items
field.array = True
if field.label is None:
field.label = self._clean_val(inp_obj.type.label)
if field.doc is None:
field.doc = self._clean_val(inp_obj.type.doc)
return field
def wf_location_to_inputs(wf_location: Union[str, Path]) -> Inputs:
"""
Generates Inputs from a location of CWL Workflow.
"""
wf_docs = fetch_document(wf_location)
wf_obj = load_document_by_string(wf_docs, uri=as_uri(wf_location))
return Inputs(wf_obj)
def cwl_make_template(wf_location: Union[str, Path]) -> str:
"""Returns the results of cwltool --make-template."""
logging.getLogger("cwltool").setLevel(logging.ERROR)
logging.getLogger("salad").setLevel(logging.ERROR)
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(["--make-template", str(wf_location)])
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
runtimeContext = RuntimeContext(vars(args))
loadingContext = setup_loadingContext(None, runtimeContext, args)
uri, _ = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
loadingContext, workflowobj, uri = cwltool_fetch_document(
uri,
loadingContext
)
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
)
def my_represent_none(self: Any, data: Any) -> Any:
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
buf = io.BytesIO()
yaml.dump(generate_input_template(make_tool(uri, loadingContext)), buf)
yaml_str = buf.getvalue().decode("utf-8")
return yaml_str
|
the-stack_0_20023 | """
This module controls defines celery tasks and their applicable schedules. The celery beat server and workers will start
when invoked. Please add internal-only celery tasks to the celery_tasks plugin.
When ran in development mode (CONFIG_LOCATION=<location of development.yaml configuration file. To run both the celery
beat scheduler and a worker simultaneously, and to have jobs kick off starting at the next minute, run the following
command: celery -A consoleme.celery.celery_tasks worker --loglevel=info -l DEBUG -B
"""
import json # We use a separate SetEncoder here so we cannot use ujson
import sys
import time
from datetime import datetime, timedelta
from typing import Dict, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import get_enabled_regions_for_account
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
)
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.timeout import Timeout
asynpool.PROC_ALIVE_TIMEOUT = config.get("celery.asynpool_proc_alive_timeout", 60.0)
default_retry_kwargs = {
"autoretry_for": (Exception,),
"retry_backoff": True,
"retry_kwargs": {"max_retries": config.get("celery.default_max_retries", 5)},
}
class Celery(celery.Celery):
def on_configure(self) -> None:
sentry_dsn = config.get("sentry.dsn")
if sentry_dsn:
sentry_sdk.init(
sentry_dsn,
integrations=[
TornadoIntegration(),
CeleryIntegration(),
AioHttpIntegration(),
RedisIntegration(),
],
)
app = Celery(
"tasks",
broker=config.get(f"celery.broker.{config.region}", "redis://127.0.0.1:6379/1"),
backend=config.get(f"celery.backend.{config.region}", "redis://127.0.0.1:6379/2"),
)
app.conf.result_expires = config.get("celery.result_expires", 60)
app.conf.worker_prefetch_multiplier = config.get("celery.worker_prefetch_multiplier", 4)
app.conf.task_acks_late = config.get("celery.task_acks_late", True)
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
red = async_to_sync(RedisHandler().redis)()
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
group_mapping = get_plugin_by_name(
config.get("plugins.group_mapping", "default_group_mapping")
)()
internal_celery_tasks = get_plugin_by_name(
config.get("plugins.internal_celery_tasks", "default_celery_tasks")
)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
internal_policies = get_plugin_by_name(
config.get("plugins.internal_policies", "default_policies")
)()
REDIS_IAM_COUNT = 1000
@app.task(soft_time_limit=20)
def report_celery_last_success_metrics() -> bool:
"""
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted
from this function.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
current_time = int(time.time())
global schedule
for _, t in schedule.items():
task = t.get("task")
last_success = int(red.get(f"{task}.last_success") or 0)
if last_success == 0:
log_data["message"] = "Last Success Value is 0"
log_data["task_last_success_key"] = f"{task}.last_success"
log.error(log_data)
stats.gauge(f"{task}.time_since_last_success", current_time - last_success)
red.set(f"{task}.time_since_last_success", current_time - last_success)
red.set(
f"{function}.last_success", int(time.time())
) # Alert if this metric is not seen
stats.count(f"{function}.success")
stats.timer("worker.healthy")
return True
def get_celery_request_tags(**kwargs):
request = kwargs.get("request")
sender_hostname = "unknown"
sender = kwargs.get("sender")
if sender:
try:
sender_hostname = sender.hostname
except AttributeError:
sender_hostname = vars(sender.request).get("origin", "unknown")
if request and not isinstance(
request, Context
): # unlike others, task_revoked sends a Context for `request`
task_name = request.name
task_id = request.id
receiver_hostname = request.hostname
else:
try:
task_name = sender.name
except AttributeError:
task_name = kwargs.pop("name", "")
try:
task_id = sender.request.id
except AttributeError:
task_id = kwargs.pop("id", "")
try:
receiver_hostname = sender.request.hostname
except AttributeError:
receiver_hostname = ""
tags = {
"task_name": task_name,
"task_id": task_id,
"sender_hostname": sender_hostname,
"receiver_hostname": receiver_hostname,
}
tags["expired"] = kwargs.get("expired", False)
exception = kwargs.get("exception")
if not exception:
exception = kwargs.get("exc")
if exception:
tags["error"] = repr(exception)
if isinstance(exception, SoftTimeLimitExceeded):
tags["timed_out"] = True
return tags
@task_prerun.connect
def refresh_dynamic_config_in_worker(**kwargs):
tags = get_celery_request_tags(**kwargs)
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
dynamic_config = red.get("DYNAMIC_CONFIG_CACHE")
if not dynamic_config:
log.error({**log_data, "error": "Unable to retrieve Dynamic Config from Redis"})
return
dynamic_config_j = json.loads(dynamic_config)
if config.CONFIG.config.get("dynamic_config", {}) != dynamic_config_j:
log.debug(
{
**log_data,
**tags,
"message": "Refreshing dynamic configuration on Celery Worker",
}
)
config.CONFIG.config["dynamic_config"] = dynamic_config_j
@task_received.connect
def report_number_pending_tasks(**kwargs):
"""
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
tags.pop("task_id", None)
stats.timer("celery.new_pending_task", tags=tags)
@task_success.connect
def report_successful_task(**kwargs):
"""
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
This metric can be used for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
red.set(f"{tags['task_name']}.last_success", int(time.time()))
tags.pop("error", None)
tags.pop("task_id", None)
stats.timer("celery.successful_task", tags=tags)
@task_retry.connect
def report_task_retry(**kwargs):
"""
Report a generic retry metric as tasks to our metrics broker every time a task is retroed.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Retry",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.retried_task", tags=error_tags)
@task_failure.connect
def report_failed_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when
a task has hit a SoftTimeLimit.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Failure",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.failed_task", tags=error_tags)
@task_unknown.connect
def report_unknown_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Unknown",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.unknown_task", tags=error_tags)
@task_rejected.connect
def report_rejected_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is rejected.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Rejected",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.rejected_task", tags=error_tags)
@task_revoked.connect
def report_revoked_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Revoked",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.revoked_task", tags=error_tags)
@retry(
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _add_role_to_redis(redis_key: str, role_entry: dict) -> None:
"""
This function will add IAM role data to redis so that policy details can be quickly retrieved by the policies
endpoint.
IAM role data is stored in the `redis_key` redis key by the role's ARN.
Parameters
----------
redis_key : str
The redis key (hash)
role_entry : dict
The role entry
Example: {'name': 'nameOfRole', 'accountId': '123456789012', 'arn': 'arn:aws:iam::123456789012:role/nameOfRole',
'templated': None, 'ttl': 1562510908, 'policy': '<json_formatted_policy>'}
"""
try:
red.hset(redis_key, role_entry["arn"], json.dumps(role_entry))
except Exception as e: # noqa
stats.count(
"cache_roles_for_account.error",
tags={"redis_key": redis_key, "error": str(e), "role_entry": role_entry},
)
log_data = {
"message": "Error syncing Account's IAM roles to Redis",
"account_id": role_entry["account_id"],
"arn": role_entry["arn"],
"role_entry": role_entry,
}
log.error(log_data, exc_info=True)
raise
@app.task(soft_time_limit=180)
def cache_audit_table_details() -> bool:
d = UserDynamoHandler("consoleme")
entries = async_to_sync(d.get_all_audit_logs)()
topic = config.get("redis.audit_log_key", "CM_AUDIT_LOGS")
s3_bucket = None
s3_key = None
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("cache_audit_table_details.s3.bucket")
s3_key = config.get("cache_audit_table_details.s3.file")
async_to_sync(store_json_results_in_redis_and_s3)(
entries, topic, s3_bucket=s3_bucket, s3_key=s3_key
)
return True
@app.task(soft_time_limit=3600)
def cache_cloudtrail_errors_by_arn() -> Dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data: Dict = {"function": function}
cloudtrail_errors: Dict = internal_policies.error_count_by_arn()
if not cloudtrail_errors:
cloudtrail_errors = {}
red.setex(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
),
86400,
json.dumps(cloudtrail_errors),
)
log_data["number_of_roles_with_errors"]: len(cloudtrail_errors.keys())
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800)
def cache_policies_table_details() -> bool:
iam_role_redis_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
all_iam_roles = red.hgetall(iam_role_redis_key)
items = []
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
cloudtrail_errors = {}
cloudtrail_errors_j = red.get(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
)
)
if cloudtrail_errors_j:
cloudtrail_errors = json.loads(cloudtrail_errors_j)
del cloudtrail_errors_j
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = red.get(s3_error_topic)
s3_errors = {}
if all_s3_errors:
s3_errors = json.loads(all_s3_errors)
for arn, role_details_j in all_iam_roles.items():
role_details = ujson.loads(role_details_j)
error_count = cloudtrail_errors.get(arn, 0)
s3_errors_for_arn = s3_errors.get(arn, [])
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
account_id = arn.split(":")[4]
account_name = accounts_d.get(str(account_id), "Unknown")
resource_id = role_details.get("resourceId")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": "iam",
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
arn.lower(),
),
"errors": error_count,
"config_history_url": async_to_sync(
get_aws_config_history_url_for_resource
)(account_id, resource_id, "AWS::IAM::Role"),
}
)
s3_bucket_key: str = config.get("redis.s3_bucket_key", "S3_BUCKETS")
s3_accounts = red.hkeys(s3_bucket_key)
if s3_accounts:
for account in s3_accounts:
account_name = accounts_d.get(str(account), "Unknown")
buckets = json.loads(red.hget(s3_bucket_key, account))
for bucket in buckets:
bucket_arn = f"arn:aws:s3:::{bucket}"
s3_errors_for_arn = s3_errors.get(bucket_arn, [])
error_count = 0
for error in s3_errors_for_arn:
error_count += int(error.get("count"))
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": f"arn:aws:s3:::{bucket}",
"technology": "s3",
"templated": None,
"errors": error_count,
}
)
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
sns_accounts = red.hkeys(sns_topic_key)
if sns_accounts:
for account in sns_accounts:
account_name = accounts_d.get(str(account), "Unknown")
topics = json.loads(red.hget(sns_topic_key, account))
for topic in topics:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": topic,
"technology": "sns",
"templated": None,
"errors": error_count,
}
)
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
sqs_accounts = red.hkeys(sqs_queue_key)
if sqs_accounts:
for account in sqs_accounts:
account_name = accounts_d.get(str(account), "Unknown")
queues = json.loads(red.hget(sqs_queue_key, account))
for queue in queues:
error_count = 0
items.append(
{
"account_id": account,
"account_name": account_name,
"arn": queue,
"technology": "sqs",
"templated": None,
"errors": error_count,
}
)
resources_from_aws_config_redis_key: str = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
resources_from_aws_config = red.hgetall(resources_from_aws_config_redis_key)
if resources_from_aws_config:
for arn, value in resources_from_aws_config.items():
resource = json.loads(value)
technology = resource["resourceType"]
# Skip technologies that we retrieve directly
if technology in [
"AWS::IAM::Role",
"AWS::SQS::Queue",
"AWS::SNS::Topic",
"AWS::S3::Bucket",
]:
continue
account_id = arn.split(":")[4]
account_name = accounts_d.get(account_id, "Unknown")
items.append(
{
"account_id": account_id,
"account_name": account_name,
"arn": arn,
"technology": technology,
"templated": None,
"errors": 0,
}
)
s3_bucket = None
s3_key = None
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("cache_policies_table_details.s3.bucket")
s3_key = config.get("cache_policies_table_details.s3.file")
async_to_sync(store_json_results_in_redis_and_s3)(
items,
redis_key=config.get("policies.redis_policies_key", "ALL_POLICIES"),
s3_bucket=s3_bucket,
s3_key=s3_key,
)
stats.count(
"cache_policies_table_details.success",
tags={"num_roles": len(all_iam_roles.keys())},
)
return True
@app.task(name="cache_roles_for_account", soft_time_limit=2700, **default_retry_kwargs)
def cache_roles_for_account(account_id: str) -> bool:
# Get the DynamoDB handler:
dynamo = IAMRoleDynamoHandler()
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
# Only query IAM and put data in Dynamo if we're in the active region
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
paginator = client.get_paginator("get_account_authorization_details")
response_iterator = paginator.paginate()
all_iam_resources = {}
for response in response_iterator:
if not all_iam_resources:
all_iam_resources = response
else:
all_iam_resources["UserDetailList"].extend(response["UserDetailList"])
all_iam_resources["GroupDetailList"].extend(response["GroupDetailList"])
all_iam_resources["RoleDetailList"].extend(response["RoleDetailList"])
all_iam_resources["Policies"].extend(response["Policies"])
for k in response.keys():
if k not in [
"UserDetailList",
"GroupDetailList",
"RoleDetailList",
"Policies",
"ResponseMetadata",
"Marker",
"IsTruncated",
]:
# Fail hard if we find something unexpected
raise RuntimeError("Unexpected key {0} in response".format(k))
# Store entire response in S3
async_to_sync(store_json_results_in_redis_and_s3)(
all_iam_resources,
s3_bucket=config.get("cache_iam_resources_for_account.s3.bucket"),
s3_key=config.get("cache_iam_resources_for_account.s3.file", "").format(
account_id=account_id
),
)
iam_roles = all_iam_resources["RoleDetailList"]
async_to_sync(store_json_results_in_redis_and_s3)(
iam_roles,
s3_bucket=config.get("cache_roles_for_account.s3.bucket"),
s3_key=config.get("cache_roles_for_account.s3.file", "").format(
resource_type="iam_roles", account_id=account_id
),
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
# Save them:
for role in iam_roles:
role_entry = {
"arn": role.get("Arn"),
"name": role.get("RoleName"),
"resourceId": role.get("RoleId"),
"accountId": account_id,
"ttl": ttl,
"policy": dynamo.convert_role_to_json(role),
"templated": red.hget(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"),
role.get("Arn").lower(),
),
}
# DynamoDB:
dynamo.sync_iam_role_for_account(role_entry)
# Redis:
_add_role_to_redis(cache_key, role_entry)
# Run internal function on role. This can be used to inspect roles, add managed policies, or other actions
aws().handle_detected_role(role)
# Maybe store all resources in git
if config.get("cache_iam_resources_for_account.store_in_git.enabled"):
store_iam_resources_in_git(all_iam_resources, account_id)
stats.count("cache_roles_for_account.success", tags={"account_id": account_id})
return True
@app.task(soft_time_limit=3600)
def cache_roles_across_accounts() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
cache_key = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
log_data = {"function": function, "cache_key": cache_key}
num_accounts = 0
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
cache_roles_for_account.delay(account_id)
num_accounts += 1
else:
if account_id in config.get("celery.test_account_ids", []):
cache_roles_for_account.delay(account_id)
num_accounts += 1
else:
dynamo = IAMRoleDynamoHandler()
# In non-active regions, we just want to sync DDB data to Redis
roles = dynamo.fetch_all_roles()
for role_entry in roles:
_add_role_to_redis(cache_key, role_entry)
# Delete roles in Redis cache with expired TTL
all_roles = red.hgetall(cache_key)
roles_to_delete_from_cache = []
for arn, role_entry_j in all_roles.items():
role_entry = json.loads(role_entry_j)
if datetime.fromtimestamp(role_entry["ttl"]) < datetime.utcnow():
roles_to_delete_from_cache.append(arn)
if roles_to_delete_from_cache:
red.hdel(cache_key, *roles_to_delete_from_cache)
for arn in roles_to_delete_from_cache:
all_roles.pop(arn, None)
log_data["num_roles"] = len(all_roles)
# Store full list of roles in a single place
async_to_sync(store_json_results_in_redis_and_s3)(
all_roles,
s3_bucket=config.get(
"cache_roles_across_accounts.all_roles_combined.s3.bucket"
),
s3_key=config.get("cache_roles_across_accounts.all_roles_combined.s3.file"),
)
stats.count(f"{function}.success")
log_data["num_accounts"] = num_accounts
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_managed_policies_for_account(account_id: str) -> Dict[str, Union[str, int]]:
managed_policies: list[dict] = get_all_managed_policies(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
all_policies: list = []
for policy in managed_policies:
all_policies.append(policy.get("Arn"))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_managed_policies": len(all_policies),
}
log.debug(log_data)
stats.count(
"cache_managed_policies_for_account",
tags={"account_id": account_id, "num_managed_policies": len(all_policies)},
)
policy_key = config.get("redis.iam_managed_policies_key", "IAM_MANAGED_POLICIES")
red.hset(policy_key, account_id, json.dumps(all_policies))
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get("account_resource_cache.s3.file", "").format(
resource_type="managed_policies", account_id=account_id
)
async_to_sync(store_json_results_in_redis_and_s3)(
all_policies, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=120)
def cache_managed_policies_across_accounts() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_managed_policies_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_managed_policies_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_s3_buckets_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: list = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_s3_buckets_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_s3_buckets_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_sqs_queues_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: list = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_sqs_queues_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_sqs_queues_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=120)
def cache_sns_topics_across_accounts() -> bool:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
# First, get list of accounts
accounts_d: list = async_to_sync(get_account_id_to_name_mapping)()
for account_id in accounts_d.keys():
if config.get("environment") == "prod":
cache_sns_topics_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_sns_topics_for_account.delay(account_id)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_sqs_queues_for_account(account_id: str) -> Dict[str, Union[str, int]]:
all_queues: set = set()
enabled_regions = get_enabled_regions_for_account(account_id)
for region in enabled_regions:
client = boto3_cached_conn(
"sqs",
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=f"https://sts.{config.region}.amazonaws.com",
),
)
paginator = client.get_paginator("list_queues")
response_iterator = paginator.paginate(PaginationConfig={"PageSize": 1000})
for res in response_iterator:
for queue in res.get("QueueUrls", []):
arn = f"arn:aws:sqs:{region}:{account_id}:{queue.split('/')[4]}"
all_queues.add(arn)
sqs_queue_key: str = config.get("redis.sqs_queues_key", "SQS_QUEUES")
red.hset(sqs_queue_key, account_id, json.dumps(list(all_queues)))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_sqs_queues": len(all_queues),
}
log.debug(log_data)
stats.count(
"cache_sqs_queues_for_account",
tags={"account_id": account_id, "number_sqs_queues": len(all_queues)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get("account_resource_cache.s3.file", "").format(
resource_type="sqs_queues", account_id=account_id
)
async_to_sync(store_json_results_in_redis_and_s3)(
all_queues, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_sns_topics_for_account(account_id: str) -> Dict[str, Union[str, int]]:
# Make sure it is regional
all_topics: set = set()
enabled_regions = get_enabled_regions_for_account(account_id)
for region in enabled_regions:
topics = list_topics(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=region,
read_only=True,
)
for topic in topics:
all_topics.add(topic["TopicArn"])
sns_topic_key: str = config.get("redis.sns_topics_key", "SNS_TOPICS")
red.hset(sns_topic_key, account_id, json.dumps(list(all_topics)))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_sns_topics": len(all_topics),
}
log.debug(log_data)
stats.count(
"cache_sns_topics_for_account",
tags={"account_id": account_id, "number_sns_topics": len(all_topics)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get("account_resource_cache.s3.file", "").format(
resource_type="sns_topics", account_id=account_id
)
async_to_sync(store_json_results_in_redis_and_s3)(
all_topics, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_s3_buckets_for_account(account_id: str) -> Dict[str, Union[str, int]]:
s3_buckets: list = list_buckets(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
read_only=True,
)
buckets: list = []
for bucket in s3_buckets["Buckets"]:
buckets.append(bucket["Name"])
s3_bucket_key: str = config.get("redis.s3_buckets_key", "S3_BUCKETS")
red.hset(s3_bucket_key, account_id, json.dumps(buckets))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"account_id": account_id,
"number_s3_buckets": len(buckets),
}
log.debug(log_data)
stats.count(
"cache_s3_buckets_for_account",
tags={"account_id": account_id, "number_sns_topics": len(buckets)},
)
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
s3_bucket = config.get("account_resource_cache.s3.bucket")
s3_key = config.get("account_resource_cache.s3.file", "").format(
resource_type="s3_buckets", account_id=account_id
)
async_to_sync(store_json_results_in_redis_and_s3)(
buckets, s3_bucket=s3_bucket, s3_key=s3_key
)
return log_data
@retry(
stop_max_attempt_number=4,
wait_exponential_multiplier=1000,
wait_exponential_max=1000,
)
def _scan_redis_iam_cache(
cache_key: str, index: int, count: int
) -> Tuple[int, Dict[str, str]]:
return red.hscan(cache_key, index, count=count)
@app.task(soft_time_limit=1800)
def clear_old_redis_iam_cache() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
# Do not run if this is not in the active region:
if config.region != config.get("celery.active_region", config.region):
return False
# Need to loop over all items in the set:
cache_key: str = config.get("aws.iamroles_redis_key", "IAM_ROLE_CACHE")
index: int = 0
expire_ttl: int = int((datetime.utcnow() - timedelta(hours=6)).timestamp())
roles_to_expire = []
# We will loop over REDIS_IAM_COUNT items at a time:
try:
while True:
results = _scan_redis_iam_cache(cache_key, index, REDIS_IAM_COUNT)
index = results[0]
# Verify if the role is too old:
for arn, role in results[1].items():
role = json.loads(role)
if role["ttl"] <= expire_ttl:
roles_to_expire.append(arn)
# We will be complete if the next index is 0:
if not index:
break
except: # noqa
log_data = {
"function": function,
"message": "Error retrieving roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
# Delete all the roles that we need to delete:
try:
if roles_to_expire:
red.hdel(cache_key, *roles_to_expire)
except: # noqa
log_data = {
"function": function,
"message": "Error deleting roles from Redis for cache cleanup.",
}
log.error(log_data, exc_info=True)
raise
stats.count(f"{function}.success", tags={"expired_roles": len(roles_to_expire)})
return True
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_resources_from_aws_config_for_account(account_id) -> dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
s3_bucket = config.get("aws_config_cache.s3.bucket")
s3_key = config.get("aws_config_cache.s3.file", "").format(account_id=account_id)
dynamo = UserDynamoHandler()
# Only query in active region, otherwise get data from DDB
if config.region == config.get("celery.active_region", config.region) or config.get(
"environment"
) in ["dev", "test"]:
results = aws_config.query(
config.get(
"cache_all_resources_from_aws_config.aws_config.all_resources_query",
"select * where accountId = '{account_id}'",
).format(account_id=account_id),
use_aggregator=False,
account_id=account_id,
)
ttl: int = int((datetime.utcnow() + timedelta(hours=36)).timestamp())
redis_result_set = {}
for result in results:
result["ttl"] = ttl
if result.get("arn"):
if redis_result_set.get(result["arn"]):
continue
redis_result_set[result["arn"]] = json.dumps(result)
if redis_result_set:
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
s3_bucket=s3_bucket,
s3_key=s3_key,
)
dynamo.write_resource_cache_data(results)
else:
redis_result_set = async_to_sync(retrieve_json_data_from_redis_or_s3)(
s3_bucket=s3_bucket, s3_key=s3_key
)
async_to_sync(store_json_results_in_redis_and_s3)(
redis_result_set,
redis_key=config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
),
redis_data_type="hash",
)
log_data = {
"function": function,
"account_id": account_id,
"number_resources_synced": len(redis_result_set),
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800)
def cache_resources_from_aws_config_across_accounts() -> bool:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
resource_redis_cache_key = config.get(
"aws_config_cache.redis_key", "AWSCONFIG_RESOURCE_CACHE"
)
# First, get list of accounts
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
# Second, call tasks to enumerate all the roles across all accounts
for account_id in accounts_d.keys():
if config.get("environment") in ["prod", "dev"]:
cache_resources_from_aws_config_for_account.delay(account_id)
else:
if account_id in config.get("celery.test_account_ids", []):
cache_resources_from_aws_config_for_account.delay(account_id)
# Delete roles in Redis cache with expired TTL
all_resources = red.hgetall(resource_redis_cache_key)
if all_resources:
expired_arns = []
for arn, resource_entry_j in all_resources.items():
resource_entry = ujson.loads(resource_entry_j)
if datetime.fromtimestamp(resource_entry["ttl"]) < datetime.utcnow():
expired_arns.append(arn)
if expired_arns:
red.hdel(resource_redis_cache_key, *expired_arns)
# Cache all resource ARNs into a single file. Note: This runs synchronously with this task. This task triggers
# resource collection on all accounts to happen asynchronously. That means when we store or delete data within
# this task, we're always going to be caching the results from the previous task.
if config.region == config.get(
"celery.active_region", config.region
) or config.get("environment") in ["dev"]:
# Refresh all resources after deletion of expired entries
all_resources = red.hgetall(resource_redis_cache_key)
s3_bucket = config.get("aws_config_cache_combined.s3.bucket")
s3_key = config.get("aws_config_cache_combined.s3.file")
async_to_sync(store_json_results_in_redis_and_s3)(
all_resources, s3_bucket=s3_bucket, s3_key=s3_key
)
stats.count(f"{function}.success")
return True
@app.task(soft_time_limit=1800)
def get_iam_role_limit() -> dict:
"""
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts.
"""
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
num_accounts = 0
num_roles = 0
if not config.get("celery.get_iam_role_limit.enabled"):
return {}
success_message = "Not running - Inactive region"
if config.region == config.get(
"celery.active_region", config.region
) and config.get("environment") in ["prod", "dev"]:
@sts_conn("iam")
def _get_delivery_channels(**kwargs) -> list:
"""Gets the delivery channels in the account/region -- calls are wrapped with CloudAux"""
return kwargs.pop("client").get_account_summary(**kwargs)
success_message = "Task successfully completed"
# First, get list of accounts
accounts_d: dict = async_to_sync(get_account_id_to_name_mapping)()
num_accounts = len(accounts_d.keys())
for account_id, account_name in accounts_d.items():
try:
iam_summary = _get_delivery_channels(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
num_iam_roles = iam_summary["SummaryMap"]["Roles"]
iam_role_quota = iam_summary["SummaryMap"]["RolesQuota"]
iam_role_quota_ratio = num_iam_roles / iam_role_quota
num_roles += num_iam_roles
log_data = {
"function": function,
"message": "IAM role quota for account",
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"iam_role_quota_ratio": iam_role_quota_ratio,
"account_id": account_id,
"account_name": account_name,
}
stats.gauge(
f"{function}.quota_ratio_gauge",
iam_role_quota_ratio,
tags={
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"account_id": account_id,
"account_name": account_name,
},
)
log.debug(log_data)
except ClientError as e:
log_data = {
"function": function,
"message": "Error retrieving IAM quota",
"account_id": account_id,
"account_name": account_name,
"error": e,
}
stats.count(f"{function}.error", tags={"account_id": account_id})
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
raise
log_data = {
"function": function,
"num_accounts": num_accounts,
"num_roles": num_roles,
"message": success_message,
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=300)
def cache_policy_requests() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
requests = async_to_sync(cache_all_policy_requests)()
log_data = {
"function": function,
"num_requests": len(requests),
"message": "Successfully cached requests",
}
return log_data
@app.task(soft_time_limit=300)
def cache_cloud_account_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
account_mapping = async_to_sync(cache_cloud_accounts)()
log_data = {
"function": function,
"num_accounts": len(account_mapping.accounts),
"message": "Successfully cached cloud account mapping",
}
log.debug(log_data)
return log_data
@app.task(soft_time_limit=1800, **default_retry_kwargs)
def cache_credential_authorization_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
authorization_mapping = async_to_sync(
generate_and_store_credential_authorization_mapping
)()
log_data = {
"function": function,
"message": "Successfully cached cloud credential authorization mapping",
"num_group_authorizations": len(authorization_mapping),
}
log.debug(log_data)
return log_data
schedule_30_minute = timedelta(seconds=1800)
schedule_45_minute = timedelta(seconds=2700)
schedule_6_hours = timedelta(hours=6)
schedule_minute = timedelta(minutes=1)
schedule_5_minutes = timedelta(minutes=5)
schedule_24_hours = timedelta(hours=24)
schedule_1_hour = timedelta(hours=1)
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
schedule = {
"cache_roles_across_accounts": {
"task": "consoleme.celery.celery_tasks.cache_roles_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"clear_old_redis_iam_cache": {
"task": "consoleme.celery.celery_tasks.clear_old_redis_iam_cache",
"options": {"expires": 180},
"schedule": schedule_6_hours,
},
"cache_policies_table_details": {
"task": "consoleme.celery.celery_tasks.cache_policies_table_details",
"options": {"expires": 1000},
"schedule": schedule_30_minute,
},
"report_celery_last_success_metrics": {
"task": "consoleme.celery.celery_tasks.report_celery_last_success_metrics",
"options": {"expires": 60},
"schedule": schedule_minute,
},
"cache_managed_policies_across_accounts": {
"task": "consoleme.celery.celery_tasks.cache_managed_policies_across_accounts",
"options": {"expires": 1000},
"schedule": schedule_45_minute,
},
"cache_s3_buckets_across_accounts": {
"task": "consoleme.celery.celery_tasks.cache_s3_buckets_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sqs_queues_across_accounts": {
"task": "consoleme.celery.celery_tasks.cache_sqs_queues_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_sns_topics_across_accounts": {
"task": "consoleme.celery.celery_tasks.cache_sns_topics_across_accounts",
"options": {"expires": 300},
"schedule": schedule_45_minute,
},
"cache_audit_table_details": {
"task": "consoleme.celery.celery_tasks.cache_audit_table_details",
"options": {"expires": 300},
"schedule": schedule_5_minutes,
},
"get_iam_role_limit": {
"task": "consoleme.celery.celery_tasks.get_iam_role_limit",
"options": {"expires": 300},
"schedule": schedule_24_hours,
},
"cache_cloudtrail_errors_by_arn": {
"task": "consoleme.celery.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_resources_from_aws_config_across_accounts": {
"task": "consoleme.celery.celery_tasks.cache_resources_from_aws_config_across_accounts",
"options": {"expires": 300},
"schedule": schedule_1_hour,
},
"cache_policy_requests": {
"task": "consoleme.celery.celery_tasks.cache_policy_requests",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
"cache_cloud_account_mapping": {
"task": "consoleme.celery.celery_tasks.cache_cloud_account_mapping",
"options": {"expires": 1000},
"schedule": schedule_1_hour,
},
"cache_credential_authorization_mapping": {
"task": "consoleme.celery.celery_tasks.cache_credential_authorization_mapping",
"options": {"expires": 1000},
"schedule": schedule_5_minutes,
},
}
if internal_celery_tasks and isinstance(internal_celery_tasks, dict):
schedule = {**schedule, **internal_celery_tasks}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
app.conf.beat_schedule = schedule
app.conf.timezone = "UTC"
|
the-stack_0_20024 | import asyncio
import logging
from pyinjective.composer import Composer as ProtoMsgComposer
from pyinjective.client import Client
from pyinjective.transaction import Transaction
from pyinjective.constant import Network
from pyinjective.wallet import PrivateKey, PublicKey, Address
async def main() -> None:
# select network: local, testnet, mainnet
network = Network.testnet()
composer = ProtoMsgComposer(network=network.string())
# initialize grpc client
client = Client(network, insecure=False)
# load account
priv_key = PrivateKey.from_hex("f9db9bf330e23cb7839039e944adef6e9df447b90b503d5b4464c90bea9022f3")
pub_key = priv_key.to_public_key()
address = pub_key.to_address().init_num_seq(network.lcd_endpoint)
subaccount_id = address.get_subaccount_id(index=0)
# prepare tx msg
market_id = "0xa508cb32923323679f29a032c70342c147c17d0145625922b0ef22e955c844c0"
grantee = "inj1hkhdaj2a2clmq5jq6mspsggqs32vynpk228q3r"
granter_inj_address = "inj14au322k9munkmx5wrchz9q30juf5wjgz2cfqku"
granter_address = Address.from_acc_bech32(granter_inj_address)
granter_subaccount_id = granter_address.get_subaccount_id(index=0)
msg0 = composer.MsgCreateSpotLimitOrder(
sender=granter_inj_address,
market_id=market_id,
subaccount_id=granter_subaccount_id,
fee_recipient=grantee,
price=7.523,
quantity=0.01,
is_buy=True
)
msg = composer.MsgExec(
grantee=grantee,
msgs=[msg0]
)
# build sim tx
tx = (
Transaction()
.with_messages(msg)
.with_sequence(address.get_sequence())
.with_account_num(address.get_number())
.with_chain_id(network.chain_id)
)
sim_sign_doc = tx.get_sign_doc(pub_key)
sim_sig = priv_key.sign(sim_sign_doc.SerializeToString())
sim_tx_raw_bytes = tx.get_tx_data(sim_sig, pub_key)
# simulate tx
(sim_res, success) = client.simulate_tx(sim_tx_raw_bytes)
if not success:
print(sim_res)
return
# We need to unpack 2 layers of response when using MsgExec
# response bytes -> response msgs
# exec msg response -> grantee msg response
sim_res_msg = ProtoMsgComposer.MsgResponses(sim_res.result.data, simulation=True)
unpacked_msg_res = ProtoMsgComposer.UnpackMsgExecResponse(
msg_type=msg0.__class__.__name__,
data=sim_res_msg[0].results[0]
)
print(unpacked_msg_res)
# build tx
gas_price = 500000000
gas_limit = sim_res.gas_info.gas_used + 20000 # add 20k for gas, fee computation
fee = [composer.Coin(
amount=gas_price * gas_limit,
denom=network.fee_denom,
)]
tx = tx.with_gas(gas_limit).with_fee(fee).with_memo("").with_timeout_height(0)
sign_doc = tx.get_sign_doc(pub_key)
sig = priv_key.sign(sign_doc.SerializeToString())
tx_raw_bytes = tx.get_tx_data(sig, pub_key)
# broadcast tx: send_tx_async_mode, send_tx_sync_mode, send_tx_block_mode
res = client.send_tx_block_mode(tx_raw_bytes)
# print tx response
print(res)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(main())
|
the-stack_0_20027 | import sys
import pdb
import random
import logging
import json
import time
import traceback
from multiprocessing import Process
from pymilvus import Milvus, DataType
import numpy as np
import utils as util
import config
from milvus_benchmark.runners import utils
logger = logging.getLogger("milvus_benchmark.client")
# yaml file and code file comparison table of Index parameters
INDEX_MAP = {
"flat": "FLAT",
"ivf_flat": "IVF_FLAT",
"ivf_sq8": "IVF_SQ8",
"nsg": "NSG",
"ivf_sq8h": "IVF_SQ8_HYBRID",
"ivf_pq": "IVF_PQ",
"hnsw": "HNSW",
"annoy": "ANNOY",
"bin_flat": "BIN_FLAT",
"bin_ivf_flat": "BIN_IVF_FLAT",
"rhnsw_pq": "RHNSW_PQ",
"rhnsw_sq": "RHNSW_SQ"
}
epsilon = 0.1
DEFAULT_WARM_QUERY_TOPK = 1
DEFAULT_WARM_QUERY_NQ = 1
def time_wrapper(func):
"""
This decorator prints the execution time for the decorated function.
"""
def wrapper(*args, **kwargs):
"""
log: Specify output log
rps: Specify the rps of the return interface
"""
start = time.time()
# logger.debug("Milvus {} start".format(func.__name__))
log = kwargs.get("log", True)
kwargs.pop("log", None)
rps = kwargs.get("rps", False)
kwargs.pop("rps", None)
result = func(*args, **kwargs)
end = time.time()
if log:
_rps = round(end - start, 2)
logger.debug("Milvus {} run in {}s".format(func.__name__, _rps))
if rps is not False:
return result, _rps
return result
return wrapper
class MilvusClient(object):
def __init__(self, collection_name=None, host=None, port=None, timeout=300):
self._collection_name = collection_name
self._collection_info = None
start_time = time.time()
if not host:
host = config.SERVER_HOST_DEFAULT
if not port:
port = config.SERVER_PORT_DEFAULT
# retry connect remote server
i = 0
while time.time() < start_time + timeout:
try:
self._milvus = Milvus(
host=host,
port=port,
try_connect=False,
pre_ping=False)
break
except Exception as e:
logger.error(str(e))
logger.error("Milvus connect failed: %d times" % i)
i = i + 1
time.sleep(30)
if time.time() > start_time + timeout:
raise Exception("Server connect timeout")
# self._metric_type = None
def __str__(self):
return 'Milvus collection %s' % self._collection_name
def set_collection(self, collection_name):
""" seting collection name """
self._collection_name = collection_name
# TODO: server not support
# def check_status(self, status):
# if not status.OK():
# logger.error(status.message)
# logger.error(self._milvus.server_status())
# logger.error(self.count())
# raise Exception("Status not ok")
def check_result_ids(self, result):
for index, item in enumerate(result):
if item[0].distance >= epsilon:
logger.error(index)
logger.error(item[0].distance)
raise Exception("Distance wrong")
@property
def collection_name(self):
return self._collection_name
# only support the given field name
def create_collection(self, dimension, data_type=DataType.FLOAT_VECTOR, auto_id=False,
collection_name=None, other_fields=None):
self._dimension = dimension
if not collection_name:
collection_name = self._collection_name
vec_field_name = utils.get_default_field_name(data_type)
fields = [
{"name": vec_field_name, "type": data_type, "params": {"dim": dimension}},
{"name": "id", "type": DataType.INT64, "is_primary": True}
]
if other_fields:
other_fields = other_fields.split(",")
for other_field_name in other_fields:
if other_field_name.startswith("int"):
field_type = DataType.INT64
elif other_field_name.startswith("float"):
field_type = DataType.FLOAT
elif other_field_name.startswith("double"):
field_type = DataType.DOUBLE
else:
raise Exception("Field name not supported")
fields.append({"name": other_field_name, "type": field_type})
create_param = {
"fields": fields,
"auto_id": auto_id}
try:
self._milvus.create_collection(collection_name, create_param)
logger.info("Create collection: <%s> successfully" % collection_name)
except Exception as e:
logger.error(str(e))
raise
def create_partition(self, tag, collection_name=None):
if not collection_name:
collection_name = self._collection_name
self._milvus.create_partition(collection_name, tag)
@time_wrapper
def insert(self, entities, collection_name=None, timeout=None):
tmp_collection_name = self._collection_name if collection_name is None else collection_name
try:
insert_res = self._milvus.insert(tmp_collection_name, entities, timeout=timeout)
return insert_res.primary_keys
except Exception as e:
logger.error(str(e))
@time_wrapper
def insert_flush(self, entities, _async=False, collection_name=None):
# the method that included insert and flush
tmp_collection_name = self._collection_name if collection_name is None else collection_name
try:
insert_res = self._milvus.insert(tmp_collection_name, entities)
return insert_res.primary_keys
except Exception as e:
logger.error(str(e))
self._milvus.flush([tmp_collection_name], _async=_async)
def get_dimension(self):
info = self.get_info()
for field in info["fields"]:
if field["type"] in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]:
return field["params"]["dim"]
def get_rand_ids(self, length):
segment_ids = []
while True:
stats = self.get_stats()
segments = stats["partitions"][0]["segments"]
# random choice one segment
segment = random.choice(segments)
try:
segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["id"])
except Exception as e:
logger.error(str(e))
if not len(segment_ids):
continue
elif len(segment_ids) > length:
return random.sample(segment_ids, length)
else:
logger.debug("Reset length: %d" % len(segment_ids))
return segment_ids
# def get_rand_ids_each_segment(self, length):
# res = []
# status, stats = self._milvus.get_collection_stats(self._collection_name)
# self.check_status(status)
# segments = stats["partitions"][0]["segments"]
# segments_num = len(segments)
# # random choice from each segment
# for segment in segments:
# status, segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["name"])
# self.check_status(status)
# res.extend(segment_ids[:length])
# return segments_num, res
# def get_rand_entities(self, length):
# ids = self.get_rand_ids(length)
# status, get_res = self._milvus.get_entity_by_id(self._collection_name, ids)
# self.check_status(status)
# return ids, get_res
@time_wrapper
def get_entities(self, get_ids):
get_res = self._milvus.get_entity_by_id(self._collection_name, get_ids)
return get_res
@time_wrapper
def delete(self, ids, collection_name=None):
# delete entity by id
tmp_collection_name = self._collection_name if collection_name is None else collection_name
self._milvus.delete_entity_by_id(tmp_collection_name, ids)
def delete_rand(self):
delete_id_length = random.randint(1, 100)
count_before = self.count()
logger.debug("%s: length to delete: %d" % (self._collection_name, delete_id_length))
delete_ids = self.get_rand_ids(delete_id_length)
self.delete(delete_ids)
self.flush()
logger.info("%s: count after delete: %d" % (self._collection_name, self.count()))
get_res = self._milvus.get_entity_by_id(self._collection_name, delete_ids)
for item in get_res:
assert not item
# if count_before - len(delete_ids) < self.count():
# logger.error(delete_ids)
# raise Exception("Error occured")
@time_wrapper
def flush(self, _async=False, collection_name=None, timeout=None):
tmp_collection_name = self._collection_name if collection_name is None else collection_name
self._milvus.flush([tmp_collection_name], _async=_async, timeout=timeout)
@time_wrapper
def compact(self, collection_name=None):
tmp_collection_name = self._collection_name if collection_name is None else collection_name
status = self._milvus.compact(tmp_collection_name)
self.check_status(status)
# only support "in" in expr
@time_wrapper
def get(self, ids, collection_name=None, timeout=None):
tmp_collection_name = self._collection_name if collection_name is None else collection_name
# res = self._milvus.get(tmp_collection_name, ids, output_fields=None, partition_names=None)
ids_expr = "id in %s" % (str(ids))
res = self._milvus.query(tmp_collection_name, ids_expr, output_fields=None, partition_names=None, timeout=timeout)
return res
@time_wrapper
def create_index(self, field_name, index_type, metric_type, _async=False, index_param=None):
index_type = INDEX_MAP[index_type]
metric_type = utils.metric_type_trans(metric_type)
logger.info("Building index start, collection_name: %s, index_type: %s, metric_type: %s" % (
self._collection_name, index_type, metric_type))
if index_param:
logger.info(index_param)
index_params = {
"index_type": index_type,
"metric_type": metric_type,
"params": index_param
}
self._milvus.create_index(self._collection_name, field_name, index_params, _async=_async)
# TODO: need to check
def describe_index(self, field_name, collection_name=None):
# stats = self.get_stats()
tmp_collection_name = self._collection_name if collection_name is None else collection_name
info = self._milvus.describe_index(tmp_collection_name, field_name)
logger.info(info)
index_info = {"index_type": "flat", "metric_type": None, "index_param": None}
if info:
index_info = {"index_type": info["index_type"], "metric_type": info["metric_type"], "index_param": info["params"]}
# transfer index type name
for k, v in INDEX_MAP.items():
if index_info['index_type'] == v:
index_info['index_type'] = k
return index_info
def drop_index(self, field_name):
logger.info("Drop index: %s" % self._collection_name)
return self._milvus.drop_index(self._collection_name, field_name)
@time_wrapper
def query(self, vector_query, filter_query=None, collection_name=None, timeout=300):
""" This method corresponds to the search method of milvus """
tmp_collection_name = self._collection_name if collection_name is None else collection_name
params = util.search_param_analysis(vector_query, filter_query)
params.update({"timeout": timeout})
logger.debug("Params of search : %s" % str(params))
result = self._milvus.search(tmp_collection_name, **params)
# must_params = [vector_query]
# if filter_query:
# must_params.extend(filter_query)
# query = {
# "bool": {"must": must_params}
# }
# result = self._milvus.search(tmp_collection_name, query, timeout=timeout)
return result
@time_wrapper
def warm_query(self, index_field_name, search_param, metric_type, times=2):
query_vectors = [[random.random() for _ in range(self._dimension)] for _ in range(DEFAULT_WARM_QUERY_NQ)]
# index_info = self.describe_index(index_field_name)
vector_query = {"vector": {index_field_name: {
"topk": DEFAULT_WARM_QUERY_TOPK,
"query": query_vectors,
"metric_type": metric_type,
"params": search_param}
}}
must_params = [vector_query]
query = {
"bool": {"must": must_params}
}
logger.debug("Start warm up query")
for i in range(times):
params = util.search_param_analysis(vector_query, None)
self._milvus.search(self._collection_name, **params)
# self._milvus.search(self._collection_name, query)
logger.debug("End warm up query")
@time_wrapper
def load_and_query(self, vector_query, filter_query=None, collection_name=None, timeout=120):
tmp_collection_name = self._collection_name if collection_name is None else collection_name
must_params = [vector_query]
if filter_query:
must_params.extend(filter_query)
query = {
"bool": {"must": must_params}
}
self.load_collection(tmp_collection_name)
params = util.search_param_analysis(vector_query, filter_query)
params.update({"timeout": timeout})
result = self._milvus.search(tmp_collection_name, **params)
# result = self._milvus.search(tmp_collection_name, query, timeout=timeout)
return result
def get_ids(self, result):
# idss = result._entities.ids
ids = []
# len_idss = len(idss)
# len_r = len(result)
# top_k = len_idss // len_r
# for offset in range(0, len_idss, top_k):
# ids.append(idss[offset: min(offset + top_k, len_idss)])
for res in result:
ids.append(res.ids)
return ids
def query_rand(self, nq_max=100, timeout=None):
# for ivf search
dimension = 128
top_k = random.randint(1, 100)
nq = random.randint(1, nq_max)
nprobe = random.randint(1, 100)
search_param = {"nprobe": nprobe}
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
metric_type = random.choice(["l2", "ip"])
logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe))
vec_field_name = utils.get_default_field_name()
vector_query = {"vector": {vec_field_name: {
"topk": top_k,
"query": query_vectors,
"metric_type": utils.metric_type_trans(metric_type),
"params": search_param}
}}
self.query(vector_query, timeout=timeout)
def load_query_rand(self, nq_max=100, timeout=None):
# for ivf search
dimension = 128
top_k = random.randint(1, 100)
nq = random.randint(1, nq_max)
nprobe = random.randint(1, 100)
search_param = {"nprobe": nprobe}
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
metric_type = random.choice(["l2", "ip"])
logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe))
vec_field_name = utils.get_default_field_name()
vector_query = {"vector": {vec_field_name: {
"topk": top_k,
"query": query_vectors,
"metric_type": utils.metric_type_trans(metric_type),
"params": search_param}
}}
self.load_and_query(vector_query, timeout=timeout)
# TODO: need to check
def count(self, collection_name=None):
if collection_name is None:
collection_name = self._collection_name
row_count = self._milvus.get_collection_stats(collection_name)["row_count"]
logger.debug("Row count: %d in collection: <%s>" % (row_count, collection_name))
return row_count
def drop(self, timeout=120, collection_name=None):
"""
drop steps:
1.drop collection
2.check collection exist
3.Set timeout to exit
"""
timeout = int(timeout)
if collection_name is None:
collection_name = self._collection_name
logger.info("Start delete collection: %s" % collection_name)
self._milvus.drop_collection(collection_name)
i = 0
while i < timeout:
try:
res = self._milvus.has_collection(collection_name)
if res:
time.sleep(1)
i = i + 1
continue
else:
break
except Exception as e:
logger.warning("Collection count failed: {}".format(str(e)))
break
if i >= timeout:
logger.error("Delete collection timeout")
def get_stats(self):
return self._milvus.get_collection_stats(self._collection_name)
def get_info(self, collection_name=None):
if collection_name is None:
collection_name = self._collection_name
return self._milvus.describe_collection(collection_name)
def show_collections(self):
return self._milvus.list_collections()
def exists_collection(self, collection_name=None):
if collection_name is None:
collection_name = self._collection_name
res = self._milvus.has_collection(collection_name)
return res
def clean_db(self):
collection_names = self.show_collections()
for name in collection_names:
self.drop(collection_name=name)
@time_wrapper
def load_collection(self, collection_name=None, timeout=3000):
if collection_name is None:
collection_name = self._collection_name
return self._milvus.load_collection(collection_name, timeout=timeout)
@time_wrapper
def release_collection(self, collection_name=None, timeout=3000):
if collection_name is None:
collection_name = self._collection_name
return self._milvus.release_collection(collection_name, timeout=timeout)
@time_wrapper
def load_partitions(self, tag_names, collection_name=None, timeout=3000):
if collection_name is None:
collection_name = self._collection_name
return self._milvus.load_partitions(collection_name, tag_names, timeout=timeout)
@time_wrapper
def release_partitions(self, tag_names, collection_name=None, timeout=3000):
if collection_name is None:
collection_name = self._collection_name
return self._milvus.release_partitions(collection_name, tag_names, timeout=timeout)
@time_wrapper
def scene_test(self, collection_name=None, vectors=None, ids=None):
"""
Scene test steps:
1.create collection with the specified collection name
2.insert data
3.flush data
4.create index
5.drop collection
"""
logger.debug("[scene_test] Start scene test : %s" % collection_name)
self.create_collection(dimension=128, collection_name=collection_name)
time.sleep(1)
collection_info = self.get_info(collection_name)
entities = utils.generate_entities(collection_info, vectors, ids)
logger.debug("[scene_test] Start insert : %s" % collection_name)
self.insert(entities, collection_name=collection_name)
logger.debug("[scene_test] Start flush : %s" % collection_name)
self.flush(collection_name=collection_name)
logger.debug("[scene_test] Start create index : %s" % collection_name)
self.create_index(field_name='float_vector', index_type="ivf_sq8", metric_type='l2',
collection_name=collection_name, index_param={'nlist': 2048})
time.sleep(59)
logger.debug("[scene_test] Start drop : %s" % collection_name)
self.drop(collection_name=collection_name)
logger.debug("[scene_test]Scene test close : %s" % collection_name)
# TODO: remove
# def get_server_version(self):
# return self._milvus.server_version()
# def get_server_mode(self):
# return self.cmd("mode")
# def get_server_commit(self):
# return self.cmd("build_commit_id")
# def get_server_config(self):
# return json.loads(self.cmd("get_milvus_config"))
# def get_mem_info(self):
# result = json.loads(self.cmd("get_system_info"))
# result_human = {
# # unit: Gb
# "memory_used": round(int(result["memory_used"]) / (1024 * 1024 * 1024), 2)
# }
# return result_human
# def cmd(self, command):
# res = self._milvus._cmd(command)
# logger.info("Server command: %s, result: %s" % (command, res))
# return res
# @time_wrapper
# def set_config(self, parent_key, child_key, value):
# self._milvus.set_config(parent_key, child_key, value)
# def get_config(self, key):
# return self._milvus.get_config(key)
|
the-stack_0_20028 | """Test the FreeDNS component."""
import pytest
from homeassistant.components import freedns
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
ACCESS_TOKEN = "test_token"
UPDATE_INTERVAL = freedns.DEFAULT_INTERVAL
UPDATE_URL = freedns.UPDATE_URL
@pytest.fixture
def setup_freedns(hass, aioclient_mock):
"""Fixture that sets up FreeDNS."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="Successfully updated 1 domains."
)
hass.loop.run_until_complete(
async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
)
async def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="ERROR: Address has not changed."
)
result = await async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + UPDATE_INTERVAL)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
async def test_setup_fails_if_wrong_token(hass, aioclient_mock):
"""Test setup fails if first update fails through wrong token."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(UPDATE_URL, params=params, text="ERROR: Invalid update URL (2)")
result = await async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert not result
assert aioclient_mock.call_count == 1
|
the-stack_0_20029 | import urllib
class SlackEndpoints(object):
base = 'https://slack.com/api/'
def url(self, *args):
"""Returns the URL corresponding the API endpoint specified by the
arguments.
"""
endpoint = '.'.join(args)
return urllib.parse.urljoin(self.base, endpoint)
endpoints = SlackEndpoints()
|
the-stack_0_20030 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TensorInfo helpers.
@tf_export(v1=["saved_model.build_tensor_info",
"saved_model.utils.build_tensor_info"])
@deprecation.deprecated(
None,
"This function will only be available through the v1 compatibility "
"library as tf.compat.v1.saved_model.utils.build_tensor_info or "
"tf.compat.v1.saved_model.build_tensor_info.")
def build_tensor_info(tensor):
"""Utility function to build TensorInfo proto from a Tensor.
Args:
tensor: Tensor or SparseTensor whose name, dtype and shape are used to
build the TensorInfo. For SparseTensors, the names of the three
constituent Tensors are used.
Returns:
A TensorInfo protocol buffer constructed based on the supplied argument.
Raises:
RuntimeError: If eager execution is enabled.
@compatibility(TF2)
This API is not compatible with eager execution as `tensor` needs to be a
graph tensor, and there is no replacement for it in TensorFlow 2.x. To start
writing programs using TensorFlow 2.x, please refer to the [Effective
TensorFlow 2](https://www.tensorflow.org/guide/effective_tf2) guide.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("`build_tensor_info` is not supported in eager "
"execution.")
return build_tensor_info_internal(tensor)
def build_tensor_info_internal(tensor):
"""Utility function to build TensorInfo proto from a Tensor."""
if (isinstance(tensor, composite_tensor.CompositeTensor) and
not isinstance(tensor, sparse_tensor.SparseTensor)):
return _build_composite_tensor_info_internal(tensor)
tensor_info = meta_graph_pb2.TensorInfo(
dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum,
tensor_shape=tensor.get_shape().as_proto())
if isinstance(tensor, sparse_tensor.SparseTensor):
tensor_info.coo_sparse.values_tensor_name = tensor.values.name
tensor_info.coo_sparse.indices_tensor_name = tensor.indices.name
tensor_info.coo_sparse.dense_shape_tensor_name = tensor.dense_shape.name
else:
tensor_info.name = tensor.name
return tensor_info
def _build_composite_tensor_info_internal(tensor):
"""Utility function to build TensorInfo proto from a CompositeTensor."""
spec = tensor._type_spec # pylint: disable=protected-access
tensor_info = meta_graph_pb2.TensorInfo()
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_coder.encode_structure(spec)
tensor_info.composite_tensor.type_spec.CopyFrom(spec_proto.type_spec_value)
for component in nest.flatten(tensor, expand_composites=True):
tensor_info.composite_tensor.components.add().CopyFrom(
build_tensor_info_internal(component))
return tensor_info
def build_tensor_info_from_op(op):
"""Utility function to build TensorInfo proto from an Op.
Note that this function should be used with caution. It is strictly restricted
to TensorFlow internal use-cases only. Please make sure you do need it before
using it.
This utility function overloads the TensorInfo proto by setting the name to
the Op's name, dtype to DT_INVALID and tensor_shape as None. One typical usage
is for the Op of the call site for the defunned function:
```python
@function.defun
def some_variable_initialization_fn(value_a, value_b):
a = value_a
b = value_b
value_a = constant_op.constant(1, name="a")
value_b = constant_op.constant(2, name="b")
op_info = utils.build_op_info(
some_variable_initialization_fn(value_a, value_b))
```
Args:
op: An Op whose name is used to build the TensorInfo. The name that points
to the Op could be fetched at run time in the Loader session.
Returns:
A TensorInfo protocol buffer constructed based on the supplied argument.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"`build_tensor_info_from_op` is not supported in eager execution.")
return meta_graph_pb2.TensorInfo(
dtype=types_pb2.DT_INVALID,
tensor_shape=tensor_shape.unknown_shape().as_proto(),
name=op.name)
@tf_export(v1=["saved_model.get_tensor_from_tensor_info",
"saved_model.utils.get_tensor_from_tensor_info"])
@deprecation.deprecated(
None,
"This function will only be available through the v1 compatibility "
"library as tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info or "
"tf.compat.v1.saved_model.get_tensor_from_tensor_info.")
def get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None):
"""Returns the Tensor or CompositeTensor described by a TensorInfo proto.
Args:
tensor_info: A TensorInfo proto describing a Tensor or SparseTensor or
CompositeTensor.
graph: The tf.Graph in which tensors are looked up. If None, the
current default graph is used.
import_scope: If not None, names in `tensor_info` are prefixed with this
string before lookup.
Returns:
The Tensor or SparseTensor or CompositeTensor in `graph` described by
`tensor_info`.
Raises:
KeyError: If `tensor_info` does not correspond to a tensor in `graph`.
ValueError: If `tensor_info` is malformed.
"""
graph = graph or ops.get_default_graph()
def _get_tensor(name):
return graph.get_tensor_by_name(
ops.prepend_name_scope(name, import_scope=import_scope))
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
return _get_tensor(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
_get_tensor(tensor_info.coo_sparse.indices_tensor_name),
_get_tensor(tensor_info.coo_sparse.values_tensor_name),
_get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [_get_tensor(component.name) for component in
tensor_info.composite_tensor.components]
return nest.pack_sequence_as(spec, components, expand_composites=True)
else:
raise ValueError(f"Invalid TensorInfo.encoding: {encoding}. Expected `"
"coo_sparse`, `composite_tensor`, or `name` for a dense "
"tensor.")
def get_element_from_tensor_info(tensor_info, graph=None, import_scope=None):
"""Returns the element in the graph described by a TensorInfo proto.
Args:
tensor_info: A TensorInfo proto describing an Op or Tensor by name.
graph: The tf.Graph in which tensors are looked up. If None, the current
default graph is used.
import_scope: If not None, names in `tensor_info` are prefixed with this
string before lookup.
Returns:
Op or tensor in `graph` described by `tensor_info`.
Raises:
KeyError: If `tensor_info` does not correspond to an op or tensor in `graph`
"""
graph = graph or ops.get_default_graph()
return graph.as_graph_element(
ops.prepend_name_scope(tensor_info.name, import_scope=import_scope))
# Path helpers.
def get_or_create_variables_dir(export_dir):
"""Return variables sub-directory, or create one if it doesn't exist."""
variables_dir = get_variables_dir(export_dir)
file_io.recursive_create_dir(variables_dir)
return variables_dir
def get_variables_dir(export_dir):
"""Return variables sub-directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
def get_variables_path(export_dir):
"""Return the variables path, used as the prefix for checkpoint files."""
return os.path.join(
compat.as_text(get_variables_dir(export_dir)),
compat.as_text(constants.VARIABLES_FILENAME))
def get_or_create_assets_dir(export_dir):
"""Return assets sub-directory, or create one if it doesn't exist."""
assets_destination_dir = get_assets_dir(export_dir)
file_io.recursive_create_dir(assets_destination_dir)
return assets_destination_dir
def get_assets_dir(export_dir):
"""Return path to asset directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir),
compat.as_text(constants.ASSETS_DIRECTORY))
def get_or_create_debug_dir(export_dir):
"""Returns path to the debug sub-directory, creating if it does not exist."""
debug_dir = get_debug_dir(export_dir)
file_io.recursive_create_dir(debug_dir)
return debug_dir
def get_saved_model_pbtxt_path(export_dir):
return os.path.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
def get_saved_model_pb_path(export_dir):
return os.path.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
def get_debug_dir(export_dir):
"""Returns path to the debug sub-directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir), compat.as_text(constants.DEBUG_DIRECTORY))
# Based on tensor_bundle/byte_swap.cc
byte_swappable = [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,
dtypes.complex64, dtypes.complex128, dtypes.uint16, dtypes.uint32,
dtypes.uint64, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.qint16,
dtypes.quint16, dtypes.qint32
]
def swap_function_tensor_content(meta_graph_def, from_endiness, to_endiness):
functions = meta_graph_def.graph_def.library.function
for function in functions:
node_def = function.node_def
for node in node_def:
if node.op == "Const":
tensor = node.attr["value"].tensor
byte_swap_tensor_content(tensor, from_endiness, to_endiness)
def byte_swap_tensor_content(tensor, from_endiness, to_endiness):
"""Byte swaps."""
if tensor.dtype in byte_swappable:
tshape = tensor.tensor_shape.dim
tensor_bytes = tensor.tensor_content
if tensor_bytes:
tensor_size = 1
for sz in tshape:
tensor_size = tensor_size * sz.size
chunksize = int(len(tensor_bytes) / tensor_size)
# Split tensor_data into chunks for byte swapping.
to_swap = [
tensor_bytes[i:i + chunksize]
for i in range(0, len(tensor_bytes), chunksize)
]
# Swap and replace tensor_content.
tensor.tensor_content = b"".join([
int.from_bytes(byteswap,
from_endiness).to_bytes(chunksize, to_endiness)
for byteswap in to_swap
])
|
the-stack_0_20033 | import numpy as np
import tensorflow as tf
import vgg16
import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
print((img1.shape))
batch1 = img1.reshape((1, 224, 224, 3))
print(batch1.shape)
img2 = utils.load_image("./test_data/test.jpg")
batch2 = img2.reshape((1, 224, 224, 3))
x = np.empty((0, 224, 224, 3))
batch = np.append(x, batch2, axis=0)
print(batch.shape)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
batch = np.concatenate((batch, batch1), 0).astype(np.float32)
print(batch.shape)
# img1 = utils.load_image("./test_data/tiger.jpeg")
# batch1 = img1.reshape((1, 224, 224, 3))
# batch = np.concatenate((batch, batch1), 0)
# print(batch.shape)
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
# with tf.device('/cpu:0'):
vgg = vgg16.Vgg16()
with tf.Session() as sess:
for i in range(0, 5):
images = tf.placeholder("float", [None, 224, 224, 3])
feed_dict = {images: batch}
with tf.name_scope("content_vgg"):
vgg.build(images)
prob = sess.run(vgg.fc7, feed_dict=feed_dict)
print(prob.shape)
# conv5_3 = sess.run(vgg.conv5_3, feed_dict=feed_dict)
# print("CONV5_3: ")
# print(sess.run(vgg.conv5_3, feed_dict=feed_dict).shape)
# print("========================================")
# print("POOL5: ")
# print(sess.run(vgg.pool5, feed_dict=feed_dict).shape)
# print("========================================")
# print("POOL5: ")
# print(sess.run(vgg.pool5, feed_dict=feed_dict).shape)
# print("========================================")
# print("FC6: ")
# print(sess.run(vgg.fc6, feed_dict=feed_dict).shape)
# print("========================================")
# print("RELU6: ")
# print(sess.run(vgg.relu6, feed_dict=feed_dict).shape)
# print("========================================")
# print("FC7: ")
# print(sess.run(vgg.fc7, feed_dict=feed_dict).shape)
# print("========================================")
# print("RELU7: ")
# print(sess.run(vgg.relu7, feed_dict=feed_dict).shape)
# print("========================================")
# print("FC8: ")
# print(sess.run(vgg.fc8, feed_dict=feed_dict).shape)
# print("========================================")
# utils.print_prob(prob[0], './synset.txt')
# utils.print_prob(prob[1], './synset.txt') |
the-stack_0_20034 | import csv
from models.Price import Price
class PriceRepo:
def __init__(self):
self.__price = []
def get_price_list(self):
self.__price = []
with open ("data/prices.csv", "r") as csv_file:
csv_reader = csv.DictReader(csv_file) #Til þess að geta filterað út frá lyklum þarf að nota DictReader
if self.__price == []:
for line in csv_reader:
self.__price.append(line)
return self.__price
def change_price(self, new_value):
self.__new_value = new_value
with open ("data/prices.csv", "w", encoding = "utf-8") as changed_csv:
fieldnames = ["Category", "Price", "Insurance"]
csv_writer = csv.DictWriter(changed_csv, fieldnames = fieldnames, lineterminator = "\n")
csv_writer.writeheader()
for line in new_value:
csv_writer.writerow(line)
|
the-stack_0_20036 | # Copyright (c) 2020 Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with o without
# modification, are permitted provided that the following coditions are
# met: redistributions of source code must retain the above cpyright
# notice, this list of conditions and the following disclaimer
# redistributions in binary form must reproduce the above copyrght
# notice, this list of conditions and the following disclaimer i the
# documentation and/or other materials provided with the distribuion;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived fom
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Malek Musleh, modified by Tim Rogers to work with gem5 20.x
### The following file was referenced from the following site:
### http://www.m5sim.org/SPEC_CPU2006_benchmarks
###
### and subsequent changes were made
from __future__ import print_function
from __future__ import absolute_import
import optparse
import sys
import os
import spec2k6
import shutil
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.params import NULL
from m5.util import addToPath, fatal, warn
addToPath('../')
from ruby import Ruby
from common import Options
from common import Simulation
from common import CacheConfig
from common import CpuConfig
from common import ObjectList
from common import MemConfig
from common.FileSystemConfig import config_filesystem
from common.Caches import *
from common.cpu2000 import *
# from newCacheConfig import *
def get_processes(options):
if options.benchmark == 'perlbench':
process = spec2k6.perlbench
elif options.benchmark == 'bzip2':
process = spec2k6.bzip2
elif options.benchmark == 'gcc':
process = spec2k6.gcc
elif options.benchmark == 'bwaves':
process = spec2k6.bwaves
elif options.benchmark == 'gamess':
process = spec2k6.gamess
elif options.benchmark == 'mcf':
process = spec2k6.mcf
elif options.benchmark == 'milc':
process = spec2k6.milc
elif options.benchmark == 'zeusmp':
process = spec2k6.zeusmp
elif options.benchmark == 'gromacs':
process = spec2k6.gromacs
shutil.copy(os.path.join(process.cwd,"gromacs.tpr"),os.getcwd())
elif options.benchmark == 'cactusADM':
process = spec2k6.cactusADM
elif options.benchmark == 'leslie3d':
process = spec2k6.leslie3d
elif options.benchmark == 'namd':
process = spec2k6.namd
elif options.benchmark == 'gobmk':
process = spec2k6.gobmk;
elif options.benchmark == 'dealII':
process = spec2k6.dealII
elif options.benchmark == 'soplex':
process = spec2k6.soplex
elif options.benchmark == 'povray':
process = spec2k6.povray
elif options.benchmark == 'calculix':
process = spec2k6.calculix
elif options.benchmark == 'hmmer':
process = spec2k6.hmmer
elif options.benchmark == 'sjeng':
process = spec2k6.sjeng
elif options.benchmark == 'GemsFDTD':
process = spec2k6.GemsFDTD
elif options.benchmark == 'libquantum':
process = spec2k6.libquantum
elif options.benchmark == 'h264ref':
process = spec2k6.h264ref
elif options.benchmark == 'tonto':
process = spec2k6.tonto
elif options.benchmark == 'lbm':
process = spec2k6.lbm
elif options.benchmark == 'omnetpp':
process = spec2k6.omnetpp
elif options.benchmark == 'astar':
process = spec2k6.astar
elif options.benchmark == 'wrf':
process = spec2k6.wrf
elif options.benchmark == 'sphinx3':
process = spec2k6.sphinx3
elif options.benchmark == 'xalancbmk':
process = spec2k6.xalancbmk
elif options.benchmark == 'specrand_i':
process = spec2k6.specrand_i
elif options.benchmark == 'specrand_f':
process = spec2k6.specrand_f
return [process], 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
parser.add_option("-b", "--benchmark", default="",
help="The benchmark to be loaded.")
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
multiprocesses = []
numThreads = 1
if options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print("number of benchmarks not equal to set num_cpus!")
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'arm':
exec("workload = %s('arm_%s', 'linux', '%s')" % (
app, options.arm_iset, options.spec_input))
else:
exec("workload = %s(buildEnv['TARGET_ISA', 'linux', '%s')" % (
app, options.spec_input))
multiprocesses.append(workload.makeProcess())
except:
print("Unable to find workload for %s: %s" %
(buildEnv['TARGET_ISA'], app),
file=sys.stderr)
sys.exit(1)
elif options.benchmark:
multiprocesses, numThreads = get_processes(options)
else:
print("No workload specified. Exiting!\n", file=sys.stderr)
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in range(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size,
workload = NULL)
if numThreads > 1:
system.multi_thread = True
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# If elastic tracing is enabled, then configure the cpu and attach the elastic
# trace probe
if options.elastic_trace_en:
CpuConfig.config_etrace(CPUClass, system.cpu, options)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
if ObjectList.is_kvm_cpu(CPUClass) or ObjectList.is_kvm_cpu(FutureClass):
if buildEnv['TARGET_ISA'] == 'x86':
system.kvm_vm = KvmVM()
for process in multiprocesses:
process.useArchPT = True
process.kvmInSE = True
else:
fatal("KvmCPU can only be used in SE mode with x86")
# Sanity check
if options.simpoint_profile:
if not ObjectList.is_noncaching_cpu(CPUClass):
fatal("SimPoint/BPProbe should be done with an atomic cpu")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in range(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.simpoint_profile:
system.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
system.cpu[i].addCheckerCpu()
if options.bp_type:
bpClass = ObjectList.bp_list.get(options.bp_type)
system.cpu[i].branchPred = bpClass()
if options.indirect_bp_type:
indirectBPClass = \
ObjectList.indirect_bp_list.get(options.indirect_bp_type)
system.cpu[i].branchPred.indirectBranchPred = indirectBPClass()
system.cpu[i].createThreads()
if options.ruby:
Ruby.create_system(options, False, system)
assert(options.num_cpus == len(system.ruby._cpu_ports))
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
for i in range(np):
ruby_port = system.ruby._cpu_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts[0].pio = ruby_port.master
system.cpu[i].interrupts[0].int_master = ruby_port.slave
system.cpu[i].interrupts[0].int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
MemClass = Simulation.setMemClass(options)
system.membus = SystemXBar()
system.system_port = system.membus.slave
CacheConfig.config_cache(options, system)
MemConfig.config_mem(options, system)
config_filesystem(system, options)
if options.wait_gdb:
for cpu in system.cpu:
cpu.wait_for_remote_gdb = True
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
|
the-stack_0_20037 | from typing import List
class ScaleLimitError(Exception):
"""Raise when the application couldn't scale for all needed jobs."""
pass
class TaskTerminationError(Exception):
"""Raise when something goes wrong while trying to terminate a task"""
def __init__(self, message=None, task_name=None, *args, **kwargs):
"""
:param message: A non-default message
:param task_name: The name of the task that was unsuccessfully terminated
"""
self.message = message
if not self.message:
if task_name:
self.message = f"Failed to terminate task with name: {task_name}"
else:
self.message = "Failed to terminate task with unknown name"
super(TaskTerminationError, self).__init__(self.message, *args, **kwargs)
class MultipleTaskTerminationErrors(TaskTerminationError):
"""Raise when multiple task termination errors occurred"""
def __init__(self, errors: List[TaskTerminationError]) -> None:
"""
:param errors: All of the task termination errors that occurred
"""
self.errors = errors
super().__init__(self.errors)
def __str__(self) -> str:
return "The following errors occurred while trying to terminate multiple tasks\n" + "\n".join(
[str(err) for err in self.errors]
)
|
the-stack_0_20040 | import re
import json
import pickle
import textwrap
import unittest
import warnings
import importlib
import importlib_metadata
import pyfakefs.fake_filesystem_unittest as ffs
from . import fixtures
from importlib_metadata import (
Distribution,
EntryPoint,
MetadataPathFinder,
PackageNotFoundError,
distributions,
entry_points,
metadata,
packages_distributions,
version,
)
class BasicTests(fixtures.DistInfoPkg, unittest.TestCase):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_self(self):
dist = Distribution.from_name('distinfo-pkg')
assert isinstance(dist.version, str)
assert re.match(self.version_pattern, dist.version)
def test_for_name_does_not_exist(self):
with self.assertRaises(PackageNotFoundError):
Distribution.from_name('does-not-exist')
def test_package_not_found_mentions_metadata(self):
"""
When a package is not found, that could indicate that the
packgae is not installed or that it is installed without
metadata. Ensure the exception mentions metadata to help
guide users toward the cause. See #124.
"""
with self.assertRaises(PackageNotFoundError) as ctx:
Distribution.from_name('does-not-exist')
assert "metadata" in str(ctx.exception)
def test_new_style_classes(self):
self.assertIsInstance(Distribution, type)
self.assertIsInstance(MetadataPathFinder, type)
class ImportTests(fixtures.DistInfoPkg, unittest.TestCase):
def test_import_nonexistent_module(self):
# Ensure that the MetadataPathFinder does not crash an import of a
# non-existent module.
with self.assertRaises(ImportError):
importlib.import_module('does_not_exist')
def test_resolve(self):
ep = entry_points(group='entries')['main']
self.assertEqual(ep.load().__name__, "main")
def test_entrypoint_with_colon_in_name(self):
ep = entry_points(group='entries')['ns:sub']
self.assertEqual(ep.value, 'mod:main')
def test_resolve_without_attr(self):
ep = EntryPoint(
name='ep',
value='importlib_metadata',
group='grp',
)
assert ep.load() is importlib_metadata
class NameNormalizationTests(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase):
@staticmethod
def pkg_with_dashes(site_dir):
"""
Create minimal metadata for a package with dashes
in the name (and thus underscores in the filename).
"""
metadata_dir = site_dir / 'my_pkg.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w', encoding='utf-8') as strm:
strm.write('Version: 1.0\n')
return 'my-pkg'
def test_dashes_in_dist_name_found_as_underscores(self):
"""
For a package with a dash in the name, the dist-info metadata
uses underscores in the name. Ensure the metadata loads.
"""
pkg_name = self.pkg_with_dashes(self.site_dir)
assert version(pkg_name) == '1.0'
@staticmethod
def pkg_with_mixed_case(site_dir):
"""
Create minimal metadata for a package with mixed case
in the name.
"""
metadata_dir = site_dir / 'CherryPy.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w', encoding='utf-8') as strm:
strm.write('Version: 1.0\n')
return 'CherryPy'
def test_dist_name_found_as_any_case(self):
"""
Ensure the metadata loads when queried with any case.
"""
pkg_name = self.pkg_with_mixed_case(self.site_dir)
assert version(pkg_name) == '1.0'
assert version(pkg_name.lower()) == '1.0'
assert version(pkg_name.upper()) == '1.0'
class NonASCIITests(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase):
@staticmethod
def pkg_with_non_ascii_description(site_dir):
"""
Create minimal metadata for a package with non-ASCII in
the description.
"""
metadata_dir = site_dir / 'portend.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w', encoding='utf-8') as fp:
fp.write('Description: pôrˈtend')
return 'portend'
@staticmethod
def pkg_with_non_ascii_description_egg_info(site_dir):
"""
Create minimal metadata for an egg-info package with
non-ASCII in the description.
"""
metadata_dir = site_dir / 'portend.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w', encoding='utf-8') as fp:
fp.write(
textwrap.dedent(
"""
Name: portend
pôrˈtend
"""
).strip()
)
return 'portend'
def test_metadata_loads(self):
pkg_name = self.pkg_with_non_ascii_description(self.site_dir)
meta = metadata(pkg_name)
assert meta['Description'] == 'pôrˈtend'
def test_metadata_loads_egg_info(self):
pkg_name = self.pkg_with_non_ascii_description_egg_info(self.site_dir)
meta = metadata(pkg_name)
assert meta['Description'] == 'pôrˈtend'
class DiscoveryTests(fixtures.EggInfoPkg, fixtures.DistInfoPkg, unittest.TestCase):
def test_package_discovery(self):
dists = list(distributions())
assert all(isinstance(dist, Distribution) for dist in dists)
assert any(dist.metadata['Name'] == 'egginfo-pkg' for dist in dists)
assert any(dist.metadata['Name'] == 'distinfo-pkg' for dist in dists)
def test_invalid_usage(self):
with self.assertRaises(ValueError):
list(distributions(context='something', name='else'))
class DirectoryTest(fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase):
def test_egg_info(self):
# make an `EGG-INFO` directory that's unrelated
self.site_dir.joinpath('EGG-INFO').mkdir()
# used to crash with `IsADirectoryError`
with self.assertRaises(PackageNotFoundError):
version('unknown-package')
def test_egg(self):
egg = self.site_dir.joinpath('foo-3.6.egg')
egg.mkdir()
with self.add_sys_path(egg):
with self.assertRaises(PackageNotFoundError):
version('foo')
class MissingSysPath(fixtures.OnSysPath, unittest.TestCase):
site_dir = '/does-not-exist'
def test_discovery(self):
"""
Discovering distributions should succeed even if
there is an invalid path on sys.path.
"""
importlib_metadata.distributions()
class InaccessibleSysPath(fixtures.OnSysPath, ffs.TestCase):
site_dir = '/access-denied'
def setUp(self):
super().setUp()
self.setUpPyfakefs()
self.fs.create_dir(self.site_dir, perm_bits=000)
def test_discovery(self):
"""
Discovering distributions should succeed even if
there is an invalid path on sys.path.
"""
list(importlib_metadata.distributions())
class TestEntryPoints(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
self.ep = importlib_metadata.EntryPoint(
name='name', value='value', group='group'
)
def test_entry_point_pickleable(self):
revived = pickle.loads(pickle.dumps(self.ep))
assert revived == self.ep
def test_positional_args(self):
"""
Capture legacy (namedtuple) construction, discouraged.
"""
EntryPoint('name', 'value', 'group')
def test_immutable(self):
"""EntryPoints should be immutable"""
with self.assertRaises(AttributeError):
self.ep.name = 'badactor'
def test_repr(self):
assert 'EntryPoint' in repr(self.ep)
assert 'name=' in repr(self.ep)
assert "'name'" in repr(self.ep)
def test_hashable(self):
"""EntryPoints should be hashable"""
hash(self.ep)
def test_json_dump(self):
"""
json should not expect to be able to dump an EntryPoint
"""
with self.assertRaises(Exception):
with warnings.catch_warnings(record=True):
json.dumps(self.ep)
def test_module(self):
assert self.ep.module == 'value'
def test_attr(self):
assert self.ep.attr is None
def test_sortable(self):
"""
EntryPoint objects are sortable, but result is undefined.
"""
sorted(
[
EntryPoint(name='b', value='val', group='group'),
EntryPoint(name='a', value='val', group='group'),
]
)
class FileSystem(
fixtures.OnSysPath, fixtures.SiteDir, fixtures.FileBuilder, unittest.TestCase
):
def test_unicode_dir_on_sys_path(self):
"""
Ensure a Unicode subdirectory of a directory on sys.path
does not crash.
"""
fixtures.build_files(
{self.unicode_filename(): {}},
prefix=self.site_dir,
)
list(distributions())
class PackagesDistributionsPrebuiltTest(fixtures.ZipFixtures, unittest.TestCase):
def test_packages_distributions_example(self):
self._fixture_on_path('example-21.12-py3-none-any.whl')
assert packages_distributions()['example'] == ['example']
def test_packages_distributions_example2(self):
"""
Test packages_distributions on a wheel built
by trampolim.
"""
self._fixture_on_path('example2-1.0.0-py3-none-any.whl')
assert packages_distributions()['example2'] == ['example2']
class PackagesDistributionsTest(
fixtures.OnSysPath, fixtures.SiteDir, unittest.TestCase
):
def test_packages_distributions_neither_toplevel_nor_files(self):
"""
Test a package built without 'top-level.txt' or a file list.
"""
fixtures.build_files(
{
'trim_example-1.0.0.dist-info': {
'METADATA': """
Name: trim_example
Version: 1.0.0
""",
}
},
prefix=self.site_dir,
)
packages_distributions()
|
the-stack_0_20041 |
# importing the required libraries and modules
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn import svm, metrics
from sklearn import decomposition
import time
#reading the training and the testing images data
data = pd.read_csv('D://Spring 2018/CS 559 Machine Learning/Project/train.txt', header = None)
testdata = pd.read_csv('D://Spring 2018/CS 559 Machine Learning/Project/test.txt', header = None)
#normalizing the training data
X_train=data.iloc[:,0:64]/16.0
y_train=data.iloc[:,64]
X_test=testdata.iloc[:,0:64]/16.0
y_test=testdata.iloc[:,64]
######################################################
######################################################
######################################################
#SVM without PCA
parameters = {'kernel':'rbf', 'C':1, 'gamma': 1,'degree':3}
classifier = svm.SVC(kernel=parameters['kernel'],gamma=parameters['gamma'],C = parameters['C'],degree=parameters['degree'])
classifier.fit(X_train,y_train)
predicted = classifier.predict(X_test)
count=0
for i,j in zip(predicted,y_test):
if (i == j):
count+=1
print("\n")
print("Accuracy using SVM without using PCA is: ",count/len(y_test) )
######################################################
######################################################
######################################################
# Applying PCA
#finding the important principalcomponents using the variance explained by them
pca=PCA()
pca.fit(X_train)
with plt.style.context('fivethirtyeight'):
plt.show()
plt.xlabel("Principal components ")
plt.ylabel("Variance")
plt.plot(pca.explained_variance_ratio_)
plt.title('Variance Explained by Extracted Componenent')
plt.show()
time.sleep(5)
######################################################
######################################################
######################################################
#PCA with 15 components
pca=PCA(n_components=15)
train_ext=pca.fit_transform(X_train)
#Gaussian radial basis kernel is used
parameters = {'kernel':'rbf', 'C':1, 'gamma': 1,'degree':3}
classifier = svm.SVC(kernel=parameters['kernel'],gamma=parameters['gamma'],C = parameters['C'],degree=parameters['degree'])
classifier.fit(train_ext,y_train)
test_ext=pca.transform(X_test)
predicted = classifier.predict(test_ext)
count=0
for i,j in zip(predicted,y_test):
if (i == j):
count+=1
print("\n")
print("Accuracy using SVM with PCA 15 components is: ",count/len(y_test) )
######################################################
######################################################
######################################################
#PCA with 20 components
pca=PCA(n_components=20)
pca.fit(X_train)
train_ext=pca.fit_transform(X_train)
parameters = {'kernel':'rbf', 'C':1, 'gamma': 1,'degree':3}
classifier = svm.SVC(kernel=parameters['kernel'],gamma=parameters['gamma'],C = parameters['C'],degree=parameters['degree'])
classifier.fit(train_ext,y_train)
test_ext=pca.transform(X_test)
predicted = classifier.predict(test_ext)
count=0
for i,j in zip(predicted,y_test):
if (i == j):
count+=1
print("\n")
print("Accuracy using SVM with PCA 20 components is: ",count/len(y_test) )
######################################################
######################################################
######################################################
#PCA with 25 components
pca=PCA(n_components=25)
pca.fit(X_train)
train_ext=pca.fit_transform(X_train)
parameters = {'kernel':'rbf', 'C':1, 'gamma': 1,'degree':3}
classifier = svm.SVC(kernel=parameters['kernel'],gamma=parameters['gamma'],C = parameters['C'],degree=parameters['degree'])
classifier.fit(train_ext,y_train)
test_ext=pca.transform(X_test)
predicted = classifier.predict(test_ext)
count=0
for i,j in zip(predicted,y_test):
if (i == j):
count+=1
print("\n")
print("Accuracy using SVM with PCA 25 components is: ",count/len(y_test) )
#Hence, found SVM with PCA of 20 components is a good result with 97.77 % Accuracy |
the-stack_0_20042 | import argparse
import json
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn
def get_distribution(arg):
return dict(
uniform=np.random.uniform,
poisson=np.random.poisson,
normal=np.random.normal,
exponential=np.random.exponential,
chi_square=np.random.chisquare,
pareto=np.random.pareto,
lognormal=np.random.lognormal,
)[arg]
def get_estimates(values, num_steps, num_seeds, exploration_bonus, noise_scale):
u = np.ones(values.size)
logits_list = [u.copy() for _ in range(num_seeds)]
def sample(logits, adaptive):
probs = logits / logits.sum()
index = np.random.choice(values.size, p=probs)
choice = values[index]
if adaptive:
logits += exploration_bonus
logits[index] = np.abs(choice)
return choice / (values.size * probs[index])
for i in range(num_steps):
true = values.mean()
adaptive = [sample(l, adaptive=True) for l in logits_list]
baseline = [sample(u, adaptive=False) for _ in logits_list]
yield true, adaptive, baseline
values += noise_scale * np.random.normal(1, 1, values.size)
def main(
distribution,
stats,
num_values,
num_steps,
seed,
num_samples,
exploration_bonus,
num_seeds,
noise_scale,
noise_mean,
noise_std,
distribution_plot_name,
estimate_plot_name,
):
if seed is not None:
np.random.seed(seed)
def get_estimates(values):
u = np.ones(values.size)
logits_list = [u.copy() for _ in range(num_seeds)]
def sample(logits, adaptive):
probs = logits / logits.sum()
indices = np.random.choice(values.size, size=num_samples, p=probs)
choice = values[indices]
if adaptive:
logits[indices] = np.abs(choice)
logits += exploration_bonus
weight = 1 / (values.size * probs[indices])
return np.mean(choice * weight)
for i in range(num_steps):
for logits in logits_list:
yield i, sample(logits, adaptive=True), "adaptive"
yield i, sample(u, adaptive=False), "baseline"
yield i, values.mean(), "truth"
values += noise_scale * np.random.normal(noise_mean, noise_std, values.size)
initial_values = distribution(*stats, size=num_values)
seaborn.distplot(initial_values)
plt.savefig(distribution_plot_name)
plt.close()
print("initial values:")
print(initial_values)
estimates = get_estimates(initial_values.astype(float))
data = pd.DataFrame(data=estimates, columns=["steps", "estimate", "type"])
seaborn.lineplot(x="steps", y="estimate", hue="type", data=data)
plt.savefig(estimate_plot_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--distribution", type=get_distribution)
parser.add_argument("--stats", nargs="*", type=float)
parser.add_argument("--num-values", type=int, default=20)
parser.add_argument("--num-steps", type=int, default=50)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--exploration-bonus", type=float, default=0.0)
parser.add_argument("--num-seeds", type=int, default=100)
parser.add_argument("--noise-scale", type=float, default=0)
parser.add_argument("--noise-std", type=float, default=2)
parser.add_argument("--noise-mean", type=float, default=1)
parser.add_argument("--num-samples", type=float, default=1)
parser.add_argument("--estimate-plot-name", default="estimates")
parser.add_argument("--distribution-plot-name", default="distribution")
if len(sys.argv) == 2:
with open(sys.argv.pop(1)) as f:
parser.set_defaults(**json.load(f))
main(**vars(parser.parse_args()))
|
the-stack_0_20044 | #!/usr/bin/python
import os
import re
import sys
from collections import OrderedDict
def main():
file_GTH = 'HFX_BASIS'
basis_sets = OrderedDict()
with open(file_GTH,'r') as searchfile:
for line in searchfile:
if line.startswith('#'):
continue
elif 'GTH' in line:
bas_type = line.split()[1]
if bas_type not in basis_sets:
basis_sets[bas_type] = []
basis_sets[bas_type].append(line)
else:
basis_sets[bas_type].append(line)
for basis_set in basis_sets:
with open('gth-%s.dat'%(basis_set.lower().replace('-gth','')),'w') as f:
lines = basis_sets[basis_set]
for line in lines:
if 'GTH' in line:
f.write('#BASIS SET\n')
f.write(line)
f.write('END\n')
f.close()
if __name__ == '__main__':
main()
|
the-stack_0_20045 | from citrination_client.base.errors import CitrinationClientError
class Target(object):
"""
The optimization target for a design run. Consists of
the name of the output column to optimize and the objective
(either "Max" or "Min", or a scalar value (such as "5.0"))
"""
def __init__(self, name, objective):
"""
Constructor.
:param name: The name of the target output column
:type name: str
:param objective: The optimization objective; "Min", "Max", or a scalar value (such as "5.0")
:type objective: str
"""
try:
self._objective = float(objective)
except ValueError:
if objective.lower() not in ["max", "min"]:
raise CitrinationClientError(
"Target objective must either be \"min\" or \"max\""
)
self._objective = objective
self._name = name
def to_dict(self):
return {
"descriptor": self._name,
"objective": self._objective
}
|
the-stack_0_20048 | # -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2018 David Rodrigues Parrini
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime as dt
import errno
import io
import math
import os
import re
import struct
import sys
import warnings
# COMTRADE standard revisions
REV_1991 = "1991"
REV_1999 = "1999"
REV_2013 = "2013"
# DAT file format types
TYPE_ASCII = "ASCII"
TYPE_BINARY = "BINARY"
TYPE_BINARY32 = "BINARY32"
TYPE_FLOAT32 = "FLOAT32"
# Special values
TIMESTAMP_MISSING = 0xFFFFFFFF
# CFF headers
CFF_HEADER_REXP = r"(?i)--- file type: ([a-z]+)(?:\s+([a-z0-9]+)(?:\s*\:\s*([0-9]+))?)? ---$"
# common separator character of data fields of CFG and ASCII DAT files
SEPARATOR = ","
# timestamp regular expression
re_date = re.compile(r"([0-9]{1,2})/([0-9]{1,2})/([0-9]{2,4})")
re_time = re.compile(r"([0-9]{1,2}):([0-9]{2}):([0-9]{2})(\.([0-9]{1,12}))?")
# Non-standard revision warning
WARNING_UNKNOWN_REVISION = "Unknown standard revision \"{}\""
# Date time with nanoseconds resolution warning
WARNING_DATETIME_NANO = "Unsupported datetime objects with nanoseconds \
resolution. Using truncated values."
# Date time with year 0, month 0 and/or day 0.
WARNING_MINDATE = "Missing date values. Using minimum values: {}."
def _read_sep_values(line, expected: int = -1, default: str = ''):
values = tuple(map(lambda cell: cell.strip(), line.split(SEPARATOR)))
if expected == -1 or len(values) == expected:
return values
return [values[i] if i < len(values) else default
for i in range(expected)]
def _prevent_null(str_value: str, value_type: type, default_value):
if len(str_value.strip()) == 0:
return default_value
else:
return value_type(str_value)
def _get_date(date_str: str) -> tuple:
m = re_date.match(date_str)
if m is not None:
day = int(m.group(1))
month = int(m.group(2))
year = int(m.group(3))
return day, month, year
return 0, 0, 0
def _get_time(time_str: str, ignore_warnings: bool = False) -> tuple:
m = re_time.match(time_str)
if m is not None:
hour = int(m.group(1))
minute = int(m.group(2))
second = int(m.group(3))
fracsec_str = m.group(5)
# Pad fraction of seconds with 0s to the right
if len(fracsec_str) <= 6:
fracsec_str = fill_with_zeros_to_the_right(fracsec_str, 6)
else:
fracsec_str = fill_with_zeros_to_the_right(fracsec_str, 9)
frac_second = int(fracsec_str)
in_nanoseconds = len(fracsec_str) > 6
microsecond = frac_second
if in_nanoseconds:
# Nanoseconds resolution is not supported by datetime module, so it's
# converted to integer below.
if not ignore_warnings:
warnings.warn(Warning(WARNING_DATETIME_NANO))
microsecond = int(microsecond * 1E-3)
return hour, minute, second, microsecond, in_nanoseconds
def fill_with_zeros_to_the_right(number_str: str, width: int):
actual_len = len(number_str)
if actual_len < width:
difference = width - actual_len
fill_chars = "0"*difference
return number_str + fill_chars
return number_str
def _read_timestamp(timestamp_line: str, ignore_warnings=False) -> tuple:
"""Process comma separated fields and returns a tuple containing the timestamp
and a boolean value indicating whether nanoseconds are used.
Can possibly return the timestamp 00/00/0000 00:00:00.000 for empty strings
or empty pairs."""
day, month, year, hour, minute, second, microsecond = (0,)*7
nanosec = False
if len(timestamp_line.strip()) > 0:
values = _read_sep_values(timestamp_line, 2)
if len(values) >= 2:
date_str, time_str = values[0:2]
if len(date_str.strip()) > 0:
day, month, year = _get_date(date_str)
if len(time_str.strip()) > 0:
hour, minute, second, microsecond, \
nanosec = _get_time(time_str, ignore_warnings)
using_min_data = False
if year <= 0:
year = dt.MINYEAR
using_min_data = True
if month <= 0:
month = 1
using_min_data = True
if day <= 0:
day = 1
using_min_data = True
# Timezone info unsupported
tzinfo = None
timestamp = dt.datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
if not ignore_warnings and using_min_data:
warnings.warn(Warning(WARNING_MINDATE.format(str(timestamp))))
return timestamp, nanosec
def _file_is_utf8(file_path):
if os.path.exists(file_path):
with open(file_path, "r") as file:
return _stream_is_utf8(file)
return False
def _stream_is_utf8(stream):
try:
contents = stream.readlines()
except UnicodeDecodeError as exception:
return True
return False
class Cfg:
"""Parses and stores Comtrade's CFG data."""
# time base units
TIME_BASE_NANOSEC = 1E-9
TIME_BASE_MICROSEC = 1E-6
def __init__(self, **kwargs):
"""
Cfg object constructor.
Keyword arguments:
ignore_warnings -- whether warnings are displayed in stdout
(default: False)
"""
self.filename = ""
# implicit data
self._time_base = self.TIME_BASE_MICROSEC
# Default CFG data
self._station_name = ""
self._rec_dev_id = ""
self._rev_year = 2013
self._channels_count = 0
self._analog_channels = []
self._status_channels = []
self._analog_count = 0
self._status_count = 0
self._frequency = 0.0
self._nrates = 1
self._sample_rates = []
self._timestamp_critical = False
self._start_timestamp = dt.datetime(1900, 1, 1)
self._trigger_timestamp = dt.datetime(1900, 1, 1)
self._ft = TYPE_ASCII
self._time_multiplier = 1.0
# 2013 standard revision information
# time_code,local_code = 0,0 means local time is UTC
self._time_code = 0
self._local_code = 0
# tmq_code,leapsec
self._tmq_code = 0
self._leap_second = 0
if "ignore_warnings" in kwargs:
self.ignore_warnings = kwargs["ignore_warnings"]
else:
self.ignore_warnings = False
@property
def station_name(self) -> str:
"""Return the recording device's station name."""
return self._station_name
@property
def rec_dev_id(self) -> str:
"""Return the recording device id."""
return self._rec_dev_id
@property
def rev_year(self) -> int:
"""Return the COMTRADE revision year."""
return self._rev_year
@property
def channels_count(self) -> int:
"""Return the number of channels, total."""
return self._channels_count
@property
def analog_channels(self) -> list:
"""Return the analog channels list with complete channel description."""
return self._analog_channels
@property
def status_channels(self) -> list:
"""Return the status channels list with complete channel description."""
return self._status_channels
@property
def analog_count(self) -> int:
"""Return the number of analog channels."""
return self._analog_count
@property
def status_count(self) -> int:
"""Return the number of status channels."""
return self._status_count
@property
def time_base(self) -> float:
"""Return the time base."""
return self._time_base
@property
def frequency(self) -> float:
"""Return the measured line frequency in Hertz."""
return self._frequency
@property
def ft(self) -> str:
"""Return the expected DAT file format."""
return self._ft
@property
def timemult(self) -> float:
"""Return the DAT time multiplier (Default = 1)."""
return self._time_multiplier
@property
def timestamp_critical(self) -> bool:
"""Returns whether the DAT file must contain non-zero
timestamp values."""
return self._timestamp_critical
@property
def start_timestamp(self) -> dt.datetime:
"""Return the recording start time stamp as a datetime object."""
return self._start_timestamp
@property
def trigger_timestamp(self) -> dt.datetime:
"""Return the trigger time stamp as a datetime object."""
return self._trigger_timestamp
@property
def nrates(self) -> int:
"""Return the number of different sample rates within the DAT file."""
return self._nrates
@property
def sample_rates(self) -> list:
"""
Return a list with pairs describing the number of samples for a given
sample rate.
"""
return self._sample_rates
# Deprecated properties - Changed "Digital" for "Status"
@property
def digital_channels(self) -> list:
"""Returns the status channels bidimensional values list."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_channels is deprecated, "
"use status_channels instead."))
return self._status_channels
@property
def digital_count(self) -> int:
"""Returns the number of status channels."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_count is deprecated, "
"use status_count instead."))
return self._status_count
def load(self, filepath):
"""Load and read a CFG file contents."""
self.filepath = filepath
if os.path.isfile(self.filepath):
kwargs = {}
if _file_is_utf8(self.filepath):
kwargs["encoding"] = "utf-8"
with open(self.filepath, "r", **kwargs) as cfg:
self._read_io(cfg)
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
self.filepath)
def read(self, cfg_lines):
"""Read CFG-format data of a FileIO or StringIO object."""
if type(cfg_lines) is str:
self._read_io(io.StringIO(cfg_lines))
else:
self._read_io(cfg_lines)
def _read_io(self, cfg):
"""Read CFG-format lines and stores its data."""
line_count = 0
self._nrates = 1
self._sample_rates = []
self._analog_channels = []
self._status_channels = []
# First line
line = cfg.readline()
# station, device, and comtrade standard revision information
packed = _read_sep_values(line)
if 3 == len(packed):
# only 1999 revision and above has the standard revision year
self._station_name, self._rec_dev_id, self._rev_year = packed
self._rev_year = self._rev_year.strip()
if self._rev_year not in (REV_1991, REV_1999, REV_2013):
if not self.ignore_warnings:
msg = WARNING_UNKNOWN_REVISION.format(self._rev_year)
warnings.warn(Warning(msg))
else:
self._station_name, self._rec_dev_id = packed
self._rev_year = REV_1999
line_count = line_count + 1
# Second line
line = cfg.readline()
# number of channels and its type
totchn, achn, schn = _read_sep_values(line, 3, '0')
self._channels_count = int(totchn)
self._analog_count = int(achn[:-1])
self._status_count = int(schn[:-1])
self._analog_channels = [None]*self._analog_count
self._status_channels = [None]*self._status_count
line_count = line_count + 1
# Analog channel description lines
for ichn in range(self._analog_count):
line = cfg.readline()
packed = _read_sep_values(line, 13, '0')
# unpack values
n, name, ph, ccbm, uu, a, b, skew, cmin, cmax, \
primary, secondary, pors = packed
# type conversion
n = int(n)
a = float(a)
b = _prevent_null(b, float, 0.0)
skew = _prevent_null(skew, float, 0.0)
cmin = float(cmin)
cmax = float(cmax)
primary = float(primary)
secondary = float(secondary)
self.analog_channels[ichn] = AnalogChannel(n, a, b, skew,
cmin, cmax, name, uu, ph, ccbm, primary, secondary, pors)
line_count = line_count + 1
# Status channel description lines
for ichn in range(self._status_count):
line = cfg.readline()
# unpack values
packed = _read_sep_values(line, 5, '0')
n, name, ph, ccbm, y = packed
# type conversion
n = int(n)
y = _prevent_null(y, int, 0) # TODO: actually a critical data. In the future add a warning.
self.status_channels[ichn] = StatusChannel(n, name, ph, ccbm, y)
line_count = line_count + 1
# Frequency line
line = cfg.readline()
if len(line.strip()) > 0:
self._frequency = float(line.strip())
line_count = line_count + 1
# Nrates line
# number of different sample rates
line = cfg.readline()
self._nrates = int(line.strip())
if self._nrates == 0:
self._nrates = 1
self._timestamp_critical = True
else:
self._timestamp_critical = False
line_count = line_count + 1
for inrate in range(self._nrates):
line = cfg.readline()
# each sample rate
samp, endsamp = _read_sep_values(line)
samp = float(samp)
endsamp = int(endsamp)
self.sample_rates.append([samp, endsamp])
line_count = line_count + 1
# First data point time and time base
line = cfg.readline()
ts_str = line.strip()
self._start_timestamp, nanosec = _read_timestamp(ts_str, self.ignore_warnings)
self._time_base = self._get_time_base(nanosec)
line_count = line_count + 1
# Event data point and time base
line = cfg.readline()
ts_str = line.strip()
self._trigger_timestamp, nanosec = _read_timestamp(ts_str, self.ignore_warnings)
self._time_base = min([self.time_base, self._get_time_base(nanosec)])
line_count = line_count + 1
# DAT file type
line = cfg.readline()
self._ft = line.strip()
line_count = line_count + 1
# Timestamp multiplication factor
if self._rev_year in (REV_1999, REV_2013):
line = cfg.readline().strip()
if len(line) > 0:
self._time_multiplier = float(line)
else:
self._time_multiplier = 1.0
line_count = line_count + 1
# time_code and local_code
if self._rev_year == REV_2013:
line = cfg.readline()
if line:
self._time_code, self._local_code = _read_sep_values(line)
line_count = line_count + 1
line = cfg.readline()
# time_code and local_code
self._tmq_code, self._leap_second = _read_sep_values(line)
line_count = line_count + 1
def _get_time_base(self, using_nanoseconds: bool):
"""
Return the time base, which is based on the fractionary part of the
seconds in a timestamp (00.XXXXX).
"""
if using_nanoseconds:
return self.TIME_BASE_NANOSEC
else:
return self.TIME_BASE_MICROSEC
class Comtrade:
"""Parses and stores Comtrade data."""
# extensions
EXT_CFG = "cfg"
EXT_DAT = "dat"
EXT_INF = "inf"
EXT_HDR = "hdr"
# format specific
ASCII_SEPARATOR = ","
def __init__(self, **kwargs):
"""
Comtrade object constructor.
Keyword arguments:
ignore_warnings -- whether warnings are displayed in stdout
(default: False).
"""
self.file_path = ""
self._cfg = Cfg(**kwargs)
# Default CFG data
self._analog_channel_ids = []
self._analog_phases = []
self._status_channel_ids = []
self._status_phases = []
self._timestamp_critical = False
# DAT file data
self._time_values = []
self._analog_values = []
self._status_values = []
# Additional CFF data (or additional comtrade files)
self._hdr = None
self._inf = None
if "ignore_warnings" in kwargs:
self.ignore_warnings = kwargs["ignore_warnings"]
else:
self.ignore_warnings = False
@property
def station_name(self) -> str:
"""Return the recording device's station name."""
return self._cfg.station_name
@property
def rec_dev_id(self) -> str:
"""Return the recording device id."""
return self._cfg.rec_dev_id
@property
def rev_year(self) -> int:
"""Return the COMTRADE revision year."""
return self._cfg.rev_year
@property
def cfg(self) -> Cfg:
"""Return the underlying CFG class instance."""
return self._cfg
@property
def hdr(self):
"""Return the HDR file contents."""
return self._hdr
@property
def inf(self):
"""Return the INF file contents."""
return self._inf
@property
def analog_channel_ids(self) -> list:
"""Returns the analog channels name list."""
return self._analog_channel_ids
@property
def analog_phases(self) -> list:
"""Returns the analog phase name list."""
return self._analog_phases
@property
def status_channel_ids(self) -> list:
"""Returns the status channels name list."""
return self._status_channel_ids
@property
def status_phases(self) -> list:
"""Returns the status phase name list."""
return self._status_phases
@property
def time(self) -> list:
"""Return the time values list."""
return self._time_values
@property
def analog(self) -> list:
"""Return the analog channel values bidimensional list."""
return self._analog_values
@property
def status(self) -> list:
"""Return the status channel values bidimensional list."""
return self._status_values
@property
def total_samples(self) -> int:
"""Return the total number of samples (per channel)."""
return self._total_samples
@property
def frequency(self) -> float:
"""Return the measured line frequency in Hertz."""
return self._cfg.frequency
@property
def start_timestamp(self):
"""Return the recording start time stamp as a datetime object."""
return self._cfg.start_timestamp
@property
def trigger_timestamp(self):
"""Return the trigger time stamp as a datetime object."""
return self._cfg.trigger_timestamp
@property
def channels_count(self) -> int:
"""Return the number of channels, total."""
return self._cfg.channels_count
@property
def analog_count(self) -> int:
"""Return the number of analog channels."""
return self._cfg.analog_count
@property
def status_count(self) -> int:
"""Return the number of status channels."""
return self._cfg.status_count
@property
def trigger_time(self) -> float:
"""Return relative trigger time in seconds."""
stt = self._cfg.start_timestamp
trg = self._cfg.trigger_timestamp
tdiff = trg - stt
tsec = (tdiff.days*60*60*24) + tdiff.seconds + (tdiff.microseconds*1E-6)
return tsec
@property
def time_base(self) -> float:
"""Return the time base."""
return self._cfg.time_base
@property
def ft(self) -> str:
"""Return the expected DAT file format."""
return self._cfg.ft
# Deprecated properties - Changed "Digital" for "Status"
@property
def digital_channel_ids(self) -> list:
"""Returns the status channels name list."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_channel_ids is deprecated, use status_channel_ids instead."))
return self._status_channel_ids
@property
def digital(self) -> list:
"""Returns the status channels bidimensional values list."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital is deprecated, use status instead."))
return self._status_values
@property
def digital_count(self) -> int:
"""Returns the number of status channels."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_count is deprecated, use status_count instead."))
return self._cfg.status_count
def _get_dat_reader(self):
# case insensitive comparison of file format
dat = None
ft_upper = self.ft.upper()
if ft_upper == TYPE_ASCII:
dat = AsciiDatReader()
elif ft_upper == TYPE_BINARY:
dat = BinaryDatReader()
elif ft_upper == TYPE_BINARY32:
dat = Binary32DatReader()
elif ft_upper == TYPE_FLOAT32:
dat = Float32DatReader()
else:
dat = None
raise Exception("Not supported data file format: {}".format(self.ft))
return dat
def read(self, cfg_lines, dat_lines_or_bytes) -> None:
"""
Read CFG and DAT files contents. Expects FileIO or StringIO objects.
"""
self._cfg.read(cfg_lines)
# channel ids
self._cfg_extract_channels_ids(self._cfg)
# channel phases
self._cfg_extract_phases(self._cfg)
dat = self._get_dat_reader()
dat.read(dat_lines_or_bytes, self._cfg)
# copy dat object information
self._dat_extract_data(dat)
def _cfg_extract_channels_ids(self, cfg) -> None:
self._analog_channel_ids = [channel.name for channel in cfg.analog_channels]
self._status_channel_ids = [channel.name for channel in cfg.status_channels]
def _cfg_extract_phases(self, cfg) -> None:
self._analog_phases = [channel.ph for channel in cfg.analog_channels]
self._status_phases = [channel.ph for channel in cfg.status_channels]
def _dat_extract_data(self, dat) -> None:
self._time_values = dat.time
self._analog_values = dat.analog
self._status_values = dat.status
self._total_samples = dat.total_samples
def load(self, cfg_file, dat_file = None, **kwargs) -> None:
"""
Load CFG, DAT, INF, and HDR files. Each must be a FileIO or StringIO
object. dat_file, inf_file, and hdr_file are optional (Default: None).
cfg_file is the cfg file path, including its extension.
dat_file is optional, and may be set if the DAT file name differs from
the CFG file name.
Keyword arguments:
inf_file -- optional INF file path (Default = None)
hdr_file -- optional HDR file path (Default = None)
"""
if "inf_file" in kwargs:
inf_file = kwargs["inf_file"]
else:
inf_file = None
if "hdr_file" in kwargs:
hdr_file = kwargs["hdr_file"]
else:
hdr_file = None
# which extension: CFG or CFF?
file_ext = cfg_file[-3:].upper()
if file_ext == "CFG":
basename = cfg_file[:-3]
# if not informed, infer dat_file with cfg_file
if dat_file is None:
dat_file = cfg_file[:-3] + self.EXT_DAT
if inf_file is None:
inf_file = basename + self.EXT_INF
if hdr_file is None:
hdr_file = basename + self.EXT_HDR
# load both cfg and dat
self._load_cfg_dat(cfg_file, dat_file)
# Load additional inf and hdr files, if they exist.
self._load_inf(inf_file)
self._load_hdr(hdr_file)
elif file_ext == "CFF":
# check if the CFF file exists
self._load_cff(cfg_file)
else:
raise Exception(r"Expected CFG file path, got intead \"{}\".".format(cfg_file))
def _load_cfg_dat(self, cfg_filepath, dat_filepath):
self._cfg.load(cfg_filepath)
# channel ids
self._cfg_extract_channels_ids(self._cfg)
# channel phases
self._cfg_extract_phases(self._cfg)
dat = self._get_dat_reader()
dat.load(dat_filepath, self._cfg)
# copy dat object information
self._dat_extract_data(dat)
def _load_inf(self, inf_file):
if os.path.exists(inf_file):
kwargs = {}
if _file_is_utf8(self.file_path):
kwargs["encoding"] = "utf-8"
with open(inf_file, 'r', **kwargs) as file:
self._inf = file.read()
if len(self._inf) == 0:
self._inf = None
else:
self._inf = None
def _load_hdr(self, hdr_file):
if os.path.exists(hdr_file):
kwargs = {}
if _file_is_utf8(self.file_path):
kwargs["encoding"] = "utf-8"
with open(hdr_file, 'r', **kwargs) as file:
self._hdr = file.read()
if len(self._hdr) == 0:
self._hdr = None
else:
self._hdr = None
def _load_cff(self, cff_file_path: str):
# stores each file type lines
cfg_lines = []
dat_lines = []
hdr_lines = []
inf_lines = []
# file type: CFG, HDR, INF, DAT
ftype = None
# file format: ASCII, BINARY, BINARY32, FLOAT32
fformat = None
# Number of bytes for binary/float dat
fbytes = 0
with open(cff_file_path, "r") as file:
header_re = re.compile(CFF_HEADER_REXP)
last_match = None
line_number = 0
line = file.readline()
while line != "":
line_number += 1
mobj = header_re.match(line.strip().upper())
if mobj is not None:
last_match = mobj
groups = last_match.groups()
ftype = groups[0]
if len(groups) > 1:
fformat = last_match.groups()[1]
fbytes_obj = last_match.groups()[2]
fbytes = int(fbytes_obj) if fbytes_obj is not None else 0
elif last_match is not None and ftype == "CFG":
cfg_lines.append(line.strip())
elif last_match is not None and ftype == "DAT":
if fformat == TYPE_ASCII:
dat_lines.append(line.strip())
else:
break
elif last_match is not None and ftype == "HDR":
hdr_lines.append(line.strip())
elif last_match is not None and ftype == "INF":
inf_lines.append(line.strip())
line = file.readline()
if fformat == TYPE_ASCII:
# process ASCII CFF data
self.read("\n".join(cfg_lines), "\n".join(dat_lines))
else:
# read dat bytes
total_bytes = os.path.getsize(cff_file_path)
cff_bytes_read = total_bytes - fbytes
with open(cff_file_path, "rb") as file:
file.read(cff_bytes_read)
dat_bytes = file.read(fbytes)
self.read("\n".join(cfg_lines), dat_bytes)
# stores additional data
self._hdr = "\n".join(hdr_lines)
if len(self._hdr) == 0:
self._hdr = None
self._inf = "\n".join(inf_lines)
if len(self._inf) == 0:
self._inf = None
def cfg_summary(self):
"""Returns the CFG attributes summary string."""
header_line = "Channels (total,A,D): {}A + {}D = {}"
sample_line = "Sample rate of {} Hz to the sample #{}"
interval_line = "From {} to {} with time mult. = {}"
format_line = "{} format"
lines = [header_line.format(self.analog_count, self.status_count,
self.channels_count),
"Line frequency: {} Hz".format(self.frequency)]
for i in range(self._cfg.nrates):
rate, points = self._cfg.sample_rates[i]
lines.append(sample_line.format(rate, points))
lines.append(interval_line.format(self.start_timestamp,
self.trigger_timestamp,
self._cfg.timemult))
lines.append(format_line.format(self.ft))
return "\n".join(lines)
class Channel:
"""Holds common channel description data."""
def __init__(self, n=1, name='', ph='', ccbm=''):
"""Channel abstract class constructor."""
self.n = n
self.name = name
self.ph = ph
self.ccbm = ccbm
def __str__(self):
return ','.join([str(self.n), self.name, self.ph, self.ccbm])
class StatusChannel(Channel):
"""Holds status channel description data."""
def __init__(self, n: int, name='', ph='', ccbm='', y=0):
"""StatusChannel class constructor."""
super().__init__(n, name, ph, ccbm)
self.name = name
self.n = n
self.name = name
self.ph = ph
self.ccbm = ccbm
self.y = y
def __str__(self):
fields = [str(self.n), self.name, self.ph, self.ccbm, str(self.y)]
class AnalogChannel(Channel):
"""Holds analog channel description data."""
def __init__(self, n: int, a: float, b=0.0, skew=0.0, cmin=-32767,
cmax=32767, name='', uu='', ph='', ccbm='', primary=1.0,
secondary=1.0, pors='P'):
"""AnalogChannel class constructor."""
super().__init__(n, name, ph, ccbm)
self.name = name
self.uu = uu
self.n = n
self.a = a
self.b = b
self.skew = skew
self.cmin = cmin
self.cmax = cmax
# misc
self.uu = uu
self.ph = ph
self.ccbm = ccbm
self.primary = primary
self.secondary = secondary
self.pors = pors
def __str__(self):
fields = [str(self.n), self.name, self.ph, self.ccbm, self.uu,
str(self.a), str(self.b), str(self.skew), str(self.cmin),
str(self.cmax), str(self.primary), str(self.secondary), self.pors]
return ','.join(fields)
class DatReader:
"""Abstract DatReader class. Used to parse DAT file contents."""
read_mode = "r"
def __init__(self):
"""DatReader class constructor."""
self.file_path = ""
self._content = None
self._cfg = None
self.time = []
self.analog = []
self.status = []
self._total_samples = 0
@property
def total_samples(self):
"""Return the total samples (per channel)."""
return self._total_samples
def load(self, dat_filepath, cfg):
"""Load a DAT file and parse its contents."""
self.file_path = dat_filepath
self._content = None
if os.path.isfile(self.file_path):
# extract CFG file information regarding data dimensions
self._cfg = cfg
self._preallocate()
with open(self.file_path, self.read_mode) as contents:
self.parse(contents)
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
self.file_path)
def read(self, dat_lines, cfg):
"""
Read a DAT file contents, expecting a list of string or FileIO object.
"""
self.file_path = None
self._content = dat_lines
self._cfg = cfg
self._preallocate()
self.parse(dat_lines)
def _preallocate(self):
# read from the cfg file the number of samples in the dat file
steps = self._cfg.sample_rates[-1][1] # last samp field
self._total_samples = steps
# analog and status count
analog_count = self._cfg.analog_count
status_count = self._cfg.status_count
# preallocate analog and status values
self.time = [0.0] * steps
self.analog = [None] * analog_count
self.status = [None] * status_count
# preallocate each channel values with zeros
for i in range(analog_count):
self.analog[i] = [0.0] * steps
for i in range(status_count):
self.status[i] = [0] * steps
def _get_samp(self, n) -> float:
"""Get the sampling rate for a sample n (1-based index)."""
# TODO: make tests.
last_sample_rate = 1.0
for samp, endsamp in self._cfg.sample_rates:
if n <= endsamp:
return samp
return last_sample_rate
def _get_time(self, n: int, ts_value: float, time_base: float,
time_multiplier: float):
# TODO: add option to enforce dat file timestamp, when available.
# TODO: make tests.
ts = 0
sample_rate = self._get_samp(n)
if not self._cfg.timestamp_critical or ts_value == TIMESTAMP_MISSING:
# if the timestamp is missing, use calculated.
if sample_rate != 0.0:
return (n - 1) / sample_rate
else:
raise Exception("Missing timestamp and no sample rate "
"provided.")
else:
# Use provided timestamp if its not missing
return ts_value * time_base * time_multiplier
def parse(self, contents):
"""Virtual method, parse DAT file contents."""
pass
class AsciiDatReader(DatReader):
"""ASCII format DatReader subclass."""
def __init__(self):
# Call the initialization for the inherited class
super().__init__()
self.ASCII_SEPARATOR = SEPARATOR
self.DATA_MISSING = ""
def parse(self, contents):
"""Parse a ASCII file contents."""
analog_count = self._cfg.analog_count
status_count = self._cfg.status_count
time_mult = self._cfg.timemult
time_base = self._cfg.time_base
# auxiliary vectors (channels gains and offsets)
a = [x.a for x in self._cfg.analog_channels]
b = [x.b for x in self._cfg.analog_channels]
# extract lines
if type(contents) is str:
lines = contents.splitlines()
else:
lines = contents
line_number = 0
for line in lines:
line_number = line_number + 1
if line_number <= self._total_samples:
values = line.strip().split(self.ASCII_SEPARATOR)
n = int(values[0])
# Read time
ts_val = float(values[1])
ts = self._get_time(n, ts_val, time_base, time_mult)
avalues = [float(x)*a[i] + b[i] for i, x in enumerate(values[2:analog_count+2])]
svalues = [int(x) for x in values[len(values)-status_count:]]
# store
self.time[line_number-1] = ts
for i in range(analog_count):
self.analog[i][line_number - 1] = avalues[i]
for i in range(status_count):
self.status[i][line_number - 1] = svalues[i]
class BinaryDatReader(DatReader):
"""16-bit binary format DatReader subclass."""
def __init__(self):
# Call the initialization for the inherited class
super().__init__()
self.ANALOG_BYTES = 2
self.STATUS_BYTES = 2
self.TIME_BYTES = 4
self.SAMPLE_NUMBER_BYTES = 4
# maximum negative value
self.DATA_MISSING = 0xFFFF
self.read_mode = "rb"
if struct.calcsize("L") == 4:
self.STRUCT_FORMAT = "LL {acount:d}h {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "LL {acount:d}h"
self.STRUCT_FORMAT_STATUS_ONLY = "LL {dcount:d}H"
else:
self.STRUCT_FORMAT = "II {acount:d}h {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "II {acount:d}h"
self.STRUCT_FORMAT_STATUS_ONLY = "II {dcount:d}H"
def get_reader_format(self, analog_channels, status_bytes):
# Number of status fields of 2 bytes based on the total number of
# bytes.
dcount = math.floor(status_bytes / 2)
# Check the file configuration
if int(status_bytes) > 0 and int(analog_channels) > 0:
return self.STRUCT_FORMAT.format(acount=analog_channels,
dcount=dcount)
elif int(analog_channels) > 0:
# Analog channels only.
return self.STRUCT_FORMAT_ANALOG_ONLY.format(acount=analog_channels)
else:
# Status channels only.
return self.STRUCT_FORMAT_STATUS_ONLY.format(acount=dcount)
def parse(self, contents):
"""Parse DAT binary file contents."""
time_mult = self._cfg.timemult
time_base = self._cfg.time_base
achannels = self._cfg.analog_count
schannel = self._cfg.status_count
# auxillary vectors (channels gains and offsets)
a = [x.a for x in self._cfg.analog_channels]
b = [x.b for x in self._cfg.analog_channels]
sample_id_bytes = self.SAMPLE_NUMBER_BYTES + self.TIME_BYTES
abytes = achannels*self.ANALOG_BYTES
dbytes = self.STATUS_BYTES * math.ceil(schannel / 16.0)
bytes_per_row = sample_id_bytes + abytes + dbytes
groups_of_16bits = math.floor(dbytes / self.STATUS_BYTES)
# Struct format.
row_reader = struct.Struct(self.get_reader_format(achannels, dbytes))
# Row reading function.
next_row = None
if isinstance(contents, io.TextIOBase) or \
isinstance(contents, io.BufferedIOBase) or \
isinstance(contents, bytes):
if isinstance(contents, bytes):
contents = io.BytesIO(contents)
def next_row(offset: int):
return contents.read(bytes_per_row)
elif isinstance(contents, str):
def next_row(offset: int):
return contents[offset:offset + bytes_per_row]
else:
raise TypeError("Unsupported content type: {}".format(
type(contents)))
# Get next row.
buffer_offset = 0
row = next_row(buffer_offset)
irow = 0
while row != b'':
values = row_reader.unpack(row)
# Sample number
n = values[0]
# Time stamp
ts_val = values[1]
ts = self._get_time(n, ts_val, time_base, time_mult)
self.time[irow] = ts
# Extract analog channel values.
for ichannel in range(achannels):
yint = values[ichannel + 2]
y = a[ichannel] * yint + b[ichannel]
self.analog[ichannel][irow] = y
# Extract status channel values.
for igroup in range(groups_of_16bits):
group = values[achannels + 2 + igroup]
# for each group of 16 bits, extract the status channels
maxchn = min([ (igroup+1) * 16, schannel])
for ichannel in range(igroup * 16, maxchn):
chnindex = ichannel - igroup*16
mask = int('0b01', 2) << chnindex
extract = (group & mask) >> chnindex
self.status[ichannel][irow] = extract
# Get the next row
irow += 1
buffer_offset += bytes_per_row
row = next_row(buffer_offset)
class Binary32DatReader(BinaryDatReader):
"""32-bit binary format DatReader subclass."""
def __init__(self):
# Call the initialization for the inherited class
super().__init__()
self.ANALOG_BYTES = 4
if struct.calcsize("L") == 4:
self.STRUCT_FORMAT = "LL {acount:d}l {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "LL {acount:d}l"
else:
self.STRUCT_FORMAT = "II {acount:d}i {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "II {acount:d}i"
# maximum negative value
self.DATA_MISSING = 0xFFFFFFFF
class Float32DatReader(BinaryDatReader):
"""Single precision (float) binary format DatReader subclass."""
def __init__(self):
# Call the initialization for the inherited class
super().__init__()
self.ANALOG_BYTES = 4
if struct.calcsize("L") == 4:
self.STRUCT_FORMAT = "LL {acount:d}f {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "LL {acount:d}f"
else:
self.STRUCT_FORMAT = "II {acount:d}f {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "II {acount:d}f"
# Maximum negative value
self.DATA_MISSING = sys.float_info.min
|
the-stack_0_20050 | #!/usr/bin/env python3
# Copyright 2016 Creative Commons
# Written by Rob Myers <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path, re, sys
from pathlib import Path
class AddCC4Links(object):
"""Adds a link to a license, specified by language code and name, to all
existing CC 4.0 license legalcodes where they do not already contain a
link to it.
Make sure to run it in a checked out version of the creativecommons.org
repository, either in the tools directory, docroot,
or docroot/legalcode.
Note that this code modifies files inline. Commit any other changes
before running it."""
def usage(self):
print("add-cc4-links.py LANGUAGE_CODE LANGUAGE_NAME")
print(" e.g. add-cc4-links.py nl Nederlands")
print(
" LANGUAGE_CODE must be 2 letters or 2-hyphen-N, the same used in filename."
)
print(" LANGUAGE_NAME must be in the relevant language")
print(" if it contains whitespace, enclose in quotes.")
def get_args(self):
# Make sure there are enough args
# Make sure arg 2 is a language code
# Make sure arg 3 is not a language code
self.args_ok = (
(len(sys.argv) == 3) and (len(sys.argv[1]) >= 2) and (len(sys.argv[2]) >= 2)
)
if self.args_ok:
self.language_code = sys.argv[1]
self.language_name = sys.argv[2]
self.exclude_pattern = "*_4.0_" + self.language_code + ".html"
else:
self.usage()
return self.args_ok
def get_path(self):
"""Where are the licenses?"""
self.path = False
path = Path.cwd()
pathdir = path.name
if pathdir == "legalcode":
self.path = path
if pathdir == "docroot":
self.path = path / "legalcode"
if pathdir == "tools":
self.path = path.parent / "docroot" / "legalcode"
if not self.path:
print("Please run from within the checked-out project.")
return self.path != False
def get_files(self):
"""Get all the 4.0 files *except* those we are linking to"""
self.files = [
f
for f in self.path.glob("*_4.0*.html")
if (not os.path.islink(f) and not f.match(self.exclude_pattern))
]
self.files.sort()
def process_files(self):
"""Add links to all the license files"""
for filepath in self.files:
self.process_file(filepath)
def file_license_and_language(self, filepath):
"""Get the license number and language code from the file path"""
elements = filepath.stem.split("_")
# Un-translated deeds don't have a language code, so set to English
if len(elements) != 3:
elements += ["en"]
return elements[0], elements[2]
def links_in_page(self, content):
"""Find the translated license links at the bottom of the page"""
return re.findall(
r'//creativecommons\.org/licenses/[^/]+/4\.0/legalcode(\.[^"]{2,})?">([^>]+)</a>',
content,
)
def is_rtl(self, content):
"""Determine whether the page is in a right-to-left script"""
return (re.search(r' dir="rtl"', content) != None) or (
re.search(r'class="rtl"', content) != None
)
def insert_at_index_rtl(self, links):
index = -1
for i, match in reversed(list(enumerate(links))):
if self.language_name.casefold() < match[1].casefold():
index = i
break
return index
def insert_at_index_ltr(self, links):
index = -1
for match in links:
if self.language_name.casefold() < match[1].casefold():
break
else:
index += 1
return index
def insert_at_index(self, links, rtl):
"""Find the alphabetic position in the list of translated license links
to insert the link at"""
if rtl:
return self.insert_at_index_rtl(links)
else:
return self.insert_at_index_ltr(links)
def insert_link(self, content, lic, links, index):
"""Insert the link to the correct version of the license
in the correct position in the list of links at the bottom of the
page"""
link = (
'<a href="//creativecommons.org/licenses/'
+ lic
+ "/4.0/legalcode."
+ self.language_code
+ '">'
+ self.language_name
+ "</a>"
)
if index == -1:
target = '<a href="//creativecommons.org/licenses/' + lic
replace = link + ", " + target
else:
lang = links[index][1]
target = ">" + lang + "</a>"
replace = target + ", " + link
return content.replace(target, replace, 1)
def file_contains_link_already(self, links):
"""Did we already add a link to this page?"""
return (
next(
(
code
for code, name in links
if name == self.language_name or code == self.language_code
),
False,
)
!= False
)
def process_file(self, filepath):
"""Get the file's details and insert a link to the translated version
into it"""
lic, lang = self.file_license_and_language(filepath)
with filepath.open() as infile:
content = infile.read()
links = self.links_in_page(content)
if not self.file_contains_link_already(links):
rtl = self.is_rtl(content)
index = self.insert_at_index(links, rtl)
# print(links)
# print(index)
# print(links[index])
updated_content = self.insert_link(content, lic, links, index)
with filepath.open("w") as outfile:
outfile.write(updated_content)
direction = "ltr"
if rtl:
direction = "rtl"
print("Added link to " + direction + " file: " + filepath.name)
# else:
# print('File already contains link: ' + filepath.name)
def main(self):
"""Get the command line arguments, find the files, and process them"""
if self.get_args() and self.get_path():
self.get_files()
self.process_files()
if __name__ == "__main__":
link_adder = AddCC4Links()
link_adder.main()
|
the-stack_0_20051 | AR = '/usr/bin/ar'
ARFLAGS = 'rcs'
CCFLAGS = ['-g']
CCFLAGS_MACBUNDLE = ['-fPIC']
CCFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CC_VERSION = ('4', '2', '1')
COMPILER_CXX = 'g++'
CPP = '/usr/bin/cpp'
CPPFLAGS_NODE = ['-D_GNU_SOURCE']
CPPPATH_NODE = '/src/nvm/v0.6.10/include/node'
CPPPATH_ST = '-I%s'
CXX = ['/usr/bin/g++']
CXXDEFINES_ST = '-D%s'
CXXFLAGS = ['-g']
CXXFLAGS_DEBUG = ['-g']
CXXFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CXXFLAGS_RELEASE = ['-O2']
CXXLNK_SRC_F = ''
CXXLNK_TGT_F = ['-o', '']
CXX_NAME = 'gcc'
CXX_SRC_F = ''
CXX_TGT_F = ['-c', '-o', '']
DEST_CPU = 'x86_64'
DEST_OS = 'darwin'
FULLSTATIC_MARKER = '-static'
LIBDIR = '/Users/alex/.node_libraries'
LIBPATH_NODE = '/src/nvm/v0.6.10/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINK_CXX = ['/usr/bin/g++']
NODE_PATH = '/Users/alex/.node_libraries'
PREFIX = '/usr/local'
PREFIX_NODE = '/src/nvm/v0.6.10'
RANLIB = '/usr/bin/ranlib'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = ''
SONAME_ST = ''
STATICLIBPATH_ST = '-L%s'
STATICLIB_MARKER = ''
STATICLIB_ST = '-l%s'
macbundle_PATTERN = '%s.bundle'
program_PATTERN = '%s'
shlib_CXXFLAGS = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
shlib_LINKFLAGS = ['-dynamiclib']
shlib_PATTERN = 'lib%s.dylib'
staticlib_LINKFLAGS = []
staticlib_PATTERN = 'lib%s.a'
|
the-stack_0_20053 | def c_1():
L = [4, 10, 8]
x = L.sort()
L.append(20)
L2 = L[1:]
# output:
# >>> x
# None
# >>> L
# [4, 8, 10, 20]
# >>> id(L) == id(L2)
# False
def repeat_word(word, num_times):
''' (str, int) -> str
'''
print(__name__)
word = word * num_times
print('Repeated word is:', word)
return word
if __name__ == '__main__':
word = 'Yes'
print('Original word is:', word)
repeat_word(word, 3)
print('New word is:', word)
word = repeat_word(word, 2) + '!'
print('New word is:', word)
# output:
# Original word is: Yes
# __main__
# Repeated word is: YesYesYes
# New word is: Yes
# __main__
# Repeated word is: YesYes
# New word is: YesYes! |
the-stack_0_20054 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: lenovo
@file: File.py
@time: 2021/4/12 13:20
"""
def write_to_file(filename,content):
with open(filename, 'a') as f: # 如果filename不存在会自动创建, 'w'表示写数据,写之前会清空文件中的原有数据!
for i in range(0,len(content)):
f.write(content[i])
f.write("\n") |
the-stack_0_20055 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 15:27:02 2012
@author: Craig
"""
# standard modules
from datetime import datetime, timedelta
from string import Template
import uuid
import types
# site modules
from scrapy.item import Field
from scrapy.contrib.loader import ItemLoader, XPathItemLoader
from scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join
from scrapy.contrib.loader.processor import Identity, Compose
from scrapy.http import Request, Response, TextResponse, FormRequest
from scrapy.shell import inspect_response
from scrapy import log
from scrapy.selector import HtmlXPathSelector
# local modules
from nrc.NrcBot import NrcBot
from nrc.items import NrcItem, FeedEntry, FeedEntryTag
from nrc.spiders.CogisScraper import CogisInspection, CogisSpill
# CONSTANTS
insp_title_template = Template(
"Violation reported at well $api operated by $operator")
spill_title_template = Template(
"Operator $operator reports spill at well $api")
insp_summ_template = Template("""\
A field inspection by the Colorado Oil and Gas Conservation Commission on $date
reported a violation at well $api operated by $operator.
""")
spill_summ_template = Template("""\
A spill was reported at well $api operated by $operator.
""")
content_template = Template("""\
<b>Report Details</b><br/>
Report: <a href="$doc_href">$doc_num</a><br/>
Well API Record: <a href="$well_href">$api</a><br/>
Operator: $operator<br/>
Report Date: $date<br/>
County: $county<br/>
$notestring
<a href="$doc_href">View original report</a><br/>
""")
# COGIS Records Geolocator
class CogisLocator (NrcBot):
name = 'CogisLocator'
task_conditions = {'CogisScraper':'NEW'}
local_task_params = {
'task_id':'1001',
'source_task_id':'124',
'feedsource_id':'1001',
'Item':'CogisInspection',
'loc_key_field':'insp_api_num',
'target_fields':'site_lat, site_lng, operator',
'url_template':
'http://cogcc.state.co.us/cogis/FacilityDetail.asp?facid={0}&type=WELL',
}
allowed_domains = None
def process_item(self, task):
if not isinstance(task, dict):
keyval = task
task = dict(self.local_task_params.items())
item = self.get_cogis_item(task, keyval)
if item is None: return
api_key = item[task["loc_key_field"]].replace("-","")
url = task["url_template"].format(api_key)
req = Request(url,
callback=self.parse_well,
dont_filter=True,
errback=self.error_callback)
req.meta['task'] = task
req.meta['task_keyval'] = keyval
yield req
else:
# this is for the new task structure only
self.item_dropped(keyval)
self.log('CogisLocator.process_item got task parameter dictionary',
log.ERROR)
return
def parse_well(self, response):
task = response.meta['task']
keyval = response.meta['task_keyval']
hxs = HtmlXPathSelector(response)
fields = hxs.select('//td//text()')
lat, lng = None, None
for field in fields:
# Note that there may be two instances of lat/lng in the record,
# one is 'as planned' and the 2nd is 'as built'.
# We want the 2nd if it's there.
l1, l2 = self.extract_lat_lng(field.extract())
if l1 is not None:
lat, lng = l1, l2
operator = None
take_next = False
for field in fields:
if field.extract().strip() == 'Operator:':
take_next = True
continue
elif take_next:
operator = ''.join([c for c in
field.extract().strip()[:-7].strip()
if ord(c)<127])
if not operator: continue
break
if operator or lat is not None:
item = self.get_cogis_item(task, keyval)
if item is None: return
target_fields = [f.strip()
for f in task['target_fields'].split(',')]
if item[target_fields[2]]:
operator = item[target_fields[2]]
update_fields = dict(zip(target_fields, (lat, lng, operator)))
self.db.updateItem (task['Item'],
item['st_id'],
update_fields,
id_field='st_id')
for key, val in update_fields.items():
item[key] = val
if lat is not None:
self.log('set lat/lng for cogis %s to %s/%s' % (keyval, lat, lng),
log.INFO)
self.item_completed(keyval)
return self.screen_feed_entry(item, task)
else:
self.log('lat/lng values not found for cogis %s' % (keyval,),
log.INFO)
self.item_dropped(keyval)
def get_cogis_item(self, task, keyval):
item = globals()[task['Item']]()
cogis_rec = self.db.loadItem (item,
match_fields={'doc_num':str(keyval)})
if cogis_rec is None:
self.log("No {0} record for doc_num {1}."
.format(task['Item'], keyval),
log.ERROR)
assert False
return None
for field_name, value in cogis_rec.items():
if field_name == 'timestamp':
continue
item[field_name] = value
return item
def extract_lat_lng(self, s):
if "lat/long" in s.lower():
s2 = ''.join([c for c in s if c in "0123456789-./"]).split('/')
if len(s2) == 3:
try:
float(s2[1])
float(s2[2])
return s2[1:]
except ValueError:
pass
return None, None
def screen_feed_entry(self, item, task):
if (isinstance(item, CogisInspection)):
if item["violation"] == 'Y':
return self.create_insp_feed_entry(item, task)
if (isinstance(item, CogisSpill)
):
return self.create_spill_feed_entry(item, task)
def create_insp_feed_entry (self, item, task):
params = {}
params['lat'] = item['site_lat']
params['lng'] = item['site_lng']
params['operator'] = item['operator']
params['api'] = item['insp_api_num']
params['title_tmpl'] = insp_title_template
params['summ_tmpl'] = insp_summ_template
params['notestring'] = ""
params['notes'] = []
return self.create_feed_entry(item, task, params)
def create_spill_feed_entry (self, item, task):
params = {}
params['lat'] = item['spill_lat']
params['lng'] = item['spill_lng']
params['operator'] = item['company_name']
params['api'] = item['facility_id']
params['title_tmpl'] = spill_title_template
params['summ_tmpl'] = spill_summ_template
params['notestring'] = ""
params['notes'] = []
if item['groundwater'] and item['groundwater'].upper() == 'Y':
params['notes'].append('groundwater affected')
if item['surfacewater'] and item['surfacewater'].upper() == 'Y':
params['notes'].append('surfacewater affected')
return self.create_feed_entry(item, task, params)
def create_feed_entry (self, item, task, params):
params['county'] = item['county_name']
params['doc_num'] = item['doc_num']
params['doc_href'] = item['doc_href']
params['date'] = item['date']
facility_id = ''.join([c for c in params['api']
if c in ('0123456789')])
params['well_href'] = ("http://cogcc.state.co.us/cogis/"
"FacilityDetail.asp?facid={0}&type=WELL"
.format(facility_id))
params['content_tmpl'] = content_template
if params['notes']:
params['notestring'] = ("Notes: %s<br/>"
% (', '.join(params['notes']),))
# create a new feed item
l=ItemLoader (FeedEntry())
url = params['doc_href']
#feed_entry_id = uuid.uuid3(uuid.NAMESPACE_URL, url.encode('ASCII'))
feed_entry_id = self.db.uuid3_str(name=url.encode('ASCII'))
l.add_value ('id', feed_entry_id)
l.add_value ('title', params['title_tmpl'].substitute(params))
l.add_value ('incident_datetime', params['date'])
l.add_value ('link', params['doc_href'])
l.add_value ('summary', params['summ_tmpl'].substitute(params))
l.add_value ('content', params['content_tmpl'].substitute(params))
l.add_value ('lat', params['lat'])
l.add_value ('lng', params['lng'])
l.add_value ('source_id', task['feedsource_id'])
feed_item = l.load_item()
if (feed_item.get('lat')
and feed_item.get('lng')
and feed_item.get('incident_datetime')
and (datetime.now().date() - feed_item.get('incident_datetime'))
<= timedelta(days=60)):
yield feed_item
for tag in self.get_tags(item):
yield self.create_tag (feed_entry_id, tag)
def get_tags (self, item):
tags = []
if (isinstance(item, CogisInspection)):
tags.append ('violation')
if (isinstance(item, CogisSpill)):
tags.append ('spill')
tags.append ('release')
tags.append ('drilling')
tags.append ('COGIS')
return tags
class CogisSpillLocator (CogisLocator):
name = 'CogisSpillLocator'
task_conditions = {'CogisSpillScraper':'NEW'}
local_task_params = {
'task_id':'1002',
'source_task_id':'125',
'feedsource_id':'1001',
'Item':'CogisSpill',
'loc_key_field':'facility_id',
'target_fields':'spill_lat, spill_lng, company_name',
'url_template':
'http://cogcc.state.co.us/cogis/FacilityDetail.asp?facid={0}&type=WELL',
}
|
the-stack_0_20056 | import unittest
import os
import shutil
import zipfile
import sys
# Ensure that Python can find and load gencat.py
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../')
from gencat import gencat
class MockCat(gencat):
def makeZipDict(self):
self.zip_dict = {}
self.zip_dict['zip1'] = ('concat1', )
def makeConcatDict(self):
self.concat_dict = {}
self.concat_dict['concat1'] = ('./test_data/file1.txt', ) + ('./test_data/file2.txt', )
class test_main(unittest.TestCase):
def setUp(self):
paths = ['./test_data']
for path in paths:
try:
os.makedirs(path)
except:
shutil.rmtree(path, ignore_errors = True)
os.makedirs(path)
count = 1
for FILE in ['./test_data/file1.txt', './test_data/file2.txt']:
with open(FILE, 'wb') as f:
f.write('THIS IS TEST FILE %s.\n' % (count))
count = count + 1
def test_default(self):
'''
Test that the lines in main run in the intended order and produce predictable output
when given simple input.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.main()
self.assertFalse(os.path.isdir('./test_temp'))
self.assertTrue(os.path.isfile('./test_out/concatDict.txt'))
self.assertTrue(os.path.isfile('./test_out/zipDict.txt'))
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text = f.read()
test_text = '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS TEST FILE 1.' + \
'\n\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS TEST FILE 2.\n'
self.assertEqual(text, test_text)
def tearDown(self):
paths = ['./test_data', './test_out']
for path in paths:
shutil.rmtree(path, ignore_errors = True)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_20057 | import logging
from typing import List
from backend.database.wrapper.field_wrapper import QueryFieldWrapper
from backend.database.wrapper.stats.creation.player_stat_creation import PlayerStatCreation
from backend.database.wrapper.stats.creation.replay_group_stat_creation import ReplayGroupStatCreation
from backend.database.wrapper.stats.creation.shared_stat_creation import SharedStatCreation
from backend.database.wrapper.stats.creation.team_stat_creation import TeamStatCreation
logger = logging.getLogger(__name__)
class SharedStatsWrapper:
"""
Contains basic stat queries initialized at creation time
"""
def __init__(self):
self.player_stats = PlayerStatCreation()
self.replay_group_stats = ReplayGroupStatCreation()
self.team_stats = TeamStatCreation()
def compare_to_global(self, stats, global_stats, global_stds):
"""
Converts the stats to being compared the global values.
Mostly creates ratios.
:param stats: The stats being modified
:param global_stats: The global stats
:param global_stds: The global standard deviations
:return: A list of stats that have been modified
"""
stat_list = self.get_player_stat_list()
for i, s in enumerate(stats):
player_stat = s
if player_stat is None:
player_stat = 0
else:
player_stat = float(player_stat)
global_stat = global_stats[i]
global_std = global_stds[i]
if global_stat is None or global_stat == 0:
global_stat = 1
else:
global_stat = float(global_stat)
if global_std is None or global_std == 0:
logger.debug("%s %s", stat_list[i].dynamic_field.field_name, 'std is 0')
global_std = 1
else:
global_std = float(global_std)
if global_std != 1 and global_std > 0:
if str(stat_list[i].dynamic_field.field_name) == 'time_behind_ball':
logger.debug("%s %s %s %s %s", str(stat_list[i].dynamic_field.field_name), str(player_stat),
str(global_stat), str(global_std),
str(float((player_stat - global_stat) / global_std)))
stats[i] = float((player_stat - global_stat) / global_std)
else:
stats[i] = float(player_stat / global_stat)
return stats
def get_player_stat_list(self) -> List[QueryFieldWrapper]:
return self.player_stats.stat_list
def get_player_stat_query(self):
return self.player_stats.stats_query
def get_wrapped_stats(self, stats, creation: SharedStatCreation):
return SharedStatCreation.get_wrapped_stats(stats, creation.get_stat_list())
def get_player_stat_std_query(self):
return self.player_stats.std_query
def float_maybe(self, f):
if f is None:
return None
else:
return float(f)
|
the-stack_0_20058 | import json
import ecdsa
import hashlib
import base64
from substrateinterface import Keypair
from .common import (BaseAccount, get_fallback_private_key,
get_verification_buffer, get_public_key,
PRIVATE_KEY_FILE)
from cosmospy._wallet import privkey_to_address, privkey_to_pubkey
from cosmospy.typing import SyncMode
DEFAULT_HRP = "cosmos"
def get_signable_message(message):
signable = get_verification_buffer(message).decode('utf-8')
content_message = {
"type": "signutil/MsgSignText",
"value": {
"message": signable,
"signer": message['sender'],
},
}
return {
"chain_id": "signed-message-v1",
"account_number": str(0),
"fee": {
"amount": [],
"gas": str(0),
},
"memo": "",
"sequence": str(0),
"msgs": [content_message,],
}
def get_verification_string(message):
value = get_signable_message(message)
return json.dumps(value, separators=(",", ":"), sort_keys=True)
class CSDKAccount(BaseAccount):
CHAIN = "CSDK"
CURVE = "secp256k1"
def __init__(self, private_key=None, hrp=DEFAULT_HRP):
self.private_key = private_key
self.hrp = hrp
def sign_message(self, message):
verif = get_verification_string(message)
privkey = ecdsa.SigningKey.from_string(self.private_key, curve=ecdsa.SECP256k1)
signature_compact = privkey.sign_deterministic(
verif.encode("utf-8"), hashfunc=hashlib.sha256, sigencode=ecdsa.util.sigencode_string_canonize
)
signature_base64_str = base64.b64encode(signature_compact).decode("utf-8")
base64_pubkey = base64.b64encode(self.get_public_key()).decode("utf-8")
sig = {
"signature": signature_base64_str,
"pub_key": {"type": "tendermint/PubKeySecp256k1", "value": base64_pubkey},
"account_number": str(0),
"sequence": str(0),
}
message['signature'] = json.dumps(sig)
return message
def get_address(self):
return privkey_to_address(self.private_key)
def get_public_key(self):
return privkey_to_pubkey(self.private_key)
def get_fallback_account(hrp=DEFAULT_HRP):
return CSDKAccount(private_key=get_fallback_private_key(), hrp=hrp) |
the-stack_0_20059 | #!/usr/bin/python3.6
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
# IMP : for preupgrade and postupgrade cmd,
# merge_configs() is imported from the merge_pre_post.py
from s3confstore.cortx_s3_confstore import S3CortxConfStore
import os.path
import sys
import logging
def upgrade_config(configFile:str, oldSampleFile:str, newSampleFile:str, unsafeAttributesFile:str, filetype:str):
"""
Core logic for updating config files during upgrade using conf store.
Following is algorithm from merge:
Iterate over all parameters sample.new file
for every parameter, check
- if it is marked as 'unsafe' in attributes file, skip
- if it marked as 'safe' in the attributes file
- diff the value in config and sample.old - if it is changed, skip
- if it is not changed, we will overwrite the value in cfg file from sample.new
- if it does not exist in cfg file add the value from sample.new file to cfg file
- All the arrays in yaml are always overwritten
"""
#If config file is not present then abort merging.
if not os.path.isfile(configFile):
logger.error(f'config file {configFile} does not exist')
raise Exception(f'ERROR: config file {configFile} does not exist')
logger.info(f'config file {str(configFile)} upgrade started.')
# old sample file
conf_old_sample = filetype + oldSampleFile
cs_conf_old_sample = S3CortxConfStore(config=conf_old_sample, index=conf_old_sample)
# new sample file
conf_new_sample = filetype + newSampleFile
cs_conf_new_sample = S3CortxConfStore(config=conf_new_sample, index=conf_new_sample)
conf_new_sample_keys = cs_conf_new_sample.get_all_keys()
# unsafe attribute file
conf_unsafe_file = filetype + unsafeAttributesFile
cs_conf_unsafe_file = S3CortxConfStore(config=conf_unsafe_file, index=conf_unsafe_file)
conf_unsafe_file_keys = cs_conf_unsafe_file.get_all_keys()
# active config file
conf_file = filetype + configFile
cs_conf_file = S3CortxConfStore(config=conf_file, index=conf_file)
conf_file_keys = cs_conf_file.get_all_keys()
#logic to determine which keys to merge.
keys_to_overwrite = []
for key in conf_new_sample_keys:
#If key is marked for unsafe then do not modify/overwrite.
if key in conf_unsafe_file_keys:
continue
#if key not present active config file then add it
# (this will also add and hence effectively overwrite keys removed in above [] handing
# and hence will always result in overwrite for these keys from the new sample file).
if key not in conf_file_keys:
keys_to_overwrite.append(key)
#if key is not unsafe and value is not changed by user then overwrite it.
elif cs_conf_file.get_config(key) == cs_conf_old_sample.get_config(key):
keys_to_overwrite.append(key)
#if user has changed the value of the key then skip it.
else:
continue
cs_conf_file.merge_config(source_index=conf_new_sample, keys_to_include=keys_to_overwrite)
cs_conf_file.save_config()
logger.info(f'config file {str(configFile)} upgrade completed')
def merge_configs(config_file_path: str, s3_tmp_dir):
"""
- This function will merge all S3 config files during upgrade
- This function should be used outside this file to call configs upgrade
"""
# Use existing s3-deployment-logger or setup new console logger
setup_logger()
g_upgrade_items = {
's3' : {
'configFile' : os.path.join(config_file_path, "s3/conf/s3config.yaml"),
'oldSampleFile' : os.path.join(s3_tmp_dir, "s3config.yaml.sample.old"),
'newSampleFile' : os.path.join(config_file_path, "s3/conf/s3config.yaml.sample"),
'unsafeAttributesFile' : os.path.join(config_file_path, "s3/conf/s3config_unsafe_attributes.yaml"),
'fileType' : 'yaml://'
},
'auth' : {
'configFile' : os.path.join(config_file_path, "auth/resources/authserver.properties"),
'oldSampleFile' : os.path.join(s3_tmp_dir, "authserver.properties.sample.old"),
'newSampleFile' : os.path.join(config_file_path, "auth/resources/authserver.properties.sample"),
'unsafeAttributesFile' : os.path.join(config_file_path, "auth/resources/authserver_unsafe_attributes.properties"),
'fileType' : 'properties://'
},
'keystore' : {
'configFile' : os.path.join(config_file_path, "auth/resources/keystore.properties"),
'oldSampleFile' : os.path.join(s3_tmp_dir,"keystore.properties.sample.old"),
'newSampleFile' : os.path.join(config_file_path, "auth/resources/keystore.properties.sample"),
'unsafeAttributesFile' : os.path.join(config_file_path, "auth/resources/keystore_unsafe_attributes.properties"),
'fileType' : 'properties://'
},
'bgdelete' : {
'configFile' : os.path.join(config_file_path, "s3/s3backgrounddelete/config.yaml"),
'oldSampleFile' : os.path.join(s3_tmp_dir, "config.yaml.sample.old"),
'newSampleFile' : os.path.join(config_file_path, "s3/s3backgrounddelete/config.yaml.sample"),
'unsafeAttributesFile' : os.path.join(config_file_path, "s3/s3backgrounddelete/s3backgrounddelete_unsafe_attributes.yaml"),
'fileType' : 'yaml://'
},
'cluster' : {
'configFile' : os.path.join(config_file_path, "s3/s3backgrounddelete/s3_cluster.yaml"),
'oldSampleFile' : os.path.join(s3_tmp_dir, "s3_cluster.yaml.sample.old"),
'newSampleFile' : os.path.join(config_file_path, "s3/s3backgrounddelete/s3_cluster.yaml.sample"),
'unsafeAttributesFile' : os.path.join(config_file_path, "s3/s3backgrounddelete/s3_cluster_unsafe_attributes.yaml"),
'fileType' : 'yaml://'
}
}
for upgrade_item in g_upgrade_items:
upgrade_config(g_upgrade_items[upgrade_item]['configFile'],
g_upgrade_items[upgrade_item]['oldSampleFile'],
g_upgrade_items[upgrade_item]['newSampleFile'],
g_upgrade_items[upgrade_item]['unsafeAttributesFile'],
g_upgrade_items[upgrade_item]['fileType'])
def setup_logger():
"""
- This function will use as is s3-deployment-logger if it is available
- else it will log to console
"""
global logger
logger = logging.getLogger("s3-deployment-logger")
if logger.hasHandlers():
logger.info("Logger has valid handler")
else:
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
chandler = logging.StreamHandler(sys.stdout)
chandler.setLevel(logging.DEBUG)
s3deployment_log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(s3deployment_log_format)
# create formatter and add it to the handlers
chandler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(chandler)
if __name__ == "__main__":
config_file_path = "/etc/cortx"
s3_tmp_dir = os.path.join(config_file_path, "s3/tmp")
merge_configs(config_file_path, s3_tmp_dir)
|
the-stack_0_20060 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2013-2014, gamesun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of gamesun nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GAMESUN "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GAMESUN BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from distutils.core import setup
import sys
import py2exe
import os
import glob
from py2exe.build_exe import py2exe as build_exe
import appInfo
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
if os.path.basename(pathname).lower() in ("msvcp71.dll", "dwmapi.dll"):
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
if len(sys.argv) == 1:
sys.argv.append("py2exe")
# sys.argv.append("-q")
manifest_template = '''
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="0.6.8.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false"
/>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
CONTENT_DIRS = [ "media" ]
# EXTRA_FILES = [ "./media/icon16.ico", "./media/icon32.ico" ]
EXTRA_FILES = []
class MediaCollector(build_exe):
def addDirectoryToZip(self, folder):
full = os.path.join(self.collect_dir, folder)
if not os.path.exists(full):
self.mkpath(full)
for f in glob.glob("%s/*" % folder):
if os.path.isdir(f):
self.addDirectoryToZip(f)
else:
name = os.path.basename(f)
self.copy_file(f, os.path.join(full, name))
self.compiled_files.append(os.path.join(folder, name))
def copy_extensions(self, extensions):
#super(MediaCollector, self).copy_extensions(extensions)
build_exe.copy_extensions(self, extensions)
for folder in CONTENT_DIRS:
self.addDirectoryToZip(folder)
for fileName in EXTRA_FILES:
name = os.path.basename(fileName)
self.copy_file(fileName, os.path.join(self.collect_dir, name))
self.compiled_files.append(name)
myOptions = {
"py2exe":{
"compressed": 1,
"optimize": 2,
"ascii": 1,
# "includes":,
"dll_excludes": ["MSVCP90.dll","w9xpopen.exe"],
"bundle_files": 2
}
}
RT_MANIFEST = 24
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
MyTerm_windows = Target(
# used for the versioninfo resource
copyright = appInfo.copyright,
name = appInfo.title,
version = appInfo.version,
description = appInfo.file_name,
author = appInfo.author,
url = appInfo.url,
# what to build
script = "main.py",
dest_base = appInfo.file_name,
icon_resources = [(1, "icon\icon.ico")],
other_resources= [(RT_MANIFEST, 1, manifest_template % dict(prog = appInfo.title))]
)
setup(
options = myOptions,
cmdclass= {'py2exe': MediaCollector},
data_files = [("", ["COPYING",]),],
windows = [MyTerm_windows]
) |
the-stack_0_20061 | """Module containing the implementation of the IRIReference class."""
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Rackspace
# Copyright (c) 2015 Ian Stapleton Cordasco
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from . import compat
from . import exceptions
from . import misc
from . import normalizers
from . import uri
try:
import idna
except ImportError: # pragma: no cover
idna = None
class IRIReference(namedtuple('IRIReference', misc.URI_COMPONENTS),
uri.URIMixin):
"""Immutable object representing a parsed IRI Reference.
Can be encoded into an URIReference object via the procedure
specified in RFC 3987 Section 3.1
.. note::
The IRI submodule is a new interface and may possibly change in
the future. Check for changes to the interface when upgrading.
"""
slots = ()
def __new__(cls, scheme, authority, path, query, fragment,
encoding='utf-8'):
"""Create a new IRIReference."""
ref = super(IRIReference, cls).__new__(
cls,
scheme or None,
authority or None,
path or None,
query,
fragment)
ref.encoding = encoding
return ref
def __eq__(self, other):
"""Compare this reference to another."""
other_ref = other
if isinstance(other, tuple):
other_ref = self.__class__(*other)
elif not isinstance(other, IRIReference):
try:
other_ref = self.__class__.from_string(other)
except TypeError:
raise TypeError(
'Unable to compare {0}() to {1}()'.format(
type(self).__name__, type(other).__name__))
# See http://tools.ietf.org/html/rfc3986#section-6.2
return tuple(self) == tuple(other_ref)
def _match_subauthority(self):
return misc.ISUBAUTHORITY_MATCHER.match(self.authority)
@classmethod
def from_string(cls, iri_string, encoding='utf-8'):
"""Parse a IRI reference from the given unicode IRI string.
:param str iri_string: Unicode IRI to be parsed into a reference.
:param str encoding: The encoding of the string provided
:returns: :class:`IRIReference` or subclass thereof
"""
iri_string = compat.to_str(iri_string, encoding)
split_iri = misc.IRI_MATCHER.match(iri_string).groupdict()
return cls(
split_iri['scheme'], split_iri['authority'],
normalizers.encode_component(split_iri['path'], encoding),
normalizers.encode_component(split_iri['query'], encoding),
normalizers.encode_component(split_iri['fragment'], encoding),
encoding,
)
def encode(self, idna_encoder=None):
"""Encode an IRIReference into a URIReference instance.
If the ``idna`` module is installed or the ``rfc3986[idna]``
extra is used then unicode characters in the IRI host
component will be encoded with IDNA2008.
:param idna_encoder:
Function that encodes each part of the host component
If not given will raise an exception if the IRI
contains a host component.
:rtype: uri.URIReference
:returns: A URI reference
"""
authority = self.authority
if authority:
if idna_encoder is None:
if idna is None: # pragma: no cover
raise exceptions.MissingDependencyError(
"Could not import the 'idna' module "
"and the IRI hostname requires encoding"
)
else:
def idna_encoder(x):
try:
return idna.encode(x, strict=True, std3_rules=True)
except idna.IDNAError:
raise exceptions.InvalidAuthority(self.authority)
authority = ""
if self.host:
authority = ".".join([compat.to_str(idna_encoder(part.lower()))
for part in self.host.split(".")])
if self.userinfo is not None:
authority = (normalizers.encode_component(
self.userinfo, self.encoding) + '@' + authority)
if self.port is not None:
authority += ":" + str(self.port)
return uri.URIReference(self.scheme,
authority,
path=self.path,
query=self.query,
fragment=self.fragment,
encoding=self.encoding)
|
the-stack_0_20062 | import numpy as np
from scipy import optimize
from numpy.testing import assert_allclose
from scipy.special import factorial, xlogy
from itertools import product
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import max_error
from sklearn.metrics import mean_pinball_loss
from sklearn.metrics import r2_score
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import d2_tweedie_score
from sklearn.metrics import make_scorer
from sklearn.metrics._regression import _check_reg_targets
from sklearn.exceptions import UndefinedMetricWarning
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
y_pred_2 = y_true - 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.0)
assert_almost_equal(
mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred)),
)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.0)
assert_almost_equal(mean_pinball_loss(y_true, y_pred), 0.5)
assert_almost_equal(mean_pinball_loss(y_true, y_pred_2), 0.5)
assert_almost_equal(mean_pinball_loss(y_true, y_pred, alpha=0.4), 0.6)
assert_almost_equal(mean_pinball_loss(y_true, y_pred_2, alpha=0.4), 0.4)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.0)
mape = mean_absolute_percentage_error(y_true, y_pred)
assert np.isfinite(mape)
assert mape > 1e6
assert_almost_equal(max_error(y_true, y_pred), 1.0)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.0)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=0),
mean_squared_error(y_true, y_pred),
)
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=0), r2_score(y_true, y_pred)
)
# Tweedie deviance needs positive y_pred, except for p=0,
# p>=2 needs positive y_true
# results evaluated by sympy
y_true = np.arange(1, 1 + n_samples)
y_pred = 2 * y_true
n = n_samples
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=-1),
5 / 12 * n * (n ** 2 + 2 * n + 1),
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=1), (n + 1) * (1 - np.log(2))
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=2), 2 * np.log(2) - 1
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=3 / 2),
((6 * np.sqrt(2) - 8) / n) * np.sqrt(y_true).sum(),
)
assert_almost_equal(
mean_tweedie_deviance(y_true, y_pred, power=3), np.sum(1 / y_true) / (4 * n)
)
dev_mean = 2 * np.mean(xlogy(y_true, 2 * y_true / (n + 1)))
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=1),
1 - (n + 1) * (1 - np.log(2)) / dev_mean,
)
dev_mean = 2 * np.log((n + 1) / 2) - 2 / n * np.log(factorial(n))
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=2), 1 - (2 * np.log(2) - 1) / dev_mean
)
def test_mean_squared_error_multioutput_raw_value_squared():
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/pull/16323
mse1 = mean_squared_error([[1]], [[10]], multioutput="raw_values", squared=True)
mse2 = mean_squared_error([[1]], [[10]], multioutput="raw_values", squared=False)
assert np.sqrt(mse1) == pytest.approx(mse2)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1.0 / 3 + 2.0 / 3 + 2.0 / 3) / 4.0)
error = mean_squared_error(y_true, y_pred, squared=False)
assert_almost_equal(error, 0.454, decimal=2)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 4.0)
error = mean_pinball_loss(y_true, y_pred)
assert_almost_equal(error, (1.0 + 2.0 / 3) / 8.0)
error = np.around(mean_absolute_percentage_error(y_true, y_pred), decimals=2)
assert np.isfinite(error)
assert error > 1e6
error = median_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1.0 + 1.0) / 4.0)
error = r2_score(y_true, y_pred, multioutput="variance_weighted")
assert_almost_equal(error, 1.0 - 5.0 / 2)
error = r2_score(y_true, y_pred, multioutput="uniform_average")
assert_almost_equal(error, -0.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_squared_error([0.0], [0.0], squared=False), 0.0)
assert_almost_equal(mean_squared_log_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_absolute_error([0.0], [0.0]), 0.0)
assert_almost_equal(mean_pinball_loss([0.0], [0.0]), 0.0)
assert_almost_equal(mean_absolute_percentage_error([0.0], [0.0]), 0.0)
assert_almost_equal(median_absolute_error([0.0], [0.0]), 0.0)
assert_almost_equal(max_error([0.0], [0.0]), 0.0)
assert_almost_equal(explained_variance_score([0.0], [0.0]), 1.0)
assert_almost_equal(r2_score([0.0, 1], [0.0, 1]), 1.0)
msg = (
"Mean Squared Logarithmic Error cannot be used when targets "
"contain negative values."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([-1.0], [-1.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when targets "
"contain negative values."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([1.0, 2.0, 3.0], [1.0, -2.0, 3.0])
msg = (
"Mean Squared Logarithmic Error cannot be used when targets "
"contain negative values."
)
with pytest.raises(ValueError, match=msg):
mean_squared_log_error([1.0, -2.0, 3.0], [1.0, 2.0, 3.0])
# Tweedie deviance error
power = -1.2
assert_allclose(
mean_tweedie_deviance([0], [1.0], power=power), 2 / (2 - power), rtol=1e-3
)
msg = "can only be used on strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
assert_almost_equal(mean_tweedie_deviance([0.0], [0.0], power=0), 0.0, 2)
power = 1.0
msg = "only be used on non-negative y and strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 1.5
assert_allclose(mean_tweedie_deviance([0.0], [1.0], power=power), 2 / (2 - power))
msg = "only be used on non-negative y and strictly positive y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 2.0
assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8)
msg = "can only be used on strictly positive y and y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 3.0
assert_allclose(mean_tweedie_deviance([1.0], [1.0], power=power), 0.00, atol=1e-8)
msg = "can only be used on strictly positive y and y_pred."
with pytest.raises(ValueError, match=msg):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match=msg):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
power = 0.5
with pytest.raises(ValueError, match="is only defined for power<=0 and power>=1"):
mean_tweedie_deviance([0.0], [0.0], power=power)
with pytest.raises(ValueError, match="is only defined for power<=0 and power>=1"):
d2_tweedie_score([0.0] * 2, [0.0] * 2, power=power)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(y1, y2, None)
assert type1 == y_type
if type1 == "continuous":
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
with pytest.raises(ValueError):
_check_reg_targets(y1, y2, None)
def test__check_reg_targets_exception():
invalid_multioutput = "this_value_is_not_valid"
expected_message = (
"Allowed 'multioutput' string values are.+You provided multioutput={!r}".format(
invalid_multioutput
)
)
with pytest.raises(ValueError, match=expected_message):
_check_reg_targets([1, 2, 3], [[1], [2], [3]], invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
err_msg = (
"multioutput is expected to be 'raw_values' "
"or 'uniform_average' but we got 'variance_weighted' instead."
)
with pytest.raises(ValueError, match=err_msg):
mean_pinball_loss(y_true, y_pred, multioutput="variance_weighted")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
mape = mean_absolute_percentage_error(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(pbl, [0.25 / 2, 0.625 / 2], decimal=2)
assert_array_almost_equal(mape, [0.0778, 0.2262], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]] * 4
y_pred = [[1, 1]] * 4
mse = mean_squared_error(y_true, y_pred, multioutput="raw_values")
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(mse, [1.0, 1.0], decimal=2)
assert_array_almost_equal(mae, [1.0, 1.0], decimal=2)
assert_array_almost_equal(pbl, [0.5, 0.5], decimal=2)
assert_array_almost_equal(r, [0.0, 0.0], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values")
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert np.mean(r) == r2_score(
[[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="uniform_average"
)
evs = explained_variance_score(
[[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values"
)
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(r2, [1.0, -3.0], decimal=2)
assert np.mean(r2) == r2_score(y_true, y_pred, multioutput="uniform_average")
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(evs, [1.0, -3.0], decimal=2)
assert np.mean(evs) == explained_variance_score(y_true, y_pred)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput="raw_values")
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput="raw_values"
)
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
rmsew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6], squared=False)
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
mapew = mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(rmsew, 0.59, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(mapew, 0.1668, decimal=2)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(
np.log(1 + y_true), np.log(1 + y_pred), multioutput=[0.3, 0.7]
)
assert_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize("metric", [r2_score, d2_tweedie_score])
def test_regression_single_sample(metric):
y_true = [0]
y_pred = [1]
warning_msg = "not well-defined with less than two samples."
# Trigger the warning
with pytest.warns(UndefinedMetricWarning, match=warning_msg):
score = metric(y_true, y_pred)
assert np.isnan(score)
def test_deprecation_positional_arguments_mape():
y_true = [1, 1, 1]
y_pred = [1, 0, 1]
sample_weights = [0.5, 0.1, 0.2]
multioutput = "raw_values"
warning_msg = "passing these as positional arguments will result in an error"
# Trigger the warning
with pytest.warns(FutureWarning, match=warning_msg):
mean_absolute_percentage_error(y_true, y_pred, sample_weights, multioutput)
def test_tweedie_deviance_continuity():
n_samples = 100
y_true = np.random.RandomState(0).rand(n_samples) + 0.1
y_pred = np.random.RandomState(1).rand(n_samples) + 0.1
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=0 - 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=0),
)
# Ws we get closer to the limit, with 1e-12 difference the absolute
# tolerance to pass the below check increases. There are likely
# numerical precision issues on the edges of different definition
# regions.
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=1 + 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=1),
atol=1e-6,
)
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=2 - 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=2),
atol=1e-6,
)
assert_allclose(
mean_tweedie_deviance(y_true, y_pred, power=2 + 1e-10),
mean_tweedie_deviance(y_true, y_pred, power=2),
atol=1e-6,
)
def test_mean_absolute_percentage_error():
random_number_generator = np.random.RandomState(42)
y_true = random_number_generator.exponential(size=100)
y_pred = 1.2 * y_true
assert mean_absolute_percentage_error(y_true, y_pred) == pytest.approx(0.2)
@pytest.mark.parametrize(
"distribution", ["normal", "lognormal", "exponential", "uniform"]
)
@pytest.mark.parametrize("target_quantile", [0.05, 0.5, 0.75])
def test_mean_pinball_loss_on_constant_predictions(distribution, target_quantile):
if not hasattr(np, "quantile"):
pytest.skip(
"This test requires a more recent version of numpy "
"with support for np.quantile."
)
# Check that the pinball loss is minimized by the empirical quantile.
n_samples = 3000
rng = np.random.RandomState(42)
data = getattr(rng, distribution)(size=n_samples)
# Compute the best possible pinball loss for any constant predictor:
best_pred = np.quantile(data, target_quantile)
best_constant_pred = np.full(n_samples, fill_value=best_pred)
best_pbl = mean_pinball_loss(data, best_constant_pred, alpha=target_quantile)
# Evaluate the loss on a grid of quantiles
candidate_predictions = np.quantile(data, np.linspace(0, 1, 100))
for pred in candidate_predictions:
# Compute the pinball loss of a constant predictor:
constant_pred = np.full(n_samples, fill_value=pred)
pbl = mean_pinball_loss(data, constant_pred, alpha=target_quantile)
# Check that the loss of this constant predictor is greater or equal
# than the loss of using the optimal quantile (up to machine
# precision):
assert pbl >= best_pbl - np.finfo(best_pbl.dtype).eps
# Check that the value of the pinball loss matches the analytical
# formula.
expected_pbl = (pred - data[data < pred]).sum() * (1 - target_quantile) + (
data[data >= pred] - pred
).sum() * target_quantile
expected_pbl /= n_samples
assert_almost_equal(expected_pbl, pbl)
# Check that we can actually recover the target_quantile by minimizing the
# pinball loss w.r.t. the constant prediction quantile.
def objective_func(x):
constant_pred = np.full(n_samples, fill_value=x)
return mean_pinball_loss(data, constant_pred, alpha=target_quantile)
result = optimize.minimize(objective_func, data.mean(), method="Nelder-Mead")
assert result.success
# The minimum is not unique with limited data, hence the large tolerance.
assert result.x == pytest.approx(best_pred, rel=1e-2)
assert result.fun == pytest.approx(best_pbl)
def test_dummy_quantile_parameter_tuning():
# Integration test to check that it is possible to use the pinball loss to
# tune the hyperparameter of a quantile regressor. This is conceptually
# similar to the previous test but using the scikit-learn estimator and
# scoring API instead.
n_samples = 1000
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, 5)) # Ignored
y = rng.exponential(size=n_samples)
all_quantiles = [0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95]
for alpha in all_quantiles:
neg_mean_pinball_loss = make_scorer(
mean_pinball_loss,
alpha=alpha,
greater_is_better=False,
)
regressor = DummyRegressor(strategy="quantile", quantile=0.25)
grid_search = GridSearchCV(
regressor,
param_grid=dict(quantile=all_quantiles),
scoring=neg_mean_pinball_loss,
).fit(X, y)
assert grid_search.best_params_["quantile"] == pytest.approx(alpha)
|
the-stack_0_20064 | import codecs
import json
import os
import stat
import sys
import tempfile
import uuid
import traceback
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import ansible_runner
from ansible_runner.exceptions import ConfigurationError
from ansible_runner.loader import ArtifactLoader
import ansible_runner.plugins
from ansible_runner.utils import register_for_cleanup
from ansible_runner.utils.streaming import stream_dir, unstream_dir
class UUIDEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, uuid.UUID):
return obj.hex
return json.JSONEncoder.default(self, obj)
class MockConfig(object):
def __init__(self, settings):
self.settings = settings
class Transmitter(object):
def __init__(self, _output=None, **kwargs):
if _output is None:
_output = sys.stdout.buffer
self._output = _output
self.private_data_dir = os.path.abspath(kwargs.pop('private_data_dir'))
self.only_transmit_kwargs = kwargs.pop('only_transmit_kwargs', False)
self.kwargs = kwargs
self.status = "unstarted"
self.rc = None
def run(self):
self._output.write(
json.dumps({'kwargs': self.kwargs}, cls=UUIDEncoder).encode('utf-8')
)
self._output.write(b'\n')
self._output.flush()
if not self.only_transmit_kwargs:
stream_dir(self.private_data_dir, self._output)
self._output.write(json.dumps({'eof': True}).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
return self.status, self.rc
class Worker(object):
def __init__(self, _input=None, _output=None, **kwargs):
if _input is None:
_input = sys.stdin.buffer
if _output is None:
_output = sys.stdout.buffer
self._input = _input
self._output = _output
self.kwargs = kwargs
self.job_kwargs = None
private_data_dir = kwargs.get('private_data_dir')
if private_data_dir is None:
private_data_dir = tempfile.mkdtemp()
register_for_cleanup(private_data_dir)
self.private_data_dir = private_data_dir
self.status = "unstarted"
self.rc = None
def update_paths(self, kwargs):
if kwargs.get('envvars'):
if 'ANSIBLE_ROLES_PATH' in kwargs['envvars']:
roles_path = kwargs['envvars']['ANSIBLE_ROLES_PATH']
roles_dir = os.path.join(self.private_data_dir, 'roles')
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.join(roles_dir, roles_path)
if kwargs.get('inventory'):
kwargs['inventory'] = os.path.join(self.private_data_dir, kwargs['inventory'])
return kwargs
def run(self):
while True:
try:
line = self._input.readline()
data = json.loads(line)
except (json.decoder.JSONDecodeError, IOError):
self.status_handler({'status': 'error', 'job_explanation': 'Failed to JSON parse a line from transmit stream.'}, None)
self.finished_callback(None) # send eof line
return self.status, self.rc
if 'kwargs' in data:
self.job_kwargs = self.update_paths(data['kwargs'])
elif 'zipfile' in data:
try:
unstream_dir(self._input, data['zipfile'], self.private_data_dir)
except Exception:
self.status_handler({
'status': 'error',
'job_explanation': 'Failed to extract private data directory on worker.',
'result_traceback': traceback.format_exc()
}, None)
self.finished_callback(None) # send eof line
return self.status, self.rc
elif 'eof' in data:
break
self.kwargs.update(self.job_kwargs)
self.kwargs['quiet'] = True
self.kwargs['suppress_ansible_output'] = True
self.kwargs['private_data_dir'] = self.private_data_dir
self.kwargs['status_handler'] = self.status_handler
self.kwargs['event_handler'] = self.event_handler
self.kwargs['artifacts_handler'] = self.artifacts_handler
self.kwargs['finished_callback'] = self.finished_callback
r = ansible_runner.interface.run(**self.kwargs)
self.status, self.rc = r.status, r.rc
# FIXME: do cleanup on the tempdir
return self.status, self.rc
def status_handler(self, status_data, runner_config):
self.status = status_data['status']
self._output.write(json.dumps(status_data).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
def event_handler(self, event_data):
self._output.write(json.dumps(event_data).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
def artifacts_handler(self, artifact_dir):
stream_dir(artifact_dir, self._output)
self._output.flush()
def finished_callback(self, runner_obj):
self._output.write(json.dumps({'eof': True}).encode('utf-8'))
self._output.write(b'\n')
self._output.flush()
class Processor(object):
def __init__(self, _input=None, status_handler=None, event_handler=None,
artifacts_handler=None, cancel_callback=None, finished_callback=None, **kwargs):
if _input is None:
_input = sys.stdin.buffer
self._input = _input
self.quiet = kwargs.get('quiet')
private_data_dir = kwargs.get('private_data_dir')
if private_data_dir is None:
private_data_dir = tempfile.mkdtemp()
self.private_data_dir = private_data_dir
self._loader = ArtifactLoader(self.private_data_dir)
settings = kwargs.get('settings')
if settings is None:
try:
settings = self._loader.load_file('env/settings', Mapping)
except ConfigurationError:
settings = {}
self.config = MockConfig(settings)
if kwargs.get('artifact_dir'):
self.artifact_dir = os.path.abspath(kwargs.get('artifact_dir'))
else:
project_artifacts = os.path.abspath(os.path.join(self.private_data_dir, 'artifacts'))
if kwargs.get('ident'):
self.artifact_dir = os.path.join(project_artifacts, "{}".format(kwargs.get('ident')))
else:
self.artifact_dir = project_artifacts
self.status_handler = status_handler
self.event_handler = event_handler
self.artifacts_handler = artifacts_handler
self.cancel_callback = cancel_callback # FIXME: unused
self.finished_callback = finished_callback
self.status = "unstarted"
self.rc = None
def status_callback(self, status_data):
self.status = status_data['status']
if self.status == 'starting':
self.config.command = status_data.get('command')
self.config.env = status_data.get('env')
self.config.cwd = status_data.get('cwd')
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].status_handler(self.config, status_data)
if self.status_handler is not None:
self.status_handler(status_data, runner_config=self.config)
def event_callback(self, event_data):
full_filename = os.path.join(self.artifact_dir,
'job_events',
'{}-{}.json'.format(event_data['counter'],
event_data['uuid']))
if not self.quiet and 'stdout' in event_data:
print(event_data['stdout'])
if self.event_handler is not None:
should_write = self.event_handler(event_data)
else:
should_write = True
for plugin in ansible_runner.plugins:
ansible_runner.plugins[plugin].event_handler(self.config, event_data)
if should_write:
with codecs.open(full_filename, 'w', encoding='utf-8') as write_file:
os.chmod(full_filename, stat.S_IRUSR | stat.S_IWUSR)
json.dump(event_data, write_file)
def artifacts_callback(self, artifacts_data):
length = artifacts_data['zipfile']
unstream_dir(self._input, length, self.artifact_dir)
if self.artifacts_handler is not None:
self.artifacts_handler(self.artifact_dir)
def run(self):
job_events_path = os.path.join(self.artifact_dir, 'job_events')
if not os.path.exists(job_events_path):
os.makedirs(job_events_path, 0o700, exist_ok=True)
while True:
try:
line = self._input.readline()
data = json.loads(line)
except (json.decoder.JSONDecodeError, IOError):
self.status_callback({'status': 'error', 'job_explanation': 'Failed to JSON parse a line from worker stream.'})
break
if 'status' in data:
self.status_callback(data)
elif 'zipfile' in data:
self.artifacts_callback(data)
elif 'eof' in data:
break
else:
self.event_callback(data)
if self.finished_callback is not None:
self.finished_callback(self)
return self.status, self.rc
|
the-stack_0_20065 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import dateparser
import requests
import json
import collections
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
MAX_INCIDENTS_TO_FETCH = demisto.params().get('max_fetch')
FIRST_FETCH = demisto.params().get('first_fetch')
TENANT_NAME = demisto.params().get('tenantName')
INSECURE = demisto.params().get('insecure')
PROXY = demisto.params().get('proxy')
API_KEY = demisto.params().get('apikey')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
BASE_URL = f"https://{TENANT_NAME}.armorblox.io/api/v1beta1/organizations/{TENANT_NAME}"
payload: Dict = {}
headers = {
'x-ab-authorization': f'{API_KEY}'
}
class Client(BaseClient):
"""Client class to interact with the service API
This Client implements API calls, and does not contain any Demisto logic.
Should only do requests and return data.
It inherits from BaseClient defined in CommonServer Python.
Most calls use _http_request() that handles proxy, SSL verification, etc.
"""
def get_incidents(self, orderBy="ASC", pageSize=None, pageToken=None, first_fetch=None) -> List[Dict[str, Any]]:
request_params: Dict[str, Any] = {}
request_params['orderBy'] = orderBy
if pageToken == -1 and first_fetch:
request_params['timeFilter'] = first_fetch
elif pageToken and first_fetch:
request_params['timeFilter'] = first_fetch
request_params['pageToken'] = pageToken
if pageSize:
request_params['pageSize'] = pageSize
return self._http_request(
method='GET',
url_suffix='/incidents',
params=request_params
)
def get_incident_details(self, incident_id):
request_params: Dict[str, Any] = {}
return self._http_request(
method='GET',
url_suffix='/incidents/{}'.format(incident_id),
params=request_params
)
def makehash():
return collections.defaultdict(makehash)
def test_module(client: Client) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: Armorblox client to use
:type name: ``str``
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_incidents(pageSize=1)
except DemistoException as e:
if 'Forbidden' in str(e):
return 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return 'ok'
def get_page_token(client, pageToken=None):
response = client.get_incidents(pageSize=MAX_INCIDENTS_TO_FETCH, pageToken=pageToken, first_fetch=FIRST_FETCH)
if 'next_page_token' in response.keys():
return response['next_page_token']
else:
return None
def get_incidents_list(client, pageToken, first_fetch):
"""
Hits the Armorblox API and returns the list of fetched incidents.
"""
response = client.get_incidents(pageSize=MAX_INCIDENTS_TO_FETCH, pageToken=pageToken, first_fetch=first_fetch)
results = []
if 'incidents' in response.keys():
results = response['incidents']
# For each incident, get the details and extract the message_id
for result in results:
result['message_ids'] = get_incident_message_ids(client, result["id"])
return results
def get_incident_message_ids(client, incident_id):
"""
Returns the message ids for all the events for the input incident.
"""
detail_response = client.get_incident_details(incident_id)
message_ids = []
# loop through all the events of this incident and collect the message ids
if 'events' in detail_response.keys():
for event in detail_response['events']:
message_ids.append(event['message_id'])
if 'abuse_events' in detail_response.keys():
for event in detail_response['abuse_events']:
message_ids.append(event['message_id'])
return message_ids
def get_remediation_action(client, incident_id):
"""
Returns the remediation action(s) for the input incident.
"""
detail_response = client.get_incident_details(incident_id)
remediation_actions = None
if 'remediation_actions' in detail_response.keys():
remediation_actions = detail_response['remediation_actions'][0]
else:
remediation_actions = None
contxt = makehash()
human_readable = makehash()
human_readable['incident_id'] = incident_id
human_readable['remediation_actions'] = remediation_actions
contxt['incident_id'] = incident_id
contxt['remediation_actions'] = remediation_actions
return CommandResults(outputs_prefix='Armorblox.Threat', outputs=contxt)
def fetch_incidents_command(client):
last_run = demisto.getLastRun()
start_time: Any
# pageToken fetched from demisto lastRun
pageToken = int()
response = {}
incidents = []
if 'start_time' not in last_run.keys():
pageToken = -1
response = client.get_incidents(pageSize=1, pageToken=pageToken, first_fetch=FIRST_FETCH)
if 'incidents' in response.keys():
start_time = response['incidents'][0]['date']
start_time = dateparser.parse(start_time)
message_ids = get_incident_message_ids(client, response['incidents'][0]['id'])
response['incidents'][0]['message_ids'] = message_ids
curr_incident = {'rawJSON': json.dumps(response['incidents'][0]), 'details': json.dumps(response['incidents'][0])}
incidents.append(curr_incident)
if last_run and 'pageToken' in last_run.keys():
pageToken = last_run.get('pageToken')
if last_run and 'start_time' in last_run.keys():
start_time = dateparser.parse(last_run.get('start_time'))
start_time = start_time.timestamp()
incidents_data = get_incidents_list(client, pageToken=pageToken, first_fetch=FIRST_FETCH)
pageToken = get_page_token(client, pageToken=pageToken)
last_time = start_time
for incident in incidents_data:
dt = incident['date']
dt = dateparser.parse(dt).timestamp()
# Update last run and add incident if the incident is newer than last fetch
if dt > start_time:
curr_incident = {'rawJSON': json.dumps(incident), 'details': json.dumps(incident)}
last_time = dt
incidents.append(curr_incident)
# Save the next_run as a dict with the start_time key to be stored
demisto.setLastRun({'start_time': str(last_time), 'pageToken': pageToken})
return incidents
def main():
''' EXECUTION '''
LOG('command is %s' % (demisto.command(), ))
try:
client = Client(
base_url=BASE_URL,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == "fetch-incidents":
incident_results = fetch_incidents_command(client)
demisto.incidents(incident_results)
return_results("Incidents fetched successfully!!")
# return_results(fetch_incidents_command(client))
if demisto.command() == "armorblox-check-remediation-action":
incident_id = demisto.args().get('incident_id')
return_results(get_remediation_action(client, incident_id))
elif demisto.command() == 'test-module':
result = test_module(client)
return_results(result)
except Exception as e:
return_error(str(e))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
the-stack_0_20066 | # -*- coding: utf-8 -*-
# @createTime : 2019/10/24 11:03
# @author : 王江桥
# @fileName: send_bom.py
# @email: [email protected]
import os
import json
import traceback
import xmltodict
from lxml import etree
from flask import request
from flask import current_app
from mesService import create_conn
from mesService.constants import ACTIONS_ENUM
from mesService.modules.RabbitMQ import logger
class DeviationOrder(object):
db = None
def __init__(self, itype):
self.db = create_conn(itype)
def parse_xml(self, xml_data, xml_body):
"""
function:解析本地xml文件,返回字典型数据
:return:
"""
# xmlObj = etree.parse(self.xml_path) # 解析本地xml文件
# xml_data = request.data
# print(xml_data, "xml_data")
try:
xml_data = str(xml_data, encoding='utf-8')
xmlObj = etree.HTML(xml_data) # 解析本地xml文件
xml_str = etree.tostring(xmlObj) # 将文件内容转换成字符串数据
# xml_body = request.get_data(as_text=True)
# print(xml_body, ">>>")
dict_data = self.xml_to_dict(xml_str, xml_body)
return dict_data
except:
result = {
"status": "error",
"message": "解析失败,报文格式不正确"
}
return json.dumps(result)
def xml_to_dict(self, xml_str, xml_body):
"""
function:将xml字符串转换成json字符串,获取json字符串返回字典型数据
:param xml_str:
:return:
"""
try:
list_data = xmltodict.parse(xml_str)['html']['body']['data']['erpdev']
need_keys = ['transactionid', 'plantcode', 'work_order_number', 'action', 'workstation', 'child_part_number',
'parent_part_number', 'level_1_part', 'quantity', 'manufacturing_variation_code']
result = []
for p, n in dict(list_data).items():
new_dict = {}
if p in need_keys:
new_dict[p] = n
# if p == "action":
# r_type = self.get_status_type(n)
# new_dict["action"] = r_type
result.append(new_dict)
body_dict = {"request_body": xml_body}
result.append(body_dict)
return result
except:
result = {
"status": "error",
"message": "解析失败,报文格式不正确"
}
return json.dumps(result)
def insertDatabase(self, dict_data):
"""调用存储过程"""
json_data = json.dumps(dict_data)
# print(json_data)
sql = "select wip_deviation_insert('{}');".format(json_data)
print(sql)
try:
# 使用execute返回存储过程返回结果,存储过程不报错则返回1
# 使用query返回查询结果,通过结果做判断
# with self.app.app_context():
ret = self.db.query(sql)
return ret
except Exception:
# with self.app.app_context():
# current_app.logger.error(traceback.format_exc())
logger.writeLog("数据库写入失败:" + sql, os.path.basename(os.path.dirname(os.getcwd())) + ".log")
def get_status_type(self, data):
"""
function:返回偏离类型
:param data:增加(A)或者删除(D)
:return: 1或者0
"""
return ACTIONS_ENUM[data]
|
the-stack_0_20070 | # Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.protobuf import descriptor_pb2
from google.protobuf import message_factory
from google.protobuf.descriptor_pool import DescriptorPool
from .loader import get_loader
class ProtoFactory:
def __init__(self):
# Declare descriptor pool
self.descriptor_pool = DescriptorPool()
# Load trace processor descriptor and add to descriptor pool
tp_descriptor_bytes = get_loader().read_tp_descriptor()
tp_file_desc_set_pb2 = descriptor_pb2.FileDescriptorSet()
tp_file_desc_set_pb2.MergeFromString(tp_descriptor_bytes)
for f_desc_pb2 in tp_file_desc_set_pb2.file:
self.descriptor_pool.Add(f_desc_pb2)
# Load metrics descriptor and add to descriptor pool
metrics_descriptor_bytes = get_loader().read_metrics_descriptor()
metrics_file_desc_set_pb2 = descriptor_pb2.FileDescriptorSet()
metrics_file_desc_set_pb2.MergeFromString(metrics_descriptor_bytes)
for f_desc_pb2 in metrics_file_desc_set_pb2.file:
self.descriptor_pool.Add(f_desc_pb2)
def create_message_factory(message_type):
message_desc = self.descriptor_pool.FindMessageTypeByName(message_type)
return message_factory.MessageFactory().GetPrototype(message_desc)
# Create proto messages to correctly communicate with the RPC API by sending
# and receiving data as protos
self.StatusResult = create_message_factory('perfetto.protos.StatusResult')
self.ComputeMetricArgs = create_message_factory(
'perfetto.protos.ComputeMetricArgs')
self.ComputeMetricResult = create_message_factory(
'perfetto.protos.ComputeMetricResult')
self.QueryArgs = create_message_factory('perfetto.protos.QueryArgs')
self.QueryResult = create_message_factory('perfetto.protos.QueryResult')
self.TraceMetrics = create_message_factory('perfetto.protos.TraceMetrics')
self.DisableAndReadMetatraceResult = create_message_factory(
'perfetto.protos.DisableAndReadMetatraceResult')
self.CellsBatch = create_message_factory(
'perfetto.protos.QueryResult.CellsBatch')
|
the-stack_0_20071 | # -*- coding: utf-8 -*-
#
"""
Byte pair encoding utilities
Adapted from https://github.com/openai/gpt-2/blob/master/src/encoder.py
"""
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors="replace"):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def get_encoder(gpt2_pretrained_path):
with open(os.path.join(gpt2_pretrained_path, "encoder.json"), "r") as f:
encoder = json.load(f)
with open(os.path.join(gpt2_pretrained_path, "vocab.bpe"), "r", encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
|
the-stack_0_20074 | import cv2
from os.path import join
import os
from imgaug import augmenters as iaa
augmented_image_dir = "./train/"
species = [
"blasti",
"bonegl",
"brhkyt",
"cbrtsh",
"cmnmyn",
"gretit",
"hilpig",
"himbul",
"himgri",
"hsparo",
"indvul",
"jglowl",
"lbicrw",
"mgprob",
"rebimg",
"wcrsrt",
]
""" Naming conventions can be different. This is
what I've used at my time. I just followed the table
present to generate that much number of images.
Type of Augmentation:
10 - Normal Image
20 - Gaussian Noise - 0.1* 255
30 - Gaussian Blur - sigma - 3.0
40 - Flip - Horizaontal
50 - Contrast Normalization - (0.5, 1.5)
60 - Hue
70 - Crop and Pad
Flipped
11 - Add - 2,3,4,5,6,12,13,14 7, 15, 16
12 - Multiply - 2,3,4,5,6,12,13,14 7, 15, 16
13 - Sharpen
14 - Gaussian Noise - 0.2*255
15 - Gaussian Blur - sigma - 0.0-2.0
16 - Affine Translation 50px x, y
17 - Hue Value
"""
def save_images(
augmentated_image,
destination,
number_of_images,
bird_specie_counter,
types
):
image_number = str(number_of_images)
number_of_images = int(number_of_images)
if bird_specie_counter < 10:
if number_of_images < 10:
cv2.imwrite(
join(
destination,
str(types)
+ str(0)
+ str(bird_specie_counter)
+ image_number
+ ".jpg",
),
augmentated_image
)
elif number_of_images >= 10:
cv2.imwrite(
join(
destination,
str(types)
+ str(0)
+ str(bird_specie_counter)
+ image_number
+ ".jpg",
),
augmentated_image
)
elif bird_specie_counter >= 10:
if number_of_images < 10:
cv2.imwrite(
join(
destination,
str(types)
+ str(bird_specie_counter)
+ image_number
+ ".jpg",
),
augmentated_image
)
elif number_of_images >= 10:
cv2.imwrite(
join(
destination,
str(types)
+ str(bird_specie_counter)
+ image_number
+ ".jpg",
),
augmentated_image
)
# Dataset Augmentation
gauss = iaa.AdditiveGaussianNoise(scale=0.2 * 255)
# blur = iaa.GaussianBlur(sigma=(3.0))
# flip = iaa.Fliplr(1.0)
# contrast = iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
sharp = iaa.Sharpen(alpha=(0, 0.3), lightness=(0.7, 1.3))
affine = iaa.Affine(translate_px={"x": (-50, 50), "y": (-50, 50)})
# add = iaa.Add((-20, 20), per_channel=0.5)
# multiply = iaa.Multiply((0.8, 1.2), per_channel=0.5)
hue = iaa.Sequential(
[
iaa.ChangeColorspace(from_colorspace="RGB", to_colorspace="HSV"),
iaa.WithChannels(0, iaa.Add((50, 100))),
iaa.ChangeColorspace(from_colorspace="HSV", to_colorspace="RGB"),
]
)
aug = iaa.Sequential(
[
iaa.Fliplr(1.0),
iaa.ChangeColorspace(from_colorspace="RGB", to_colorspace="HSV"),
iaa.WithChannels(0, iaa.Add((50, 100))),
iaa.ChangeColorspace(from_colorspace="HSV", to_colorspace="RGB"),
]
)
def main():
"""Read images, apply augmentation and save images.
Two types of image augmentation is applied. One is on normal
image whose image name starts with 1 nad another is one flipped
image which starts with 4. Bird classes are mentioned above which
type of augmentation is applied on which type of image and which
type of specie. We check the first value of image path
and compare it 1/4 to apply the data augmentation accordingly.
"""
for bird_specie in species:
augmented_image_folder = join(augmented_image_dir, bird_specie)
source_images = os.listdir(augmented_image_folder)
print(source_images)
source_images.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
augmented_images_arr = []
img_number = []
bird_specie_number = source_images[0]
bird_specie_number = int(bird_specie_number[2:4])
for source_image in source_images:
if int(source_image[0]) == 1:
img_number.append(source_image[4:6])
img_path = join(augmented_image_folder, source_image)
img = cv2.imread(img_path)
augmented_images_arr.append(img)
counter = 0
if len(augmented_images_arr) < 9:
# Applying Gaussian image augmentation
for augmented_image in gauss.augment_images(augmented_images_arr):
save_images(
augmented_image,
augmented_image_folder,
img_number[counter],
bird_specie_number,
20,
)
counter += 1
if __name__ == "__main__":
main()
|
the-stack_0_20077 | #!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def get_train_prototxt_deephi(caffe_path, src_prototxt, train_prototxt, image_list, directory_path):
sys.path.insert(0, caffe_path)
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.proto import caffe_pb2
import google.protobuf.text_format as tfmt
net_shape = []
net_parameter = caffe.proto.caffe_pb2.NetParameter()
with open(src_prototxt, "r") as f:
tfmt.Merge(f.read(), net_parameter)
net_shape = net_parameter.layer[0].input_param.shape[0].dim
print(net_shape[2], net_shape[3] )
n = caffe.NetSpec()
print(type(n))
n.data = L.ImageData(top='label', include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), transform_param=dict(mirror=False,mean_value=128.0),
image_data_param=dict(source=image_list,batch_size=50, new_height=net_shape[2],new_width=net_shape[3],shuffle=False,root_folder=directory_path))
with open(train_prototxt, 'w') as f:
f.write(str(n.to_proto()))
print(n.to_proto())
net_parameter = caffe.proto.caffe_pb2.NetParameter()
with open(src_prototxt, "r") as f, open(train_prototxt, "a") as g:
tfmt.Merge(f.read(), net_parameter)
print("before\n", (net_parameter))
#L = next(L for L in net_parameter.layer if L.name == 'data')
print(net_parameter.layer[0])
print(net_parameter.layer[0].input_param.shape[0].dim)
del net_parameter.layer[0]
print("after\n", (net_parameter))
g.write(tfmt.MessageToString(net_parameter))
def main():
print(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
get_train_prototxt_deephi(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
if __name__ == '__main__':
main()
|
the-stack_0_20078 | # -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
from setuptools import setup, find_packages
files = ["*"]
setup(
name='messaging_abstract',
version='0.1.1',
packages=find_packages(),
package_data={'messaging_abstract': files},
license='Apache 2.0',
description='',
setup_requires=['pytest-runner'],
tests_require=['pytest', 'mock', 'pytest-mock'],
install_requires=[
'',
],
url='https://github.com/rh-messaging-qe/messaging_abstract',
author='Dominik Lenoch',
author_email='[email protected]'
)
|
the-stack_0_20079 | import requests
import json
__version__ = '0.0.4'
class Jsms(object):
BASE_URL = 'https://api.sms.jpush.cn/v1/'
def __init__(self, app_key, master_secret):
self.session = requests.Session()
self.session.auth = (app_key, master_secret)
def send_code(self, mobile, temp_id):
body = {
'mobile': mobile,
'temp_id': temp_id
}
return self._post('codes', body)
def send_voice_code(self, mobile, code=None, lang=None, ttl=None):
body = {
'mobile': mobile
}
if code is not None:
body['code'] = code
if lang is not None:
body['voice_lang'] = lang
if ttl is not None:
body['ttl'] = ttl
return self._post('voice_codes', body)
def verify_code(self, msg_id, code):
end_point = 'codes/' + msg_id + '/valid'
body = {
'code': code
}
return self._post(end_point, body)
def send_teml(self, mobile, temp_id, temp_para=None, time=None):
end_point = 'messages'
body = {
'mobile': mobile,
'temp_id': temp_id
}
if temp_para is not None:
body['temp_para'] = temp_para
if time is not None:
body['send_time'] = time
end_point = 'schedule'
return self._post(end_point, body)
def send_batch_teml(self, mobile, temp_id, recipients=None):
pass
def show_schedule_message(self, schedule_id):
end_point = 'schedule/' + schedule_id
return self._get(end_point)
def update_schedule_message(self, schedule_id, mobile, temp_id, temp_para=None, time=None):
pass
def delete_schedule_message(self, schedule_id):
end_point = 'schedule/' + schedule_id
return self._del(end_point)
def app_balance(self):
end_point = 'accounts/app'
return self._get(end_point)
def show_sign(self, sign_id):
end_point = 'sign/' + sign_id
return self._get(end_point)
def delete_sign(self, sign_id):
end_point = 'sign/' + sign_id
return self._del(end_point)
def create_sign(self, sign, image0=None, type=None, remark=None):
end_point = 'sign'
return self._sign(end_point, sign, image0, type, remark)
def upadte_sign(self, sign_id, sign, image0=None, type=None, remark=None):
end_point = 'sign/' + sign_id
return self._sign(end_point, sign, image0, type, remark)
def _sign(self, end_point, sign=None, image0=None, type=None, remark=None):
uri = self.BASE_URL + end_point
uploads = {}
uploads['sign'] = (None, sign)
if image0 is not None:
uploads['image0'] = image0
if type is not None:
uploads['type'] = (None, str(type))
if remark is not None:
uploads['remark'] = (None, remark)
r = self.session.post(uri, files=uploads)
if 0 == len(r.content):
return r.status_code
else:
return r.json()
def _get(self, end_point):
return self._request('GET', end_point)
def _del(self, end_point):
return self._request('DELETE', end_point)
def _post(self, end_point, body):
return self._request('POST', end_point, body)
def _request(self, method, end_point, body=None):
uri = self.BASE_URL + end_point
if body is not None:
body = json.dumps(body)
r = self.session.request(method, uri, data=body)
if 0 == len(r.content):
return r.status_code
else:
return r.json()
|
the-stack_0_20080 | # Polynomials: Synthetic Division
'''
Synthetic division is a method of polynomial long
division, with less writing and fewer calculations.
It generally applies to division by binomials of
the form x - r.
Use x as the variable (i.e. not y, z, etc.) and make sure to use plus "+" and minus "-" signs, not dashes.
Enter a polynomial with descending powers of x.
E.g. x3+3x2-9x+2
Enter the binomial divisor.
E.g. x-5 (binomials of the form 3x-5, -x+12, etc. may also be used)
'''
import divisors
import terms
import utils
class PolynomialDivision:
def __init__(self, polynomial, divisor, verbose=True):
# santize the input - remove spaces
self.polynomial = utils.sanitize(polynomial)
self.divisor = utils.sanitize(divisor)
self.verbose = verbose
if "x" not in self.polynomial:
self.throw_no_x("polynomial")
elif "x" not in self.divisor:
self.throw_no_x("divisor")
self.log_work("\n-----")
self.log_work("Input:")
self.log_work("\tPolynomial:\t", self.polynomial)
self.log_work("\tDivisor:\t", self.divisor)
self.log_work("Processing:")
# get the coefficients of the polynomial
orderedCoefs = self.process_polynomial()
# divide the polynomial through long division
results_divide = self.process_divisor(orderedCoefs)
self.quotient, self.remainder = self.combine_result(results_divide)
self.log_work("Result:")
self.log_work("\tQuotient:\t", self.quotient)
self.log_work("\tRemainder:\t", self.remainder)
self.log_work("-----")
def log_work(self, *args, **kwargs):
if(self.verbose):
print(*args, **kwargs)
def throw_no_x(self, which):
print('>>>> ERROR')
print('There is no x term in the '+which +
'. Please make sure to use x as the variable')
print('<<<<')
raise ValueError('There is no x term in the '+which + '.')
def process_polynomial(self):
poly_terms = terms.get(self.polynomial)
self.log_work("\tTerms:\t\t", poly_terms)
powers = terms.get_powers(poly_terms)
self.log_work("\tPowers Present:\t", [*powers.keys()])
powers = terms.insert_missing_powers(poly_terms, powers)
power_keys = [*powers.keys()]
power_keys.sort(reverse=True)
self.log_work("\tComplete Powers:", *power_keys, sep='\t')
coefs = terms.get_coefficients(powers.values())
orderedCoefs = terms.order_coefficients(coefs, powers)
self.log_work("\tCoefficients:\t", *orderedCoefs, sep='\t')
return orderedCoefs
def process_divisor(self, orderedCoefs):
r, yield_multiplier = divisors.get_r_and_yeild_multiplier(self.divisor)
self.log_work('\tR:\t\t', r)
yields = divisors.get_yields(orderedCoefs, r)
self.log_work("\tYields:\t\t", *yields, sep='\t')
# divisors.before_dividing(yields)
self.log_work("\tYield mult.:\t", yield_multiplier)
results_divide = divisors.divide_poly(yields, yield_multiplier)
self.log_work("\tQuotient Terms:\t", *results_divide, sep='\t')
return results_divide
def combine_result(self, results_divide):
# combine the new terms into the quotient and the remainder
quotient = results_divide[0]
remainder = ''
count = len(results_divide)
for i in range(1, count):
if i < count-1:
# if i == count-1 and results_divide[i] != "":
# quotient += " + " + \
# results_divide[i]+"/("+self.divisor+")"
# else:
quotient += " + " + str(results_divide[i])
else:
if i == count-1 and results_divide[i] != "":
remainder = str(
results_divide[i])+"/("+self.divisor+")"
else:
remainder = str(results_divide[i])
return quotient, remainder
|
the-stack_0_20081 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test with:
python setup.py install
"""
DESCRIPTION = ("Create PDB files of DNA")
LONG_DESCRIPTION = """
**na2pdb** is a Python package
Create PDB files of DNA based on sequence and apply spatial manipulations to
them
License is BSD3
RNA is not yet supported
"""
DISTNAME = 'na2pdb'
LICENSE = 'BSD3'
AUTHORS = "Nick Conway"
EMAIL = "[email protected]"
URL = ""
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 1 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering',
]
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
import os
import sys
import shutil
pjoin = os.path.join
rpath = os.path.relpath
PACKAGE_PATH = os.path.abspath(os.path.dirname(__file__))
MODULE_PATH = pjoin(PACKAGE_PATH, 'na2pdb')
DATASETS_PATH = pjoin(MODULE_PATH, 'data')
# PDB dataset files to include in installation
na2pdb_files = [rpath(pjoin(root, f), MODULE_PATH) for root, _, files in
os.walk(DATASETS_PATH) for f in files if '.pdb' in f]
is_py_3 = int(sys.version_info[0] > 2)
setup(
name=DISTNAME,
maintainer=AUTHORS,
packages=['na2pdb'],
package_data={'na2pdb': na2pdb_files},
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
zip_safe=False
)
|
the-stack_0_20084 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base test class for running non-stubbed tests (functional tests)
The FunctionalTest class contains helper methods for starting the API
and Registry server, grabbing the logs of each, cleaning up pidfiles,
and spinning down the servers.
"""
import atexit
import datetime
import errno
import logging
import os
import platform
import shutil
import signal
import socket
import sys
import tempfile
import time
import fixtures
from oslo_serialization import jsonutils
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import six.moves.urllib.parse as urlparse
import testtools
from glance.common import utils
from glance.db.sqlalchemy import api as db_api
from glance import tests as glance_tests
from glance.tests import utils as test_utils
execute, get_unused_port = test_utils.execute, test_utils.get_unused_port
tracecmd_osmap = {'Linux': 'strace', 'FreeBSD': 'truss'}
class Server(object):
"""
Class used to easily manage starting and stopping
a server during functional test runs.
"""
def __init__(self, test_dir, port, sock=None):
"""
Creates a new Server object.
:param test_dir: The directory where all test stuff is kept. This is
passed from the FunctionalTestCase.
:param port: The port to start a server up on.
"""
self.debug = True
self.no_venv = False
self.test_dir = test_dir
self.bind_port = port
self.conf_file_name = None
self.conf_base = None
self.paste_conf_base = None
self.exec_env = None
self.deployment_flavor = ''
self.show_image_direct_url = False
self.show_multiple_locations = False
self.property_protection_file = ''
self.enable_v1_api = True
self.enable_v2_api = True
self.enable_v1_registry = True
self.enable_v2_registry = True
self.needs_database = False
self.log_file = None
self.sock = sock
self.fork_socket = True
self.process_pid = None
self.server_module = None
self.stop_kill = False
self.use_user_token = True
self.send_identity_credentials = False
def write_conf(self, **kwargs):
"""
Writes the configuration file for the server to its intended
destination. Returns the name of the configuration file and
the over-ridden config content (may be useful for populating
error messages).
"""
if not self.conf_base:
raise RuntimeError("Subclass did not populate config_base!")
conf_override = self.__dict__.copy()
if kwargs:
conf_override.update(**kwargs)
# A config file and paste.ini to use just for this test...we don't want
# to trample on currently-running Glance servers, now do we?
conf_dir = os.path.join(self.test_dir, 'etc')
conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name)
if os.path.exists(conf_filepath):
os.unlink(conf_filepath)
paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini")
if os.path.exists(paste_conf_filepath):
os.unlink(paste_conf_filepath)
utils.safe_mkdirs(conf_dir)
def override_conf(filepath, overridden):
with open(filepath, 'w') as conf_file:
conf_file.write(overridden)
conf_file.flush()
return conf_file.name
overridden_core = self.conf_base % conf_override
self.conf_file_name = override_conf(conf_filepath, overridden_core)
overridden_paste = ''
if self.paste_conf_base:
overridden_paste = self.paste_conf_base % conf_override
override_conf(paste_conf_filepath, overridden_paste)
overridden = ('==Core config==\n%s\n==Paste config==\n%s' %
(overridden_core, overridden_paste))
return self.conf_file_name, overridden
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
"""
Starts the server.
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
# Ensure the configuration file is written
self.write_conf(**kwargs)
self.create_database()
cmd = ("%(server_module)s --config-file %(conf_file_name)s"
% {"server_module": self.server_module,
"conf_file_name": self.conf_file_name})
cmd = "%s -m %s" % (sys.executable, cmd)
# close the sock and release the unused port closer to start time
if self.exec_env:
exec_env = self.exec_env.copy()
else:
exec_env = {}
pass_fds = set()
if self.sock:
if not self.fork_socket:
self.sock.close()
self.sock = None
else:
fd = os.dup(self.sock.fileno())
exec_env[utils.GLANCE_TEST_SOCKET_FD_STR] = str(fd)
pass_fds.add(fd)
self.sock.close()
self.process_pid = test_utils.fork_exec(cmd,
logfile=os.devnull,
exec_env=exec_env,
pass_fds=pass_fds)
self.stop_kill = not expect_exit
if self.pid_file:
pf = open(self.pid_file, 'w')
pf.write('%d\n' % self.process_pid)
pf.close()
if not expect_exit:
rc = 0
try:
os.kill(self.process_pid, 0)
except OSError:
raise RuntimeError("The process did not start")
else:
rc = test_utils.wait_for_fork(
self.process_pid,
expected_exitcode=expected_exitcode)
# avoid an FD leak
if self.sock:
os.close(fd)
self.sock = None
return (rc, '', '')
def reload(self, expect_exit=True, expected_exitcode=0, **kwargs):
"""
Start and stop the service to reload
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
self.stop()
return self.start(expect_exit=expect_exit,
expected_exitcode=expected_exitcode, **kwargs)
def create_database(self):
"""Create database if required for this server"""
if self.needs_database:
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
conf_filepath = os.path.join(conf_dir, 'glance-manage.conf')
with open(conf_filepath, 'w') as conf_file:
conf_file.write('[DEFAULT]\n')
conf_file.write('sql_connection = %s' % self.sql_connection)
conf_file.flush()
glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE'
if glance_db_env in os.environ:
# use the empty db created and cached as a tempfile
# instead of spending the time creating a new one
db_location = os.environ[glance_db_env]
os.system('cp %s %s/tests.sqlite'
% (db_location, self.test_dir))
else:
cmd = ('%s -m glance.cmd.manage --config-file %s db sync' %
(sys.executable, conf_filepath))
execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env,
expect_exit=True)
# copy the clean db to a temp location so that it
# can be reused for future tests
(osf, db_location) = tempfile.mkstemp()
os.close(osf)
os.system('cp %s/tests.sqlite %s'
% (self.test_dir, db_location))
os.environ[glance_db_env] = db_location
# cleanup the temp file when the test suite is
# complete
def _delete_cached_db():
try:
os.remove(os.environ[glance_db_env])
except Exception:
glance_tests.logger.exception(
"Error cleaning up the file %s" %
os.environ[glance_db_env])
atexit.register(_delete_cached_db)
def stop(self):
"""
Spin down the server.
"""
if not self.process_pid:
raise Exception('why is this being called? %s' % self.server_name)
if self.stop_kill:
os.kill(self.process_pid, signal.SIGTERM)
rc = test_utils.wait_for_fork(self.process_pid, raise_error=False)
return (rc, '', '')
def dump_log(self, name):
log = logging.getLogger(name)
if not self.log_file or not os.path.exists(self.log_file):
return
with open(self.log_file, 'r') as fptr:
for line in fptr:
log.info(line.strip())
class ApiServer(Server):
"""
Server object that starts/stops/manages the API server
"""
def __init__(self, test_dir, port, policy_file, delayed_delete=False,
pid_file=None, sock=None, **kwargs):
super(ApiServer, self).__init__(test_dir, port, sock=sock)
self.server_name = 'api'
self.server_module = 'glance.cmd.%s' % self.server_name
self.default_store = kwargs.get("default_store", "file")
self.bind_host = "127.0.0.1"
self.registry_host = "127.0.0.1"
self.key_file = ""
self.cert_file = ""
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.image_dir = os.path.join(self.test_dir, "images")
self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
self.log_file = os.path.join(self.test_dir, "api.log")
self.image_size_cap = 1099511627776
self.delayed_delete = delayed_delete
self.owner_is_tenant = True
self.workers = 0
self.scrub_time = 5
self.image_cache_dir = os.path.join(self.test_dir,
'cache')
self.image_cache_driver = 'sqlite'
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.property_protection_rule_format = 'roles'
self.image_member_quota = 10
self.image_property_quota = 10
self.image_tag_quota = 10
self.image_location_quota = 2
self.disable_path = None
self.needs_database = True
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.data_api = kwargs.get("data_api",
"glance.db.sqlalchemy.api")
self.user_storage_quota = '0'
self.lock_path = self.test_dir
self.location_strategy = 'location_order'
self.store_type_location_strategy_preference = ""
self.send_identity_headers = False
self.conf_base = """[DEFAULT]
debug = %(debug)s
default_log_levels = eventlet.wsgi.server=DEBUG
bind_host = %(bind_host)s
bind_port = %(bind_port)s
key_file = %(key_file)s
cert_file = %(cert_file)s
metadata_encryption_key = %(metadata_encryption_key)s
registry_host = %(registry_host)s
registry_port = %(registry_port)s
use_user_token = %(use_user_token)s
send_identity_credentials = %(send_identity_credentials)s
log_file = %(log_file)s
image_size_cap = %(image_size_cap)d
delayed_delete = %(delayed_delete)s
owner_is_tenant = %(owner_is_tenant)s
workers = %(workers)s
scrub_time = %(scrub_time)s
send_identity_headers = %(send_identity_headers)s
image_cache_dir = %(image_cache_dir)s
image_cache_driver = %(image_cache_driver)s
data_api = %(data_api)s
sql_connection = %(sql_connection)s
show_image_direct_url = %(show_image_direct_url)s
show_multiple_locations = %(show_multiple_locations)s
user_storage_quota = %(user_storage_quota)s
enable_v1_api = %(enable_v1_api)s
enable_v2_api = %(enable_v2_api)s
lock_path = %(lock_path)s
property_protection_file = %(property_protection_file)s
property_protection_rule_format = %(property_protection_rule_format)s
image_member_quota=%(image_member_quota)s
image_property_quota=%(image_property_quota)s
image_tag_quota=%(image_tag_quota)s
image_location_quota=%(image_location_quota)s
location_strategy=%(location_strategy)s
allow_additional_image_properties = True
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
[paste_deploy]
flavor = %(deployment_flavor)s
[store_type_location_strategy]
store_type_preference = %(store_type_location_strategy_preference)s
[glance_store]
filesystem_store_datadir=%(image_dir)s
default_store = %(default_store)s
"""
self.paste_conf_base = """[pipeline:glance-api]
pipeline =
cors
healthcheck
versionnegotiation
gzip
unauthenticated-context
rootapp
[pipeline:glance-api-caching]
pipeline = cors healthcheck versionnegotiation gzip unauthenticated-context
cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
cors
healthcheck
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = cors healthcheck versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = cors healthcheck versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:healthcheck]
paste.filter_factory = oslo_middleware:Healthcheck.factory
backends = disable_by_file
disable_by_file_path = %(disable_path)s
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
[filter:cors]
paste.filter_factory = oslo_middleware.cors:filter_factory
allowed_origin=http://valid.example.com
"""
class RegistryServer(Server):
"""
Server object that starts/stops/manages the Registry server
"""
def __init__(self, test_dir, port, policy_file, sock=None):
super(RegistryServer, self).__init__(test_dir, port, sock=sock)
self.server_name = 'registry'
self.server_module = 'glance.cmd.%s' % self.server_name
self.needs_database = True
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.bind_host = "127.0.0.1"
self.pid_file = os.path.join(self.test_dir, "registry.pid")
self.log_file = os.path.join(self.test_dir, "registry.log")
self.owner_is_tenant = True
self.workers = 0
self.api_version = 1
self.user_storage_quota = '0'
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.disable_path = None
self.conf_base = """[DEFAULT]
debug = %(debug)s
bind_host = %(bind_host)s
bind_port = %(bind_port)s
log_file = %(log_file)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600
api_limit_max = 1000
limit_param_default = 25
owner_is_tenant = %(owner_is_tenant)s
enable_v2_registry = %(enable_v2_registry)s
workers = %(workers)s
user_storage_quota = %(user_storage_quota)s
metadata_encryption_key = %(metadata_encryption_key)s
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
[paste_deploy]
flavor = %(deployment_flavor)s
"""
self.paste_conf_base = """[pipeline:glance-registry]
pipeline = healthcheck unauthenticated-context registryapp
[pipeline:glance-registry-fakeauth]
pipeline = healthcheck fakeauth context registryapp
[pipeline:glance-registry-trusted-auth]
pipeline = healthcheck context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api:API.factory
[filter:healthcheck]
paste.filter_factory = oslo_middleware:Healthcheck.factory
backends = disable_by_file
disable_by_file_path = %(disable_path)s
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
class ScrubberDaemon(Server):
"""
Server object that starts/stops/manages the Scrubber server
"""
def __init__(self, test_dir, policy_file, daemon=False, **kwargs):
# NOTE(jkoelker): Set the port to 0 since we actually don't listen
super(ScrubberDaemon, self).__init__(test_dir, 0)
self.server_name = 'scrubber'
self.server_module = 'glance.cmd.%s' % self.server_name
self.daemon = daemon
self.registry_host = "127.0.0.1"
self.image_dir = os.path.join(self.test_dir, "images")
self.scrub_time = 5
self.pid_file = os.path.join(self.test_dir, "scrubber.pid")
self.log_file = os.path.join(self.test_dir, "scrubber.log")
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.lock_path = self.test_dir
default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION',
default_sql_connection)
self.policy_file = policy_file
self.policy_default_rule = 'default'
self.send_identity_headers = False
self.admin_role = 'admin'
self.conf_base = """[DEFAULT]
debug = %(debug)s
log_file = %(log_file)s
daemon = %(daemon)s
wakeup_time = 2
scrub_time = %(scrub_time)s
registry_host = %(registry_host)s
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
lock_path = %(lock_path)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600
send_identity_headers = %(send_identity_headers)s
admin_role = %(admin_role)s
[glance_store]
filesystem_store_datadir=%(image_dir)s
[oslo_policy]
policy_file = %(policy_file)s
policy_default_rule = %(policy_default_rule)s
"""
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
if 'daemon' in kwargs:
expect_exit = False
return super(ScrubberDaemon, self).start(
expect_exit=expect_exit,
expected_exitcode=expected_exitcode,
**kwargs)
class FunctionalTest(test_utils.BaseTestCase):
"""
Base test class for any test that wants to test the actual
servers and clients and not just the stubbed out interfaces
"""
inited = False
disabled = False
launched_servers = []
def setUp(self):
super(FunctionalTest, self).setUp()
self.test_dir = self.useFixture(fixtures.TempDir()).path
self.api_protocol = 'http'
self.api_port, api_sock = test_utils.get_unused_port_and_socket()
self.registry_port, reg_sock = test_utils.get_unused_port_and_socket()
# NOTE: Scrubber is enabled by default for the functional tests.
# Please disbale it by explicitly setting 'self.include_scrubber' to
# False in the test SetUps that do not require Scrubber to run.
self.include_scrubber = True
self.tracecmd = tracecmd_osmap.get(platform.system())
conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(conf_dir)
self.copy_data_file('schema-image.json', conf_dir)
self.copy_data_file('policy.json', conf_dir)
self.copy_data_file('property-protections.conf', conf_dir)
self.copy_data_file('property-protections-policies.conf', conf_dir)
self.property_file_roles = os.path.join(conf_dir,
'property-protections.conf')
property_policies = 'property-protections-policies.conf'
self.property_file_policies = os.path.join(conf_dir,
property_policies)
self.policy_file = os.path.join(conf_dir, 'policy.json')
self.api_server = ApiServer(self.test_dir,
self.api_port,
self.policy_file,
sock=api_sock)
self.registry_server = RegistryServer(self.test_dir,
self.registry_port,
self.policy_file,
sock=reg_sock)
self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file)
self.pid_files = [self.api_server.pid_file,
self.registry_server.pid_file,
self.scrubber_daemon.pid_file]
self.files_to_destroy = []
self.launched_servers = []
def tearDown(self):
if not self.disabled:
self.cleanup()
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_database(self.registry_server.sql_connection)
self._reset_database(self.api_server.sql_connection)
super(FunctionalTest, self).tearDown()
self.api_server.dump_log('api_server')
self.registry_server.dump_log('registry_server')
self.scrubber_daemon.dump_log('scrubber_daemon')
def set_policy_rules(self, rules):
fap = open(self.policy_file, 'w')
fap.write(jsonutils.dumps(rules))
fap.close()
def _reset_database(self, conn_string):
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We leave behind the sqlite DB for failing tests to aid
# in diagnosis, as the file size is relatively small and
# won't interfere with subsequent tests as it's in a per-
# test directory (which is blown-away if the test is green)
pass
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p%s" % auth_pieces[1]
sql = ("drop database if exists %(database)s; "
"create database %(database)s;") % {'database': database}
cmd = ("mysql -u%(user)s %(password)s -h%(host)s "
"-e\"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
def cleanup(self):
"""
Makes sure anything we created or started up in the
tests are destroyed or spun down
"""
# NOTE(jbresnah) call stop on each of the servers instead of
# checking the pid file. stop() will wait until the child
# server is dead. This eliminates the possibility of a race
# between a child process listening on a port actually dying
# and a new process being started
servers = [self.api_server,
self.registry_server,
self.scrubber_daemon]
for s in servers:
try:
s.stop()
except Exception:
pass
for f in self.files_to_destroy:
if os.path.exists(f):
os.unlink(f)
def start_server(self,
server,
expect_launch,
expect_exit=True,
expected_exitcode=0,
**kwargs):
"""
Starts a server on an unused port.
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the server.
:param server: the server to launch
:param expect_launch: true iff the server is expected to
successfully start
:param expect_exit: true iff the launched process is expected
to exit in a timely fashion
:param expected_exitcode: expected exitcode from the launcher
"""
self.cleanup()
# Start up the requested server
exitcode, out, err = server.start(expect_exit=expect_exit,
expected_exitcode=expected_exitcode,
**kwargs)
if expect_exit:
self.assertEqual(expected_exitcode, exitcode,
"Failed to spin up the requested server. "
"Got: %s" % err)
self.launched_servers.append(server)
launch_msg = self.wait_for_servers([server], expect_launch)
self.assertTrue(launch_msg is None, launch_msg)
def start_with_retry(self, server, port_name, max_retries,
expect_launch=True,
**kwargs):
"""
Starts a server, with retries if the server launches but
fails to start listening on the expected port.
:param server: the server to launch
:param port_name: the name of the port attribute
:param max_retries: the maximum number of attempts
:param expect_launch: true iff the server is expected to
successfully start
:param expect_exit: true iff the launched process is expected
to exit in a timely fashion
"""
launch_msg = None
for i in range(max_retries):
exitcode, out, err = server.start(expect_exit=not expect_launch,
**kwargs)
name = server.server_name
self.assertEqual(0, exitcode,
"Failed to spin up the %s server. "
"Got: %s" % (name, err))
launch_msg = self.wait_for_servers([server], expect_launch)
if launch_msg:
server.stop()
server.bind_port = get_unused_port()
setattr(self, port_name, server.bind_port)
else:
self.launched_servers.append(server)
break
self.assertTrue(launch_msg is None, launch_msg)
def start_servers(self, **kwargs):
"""
Starts the API and Registry servers (glance-control api start
& glance-control registry start) on unused ports. glance-control
should be installed into the python path
Any kwargs passed to this method will override the configuration
value in the conf file used in starting the servers.
"""
self.cleanup()
# Start up the API and default registry server
# We start the registry server first, as the API server config
# depends on the registry port - this ordering allows for
# retrying the launch on a port clash
self.start_with_retry(self.registry_server, 'registry_port', 3,
**kwargs)
kwargs['registry_port'] = self.registry_server.bind_port
self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)
if self.include_scrubber:
exitcode, out, err = self.scrubber_daemon.start(**kwargs)
self.assertEqual(0, exitcode,
"Failed to spin up the Scrubber daemon. "
"Got: %s" % err)
def ping_server(self, port):
"""
Simple ping on the port. If responsive, return True, else
return False.
:note We use raw sockets, not ping here, since ping uses ICMP and
has no concept of ports...
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("127.0.0.1", port))
return True
except socket.error:
return False
finally:
s.close()
def ping_server_ipv6(self, port):
"""
Simple ping on the port. If responsive, return True, else
return False.
:note We use raw sockets, not ping here, since ping uses ICMP and
has no concept of ports...
The function uses IPv6 (therefore AF_INET6 and ::1).
"""
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
s.connect(("::1", port))
return True
except socket.error:
return False
finally:
s.close()
def wait_for_servers(self, servers, expect_launch=True, timeout=30):
"""
Tight loop, waiting for the given server port(s) to be available.
Returns when all are pingable. There is a timeout on waiting
for the servers to come up.
:param servers: Glance server ports to ping
:param expect_launch: Optional, true iff the server(s) are
expected to successfully start
:param timeout: Optional, defaults to 30 seconds
:returns: None if launch expectation is met, otherwise an
assertion message
"""
now = datetime.datetime.now()
timeout_time = now + datetime.timedelta(seconds=timeout)
replied = []
while (timeout_time > now):
pinged = 0
for server in servers:
if self.ping_server(server.bind_port):
pinged += 1
if server not in replied:
replied.append(server)
if pinged == len(servers):
msg = 'Unexpected server launch status'
return None if expect_launch else msg
now = datetime.datetime.now()
time.sleep(0.05)
failed = list(set(servers) - set(replied))
msg = 'Unexpected server launch status for: '
for f in failed:
msg += ('%s, ' % f.server_name)
if os.path.exists(f.pid_file):
pid = f.process_pid
trace = f.pid_file.replace('.pid', '.trace')
if self.tracecmd:
cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace)
try:
execute(cmd, raise_error=False, expect_exit=False)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError('No executable found for "%s" '
'command.' % self.tracecmd)
else:
raise
time.sleep(0.5)
if os.path.exists(trace):
msg += ('\n%s:\n%s\n' % (self.tracecmd,
open(trace).read()))
self.add_log_details(failed)
return msg if expect_launch else None
def stop_server(self, server, name):
"""
Called to stop a single server in a normal fashion using the
glance-control stop method to gracefully shut the server down.
:param server: the server to stop
"""
# Spin down the requested server
server.stop()
def stop_servers(self):
"""
Called to stop the started servers in a normal fashion. Note
that cleanup() will stop the servers using a fairly draconian
method of sending a SIGTERM signal to the servers. Here, we use
the glance-control stop method to gracefully shut the server down.
This method also asserts that the shutdown was clean, and so it
is meant to be called during a normal test case sequence.
"""
# Spin down the API and default registry server
self.stop_server(self.api_server, 'API server')
self.stop_server(self.registry_server, 'Registry server')
if self.include_scrubber:
self.stop_server(self.scrubber_daemon, 'Scrubber daemon')
self._reset_database(self.registry_server.sql_connection)
def run_sql_cmd(self, sql):
"""
Provides a crude mechanism to run manual SQL commands for backend
DB verification within the functional tests.
The raw result set is returned.
"""
engine = db_api.get_engine()
return engine.execute(sql)
def copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('glance/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def add_log_details(self, servers=None):
logs = [s.log_file for s in (servers or self.launched_servers)]
for log in logs:
if os.path.exists(log):
testtools.content.attach_file(self, log)
|
the-stack_0_20085 | from typing import Collection, List, Tuple
from kopf._cogs.clients import api
from kopf._cogs.configs import configuration
from kopf._cogs.helpers import typedefs
from kopf._cogs.structs import bodies, references
async def list_objs(
*,
settings: configuration.OperatorSettings,
resource: references.Resource,
namespace: references.Namespace,
logger: typedefs.Logger,
) -> Tuple[Collection[bodies.RawBody], str]:
"""
List the objects of specific resource type.
The cluster-scoped call is used in two cases:
* The resource itself is cluster-scoped, and namespacing makes not sense.
* The operator serves all namespaces for the namespaced custom resource.
Otherwise, the namespace-scoped call is used:
* The resource is namespace-scoped AND operator is namespaced-restricted.
"""
rsp = await api.get(
url=resource.get_url(namespace=namespace),
logger=logger,
settings=settings,
)
items: List[bodies.RawBody] = []
resource_version = rsp.get('metadata', {}).get('resourceVersion', None)
for item in rsp.get('items', []):
if 'kind' in rsp:
item.setdefault('kind', rsp['kind'][:-4] if rsp['kind'][-4:] == 'List' else rsp['kind'])
if 'apiVersion' in rsp:
item.setdefault('apiVersion', rsp['apiVersion'])
items.append(item)
return items, resource_version
|
the-stack_0_20088 | # -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t; python-indent: 4 -*-
"""
Role
====
Defines the basic interface for a plugin manager that also keeps track
of versions of plugins
API
===
"""
from distutils.version import StrictVersion
from lib.yapsy.PluginInfo import PluginInfo
from lib.yapsy.IPlugin import IPlugin
from lib.yapsy.PluginManagerDecorator import PluginManagerDecorator
class VersionedPluginInfo(PluginInfo):
"""
Gather some info about a plugin such as its name, author,
description...
"""
def __init__(self, plugin_name, plugin_path):
PluginInfo.__init__(self, plugin_name, plugin_path)
# version number is now required to be a StrictVersion object
self.version = StrictVersion("0.0")
def setVersion(self, vstring):
self.version = StrictVersion(vstring)
class VersionedPluginManager(PluginManagerDecorator):
"""
Handle plugin versioning by making sure that when several
versions are present for a same plugin, only the latest version is
manipulated via the standard methods (eg for activation and
deactivation)
More precisely, for operations that must be applied on a single
named plugin at a time (``getPluginByName``,
``activatePluginByName``, ``deactivatePluginByName`` etc) the
targetted plugin will always be the one with the latest version.
.. note:: The older versions of a given plugin are still reachable
via the ``getPluginsOfCategoryFromAttic`` method.
"""
def __init__(self,
decorated_manager=None,
categories_filter={"Default":IPlugin},
directories_list=None,
plugin_info_ext="yapsy-plugin"):
# Create the base decorator class
PluginManagerDecorator.__init__(self,decorated_manager,
categories_filter,
directories_list,
plugin_info_ext)
self.setPluginInfoClass(VersionedPluginInfo)
# prepare the storage for the early version of the plugins,
# for which only the latest version is the one that will be
# kept in the "core" plugin storage.
self._prepareAttic()
def _prepareAttic(self):
"""
Create and correctly initialize the storage where the wrong
version of the plugins will be stored.
"""
self._attic = {}
for categ in self.getCategories():
self._attic[categ] = []
def setCategoriesFilter(self, categories_filter):
"""
Set the categories of plugins to be looked for as well as the
way to recognise them.
Note: will also reset the attic toa void inconsistencies.
"""
self._component.setCategoriesFilter(categories_filter)
self._prepareAttic()
def getLatestPluginsOfCategory(self,category_name):
"""
DEPRECATED(>1.8): Please consider using getPluginsOfCategory
instead.
Return the list of all plugins belonging to a category.
"""
return self.getPluginsOfCategory(category_name)
def loadPlugins(self, callback=None, callback_after=None):
"""
Load the candidate plugins that have been identified through a
previous call to locatePlugins.
In addition to the baseclass functionality, this subclass also
needs to find the latest version of each plugin.
"""
self._prepareAttic()
self._component.loadPlugins(callback, callback_after)
for categ in self.getCategories():
latest_plugins = {}
allPlugins = self.getPluginsOfCategory(categ)
# identify the latest version of each plugin
for plugin in allPlugins:
name = plugin.name
version = plugin.version
if name in latest_plugins:
if version > latest_plugins[name].version:
older_plugin = latest_plugins[name]
latest_plugins[name] = plugin
self.removePluginFromCategory(older_plugin,categ)
self._attic[categ].append(older_plugin)
else:
self.removePluginFromCategory(plugin,categ)
self._attic[categ].append(plugin)
else:
latest_plugins[name] = plugin
def getPluginsOfCategoryFromAttic(self,categ):
"""
Access the older version of plugins for which only the latest
version is available through standard methods.
"""
return self._attic[categ]
|
the-stack_0_20091 | from PyQt5 import QtGui, QtCore
from PyQt5 import QtWidgets as qt_widgets
class CalendarDialog(qt_widgets.QDialog):
"""Creates a calendar widget allowing the user to select a date."""
def __init__(self, title="Calendar"):
super(CalendarDialog, self).__init__()
self.setWindowTitle(title)
layout = qt_widgets.QGridLayout()
layout.setColumnStretch(1, 1)
self.cal = qt_widgets.QCalendarWidget(self)
self.cal.setGridVisible(True)
self.cal.clicked[QtCore.QDate].connect(self.show_date)
layout.addWidget(self.cal, 0, 0, 1, 2)
self.date_label = qt_widgets.QLabel()
self.date = self.cal.selectedDate()
self.date_label.setText(self.date.toString())
layout.addWidget(self.date_label, 1, 0)
button_box = qt_widgets.QDialogButtonBox()
confirm_button = button_box.addButton(qt_widgets.QDialogButtonBox.Ok)
confirm_button.clicked.connect(self.confirm)
layout.addWidget(button_box, 1, 1)
self.setLayout(layout)
def show_date(self, date):
self.date = self.cal.selectedDate()
self.date_label.setText(self.date.toString())
def confirm(self):
self.date = self.cal.selectedDate()
self.close()
if __name__ == '__main__':
app = qt_widgets.QApplication([])
cal = CalendarDialog(title="title")
app.exec_()
date = cal.date.toString()
print(date)
|
the-stack_0_20092 | from __future__ import print_function
import os
import shutil
import sys
import time
import glob
import importlib
import logging
import subprocess
# as we need to load the shared lib from here, make sure it's in our path:
if os.path.join( os.environ['CMSSW_BASE'], 'src') not in sys.path:
sys.path.append( os.path.join( os.environ['CMSSW_BASE'], 'src') )
# -------------------------------------------------------------------------------------------------------
payload2xmlCodeTemplate = """
#include "CondCore/Utilities/interface/Payload2XMLModule.h"
#include "CondCore/Utilities/src/CondFormats.h"
PAYLOAD_2XML_MODULE( %s ){
PAYLOAD_2XML_CLASS( %s );
}
"""
buildFileTemplate = """
<flags CXXFLAGS="-Wno-sign-compare -Wno-unused-variable -Os"/>
<library file="%s" name="%s">
<use name="CondCore/Utilities"/>
<use name="py3-pybind11"/>
<use name="python3"/>
</library>
<export>
<lib name="1"/>
</export>
"""
# helper function
def sanitize(typeName):
return typeName.replace(' ','').replace('<','_').replace('>','')
def localLibName( payloadType ):
# required to avoid ( unlikely ) clashes between lib names from templates and lib names from classes
prefix = ''
if '<' in payloadType and '>' in payloadType:
prefix = 't'
ptype = payloadType
if '::' in payloadType:
ptype = payloadType.replace('::','_')
return "%s_%spayload2xml" %(sanitize(ptype),prefix)
def boost_version_for_this_release():
import pluginUtilities_payload2xml as mod2XML
return mod2XML.boost_version_label()
class CondXmlProcessor(object):
def __init__(self, condDBIn):
self.conddb = condDBIn
if not os.path.exists( os.path.join( os.environ['CMSSW_BASE'], 'src') ):
raise Exception("Looks like you are not running in a CMSSW developer area, $CMSSW_BASE/src/ does not exist")
self.fakePkgName = "fakeSubSys4pl/fakePkg4pl"
self._pl2xml_tmpDir = os.path.join( os.environ['CMSSW_BASE'], 'src', self.fakePkgName )
self.doCleanup = False
def __del__(self):
if self.doCleanup:
shutil.rmtree( '/'.join( self._pl2xml_tmpDir.split('/')[:-1] ) )
return
def discover(self, payloadType):
libName = 'pluginUtilities_payload2xml.so'
# first search: developer area or main release
libDir = os.path.join( os.environ["CMSSW_BASE"], 'lib', os.environ["SCRAM_ARCH"] )
devLibDir = libDir
libPath = os.path.join( devLibDir, libName )
releaseBase = os.environ["CMSSW_RELEASE_BASE"]
devCheckout = (releaseBase != '')
if not devCheckout:
logging.debug('Looks like the current working environment is a read-only release')
if not os.path.exists( libPath ) and devCheckout:
# main release ( for dev checkouts )
libDir = os.path.join( releaseBase, 'lib', os.environ["SCRAM_ARCH"] )
libPath = os.path.join( libDir, libName )
if not os.path.exists( libPath ):
if "CMSSW_FULL_RELEASE_BASE" in os.environ:
libDir = os.path.join( os.environ["CMSSW_FULL_RELEASE_BASE"], 'lib', os.environ["SCRAM_ARCH"] )
libPath = os.path.join( libDir, libName )
if not os.path.exists( libPath ):
# it should never happen!
raise Exception('No built-in library %s found with XML converters.' %libPath)
logging.debug("Importing built-in library %s" %libPath)
module = importlib.import_module( libName.replace('.so', '') )
functors = dir(module)
funcName = payloadType+'2xml'
if funcName in functors:
logging.info('XML converter for payload class %s found in the built-in library.' %payloadType)
return getattr( module, funcName)
if not devCheckout:
# give-up if it is a read-only release...
raise Exception('No XML converter suitable for payload class %s has been found in the built-in library.')
libName = 'plugin%s.so' %localLibName( payloadType )
libPath = os.path.join( devLibDir, libName )
if os.path.exists( libPath ):
logging.info('Found local library with XML converter for class %s' %payloadType )
module = importlib.import_module( libName.replace('.so', '') )
return getattr( module, funcName)
logging.warning('No XML converter for payload class %s found in the built-in library.' %payloadType)
return None
def prepPayload2xml(self, payloadType):
converter = self.discover(payloadType)
if converter: return converter
#otherwise, go for the code generation in the local checkout area.
startTime = time.time()
libName = localLibName( payloadType )
pluginName = 'plugin%s' % libName
tmpLibName = "Tmp_payload2xml"
tmpPluginName = 'plugin%s' %tmpLibName
libDir = os.path.join( os.environ["CMSSW_BASE"], 'lib', os.environ["SCRAM_ARCH"] )
tmpLibFile = os.path.join( libDir,tmpPluginName+'.so' )
code = payload2xmlCodeTemplate %(pluginName,payloadType)
tmpSrcFileName = 'Local_2XML.cpp'
tmpDir = self._pl2xml_tmpDir
if ( os.path.exists( tmpDir ) ) :
msg = '\nERROR: %s already exists, please remove if you did not create that manually !!' % tmpDir
raise Exception(msg)
logging.debug('Creating temporary package %s' %self._pl2xml_tmpDir)
os.makedirs( tmpDir+'/plugins' )
buildFileName = "%s/plugins/BuildFile.xml" % (tmpDir,)
with open(buildFileName, 'w') as buildFile:
buildFile.write( buildFileTemplate %(tmpSrcFileName,tmpLibName) )
buildFile.close()
tmpSrcFilePath = "%s/plugins/%s" % (tmpDir, tmpSrcFileName,)
with open(tmpSrcFilePath, 'w') as codeFile:
codeFile.write(code)
codeFile.close()
cmd = "source $CMS_PATH/cmsset_default.sh;"
cmd += "(cd %s ; scram b 2>&1 >build.log)" %tmpDir
pipe = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
out, err = pipe.communicate()
ret = pipe.returncode
buildTime = time.time()-startTime
logging.info("Building done in %s sec., return code from build: %s" %(buildTime,ret) )
if (ret != 0):
logging.error("Local build for xml dump failed.")
return None
libFile = os.path.join(libDir,pluginName + '.so')
shutil.copyfile(tmpLibFile,libFile)
module = importlib.import_module( pluginName )
funcName = payloadType+'2xml'
functor = getattr( module, funcName )
self.doCleanup = True
return functor
def payload2xml(self, session, payloadHash, destFile):
Payload = session.get_dbtype(self.conddb.Payload)
# get payload from DB:
result = session.query(Payload.data, Payload.object_type).filter(Payload.hash == payloadHash).one()
data, plType = result
logging.info('Found payload of type %s' %plType)
convFuncName = sanitize(plType)+'2xml'
xmlConverter = self.prepPayload2xml(plType)
if xmlConverter is not None:
obj = xmlConverter()
resultXML = obj.write( data )
if destFile is None:
print(resultXML)
else:
with open(destFile, 'w') as outFile:
outFile.write(resultXML)
outFile.close()
|
the-stack_0_20093 | #!/usr/bin/env python
# Shared functions
# https://github.com/rcook/swissarmyknife
import hashlib
import os
GITHUB_URL = "https://github.com/rcook/swissarmyknife"
GIB_THRESHOLD = 1024 * 1024 * 1024
MIB_THRESHOLD = 1024 * 1024
KIB_THRESHOLD = 1024
def compute_sha1(path, partial=False, include_file_size=True, block_size=1024):
"""
>>> import tempfile
>>> with tempfile.NamedTemporaryFile(delete=True) as f:
... f.write("hello world\\n")
... f.flush()
... r0 = compute_sha1(f.name)
... r1 = compute_sha1(f.name, block_size=4)
... r2 = compute_sha1(f.name, partial=True)
... r3 = compute_sha1(f.name, partial=True, block_size=4)
>>> r0
'12:22596363b3de40b06f981fb85d82312e8c0ed511'
>>> r1
'12:22596363b3de40b06f981fb85d82312e8c0ed511'
>>> r2
'12:22596363b3de40b06f981fb85d82312e8c0ed511'
>>> r3
'12:a5cec7af5f7aab769cf0d4aa440e01c7bfc371b2'
"""
file_size = os.stat(path).st_size
if partial:
block_count = 1
else:
block_count = (file_size / block_size) + (1 if (file_size % block_size) > 0 else 0)
m = hashlib.sha1()
with open(path, "rb") as f:
for _ in range(0, block_count):
m.update(f.read(block_size))
d = m.hexdigest()
if include_file_size:
return "{}:{}".format(file_size, d)
else:
return d
def add_switch_with_inverse(parser, name, default, help=None, inverse_help=None):
group = parser.add_mutually_exclusive_group()
dest = name.replace("-", "_")
# Include "(default)" indicator in help message
full_help = "{} (default)".format(help) if default else help
full_inverse_help = inverse_help if default else "{} (default)".format(inverse_help)
arg0 = lambda g: g.add_argument(
"--{}".format(name),
dest=dest,
action="store_true",
default=default,
help=full_help)
arg1 = lambda g: g.add_argument(
"--no-{}".format(name),
dest=dest,
action="store_false",
default=default,
help=full_inverse_help)
# List default option out of group first
if default:
arg0(group)
arg1(group)
else:
arg1(group)
arg0(group)
def pretty_byte_count(n):
"""
>>> pretty_byte_count(186129123987123)
'173,346.3 GiB'
>>> pretty_byte_count(186129123987)
'173.3 GiB'
>>> pretty_byte_count(186129123)
'177.5 MiB'
>>> pretty_byte_count(186129)
'181.8 KiB'
>>> pretty_byte_count(5000)
'4.9 KiB'
>>> pretty_byte_count(1024)
'1.0 KiB'
>>> pretty_byte_count(1000)
'1,000 bytes'
>>> pretty_byte_count(512)
'512 bytes'
"""
if n >= GIB_THRESHOLD:
return "{:,.1f} GiB".format(float(n) / GIB_THRESHOLD)
elif n >= MIB_THRESHOLD:
return "{:,.1f} MiB".format(float(n) / MIB_THRESHOLD)
elif n >= KIB_THRESHOLD:
return "{:,.1f} KiB".format(float(n) / KIB_THRESHOLD)
else:
return "{:,} bytes".format(n)
|
the-stack_0_20096 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='PCK', save_best='PCK')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=8,
dataset_joints=8,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w48-8ef0771d.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=48,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/fld'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='DeepFashionDataset',
ann_file=f'{data_root}/annotations/fld_full_train.json',
img_prefix=f'{data_root}/img/',
subset='full',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='DeepFashionDataset',
ann_file=f'{data_root}/annotations/fld_full_val.json',
img_prefix=f'{data_root}/img/',
subset='full',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='DeepFashionDataset',
ann_file=f'{data_root}/annotations/fld_full_test.json',
img_prefix=f'{data_root}/img/',
subset='full',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
the-stack_0_20097 | #!/home/mobigen/anaconda3/bin/python
# -*- coding: utf-8 -*-
import configparser
from konlpy.tag import Kkma
from konlpy.utils import pprint
import os
import sys
import json
class KonLpyModule() :
def __init__(self, inputFile) :
cfg = configparser.ConfigParser()
cfg.read('/home/mobigen/user/KimJW/TextMining/conf/module.conf')
self.path = inputFile
self.FILE_PATH = cfg.get('KONLPY', 'FILE_PATH')
self.KKMA_TAG_DICT = json.loads(cfg.get('KONLPY', 'KKMA_TAG'))
def run(self) :
print ('KonLpy Start')
fileName = os.path.basename(self.path)
compFilePath = os.path.join(self.FILE_PATH, fileName)
fileText = ''
fileTextList = ''
kkma = Kkma()
with open(self.path, 'r', encoding='utf-8') as readFile :
fileTextList = readFile.readlines()
with open(compFilePath, 'w', encoding='utf-8') as comFile :
result = None
for fileText in fileTextList :
if not(fileText == None or fileText.strip() == '') :
try :
result = kkma.pos(fileText)
except Exception as e :
print (e)
for resultTuple in result :
if resultTuple[1] in self.KKMA_TAG_DICT :
comFile.write('%s : %s [%s]' % (resultTuple[0], resultTuple[1], self.KKMA_TAG_DICT[resultTuple[1]]) + '\n')
else :
comFile.write('%s : %s [%s]' % (resultTuple[0], resultTuple[1], 'UnKonwn') + '\n')
else :
continue
# inputStr = input()
# inputStr = sys.stdin.readline()
def main() :
module = os.path.basename(sys.argv[0])
# section = sys.argv[1]
# cfgFile = sys.argv[2]
inputFile = sys.argv[1]
# cfg = ConfigParser.ConfigParser()
# cfg.read(cfgFile)
# logPath = cfg.get('GENERAL', 'LOG_PATH')
# logFile = os.path.join( logPath, '%s_%s.log' % (module, section) )
# if '-d' in sys.argv :
# Log.Init()
# else :
# Log.Init( Log.CRotatingLog(logFile, 100000, 9) )
# km = KonlpyModule(section, cfg)
km = KonLpyModule(inputFile)
km.run()
if __name__ == '__main__' :
try:
main()
except ValueError :
print(ValueError)
|
the-stack_0_20098 | #!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import codecs
import re
import configparser
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
class VersionChecker:
def __init__(self, utility):
# Read config.ini.
self.utility = utility
config = configparser.ConfigParser()
self.file_name = os.path.basename(__file__)
self.full_path = os.path.dirname(os.path.abspath(__file__))
self.root_path = os.path.join(self.full_path, '../')
config.read(os.path.join(self.root_path, 'config.ini'))
try:
self.signatures_dir = os.path.join(self.root_path, config['Common']['signature_path'])
self.signature_file = os.path.join(self.signatures_dir, config['VersionChecker']['signature_file'])
self.action_name = 'Product Explorer'
except Exception as e:
self.utility.print_message(FAIL, 'Reading config.ini is failure : {}'.format(e))
self.utility.write_log(40, 'Reading config.ini is failure : {}'.format(e))
sys.exit(1)
# Identify product name using signature.
def identify_product(self, response):
msg = self.utility.make_log_msg(self.utility.log_in,
self.utility.log_dis,
self.file_name,
action=self.action_name,
note='Identify product',
dest=self.utility.target_host)
self.utility.write_log(20, msg)
product_list = []
try:
# Identify product name and version.
with codecs.open(self.signature_file, 'r', 'utf-8') as fin:
matching_patterns = fin.readlines()
for idx, pattern in enumerate(matching_patterns):
items = pattern.replace('\r', '').replace('\n', '').split('@')
category = items[0]
vendor = items[1].lower()
product = items[2].lower()
default_ver = items[3]
signature = items[4]
self.utility.print_message(OK, '{}/{} Check {} using [{}]'.format(idx+1,
len(matching_patterns),
product,
signature))
obj_match = re.search(signature, response, flags=re.IGNORECASE)
if obj_match is not None:
trigger = obj_match.group(1)
# Check version.
version = default_ver
if obj_match.re.groups > 1:
version = obj_match.group(2)
# Add product name and version.
product_list.append([category, vendor, product, version, trigger])
msg = 'Find product={}/{}, verson={}, trigger={}'.format(vendor, product, version, trigger)
self.utility.print_message(WARNING, msg)
msg = self.utility.make_log_msg(self.utility.log_mid,
self.utility.log_dis,
self.file_name,
action=self.action_name,
note=msg,
dest=self.utility.target_host)
self.utility.write_log(20, msg)
except Exception as e:
msg = 'Identifying product is failure : {}'.format(e)
self.utility.print_exception(e, msg)
self.utility.write_log(30, msg)
msg = self.utility.make_log_msg(self.utility.log_out,
self.utility.log_dis,
self.file_name,
action=self.action_name,
note='Identify product',
dest=self.utility.target_host)
self.utility.write_log(20, msg)
return list(map(list, set(map(tuple, product_list))))
# Classifier product name using signatures.
def get_product_name(self, response):
self.utility.print_message(NOTE, 'Analyzing gathered HTTP response.')
self.utility.write_log(20, '[In] Analyzing gathered HTTP response [{}].'.format(self.file_name))
# Execute classifier.
product_list = self.identify_product(response)
if len(product_list) == 0:
self.utility.print_message(WARNING, 'Product Not Found.')
self.utility.write_log(30, 'Product Not Found.')
self.utility.write_log(20, '[Out] Analyzing gathered HTTP response [{}].'.format(self.file_name))
return product_list
|
the-stack_0_20101 | import sys
from datetime import timedelta
from django.db.models import Q
from django.dispatch import receiver
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pretix.base.models import Event, User, WaitingListEntry
from pretix.base.models.waitinglist import WaitingListException
from pretix.base.services.tasks import EventTask
from pretix.base.signals import periodic_task
from pretix.celery_app import app
@app.task(base=EventTask)
def assign_automatically(event: Event, user_id: int=None, subevent_id: int=None):
if user_id:
user = User.objects.get(id=user_id)
else:
user = None
quota_cache = {}
gone = set()
qs = WaitingListEntry.objects.filter(
event=event, voucher__isnull=True
).select_related('item', 'variation', 'subevent').prefetch_related(
'item__quotas', 'variation__quotas'
).order_by('-priority', 'created')
if subevent_id and event.has_subevents:
subevent = event.subevents.get(id=subevent_id)
qs = qs.filter(subevent=subevent)
sent = 0
with event.lock():
for wle in qs:
if (wle.item, wle.variation, wle.subevent) in gone:
continue
ev = (wle.subevent or event)
if not ev.presale_is_running or (wle.subevent and not wle.subevent.active):
continue
if wle.subevent and not wle.subevent.presale_is_running:
continue
if not wle.item.active or (wle.variation and not wle.variation.active):
continue
quotas = (wle.variation.quotas.filter(subevent=wle.subevent)
if wle.variation
else wle.item.quotas.filter(subevent=wle.subevent))
availability = (
wle.variation.check_quotas(count_waitinglist=False, _cache=quota_cache, subevent=wle.subevent)
if wle.variation
else wle.item.check_quotas(count_waitinglist=False, _cache=quota_cache, subevent=wle.subevent)
)
if availability[1] is None or availability[1] > 0:
try:
wle.send_voucher(quota_cache, user=user)
sent += 1
except WaitingListException: # noqa
continue
# Reduce affected quotas in cache
for q in quotas:
quota_cache[q.pk] = (
quota_cache[q.pk][0] if quota_cache[q.pk][0] > 1 else 0,
quota_cache[q.pk][1] - 1 if quota_cache[q.pk][1] is not None else sys.maxsize
)
else:
gone.add((wle.item, wle.variation, wle.subevent))
return sent
@receiver(signal=periodic_task)
@scopes_disabled()
def process_waitinglist(sender, **kwargs):
qs = Event.objects.filter(
live=True
).exclude(
Q(date_to__isnull=True) | Q(date_to__lt=now() - timedelta(days=14)),
Q(presale_end__isnull=True) | Q(presale_end__lt=now() - timedelta(days=14)),
has_subevents=False,
date_from__lt=now() - timedelta(days=14),
).prefetch_related('_settings_objects', 'organizer___settings_objects').select_related('organizer')
for e in qs:
if e.settings.waiting_list_auto and (e.presale_is_running or e.has_subevents):
assign_automatically.apply_async(args=(e.pk,))
|
the-stack_0_20105 | import click
import os
import errno
import json
from anime_downloader import util
APP_NAME = 'anime downloader'
APP_DIR = click.get_app_dir(APP_NAME)
DEFAULT_CONFIG = {
'dl': {
'url': False,
'player': None,
'skip_download': False,
'download_dir': '.',
'quality': '720p',
'fallback_qualities': ['720p', '480p', '360p'],
'force_download': False,
'file_format': '{anime_title}/{anime_title}_{ep_no}',
'provider': 'twist.moe',
'external_downloader': '',
},
'watch': {
'quality': '720p',
'log_level': 'INFO',
'provider': '9anime',
},
"siteconfig": {
"nineanime": {
"server": "mp4upload",
},
'anistream.xyz': {
"version": "subbed",
},
'animeflv': {
"version": "subbed",
"server": "streamango",
}
}
}
class _Config:
CONFIG_FILE = os.path.join(APP_DIR, 'config.json')
def __init__(self):
try:
os.makedirs(APP_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.exists(self.CONFIG_FILE):
self._write_default_config()
self._CONFIG = DEFAULT_CONFIG
else:
self._CONFIG = self._read_config()
def update(gkey, to_be, from_dict):
if gkey not in to_be:
to_be[gkey] = {}
for key, val in from_dict[gkey].items():
if key not in to_be[gkey].keys():
to_be[gkey][key] = val
elif isinstance(from_dict[gkey][key], dict):
update(key, to_be[gkey], from_dict[gkey])
for key in DEFAULT_CONFIG.keys():
update(key, self._CONFIG, DEFAULT_CONFIG)
self.write()
# Expand environment variables in download_dir (#222)
download_dir = self._CONFIG['dl']['download_dir']
download_dir = os.path.expandvars(download_dir)
self._CONFIG['dl']['download_dir'] = download_dir
@property
def CONTEXT_SETTINGS(self):
return dict(
default_map=self._CONFIG
)
def __getitem__(self, attr):
return self._CONFIG[attr]
def write(self):
self._write_config(self._CONFIG)
def _write_config(self, config_dict):
with open(self.CONFIG_FILE, 'w') as configfile:
json.dump(config_dict, configfile, indent=4, sort_keys=True)
def _read_config(self):
with open(self.CONFIG_FILE, 'r') as configfile:
conf = json.load(configfile)
return conf
def _write_default_config(self):
if util.check_in_path('aria2c'):
DEFAULT_CONFIG['dl']['external_downloader'] = '{aria2}'
self._write_config(DEFAULT_CONFIG)
Config = _Config()
|
the-stack_0_20106 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import time
from tempest import config
from tempest.lib.cli import base
from tempest import test
from tempest.lib import exceptions as exc
from tempest.lib.auth import IDENTITY_VERSION
from tempest.lib.common.utils import data_utils
from sahara_tempest_plugin.common import plugin_utils
TEMPEST_CONF = config.CONF
DEL_RESULT = '''\
{} "{}" has been removed successfully.
'''
TEMPEST_ERROR_MESSAGE = 'No matches found.'
class ClientTestBase(base.ClientTestBase):
"""Base class for saharaclient tests.
Establishes the sahara client and retrieves the essential environment
information.
"""
def _get_clients(self):
cli_dir = os.environ.get('OS_SAHARA_TESTS_DIR', '')
if not cli_dir:
# if this is executed in a virtualenv, the command installed there
# will be the first one.
paths = os.environ.get('PATH').split(':')
for path in paths:
client_candidate = os.path.join(path, 'openstack')
if os.path.isfile(client_candidate) and os.access(
client_candidate, os.X_OK):
cli_dir = path
break
self.client_manager_admin = \
test.BaseTestCase.get_client_manager('admin')
auth_provider = self.client_manager_admin.auth_provider
self.project_network = test.BaseTestCase.get_tenant_network('admin')
project_name = auth_provider.credentials.get('project_name')
if project_name is None:
project_name = auth_provider.credentials.get('tenant_name')
# complicated but probably the only way to get the exact type
# of Identity API version
if isinstance(auth_provider, IDENTITY_VERSION['v2'][1]):
identity_api_version = 2
else:
identity_api_version = 3
return base.CLIClient(
username=auth_provider.credentials.get('username'),
password=auth_provider.credentials.get('password'),
tenant_name=project_name,
uri=auth_provider.base_url({'service': 'identity'}),
cli_dir=cli_dir,
user_domain=auth_provider.credentials.get('user_domain_name'),
project_domain=auth_provider.credentials.get(
'project_domain_name'),
identity_api_version=identity_api_version)
def openstack(self, action, flags='', params='', fail_ok=False,
merge_stderr=False):
if '--os-data-processing-api-version' not in flags:
flags = flags + '--os-data-processing-api-version %s' % \
(TEMPEST_CONF.data_processing.api_version_saharaclient)
return self.clients.openstack(action, flags=flags, params=params,
fail_ok=fail_ok, merge_stderr=False)
def listing_result(self, command):
command_for_item = self.openstack('dataprocessing', params=command)
result = self.parser.listing(command_for_item)
return result
def find_in_listing(self, result, value, field='name'):
for line in result:
if line['Field'].lower() == field.lower():
self.assertEqual(line['Value'].lower(), value.lower())
return
raise self.skipException('No table to show information')
def check_if_delete(self, command, name):
delete_cmd = self.openstack('dataprocessing %s delete' % command,
params=name)
result = DEL_RESULT.format(command, name)
# lower() is required because "command" in the result string could
# have the first letter capitalized.
self.assertEqual(delete_cmd.lower(), result.lower())
def update_resource_value(self, command, value, params):
new_value = data_utils.rand_name(value)
command = '%s update %s' % (command, value)
params = '%s %s' % (params, new_value)
update_result = self.listing_result('%s %s' % (command, params))
self.find_in_listing(update_result, new_value)
return new_value
def delete_resource(self, command, name):
list_of_resources = self.listing_result('%s list' % command)
list_of_resource_names = [r['Name'] for r in list_of_resources]
if name in list_of_resource_names:
self.openstack('dataprocessing %s delete' % command, params=name)
def get_default_plugin(self):
plugins = self.listing_result('plugin list')
default_plugin_name = plugin_utils.get_default_plugin()
for plugin in plugins:
if plugin['Name'] == default_plugin_name:
return plugin
raise self.skipException('No available plugins for testing')
def find_id_of_pool(self):
floating_pool_list = self.openstack('network list --external')
floating_pool = self.parser.listing(floating_pool_list)
if not floating_pool:
raise self.skipException('Floating pool ip list is empty')
# if not empty, there should be at least one element
return floating_pool[0]['ID']
def _get_cluster_status(self, cluster_name):
status = None
show_cluster = self.listing_result('cluster show %s' % cluster_name)
for line in show_cluster:
if line['Field'] == 'Status':
status = line['Value']
if status is None:
raise self.skipException('Can not find the cluster to get its '
'status')
return status
def _get_resource_id(self, resource, resource_name):
resource_id = None
show_resource = self.listing_result('%s show %s'
% (resource, resource_name))
for line in show_resource:
if line['Field'] == 'Id':
resource_id = line['Value']
if resource_id is None:
raise self.skipException('No such %s exists' % resource)
return resource_id
def _poll_cluster_status(self, cluster_name):
with fixtures.Timeout(TEMPEST_CONF.data_processing.cluster_timeout,
gentle=True):
while True:
status = self._get_cluster_status(cluster_name)
if status == 'Active':
break
if status == 'Error':
raise exc.TempestException("Cluster in %s state" % status)
time.sleep(3)
def wait_for_resource_deletion(self, name, type):
# type can be cluster, cluster template or node group template string
name_exist = False
# if name exists in the command "type list" than tests should fail
with fixtures.Timeout(300, gentle=True):
while True:
list_of_types = self.listing_result('%s list' % type)
list_names = [p['Name'] for p in list_of_types]
if name in list_names:
name_exist = True
if not name_exist:
break
def check_negative_scenarios(self, error_message, cmd, name):
msg_exist = None
try:
self.openstack('dataprocessing %s' % cmd, params=name)
except exc.CommandFailed as e:
# lower() is required because "result" string could
# have the first letter capitalized.
if error_message.lower() in str(e).lower():
msg_exist = True
if not msg_exist:
raise exc.TempestException('"%s" is not a part of output of '
'executed command "%s" (%s)'
% (error_message, cmd, output_msg))
else:
raise exc.TempestException('"%s %s" in negative scenarios has '
'been executed without any errors'
% (cmd, name))
@classmethod
def tearDownClass(cls):
if hasattr(super(ClientTestBase, cls), 'tearDownClass'):
super(ClientTestBase, cls).tearDownClass()
# this'll be needed as long as BaseTestCase functions
# are used in this class, otherwise projects, users,
# networks and routers created won't be deleted
test.BaseTestCase.clear_credentials()
|
the-stack_0_20107 | # -----------------------------------------------------------------------------
# WWW 2019 Debiasing Vandalism Detection Models at Wikidata
#
# Copyright (c) 2019 Stefan Heindorf, Yan Scholten, Gregor Engels, Martin Potthast
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import abc
import logging
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import clone
_logger = logging.getLogger()
########################################################################
# Multiple Instance Learning
########################################################################
class BaseMultipleInstanceClassifier(BaseEstimator):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fit(self, g, X, y):
pass
@abc.abstractmethod
def predict_proba(self, g, X):
pass
class SingleInstanceClassifier(BaseMultipleInstanceClassifier):
def __init__(self, base_estimator, agg_func='mean', window=None):
self.agg_func = agg_func # name of aggregation function
self.base_estimator = base_estimator
self.proba = None
self.window = window
def fit(self, g, X, y, sample_weight=None):
self.base_estimator = clone(self.base_estimator)
self.base_estimator.fit(X, y, sample_weight=sample_weight)
self.proba = None
def set_proba(self, proba):
self.proba = proba
# g contains the group ids
def predict_proba(self, g, X):
# Determines the aggregation function (e.g., mean, max, min, ...)
if len(g) != len(X):
raise Exception(
'g and X should have same lengh')
# Has user explicitly specified proba?
# (to save some computational time)
if self.proba is not None:
proba = self.proba # use stored proba and ignore X
else:
# if proba has not been explicitly set,
# use base_estimator to compute it
proba = self.base_estimator.predict_proba(X)[:, 1]
if self.agg_func == 'cummean':
agg_proba = self._cummean_proba(g, proba, self.window)
else:
raise Exception('Unknown function name: ' + str(self.agg_func))
return agg_proba
@staticmethod
def _cummean_proba(group, proba, window):
sum_result = group_reduce_lookahead(group, proba, np.add, window)
count_result = group_reduce_lookahead(
group, np.asarray([1] * len(proba)), np.add, window)
result = sum_result / count_result
_logger.debug(
'Average group length per revision: ' +
str(np.sum(count_result) / len(proba)))
return result
class SimpleMultipleInstanceClassifier(BaseMultipleInstanceClassifier):
def __init__(self, base_estimator, trans_func='min_max', window=None):
self.trans_func = trans_func # name of aggregation function
self.base_estimator = base_estimator
self.window = window
def fit(self, g, X, y, sample_weight=None):
self.base_estimator = clone(self.base_estimator)
_logger.debug('transforming...')
_, trans_X, trans_y = self._cummin_cummax_trans_func(
g, X, y, self.window)
_logger.debug('transforming...done.')
_logger.debug('fitting...')
self.base_estimator.fit(trans_X, trans_y, sample_weight=sample_weight)
_logger.debug('fitting...done.')
def predict_proba(self, g, X):
# transformation into 'group space'
trans_g, trans_X, _ = self._cummin_cummax_trans_func(
g, X, None, self.window)
# prediction in 'group space'
trans_proba = self.base_estimator.predict_proba(trans_X)
if self.trans_func == 'cummin_cummax':
proba = trans_proba[:, 1] # result already in 'instance space'
return proba
@classmethod
def _cummin_cummax_trans_func(cls, g, X, y, window):
_logger.debug('lookahead maximum...')
max_X = group_reduce_lookahead(g, X, np.maximum, window)
_logger.debug('lookahead minimum...')
min_X = group_reduce_lookahead(g, X, np.minimum, window)
_logger.debug('concatenate...')
result_X = np.concatenate([max_X, min_X], axis=1)
del(max_X) # free memory
del(min_X) # free memory
_logger.debug('ascontiguous...')
result_X = np.ascontiguousarray(result_X)
return g, result_X, y
class CombinedMultipleInstanceClassifier(BaseMultipleInstanceClassifier):
def __init__(self, base_estimator1, base_estimator2):
self.base_estimator1 = base_estimator1
self.base_estimator2 = base_estimator2
def fit(self, g, X, y, sample_weight=None):
self.base_estimator1 = clone(self.base_estimator1)
self.base_estimator2 = clone(self.base_estimator2)
self.base_estimator1.fit(g, X, y, sample_weight)
self.base_estimator2.fit(g, X, y, sample_weight)
self.base_estimator1_proba = None
self.base_estimator2_proba = None
def set_proba(self, base_estimatro1_proba, base_estimator2_proba):
self.base_estimator1_proba = base_estimatro1_proba
self.base_estimator2_proba = base_estimator2_proba
def predict_proba(self, g, X):
if self.base_estimator1_proba is None:
base_estimator1_proba = self.base_estimator1.predict_proba(g, X)
else:
base_estimator1_proba = self.base_estimator1_proba
if self.base_estimator2_proba is None:
base_estimator2_proba = self.base_estimator2.predict_proba(g, X)
else:
base_estimator2_proba = self.base_estimator2_proba
proba = self.average_proba(
base_estimator1_proba, base_estimator2_proba)
return proba
# Averages the scores of two classifiers
@staticmethod
def average_proba(prob1, prob2):
tmp = pd.DataFrame()
tmp['prob1'] = prob1
tmp['prob2'] = prob2
avg_proba = np.ascontiguousarray(tmp.mean(axis=1).values)
return avg_proba
########################################################################
# Online Transformers
########################################################################
class StreamGroupReduceTransformer:
"""Operates on streams of (g,v) pairs where g denotes a group and v a value.
Reduces the stream within every group by applying the two-parameter
function func.
"""
def __init__(self, func):
self.func = func
self.d = {}
def partial_fit(self, g, v):
if g in self.d:
self.d[g] = self.func(self.d[g], v)
else:
self.d[g] = v
return self.d[g]
def transform(self, g):
return self.d[g]
def group_reduce_lookahead(g, X, func, lookahead):
"""Apply function func cumulatively while looking ahead."""
if lookahead > len(g):
lookahead = len(g) # unlimited lookahead
result = [np.nan] * len(g)
transformer = StreamGroupReduceTransformer(func)
for i in range(len(g) + lookahead - 1):
if i < len(g):
# add current element to lookahead data structure
cur_g = g[i]
cur_v = X[i]
transformer.partial_fit(cur_g, cur_v)
prev_i = i - lookahead + 1
if prev_i >= 0:
# compute result
prev_g = g[prev_i]
result[prev_i] = transformer.transform(prev_g)
result = np.asarray(result)
return result
|
the-stack_0_20108 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from dotmap import DotMap
import gym
from src.args import get_args
from src.modeling.models.BNN import BNN
from src.misc.DotmapUtils import get_required_argument
from src.modeling.layers import FC
import src.envs
from src.modeling.trainers.registry import register
class Cartpole:
# ENV_NAME = "MBRLCartpole-v0"
# TASK_HORIZON = 200
# NTRAIN_ITERS = 50
# NROLLOUTS_PER_ITER = 1
# PLAN_HOR = 25
MODEL_IN, MODEL_OUT = 6, 4
GP_NINDUCING_POINTS = 200
def __init__(self, args):
self.args = args
self.env = gym.make("MBRLCartpole-v0")
# self.NN_TRAIN_CFG = {"epochs": 5}
# self.OPT_CFG = {
# "Random": {"popsize": 2000},
# "CEM": {"popsize": 400, "num_elites": 40, "max_iters": 5, "alpha": 0.1},
# }
@staticmethod
def obs_preproc(obs):
if isinstance(obs, np.ndarray):
return np.concatenate(
[np.sin(obs[:, 1:2]), np.cos(obs[:, 1:2]), obs[:, :1], obs[:, 2:]],
axis=1,
)
else:
return tf.concat(
[
tf.math.sin(obs[:, 1:2]),
tf.math.cos(obs[:, 1:2]),
obs[:, :1],
obs[:, 2:],
],
axis=1,
)
@staticmethod
def obs_postproc(obs, pred):
return obs + pred
@staticmethod
def obs_postproc2(next_obs):
return next_obs
@staticmethod
def targ_proc(obs, next_obs):
return next_obs - obs
@staticmethod
def obs_cost_fn(obs):
if isinstance(obs, np.ndarray):
return -np.exp(
-np.sum(
np.square(
Cartpole._get_ee_pos(obs, are_tensors=False)
- np.array([0.0, 0.6])
),
axis=1,
)
/ (0.6 ** 2)
)
else:
return -tf.math.exp(
-tf.math.reduce_sum(
tf.math.square(
Cartpole._get_ee_pos(obs, are_tensors=True)
- np.array([0.0, 0.6])
),
axis=1,
)
/ (0.6 ** 2)
)
@staticmethod
def ac_cost_fn(acs):
if isinstance(acs, np.ndarray):
return 0.01 * np.sum(np.square(acs), axis=1)
else:
return 0.01 * tf.math.reduce_sum(tf.square(acs), axis=1)
def nn_constructor(self):
# model = get_required_argument(
# model_init_cfg, "model_class", "Must provide model class"
# )(
# DotMap(
# name="model",
# num_networks=get_required_argument(
# model_init_cfg, "num_nets", "Must provide ensemble size"
# ),
# load_model=model_init_cfg.get("load_model", False),
# model_dir=model_init_cfg.get("model_dir", None),
# )
# )
if not self.args.load_model:
model_config = [
DotMap(
{
"layer_name": "FC",
"input_dim": self.MODEL_IN,
"output_dim": 500,
"activation": "swish",
"weight_decay": 0.0001,
"ensemble_size": 1,
}
),
DotMap(
{
"layer_name": "FC",
"input_dim": 500,
"output_dim": 500,
"activation": "swish",
"weight_decay": 0.00025,
"ensemble_size": 1,
}
),
DotMap(
{
"layer_name": "FC",
"input_dim": 500,
"output_dim": 500,
"activation": "swish",
"weight_decay": 0.00025,
"ensemble_size": 1,
}
),
DotMap(
{
"layer_name": "FC",
"input_dim": 500,
"output_dim": self.MODEL_OUT,
"activation": "swish",
"weight_decay": 0.0005,
"ensemble_size": 1,
}
),
]
model = BNN(DotMap(name="test", num_networks=1), model_config)
# model.add(
# FC(
# 500,
# input_dim=self.MODEL_IN,
# activation="swish",
# weight_decay=0.0001,
# )
# )
# model.add(FC(500, activation="swish", weight_decay=0.00025))
# model.add(FC(500, activation="swish", weight_decay=0.00025))
# model.add(FC(self.MODEL_OUT, weight_decay=0.0005))
return model
def gp_constructor(self, model_init_cfg):
model = get_required_argument(
model_init_cfg, "model_class", "Must provide model class"
)(
DotMap(
name="model",
kernel_class=get_required_argument(
model_init_cfg, "kernel_class", "Must provide kernel class"
),
kernel_args=model_init_cfg.get("kernel_args", {}),
num_inducing_points=get_required_argument(
model_init_cfg,
"num_inducing_points",
"Must provide number of inducing points.",
),
)
)
return model
@staticmethod
def _get_ee_pos(obs, are_tensors=False):
x0, theta = obs[:, :1], obs[:, 1:2]
if are_tensors:
return tf.concat(
[x0 - 0.6 * tf.math.sin(theta), -0.6 * tf.math.cos(theta)], axis=1
)
else:
return np.concatenate(
[x0 - 0.6 * np.sin(theta), -0.6 * np.cos(theta)], axis=1
)
@register
def cartpole(args):
return Cartpole(args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.