filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_3162 | from ..action import Action
from db.models.user import User
from db.models.account import Account
from db.models.artist import Artist
from db.serializers.user_serializer import UserSerializer
from db.serializers.account_serializer import AccountSerializer
class ArtistAccount(Action):
arguments = ['user']
def perform(self):
user = self.user
user_info = User.objects.get(id=user.id)
artist_info = Artist.objects.get(user_id=user.id)
try:
account_info = Account.objects.get(artist_id=artist_info.id)
except :
self.fail(dict(account_error='Please add your account information'))
serialize_user = UserSerializer(user_info)
serialize_account = AccountSerializer(account_info)
account_information = {
'email': serialize_user.data.get('email', ''),
'account_number': serialize_account.data.get('account_number', ''),
'account_name': serialize_account.data.get('account_name', ''),
'bank_name': serialize_account.data.get('bank_name', ''),
'bank_code': serialize_account.data.get('bank_code', '')
}
return account_information
|
the-stack_0_3163 | # coding:utf-8
"""
author:Qiu Yurui
"""
import tensorflow as tf
import numpy as np
import os
import sys
import cv2
import argparse
import glob
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
from tensorflow.python.framework import graph_util
from tensorflow.python import pywrap_tensorflow
def getweightpath(wdir):
ckpath = os.path.join(wdir, 'checkpoint')
fr = open(ckpath, 'rt')
fline = fr.readline()
fr.close()
ckname = fline.split('"')[1]
return os.path.join(wdir, ckname)
def exportpb_fromckpt(input_checkpoint, output_graph, output_node_names):
"""
:param input_checkpoint: ckpt model path
:param output_graph: save path of pb model
:return:
"""
with tf.Graph().as_default():
with tf.Session() as sess:
gbs = tf.Variable(0, trainable=False)
input_image = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='images')
label_target = tf.placeholder(tf.int32, shape=[None, ], name='labels')
logits, end_points = inception_v3.inception_v3(input_image,
num_classes=2,
is_training=False,
dropout_keep_prob=0.0,
depth_multiplier=0.5)
# output = tf.identity(logits, name=output_node_names)
saver = tf.train.Saver()
saver.restore(sess, input_checkpoint)
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=output_node_names.split(','))
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def freeze_graph(input_checkpoint, output_graph):
'''
:param input_checkpoint:
:param output_graph: PB模型保存路径
:return:
'''
# checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用
# input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径
# 指定输出的节点名称,该节点名称必须是原模型中存在的节点
output_node_names = "InceptionV3/Logits/SpatialSqueeze"
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
with tf.Session() as sess:
saver.restore(sess, input_checkpoint) # 恢复图并得到数据
output_graph_def = graph_util.convert_variables_to_constants( # 模型持久化,将变量值固定
sess=sess,
input_graph_def=sess.graph_def, # 等于:sess.graph_def
output_node_names=output_node_names.split(",")) # 如果有多个输出节点,以逗号隔开
with tf.gfile.GFile(output_graph, "wb") as f: # 保存模型
f.write(output_graph_def.SerializeToString()) # 序列化输出
print("%d ops in the final graph." % len(output_graph_def.node)) # 得到当前图有几个操作节点
if __name__ == '__main__':
root_path_model = ''
root_path_pb = ''
output_node_names = ''
checkpoint_path = os.path.join('/Users/qiuyurui/Downloads/model.ckpt-1758393')
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print('tensor_name: ', key)
freeze_graph('/Users/qiuyurui/Downloads/model.ckpt-1758393', 'test.pb')
if not os.path.exists(root_path_pb):
os.makedirs(root_path_pb)
dirs = glob.glob(root_path_model + '/*')
for dir in dirs:
if dir.startswith('.'):
continue
if not os.path.isdir(dir):
continue
number = dir.split('/')[-1].split('_')[-1]
ckpath = getweightpath(dir)
pbpath = os.path.join(root_path_pb, '{0}.pb'.format(number))
exportpb_fromckpt(ckpath, pbpath, output_node_names)
|
the-stack_0_3165 | # data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
# ==========================dataset load==========================
class RescaleT(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image):
new_size = (self.output_size, self.output_size)
print(image.shape)
print(new_size)
img = transform.resize(image, new_size, mode='constant')
print('rescale T')
return img
class Rescale(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image):
if random.random() >= 0.5:
image = image[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image, (new_h, new_w), mode='constant')
return img
class RandomCrop(object):
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, image):
if random.random() >= 0.5:
image = image[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
return image
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image):
tmpImg = np.zeros((image.shape[0], image.shape[1], 3))
image = image / np.max(image)
if image.shape[2] == 1:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 2] = (image[:, :, 0] - 0.485) / 0.229
else:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 1] - 0.456) / 0.224
tmpImg[:, :, 2] = (image[:, :, 2] - 0.406) / 0.225
# change the r,g,b to b,r,g from [0,255] to [0,1]
tmpImg = tmpImg.transpose((2, 0, 1))
image = torch.from_numpy(tmpImg)
return image
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, flag=0):
self.flag = flag
def __call__(self, image):
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0], image.shape[1], 6))
tmpImgt = np.zeros((image.shape[0], image.shape[1], 3))
if image.shape[2] == 1:
tmpImgt[:, :, 0] = image[:, :, 0]
tmpImgt[:, :, 1] = image[:, :, 0]
tmpImgt[:, :, 2] = image[:, :, 0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:, :, 0] = (tmpImgt[:, :, 0] - np.min(tmpImgt[:, :, 0])) / (
np.max(tmpImgt[:, :, 0]) - np.min(tmpImgt[:, :, 0]))
tmpImg[:, :, 1] = (tmpImgt[:, :, 1] - np.min(tmpImgt[:, :, 1])) / (
np.max(tmpImgt[:, :, 1]) - np.min(tmpImgt[:, :, 1]))
tmpImg[:, :, 2] = (tmpImgt[:, :, 2] - np.min(tmpImgt[:, :, 2])) / (
np.max(tmpImgt[:, :, 2]) - np.min(tmpImgt[:, :, 2]))
tmpImg[:, :, 3] = (tmpImgtl[:, :, 0] - np.min(
tmpImgtl[:, :, 0])) / (np.max(tmpImgtl[:, :, 0]) - np.min(
tmpImgtl[:, :, 0]))
tmpImg[:, :, 4] = (tmpImgtl[:, :, 1] - np.min(
tmpImgtl[:, :, 1])) / (np.max(tmpImgtl[:, :, 1]) - np.min(
tmpImgtl[:, :, 1]))
tmpImg[:, :, 5] = (tmpImgtl[:, :, 2] - np.min(
tmpImgtl[:, :, 2])) / (np.max(tmpImgtl[:, :, 2]) - np.min(
tmpImgtl[:, :, 2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.mean(
tmpImg[:, :, 0])) / np.std(tmpImg[:, :, 0])
tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.mean(
tmpImg[:, :, 1])) / np.std(tmpImg[:, :, 1])
tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.mean(
tmpImg[:, :, 2])) / np.std(tmpImg[:, :, 2])
tmpImg[:, :, 3] = (tmpImg[:, :, 3] - np.mean(
tmpImg[:, :, 3])) / np.std(tmpImg[:, :, 3])
tmpImg[:, :, 4] = (tmpImg[:, :, 4] - np.mean(
tmpImg[:, :, 4])) / np.std(tmpImg[:, :, 4])
tmpImg[:, :, 5] = (tmpImg[:, :, 5] - np.mean(
tmpImg[:, :, 5])) / np.std(tmpImg[:, :, 5])
elif self.flag == 1: # with Lab color
tmpImg = np.zeros((image.shape[0], image.shape[1], 3))
if image.shape[2] == 1:
tmpImg[:, :, 0] = image[:, :, 0]
tmpImg[:, :, 1] = image[:, :, 0]
tmpImg[:, :, 2] = image[:, :, 0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.min(tmpImg[:, :, 0])) / (
np.max(tmpImg[:, :, 0]) - np.min(tmpImg[:, :, 0]))
tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.min(tmpImg[:, :, 1])) / (
np.max(tmpImg[:, :, 1]) - np.min(tmpImg[:, :, 1]))
tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.min(tmpImg[:, :, 2])) / (
np.max(tmpImg[:, :, 2]) - np.min(tmpImg[:, :, 2]))
tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.mean(
tmpImg[:, :, 0])) / np.std(tmpImg[:, :, 0])
tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.mean(
tmpImg[:, :, 1])) / np.std(tmpImg[:, :, 1])
tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.mean(
tmpImg[:, :, 2])) / np.std(tmpImg[:, :, 2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0], image.shape[1], 3))
print(f"tmpimg shape: {tmpImg.shape}")
image = image / np.max(image)
if image.shape[2] == 1:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 2] = (image[:, :, 0] - 0.485) / 0.229
else:
tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229
tmpImg[:, :, 1] = (image[:, :, 1] - 0.456) / 0.224
tmpImg[:, :, 2] = (image[:, :, 2] - 0.406) / 0.225
# change the r,g,b to b,r,g from [0,255] to [0,1]
# transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
image = torch.from_numpy(tmpImg)
print('totensorlab')
print(f"final image shape: {image.shape}")
return image
class SalObjDataset(Dataset):
def __init__(self, img_name_list, lbl_name_list, transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self, idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if (0 == len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if (3 == len(label_3.shape)):
label = label_3[:, :, 0]
elif (2 == len(label_3.shape)):
label = label_3
if (3 == len(image.shape) and 2 == len(label.shape)):
label = label[:, :, np.newaxis]
elif (2 == len(image.shape) and 2 == len(label.shape)):
image = image[:, :, np.newaxis]
label = label[:, :, np.newaxis]
sample = {'imidx': imidx, 'image': image, 'label': label}
if self.transform:
sample = self.transform(sample)
return sample
|
the-stack_0_3167 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import os
import tct
import sys
params = tct.readjson(sys.argv[1])
binabspath = sys.argv[2]
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
resultfile = params['resultfile']
result = tct.readjson(resultfile)
loglist = result['loglist'] = result.get('loglist', [])
toolname = params['toolname']
toolname_pure = params['toolname_pure']
workdir = params['workdir']
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Get and check required milestone(s)
# --------------------------------------------------
def milestones_get(name, default=None):
result = milestones.get(name, default)
loglist.append((name, result))
return result
def facts_get(name, default=None):
result = facts.get(name, default)
loglist.append((name, result))
return result
def params_get(name, default=None):
result = params.get(name, default)
loglist.append((name, result))
return result
# ==================================================
# define
# --------------------------------------------------
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
latex_file = milestones_get('latex_file')
if not (latex_file):
exitcode = 22
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('PROBLEMS with params')
if CONTINUE != 0:
loglist.append({'CONTINUE': CONTINUE})
loglist.append('NOTHING to do')
# ==================================================
# work
# --------------------------------------------------
if exitcode == CONTINUE:
latex_make_file = os.path.join(os.path.split(latex_file)[0], 'Makefile')
import subprocess
def cmdline(cmd, cwd=None):
if cwd is None:
cwd = os.getcwd()
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=cwd)
out, err = process.communicate()
exitcode = process.returncode
return exitcode, cmd, out, err
destfile = latex_make_file
# a list of pairs for textreplacements to be done in latex
# sed -i"" 's/pdflatex /pdflatex -interaction=nonstopmode -halt-on-error /' $BUILDDIR/latex/Makefile
#-interaction=STRING set interaction mode (STRING=batchmode/nonstopmode/scrollmode/errorstopmode)
sed_replacements = [(r'PDFLATEX = pdflatex', r'PDFLATEX = pdflatex -interaction=nonstopmode -halt-on-error ')]
for searchstring, replacement in sed_replacements:
if exitcode != CONTINUE:
break
x = searchstring
x = searchstring.replace(r'~', r'\~')
y = replacement
y = replacement.replace(r'~', r'\~')
cmdlist = [
'sed',
'--in-place',
"'s~%s~%s~'" % (x, y),
destfile
]
exitcode, cmd, out, err = cmdline(' '.join(cmdlist))
loglist.append([exitcode, cmd, out, err])
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if exitcode == CONTINUE:
builds_successful = milestones.get('builds_successful', [])
builds_successful.append('latex')
result['MILESTONES'].append({
'latex_make_file': latex_make_file,
'latex_make_file_tweaked': True,
'builds_successful': builds_successful,
})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
|
the-stack_0_3168 | import datetime
from json.decoder import JSONDecodeError
import python_http_client.exceptions
from twilio.base.exceptions import TwilioException
import tornado
from tornado.ioloop import IOLoop
import io
import math
from dateutil.parser import isoparse
from sqlalchemy.orm import joinedload
from sqlalchemy import func, or_, tuple_
import arrow
from marshmallow import Schema, fields
from marshmallow.exceptions import ValidationError
import functools
import healpix_alchemy as ha
from baselayer.app.access import permissions, auth_or_token
from baselayer.app.env import load_env
from baselayer.app.model_util import recursive_to_dict
from ..base import BaseHandler
from ...models import (
DBSession,
Allocation,
Annotation,
Comment,
Instrument,
Obj,
Source,
Token,
Photometry,
Group,
FollowupRequest,
ClassicalAssignment,
ObservingRun,
SourceNotification,
Classification,
Taxonomy,
Listing,
Spectrum,
SourceView,
)
from ...utils.offset import (
get_nearby_offset_stars,
facility_parameters,
source_image_parameters,
get_finding_chart,
_calculate_best_position_for_offset_stars,
)
from .candidate import grab_query_results, update_redshift_history_if_relevant
from .photometry import serialize
from .color_mag import get_color_mag
SOURCES_PER_PAGE = 100
_, cfg = load_env()
def apply_active_or_requested_filtering(query, include_requested, requested_only):
if include_requested:
query = query.filter(or_(Source.requested.is_(True), Source.active.is_(True)))
elif not requested_only:
query = query.filter(Source.active.is_(True))
if requested_only:
query = query.filter(Source.active.is_(False)).filter(
Source.requested.is_(True)
)
return query
def add_ps1_thumbnail_and_push_ws_msg(obj_id, request_handler):
try:
obj = Obj.get_if_accessible_by(obj_id, request_handler.current_user)
obj.add_ps1_thumbnail()
request_handler.push_all(
action="skyportal/REFRESH_SOURCE", payload={"obj_key": obj.internal_key}
)
request_handler.push_all(
action="skyportal/REFRESH_CANDIDATE", payload={"id": obj.internal_key}
)
except Exception as e:
return request_handler.error(f"Unable to generate PS1 thumbnail URL: {e}")
finally:
DBSession.remove()
class SourceHandler(BaseHandler):
@auth_or_token
def head(self, obj_id=None):
"""
---
single:
description: Check if a Source exists
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
responses:
200:
content:
application/json:
schema: Success
404:
content:
application/json:
schema: Error
"""
user_group_ids = [g.id for g in self.associated_user_object.accessible_groups]
num_s = (
DBSession()
.query(Source)
.filter(Source.obj_id == obj_id)
.filter(Source.group_id.in_(user_group_ids))
.count()
)
self.verify_and_commit()
if num_s > 0:
return self.success()
else:
self.set_status(404)
self.finish()
@auth_or_token
def get(self, obj_id=None):
"""
---
single:
description: Retrieve a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: false
schema:
type: string
- in: query
name: includePhotometry
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include associated photometry. Defaults to
false.
- in: query
name: includeComments
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include comment metadata in response.
Defaults to false.
- in: query
name: includePhotometryExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has any photometry points. Defaults to false.
- in: query
name: includeSpectrumExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has a spectra. Defaults to false.
responses:
200:
content:
application/json:
schema: SingleObj
400:
content:
application/json:
schema: Error
multiple:
description: Retrieve all sources
tags:
- sources
parameters:
- in: query
name: ra
nullable: true
schema:
type: number
description: RA for spatial filtering (in decimal degrees)
- in: query
name: dec
nullable: true
schema:
type: number
description: Declination for spatial filtering (in decimal degrees)
- in: query
name: radius
nullable: true
schema:
type: number
description: Radius for spatial filtering if ra & dec are provided (in decimal degrees)
- in: query
name: sourceID
nullable: true
schema:
type: string
description: Portion of ID to filter on
- in: query
name: simbadClass
nullable: true
schema:
type: string
description: Simbad class to filter on
- in: query
name: hasTNSname
nullable: true
schema:
type: boolean
description: If true, return only those matches with TNS names
- in: query
name: numPerPage
nullable: true
schema:
type: integer
description: |
Number of sources to return per paginated request. Defaults to 100. Max 1000.
- in: query
name: pageNumber
nullable: true
schema:
type: integer
description: Page number for paginated query results. Defaults to 1
- in: query
name: totalMatches
nullable: true
schema:
type: integer
description: |
Used only in the case of paginating query results - if provided, this
allows for avoiding a potentially expensive query.count() call.
- in: query
name: startDate
nullable: true
schema:
type: string
description: |
Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by
last_detected_at >= startDate
- in: query
name: endDate
nullable: true
schema:
type: string
description: |
Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by
last_detected_at <= endDate
- in: query
name: listName
nullable: true
schema:
type: string
description: |
Get only sources saved to the querying user's list, e.g., "favorites".
- in: query
name: group_ids
nullable: true
schema:
type: list
items:
type: integer
description: |
If provided, filter only sources saved to one of these group IDs.
- in: query
name: includePhotometry
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include associated photometry. Defaults to
false.
- in: query
name: includeColorMagnitude
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include the color-magnitude data from Gaia.
This will only include data for objects that have an annotation
with the appropriate format: a key named Gaia that contains a dictionary
with keys named Mag_G, Mag_Bp, Mag_Rp, and Plx
(underscores and case are ignored when matching all the above keys).
The result is saved in a field named 'color_magnitude'.
If no data is available, returns an empty array.
Defaults to false (do not search for nor include this info).
- in: query
name: includeRequested
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include requested saves. Defaults to
false.
- in: query
name: pendingOnly
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to only include requested/pending saves.
Defaults to false.
- in: query
name: savedBefore
nullable: true
schema:
type: string
description: |
Only return sources that were saved before this UTC datetime.
- in: query
name: savedAfter
nullable: true
schema:
type: string
description: |
Only return sources that were saved after this UTC datetime.
- in: query
name: saveSummary
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to only return the source save
information in the response (defaults to false). If true,
the response will contain a list of dicts with the following
schema under `response['data']['sources']`:
```
{
"group_id": 2,
"created_at": "2020-11-13T22:11:25.910271",
"saved_by_id": 1,
"saved_at": "2020-11-13T22:11:25.910271",
"requested": false,
"unsaved_at": null,
"modified": "2020-11-13T22:11:25.910271",
"obj_id": "16fil",
"active": true,
"unsaved_by_id": null
}
```
- in: query
name: sortBy
nullable: true
schema:
type: string
description: |
The field to sort by. Currently allowed options are ["id", "ra", "dec", "redshift", "saved_at"]
- in: query
name: sortOrder
nullable: true
schema:
type: string
description: |
The sort order - either "asc" or "desc". Defaults to "asc"
- in: query
name: includeComments
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to include comment metadata in response.
Defaults to false.
- in: query
name: includePhotometryExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has any photometry points. Defaults to false.
- in: query
name: includeSpectrumExists
nullable: true
schema:
type: boolean
description: |
Boolean indicating whether to return if a source has a spectra. Defaults to false.
- in: query
name: classifications
nullable: true
schema:
type: array
items:
type: string
explode: false
style: simple
description: |
Comma-separated string of "taxonomy: classification" pair(s) to filter for sources matching
that/those classification(s), i.e. "Sitewide Taxonomy: Type II, Sitewide Taxonomy: AGN"
- in: query
name: minRedshift
nullable: true
schema:
type: number
description: |
If provided, return only sources with a redshift of at least this value
- in: query
name: maxRedshift
nullable: true
schema:
type: number
description: |
If provided, return only sources with a redshift of at most this value
- in: query
name: minPeakMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources with a peak photometry magnitude of at least this value
- in: query
name: maxPeakMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources with a peak photometry magnitude of at most this value
- in: query
name: minLatestMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources whose latest photometry magnitude is at least this value
- in: query
name: maxLatestMagnitude
nullable: true
schema:
type: number
description: |
If provided, return only sources whose latest photometry magnitude is at most this value
- in: query
name: hasSpectrum
nullable: true
schema:
type: boolean
description: If true, return only those matches with at least one associated spectrum
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
sources:
type: array
items:
$ref: '#/components/schemas/Obj'
totalMatches:
type: integer
pageNumber:
type: integer
numPerPage:
type: integer
400:
content:
application/json:
schema: Error
"""
page_number = self.get_query_argument('pageNumber', None)
num_per_page = min(
int(self.get_query_argument("numPerPage", SOURCES_PER_PAGE)), 100
)
ra = self.get_query_argument('ra', None)
dec = self.get_query_argument('dec', None)
radius = self.get_query_argument('radius', None)
start_date = self.get_query_argument('startDate', None)
end_date = self.get_query_argument('endDate', None)
list_name = self.get_query_argument('listName', None)
sourceID = self.get_query_argument('sourceID', None) # Partial ID to match
include_photometry = self.get_query_argument("includePhotometry", False)
include_color_mag = self.get_query_argument("includeColorMagnitude", False)
include_requested = self.get_query_argument("includeRequested", False)
requested_only = self.get_query_argument("pendingOnly", False)
saved_after = self.get_query_argument('savedAfter', None)
saved_before = self.get_query_argument('savedBefore', None)
save_summary = self.get_query_argument('saveSummary', False)
sort_by = self.get_query_argument("sortBy", None)
sort_order = self.get_query_argument("sortOrder", "asc")
include_comments = self.get_query_argument("includeComments", False)
include_photometry_exists = self.get_query_argument(
"includePhotometryExists", False
)
include_spectrum_exists = self.get_query_argument(
"includeSpectrumExists", False
)
classifications = self.get_query_argument("classifications", None)
min_redshift = self.get_query_argument("minRedshift", None)
max_redshift = self.get_query_argument("maxRedshift", None)
min_peak_magnitude = self.get_query_argument("minPeakMagnitude", None)
max_peak_magnitude = self.get_query_argument("maxPeakMagnitude", None)
min_latest_magnitude = self.get_query_argument("minLatestMagnitude", None)
max_latest_magnitude = self.get_query_argument("maxLatestMagnitude", None)
has_spectrum = self.get_query_argument("hasSpectrum", False)
# These are just throwaway helper classes to help with deserialization
class UTCTZnaiveDateTime(fields.DateTime):
"""
DateTime object that deserializes both timezone aware iso8601
strings and naive iso8601 strings into naive datetime objects
in utc
See discussion in https://github.com/Scille/umongo/issues/44#issuecomment-244407236
"""
def _deserialize(self, value, attr, data, **kwargs):
value = super()._deserialize(value, attr, data, **kwargs)
if value and value.tzinfo:
value = (value - value.utcoffset()).replace(tzinfo=None)
return value
class Validator(Schema):
saved_after = UTCTZnaiveDateTime(required=False, missing=None)
saved_before = UTCTZnaiveDateTime(required=False, missing=None)
save_summary = fields.Boolean()
validator_instance = Validator()
params_to_be_validated = {}
if saved_after is not None:
params_to_be_validated['saved_after'] = saved_after
if saved_before is not None:
params_to_be_validated['saved_before'] = saved_before
if save_summary is not None:
params_to_be_validated['save_summary'] = save_summary
try:
validated = validator_instance.load(params_to_be_validated)
except ValidationError as e:
return self.error(f'Error parsing query params: {e.args[0]}.')
saved_after = validated['saved_after']
saved_before = validated['saved_before']
save_summary = validated['save_summary']
# parse the group ids:
group_ids = self.get_query_argument('group_ids', None)
if group_ids is not None:
try:
group_ids = [int(gid) for gid in group_ids.split(',')]
except ValueError:
return self.error(
f'Invalid group ids field ({group_ids}; Could not parse all elements to integers'
)
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
simbad_class = self.get_query_argument('simbadClass', None)
has_tns_name = self.get_query_argument('hasTNSname', None)
total_matches = self.get_query_argument('totalMatches', None)
is_token_request = isinstance(self.current_user, Token)
if obj_id is not None:
s = Obj.get_if_accessible_by(
obj_id, self.current_user, options=[joinedload(Obj.thumbnails)]
)
if s is None:
return self.error("Source not found", status=404)
source_info = s.to_dict()
source_info["followup_requests"] = (
FollowupRequest.query_records_accessible_by(
self.current_user,
options=[
joinedload(FollowupRequest.allocation).joinedload(
Allocation.instrument
),
joinedload(FollowupRequest.allocation).joinedload(
Allocation.group
),
joinedload(FollowupRequest.requester),
],
)
.filter(FollowupRequest.obj_id == obj_id)
.filter(FollowupRequest.status != "deleted")
.all()
)
source_info["assignments"] = (
ClassicalAssignment.query_records_accessible_by(
self.current_user,
options=[
joinedload(ClassicalAssignment.run)
.joinedload(ObservingRun.instrument)
.joinedload(Instrument.telescope)
],
)
.filter(ClassicalAssignment.obj_id == obj_id)
.all()
)
if is_token_request:
# Logic determining whether to register front-end request as view lives in front-end
sv = SourceView(
obj_id=obj_id,
username_or_token_id=self.current_user.id,
is_token=True,
)
DBSession.add(sv)
# To keep loaded relationships from being cleared in verify_and_commit:
source_info = recursive_to_dict(source_info)
self.verify_and_commit()
if "ps1" not in [thumb.type for thumb in s.thumbnails]:
IOLoop.current().add_callback(
lambda: add_ps1_thumbnail_and_push_ws_msg(obj_id, self)
)
if include_comments:
comments = (
Comment.query_records_accessible_by(
self.current_user,
options=[
joinedload(Comment.author),
joinedload(Comment.groups),
],
)
.filter(Comment.obj_id == obj_id)
.all()
)
source_info["comments"] = sorted(
[
{
**{
k: v
for k, v in c.to_dict().items()
if k != "attachment_bytes"
},
"author": {
**c.author.to_dict(),
"gravatar_url": c.author.gravatar_url,
},
}
for c in comments
],
key=lambda x: x["created_at"],
reverse=True,
)
source_info["annotations"] = sorted(
Annotation.query_records_accessible_by(
self.current_user, options=[joinedload(Annotation.author)]
)
.filter(Annotation.obj_id == obj_id)
.all(),
key=lambda x: x.origin,
)
readable_classifications = (
Classification.query_records_accessible_by(self.current_user)
.filter(Classification.obj_id == obj_id)
.all()
)
readable_classifications_json = []
for classification in readable_classifications:
classification_dict = classification.to_dict()
classification_dict['groups'] = [
g.to_dict() for g in classification.groups
]
readable_classifications_json.append(classification_dict)
source_info["classifications"] = readable_classifications_json
source_info["last_detected_at"] = s.last_detected_at(self.current_user)
source_info["last_detected_mag"] = s.last_detected_mag(self.current_user)
source_info["peak_detected_at"] = s.peak_detected_at(self.current_user)
source_info["peak_detected_mag"] = s.peak_detected_mag(self.current_user)
source_info["gal_lat"] = s.gal_lat_deg
source_info["gal_lon"] = s.gal_lon_deg
source_info["luminosity_distance"] = s.luminosity_distance
source_info["dm"] = s.dm
source_info["angular_diameter_distance"] = s.angular_diameter_distance
if include_photometry:
photometry = (
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == obj_id)
.all()
)
source_info["photometry"] = [
serialize(phot, 'ab', 'flux') for phot in photometry
]
if include_photometry_exists:
source_info["photometry_exists"] = (
len(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == obj_id)
.all()
)
> 0
)
if include_spectrum_exists:
source_info["spectrum_exists"] = (
len(
Spectrum.query_records_accessible_by(self.current_user)
.filter(Spectrum.obj_id == obj_id)
.all()
)
> 0
)
source_query = Source.query_records_accessible_by(self.current_user).filter(
Source.obj_id == source_info["id"]
)
source_query = apply_active_or_requested_filtering(
source_query, include_requested, requested_only
)
source_subquery = source_query.subquery()
groups = (
Group.query_records_accessible_by(self.current_user)
.join(source_subquery, Group.id == source_subquery.c.group_id)
.all()
)
source_info["groups"] = [g.to_dict() for g in groups]
for group in source_info["groups"]:
source_table_row = (
Source.query_records_accessible_by(self.current_user)
.filter(Source.obj_id == s.id, Source.group_id == group["id"])
.first()
)
if source_table_row is not None:
group["active"] = source_table_row.active
group["requested"] = source_table_row.requested
group["saved_at"] = source_table_row.saved_at
group["saved_by"] = (
source_table_row.saved_by.to_dict()
if source_table_row.saved_by is not None
else None
)
if include_color_mag:
source_info["color_magnitude"] = get_color_mag(
source_info["annotations"]
)
source_info = recursive_to_dict(source_info)
self.verify_and_commit()
return self.success(data=source_info)
# Fetch multiple sources
obj_query_options = [joinedload(Obj.thumbnails)]
obj_query = Obj.query_records_accessible_by(
self.current_user, options=obj_query_options
)
source_query = Source.query_records_accessible_by(self.current_user)
if list_name:
listing_subquery = Listing.query_records_accessible_by(
self.current_user
).subquery()
obj_query = obj_query.join(
listing_subquery, Obj.id == listing_subquery.c.obj_id
)
if classifications is not None or sort_by == "classification":
classification_subquery = Classification.query_records_accessible_by(
self.current_user
)
if classifications is not None:
taxonomy_subquery = Taxonomy.query_records_accessible_by(
self.current_user
).subquery()
classification_subquery = classification_subquery.join(
taxonomy_subquery,
Classification.taxonomy_id == taxonomy_subquery.c.id,
)
classification_subquery = classification_subquery.subquery()
obj_query = obj_query.join(
classification_subquery,
Obj.id == classification_subquery.c.obj_id,
isouter=True,
)
if sourceID:
obj_query = obj_query.filter(Obj.id.contains(sourceID.strip()))
if any([ra, dec, radius]):
if not all([ra, dec, radius]):
return self.error(
"If any of 'ra', 'dec' or 'radius' are "
"provided, all three are required."
)
try:
ra = float(ra)
dec = float(dec)
radius = float(radius)
except ValueError:
return self.error(
"Invalid values for ra, dec or radius - could not convert to float"
)
other = ha.Point(ra=ra, dec=dec)
obj_query = obj_query.filter(Obj.within(other, radius))
if start_date:
start_date = arrow.get(start_date.strip()).datetime
obj_query = obj_query.filter(
Obj.last_detected_at(self.current_user) >= start_date
)
if end_date:
end_date = arrow.get(end_date.strip()).datetime
obj_query = obj_query.filter(
Obj.last_detected_at(self.current_user) <= end_date
)
if saved_before:
source_query = source_query.filter(Source.saved_at <= saved_before)
if saved_after:
source_query = source_query.filter(Source.saved_at >= saved_after)
if list_name:
obj_query = obj_query.filter(
listing_subquery.c.list_name == list_name,
listing_subquery.c.user_id == self.associated_user_object.id,
)
if simbad_class:
obj_query = obj_query.filter(
func.lower(Obj.altdata['simbad']['class'].astext)
== simbad_class.lower()
)
if has_tns_name in ['true', True]:
obj_query = obj_query.filter(Obj.altdata['tns']['name'].isnot(None))
if has_spectrum in ["true", True]:
spectrum_subquery = Spectrum.query_records_accessible_by(
self.current_user
).subquery()
obj_query = obj_query.join(
spectrum_subquery, Obj.id == spectrum_subquery.c.obj_id
)
if min_redshift is not None:
try:
min_redshift = float(min_redshift)
except ValueError:
return self.error(
"Invalid values for minRedshift - could not convert to float"
)
obj_query = obj_query.filter(Obj.redshift >= min_redshift)
if max_redshift is not None:
try:
max_redshift = float(max_redshift)
except ValueError:
return self.error(
"Invalid values for maxRedshift - could not convert to float"
)
obj_query = obj_query.filter(Obj.redshift <= max_redshift)
if min_peak_magnitude is not None:
try:
min_peak_magnitude = float(min_peak_magnitude)
except ValueError:
return self.error(
"Invalid values for minPeakMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.peak_detected_mag(self.current_user) >= min_peak_magnitude
)
if max_peak_magnitude is not None:
try:
max_peak_magnitude = float(max_peak_magnitude)
except ValueError:
return self.error(
"Invalid values for maxPeakMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.peak_detected_mag(self.current_user) <= max_peak_magnitude
)
if min_latest_magnitude is not None:
try:
min_latest_magnitude = float(min_latest_magnitude)
except ValueError:
return self.error(
"Invalid values for minLatestMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.last_detected_mag(self.current_user) >= min_latest_magnitude
)
if max_latest_magnitude is not None:
try:
max_latest_magnitude = float(max_latest_magnitude)
except ValueError:
return self.error(
"Invalid values for maxLatestMagnitude - could not convert to float"
)
obj_query = obj_query.filter(
Obj.last_detected_mag(self.current_user) <= max_latest_magnitude
)
if classifications is not None:
if isinstance(classifications, str) and "," in classifications:
classifications = [c.strip() for c in classifications.split(",")]
elif isinstance(classifications, str):
classifications = [classifications]
else:
return self.error(
"Invalid classifications value -- must provide at least one string value"
)
# Parse into tuples of taxonomy: classification
classifications = list(
map(
lambda c: (c.split(":")[0].strip(), c.split(":")[1].strip()),
classifications,
)
)
obj_query = obj_query.filter(
tuple_(
taxonomy_subquery.c.name, classification_subquery.c.classification
).in_(classifications)
)
source_query = apply_active_or_requested_filtering(
source_query, include_requested, requested_only
)
if group_ids is not None:
if not all(gid in user_accessible_group_ids for gid in group_ids):
return self.error(
f"One of the requested groups in '{group_ids}' is inaccessible to user."
)
source_query = source_query.filter(Source.group_id.in_(group_ids))
source_subquery = source_query.subquery()
query = obj_query.join(source_subquery, Obj.id == source_subquery.c.obj_id)
order_by = None
if sort_by is not None:
if sort_by == "id":
order_by = [Obj.id] if sort_order == "asc" else [Obj.id.desc()]
elif sort_by == "ra":
order_by = (
[Obj.ra.nullslast()]
if sort_order == "asc"
else [Obj.ra.desc().nullslast()]
)
elif sort_by == "dec":
order_by = (
[Obj.dec.nullslast()]
if sort_order == "asc"
else [Obj.dec.desc().nullslast()]
)
elif sort_by == "redshift":
order_by = (
[Obj.redshift.nullslast()]
if sort_order == "asc"
else [Obj.redshift.desc().nullslast()]
)
elif sort_by == "saved_at":
order_by = (
[source_subquery.c.saved_at]
if sort_order == "asc"
else [source_subquery.c.saved_at.desc()]
)
elif sort_by == "classification":
order_by = (
[classification_subquery.c.classification.nullslast()]
if sort_order == "asc"
else [classification_subquery.c.classification.desc().nullslast()]
)
if page_number:
try:
page_number = int(page_number)
except ValueError:
return self.error("Invalid page number value.")
try:
query_results = grab_query_results(
query,
total_matches,
page_number,
num_per_page,
"sources",
order_by=order_by,
)
except ValueError as e:
if "Page number out of range" in str(e):
return self.error("Page number out of range.")
raise
elif save_summary:
query_results = {"sources": source_query.all()}
else:
query_results = grab_query_results(
query,
total_matches,
None,
None,
"sources",
order_by=order_by,
)
if not save_summary:
# Records are Objs, not Sources
obj_list = []
for obj in query_results["sources"]:
obj_list.append(obj.to_dict())
if include_comments:
obj_list[-1]["comments"] = sorted(
[
{
k: v
for k, v in c.to_dict().items()
if k != "attachment_bytes"
}
for c in Comment.query_records_accessible_by(
self.current_user
)
.filter(Comment.obj_id == obj.id)
.all()
],
key=lambda x: x["created_at"],
reverse=True,
)
readable_classifications = (
Classification.query_records_accessible_by(self.current_user)
.filter(Classification.obj_id == obj.id)
.all()
)
readable_classifications_json = []
for classification in readable_classifications:
classification_dict = classification.to_dict()
classification_dict['groups'] = [
g.to_dict() for g in classification.groups
]
readable_classifications_json.append(classification_dict)
obj_list[-1]["classifications"] = readable_classifications_json
obj_list[-1]["annotations"] = sorted(
Annotation.query_records_accessible_by(self.current_user).filter(
Annotation.obj_id == obj.id
),
key=lambda x: x.origin,
)
obj_list[-1]["last_detected_at"] = obj.last_detected_at(
self.current_user
)
obj_list[-1]["last_detected_mag"] = obj.last_detected_mag(
self.current_user
)
obj_list[-1]["peak_detected_at"] = obj.peak_detected_at(
self.current_user
)
obj_list[-1]["peak_detected_mag"] = obj.peak_detected_mag(
self.current_user
)
obj_list[-1]["gal_lon"] = obj.gal_lon_deg
obj_list[-1]["gal_lat"] = obj.gal_lat_deg
obj_list[-1]["luminosity_distance"] = obj.luminosity_distance
obj_list[-1]["dm"] = obj.dm
obj_list[-1][
"angular_diameter_distance"
] = obj.angular_diameter_distance
if include_photometry:
photometry = Photometry.query_records_accessible_by(
self.current_user
).filter(Photometry.obj_id == obj.id)
obj_list[-1]["photometry"] = [
serialize(phot, 'ab', 'flux') for phot in photometry
]
if include_photometry_exists:
obj_list[-1]["photometry_exists"] = (
len(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == obj.id)
.all()
)
> 0
)
if include_spectrum_exists:
obj_list[-1]["spectrum_exists"] = (
len(
Spectrum.query_records_accessible_by(self.current_user)
.filter(Spectrum.obj_id == obj.id)
.all()
)
> 0
)
source_query = Source.query_records_accessible_by(
self.current_user
).filter(Source.obj_id == obj_list[-1]["id"])
source_query = apply_active_or_requested_filtering(
source_query, include_requested, requested_only
)
source_subquery = source_query.subquery()
groups = (
Group.query_records_accessible_by(self.current_user)
.join(source_subquery, Group.id == source_subquery.c.group_id)
.all()
)
obj_list[-1]["groups"] = [g.to_dict() for g in groups]
for group in obj_list[-1]["groups"]:
source_table_row = (
Source.query_records_accessible_by(self.current_user)
.filter(
Source.obj_id == obj_list[-1]["id"],
Source.group_id == group["id"],
)
.first()
)
if source_table_row is not None:
group["active"] = source_table_row.active
group["requested"] = source_table_row.requested
group["saved_at"] = source_table_row.saved_at
group["saved_by"] = (
source_table_row.saved_by.to_dict()
if source_table_row.saved_by is not None
else None
)
if include_color_mag:
obj_list[-1]["color_magnitude"] = get_color_mag(
obj_list[-1]["annotations"]
)
query_results["sources"] = obj_list
query_results = recursive_to_dict(query_results)
self.verify_and_commit()
return self.success(data=query_results)
@permissions(['Upload data'])
def post(self):
"""
---
description: Add a new source
tags:
- sources
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/ObjPost'
- type: object
properties:
group_ids:
type: array
items:
type: integer
description: |
List of associated group IDs. If not specified, all of the
user or token's groups will be used.
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: string
description: New source ID
"""
data = self.get_json()
obj_already_exists = (
Obj.get_if_accessible_by(data["id"], self.current_user) is not None
)
schema = Obj.__schema__()
ra = data.get('ra', None)
dec = data.get('dec', None)
if ra is None and not obj_already_exists:
return self.error("RA must not be null for a new Obj")
if dec is None and not obj_already_exists:
return self.error("Dec must not be null for a new Obj")
user_group_ids = [g.id for g in self.current_user.groups]
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
if not user_group_ids:
return self.error(
"You must belong to one or more groups before " "you can add sources."
)
try:
group_ids = [
int(id)
for id in data.pop('group_ids')
if int(id) in user_accessible_group_ids
]
except KeyError:
group_ids = user_group_ids
if not group_ids:
return self.error(
"Invalid group_ids field. Please specify at least "
"one valid group ID that you belong to."
)
try:
obj = schema.load(data)
except ValidationError as e:
return self.error(
'Invalid/missing parameters: ' f'{e.normalized_messages()}'
)
groups = (
Group.query_records_accessible_by(self.current_user)
.filter(Group.id.in_(group_ids))
.all()
)
if not groups:
return self.error(
"Invalid group_ids field. Please specify at least "
"one valid group ID that you belong to."
)
update_redshift_history_if_relevant(data, obj, self.associated_user_object)
DBSession().add(obj)
for group in groups:
source = (
Source.query_records_accessible_by(self.current_user)
.filter(Source.obj_id == obj.id)
.filter(Source.group_id == group.id)
.first()
)
if source is not None:
source.active = True
source.saved_by = self.associated_user_object
else:
DBSession().add(
Source(
obj=obj, group=group, saved_by_id=self.associated_user_object.id
)
)
self.verify_and_commit()
if not obj_already_exists:
obj.add_linked_thumbnails()
self.push_all(
action="skyportal/REFRESH_SOURCE", payload={"obj_key": obj.internal_key}
)
self.push_all(
action="skyportal/REFRESH_CANDIDATE", payload={"id": obj.internal_key}
)
return self.success(data={"id": obj.id})
@permissions(['Upload data'])
def patch(self, obj_id):
"""
---
description: Update a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: True
schema:
type: string
requestBody:
content:
application/json:
schema: ObjNoID
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
data = self.get_json()
data['id'] = obj_id
schema = Obj.__schema__()
try:
obj = schema.load(data)
except ValidationError as e:
return self.error(
'Invalid/missing parameters: ' f'{e.normalized_messages()}'
)
update_redshift_history_if_relevant(data, obj, self.associated_user_object)
self.verify_and_commit()
self.push_all(
action="skyportal/REFRESH_SOURCE",
payload={"obj_key": obj.internal_key},
)
return self.success(action='skyportal/FETCH_SOURCES')
@permissions(['Manage sources'])
def delete(self, obj_id, group_id):
"""
---
description: Delete a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
- in: path
name: group_id
required: true
schema:
type: string
responses:
200:
content:
application/json:
schema: Success
"""
if group_id not in [g.id for g in self.current_user.accessible_groups]:
return self.error("Inadequate permissions.")
s = (
Source.query_records_accessible_by(self.current_user, mode="update")
.filter(Source.obj_id == obj_id)
.filter(Source.group_id == group_id)
.first()
)
s.active = False
s.unsaved_by = self.current_user
self.verify_and_commit()
return self.success(action='skyportal/FETCH_SOURCES')
class SourceOffsetsHandler(BaseHandler):
@auth_or_token
async def get(self, obj_id):
"""
---
description: Retrieve offset stars to aid in spectroscopy
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
- in: query
name: facility
nullable: true
schema:
type: string
enum: [Keck, Shane, P200]
description: Which facility to generate the starlist for
- in: query
name: num_offset_stars
nullable: true
schema:
type: integer
minimum: 0
maximum: 10
description: |
Requested number of offset stars (set to zero to get starlist
of just the source itself)
- in: query
name: obstime
nullable: True
schema:
type: string
description: |
datetime of observation in isoformat (e.g. 2020-12-30T12:34:10)
- in: query
name: use_ztfref
required: false
schema:
type: boolean
description: |
Use ZTFref catalog for offset star positions, otherwise Gaia DR2
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
facility:
type: string
enum: [Keck, Shane, P200]
description: Facility queried for starlist
starlist_str:
type: string
description: formatted starlist in facility format
starlist_info:
type: array
description: |
list of source and offset star information
items:
type: object
properties:
str:
type: string
description: single-line starlist format per object
ra:
type: number
format: float
description: object RA in degrees (J2000)
dec:
type: number
format: float
description: object DEC in degrees (J2000)
name:
type: string
description: object name
dras:
type: string
description: offset from object to source in RA
ddecs:
type: string
description: offset from object to source in DEC
mag:
type: number
format: float
description: |
magnitude of object (from
Gaia phot_rp_mean_mag)
ra:
type: number
format: float
description: source RA in degrees (J2000)
dec:
type: number
format: float
description: source DEC in degrees (J2000)
queries_issued:
type: integer
description: |
Number of times the catalog was queried to find
noffsets
noffsets:
type: integer
description: |
Number of suitable offset stars found (may be less)
than requested
query:
type: string
description: SQL query submitted to Gaia
400:
content:
application/json:
schema: Error
"""
source = Obj.get_if_accessible_by(obj_id, self.current_user)
if source is None:
return self.error('Source not found', status=404)
initial_pos = (source.ra, source.dec)
try:
best_ra, best_dec = _calculate_best_position_for_offset_stars(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == source.id)
.all(),
fallback=(initial_pos[0], initial_pos[1]),
how="snr2",
)
except JSONDecodeError:
self.push_notification(
'Source position using photometry points failed.'
' Reverting to discovery position.'
)
best_ra, best_dec = initial_pos[0], initial_pos[1]
facility = self.get_query_argument('facility', 'Keck')
num_offset_stars = self.get_query_argument('num_offset_stars', '3')
use_ztfref = self.get_query_argument('use_ztfref', True)
if isinstance(use_ztfref, str):
use_ztfref = use_ztfref in ['t', 'True', 'true', 'yes', 'y']
obstime = self.get_query_argument(
'obstime', datetime.datetime.utcnow().isoformat()
)
if not isinstance(isoparse(obstime), datetime.datetime):
return self.error('obstime is not valid isoformat')
if facility not in facility_parameters:
return self.error('Invalid facility')
radius_degrees = facility_parameters[facility]["radius_degrees"]
mag_limit = facility_parameters[facility]["mag_limit"]
min_sep_arcsec = facility_parameters[facility]["min_sep_arcsec"]
mag_min = facility_parameters[facility]["mag_min"]
try:
num_offset_stars = int(num_offset_stars)
except ValueError:
# could not handle inputs
return self.error('Invalid argument for `num_offset_stars`')
offset_func = functools.partial(
get_nearby_offset_stars,
best_ra,
best_dec,
obj_id,
how_many=num_offset_stars,
radius_degrees=radius_degrees,
mag_limit=mag_limit,
min_sep_arcsec=min_sep_arcsec,
starlist_type=facility,
mag_min=mag_min,
obstime=obstime,
allowed_queries=2,
use_ztfref=use_ztfref,
)
try:
(
starlist_info,
query_string,
queries_issued,
noffsets,
used_ztfref,
) = await IOLoop.current().run_in_executor(None, offset_func)
except ValueError:
return self.error("Error querying for nearby offset stars")
starlist_str = "\n".join(
[x["str"].replace(" ", " ") for x in starlist_info]
)
self.verify_and_commit()
return self.success(
data={
'facility': facility,
'starlist_str': starlist_str,
'starlist_info': starlist_info,
'ra': source.ra,
'dec': source.dec,
'noffsets': noffsets,
'queries_issued': queries_issued,
'query': query_string,
}
)
class SourceFinderHandler(BaseHandler):
@auth_or_token
async def get(self, obj_id):
"""
---
description: Generate a PDF/PNG finding chart to aid in spectroscopy
tags:
- sources
parameters:
- in: path
name: obj_id
required: true
schema:
type: string
- in: query
name: imsize
schema:
type: float
minimum: 2
maximum: 15
description: Image size in arcmin (square)
- in: query
name: facility
nullable: true
schema:
type: string
enum: [Keck, Shane, P200]
- in: query
name: image_source
nullable: true
schema:
type: string
enum: [desi, dss, ztfref]
description: Source of the image used in the finding chart
- in: query
name: use_ztfref
required: false
schema:
type: boolean
description: |
Use ZTFref catalog for offset star positions, otherwise DR2
- in: query
name: obstime
nullable: True
schema:
type: string
description: |
datetime of observation in isoformat (e.g. 2020-12-30T12:34:10)
- in: query
name: type
nullable: true
schema:
type: string
enum: [png, pdf]
description: |
output type
- in: query
name: num_offset_stars
schema:
type: integer
minimum: 0
maximum: 4
description: |
output desired number of offset stars [0,5] (default: 3)
responses:
200:
description: A PDF/PNG finding chart file
content:
application/pdf:
schema:
type: string
format: binary
image/png:
schema:
type: string
format: binary
400:
content:
application/json:
schema: Error
"""
source = Obj.get_if_accessible_by(obj_id, self.current_user)
if source is None:
return self.error('Source not found', status=404)
output_type = self.get_query_argument('type', 'pdf')
if output_type not in ["png", "pdf"]:
return self.error(f'Invalid argument for `type`: {output_type}')
imsize = self.get_query_argument('imsize', '4.0')
try:
imsize = float(imsize)
except ValueError:
# could not handle inputs
return self.error('Invalid argument for `imsize`')
if imsize < 2.0 or imsize > 15.0:
return self.error('The value for `imsize` is outside the allowed range')
initial_pos = (source.ra, source.dec)
try:
best_ra, best_dec = _calculate_best_position_for_offset_stars(
Photometry.query_records_accessible_by(self.current_user)
.filter(Photometry.obj_id == source.id)
.all(),
fallback=(initial_pos[0], initial_pos[1]),
how="snr2",
)
except JSONDecodeError:
self.push_notification(
'Source position using photometry points failed.'
' Reverting to discovery position.'
)
best_ra, best_dec = initial_pos[0], initial_pos[1]
facility = self.get_query_argument('facility', 'Keck')
image_source = self.get_query_argument('image_source', 'ztfref')
use_ztfref = self.get_query_argument('use_ztfref', True)
if isinstance(use_ztfref, str):
use_ztfref = use_ztfref in ['t', 'True', 'true', 'yes', 'y']
num_offset_stars = self.get_query_argument('num_offset_stars', '3')
try:
num_offset_stars = int(num_offset_stars)
except ValueError:
# could not handle inputs
return self.error('Invalid argument for `num_offset_stars`')
obstime = self.get_query_argument(
'obstime', datetime.datetime.utcnow().isoformat()
)
if not isinstance(isoparse(obstime), datetime.datetime):
return self.error('obstime is not valid isoformat')
if facility not in facility_parameters:
return self.error('Invalid facility')
if image_source not in source_image_parameters:
return self.error('Invalid source image')
radius_degrees = facility_parameters[facility]["radius_degrees"]
mag_limit = facility_parameters[facility]["mag_limit"]
min_sep_arcsec = facility_parameters[facility]["min_sep_arcsec"]
mag_min = facility_parameters[facility]["mag_min"]
finder = functools.partial(
get_finding_chart,
best_ra,
best_dec,
obj_id,
image_source=image_source,
output_format=output_type,
imsize=imsize,
how_many=num_offset_stars,
radius_degrees=radius_degrees,
mag_limit=mag_limit,
mag_min=mag_min,
min_sep_arcsec=min_sep_arcsec,
starlist_type=facility,
obstime=obstime,
use_source_pos_in_starlist=True,
allowed_queries=2,
queries_issued=0,
use_ztfref=use_ztfref,
)
self.push_notification(
'Finding chart generation in progress. Download will start soon.'
)
rez = await IOLoop.current().run_in_executor(None, finder)
filename = rez["name"]
image = io.BytesIO(rez["data"])
# Adapted from
# https://bhch.github.io/posts/2017/12/serving-large-files-with-tornado-safely-without-blocking/
mb = 1024 * 1024 * 1
chunk_size = 1 * mb
max_file_size = 15 * mb
if not (image.getbuffer().nbytes < max_file_size):
return self.error(
f"Refusing to send files larger than {max_file_size / mb:.2f} MB"
)
# do not send result via `.success`, since that creates a JSON
self.set_status(200)
if output_type == "pdf":
self.set_header("Content-Type", "application/pdf; charset='utf-8'")
self.set_header("Content-Disposition", f"attachment; filename={filename}")
else:
self.set_header("Content-type", f"image/{output_type}")
self.set_header(
'Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0'
)
self.verify_and_commit()
for i in range(math.ceil(max_file_size / chunk_size)):
chunk = image.read(chunk_size)
if not chunk:
break
try:
self.write(chunk) # write the chunk to response
await self.flush() # send the chunk to client
except tornado.iostream.StreamClosedError:
# this means the client has closed the connection
# so break the loop
break
finally:
# deleting the chunk is very important because
# if many clients are downloading files at the
# same time, the chunks in memory will keep
# increasing and will eat up the RAM
del chunk
# pause the coroutine so other handlers can run
await tornado.gen.sleep(1e-9) # 1 ns
class SourceNotificationHandler(BaseHandler):
@auth_or_token
def post(self):
"""
---
description: Send out a new source notification
tags:
- notifications
requestBody:
content:
application/json:
schema:
type: object
properties:
additionalNotes:
type: string
description: |
Notes to append to the message sent out
groupIds:
type: array
items:
type: integer
description: |
List of IDs of groups whose members should get the notification (if they've opted in)
sourceId:
type: string
description: |
The ID of the Source's Obj the notification is being sent about
level:
type: string
description: |
Either 'soft' or 'hard', determines whether to send an email or email+SMS notification
required:
- groupIds
- sourceId
- level
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: string
description: New SourceNotification ID
"""
if not cfg["notifications.enabled"]:
return self.error("Notifications are not enabled in current deployment.")
data = self.get_json()
additional_notes = data.get("additionalNotes")
if isinstance(additional_notes, str):
additional_notes = data["additionalNotes"].strip()
else:
if additional_notes is not None:
return self.error(
"Invalid parameter `additionalNotes`: should be a string"
)
if data.get("groupIds") is None:
return self.error("Missing required parameter `groupIds`")
try:
group_ids = [int(gid) for gid in data["groupIds"]]
except ValueError:
return self.error(
"Invalid value provided for `groupIDs`; unable to parse "
"all list items to integers."
)
groups = (
Group.query_records_accessible_by(self.current_user)
.filter(Group.id.in_(group_ids))
.all()
)
if data.get("sourceId") is None:
return self.error("Missing required parameter `sourceId`")
source = Obj.get_if_accessible_by(data["sourceId"], self.current_user)
if source is None:
return self.error('Source not found', status=404)
source_id = data["sourceId"]
source_group_ids = [
row[0]
for row in Source.query_records_accessible_by(
self.current_user, columns=[Source.group_id]
)
.filter(Source.obj_id == source_id)
.all()
]
if bool(set(group_ids).difference(set(source_group_ids))):
forbidden_groups = list(set(group_ids) - set(source_group_ids))
return self.error(
"Insufficient recipient group access permissions. Not a member of "
f"group IDs: {forbidden_groups}."
)
if data.get("level") is None:
return self.error("Missing required parameter `level`")
if data["level"] not in ["soft", "hard"]:
return self.error(
"Invalid value provided for `level`: should be either 'soft' or 'hard'"
)
level = data["level"]
new_notification = SourceNotification(
source_id=source_id,
groups=groups,
additional_notes=additional_notes,
sent_by=self.associated_user_object,
level=level,
)
DBSession().add(new_notification)
try:
self.verify_and_commit()
except python_http_client.exceptions.UnauthorizedError:
return self.error(
"Twilio Sendgrid authorization error. Please ensure "
"valid Sendgrid API key is set in server environment as "
"per their setup docs."
)
except TwilioException:
return self.error(
"Twilio Communication SMS API authorization error. Please ensure "
"valid Twilio API key is set in server environment as "
"per their setup docs."
)
return self.success(data={'id': new_notification.id})
class PS1ThumbnailHandler(BaseHandler):
@auth_or_token
def post(self):
data = self.get_json()
obj_id = data.get("objID")
if obj_id is None:
return self.error("Missing required paramter objID")
IOLoop.current().add_callback(
lambda: add_ps1_thumbnail_and_push_ws_msg(obj_id, self)
)
return self.success()
|
the-stack_0_3169 | from __future__ import annotations
from functools import wraps
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Hashable,
Iterable,
List,
Sequence,
Tuple,
cast,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
index as libindex,
lib,
)
from pandas._libs.hashtable import duplicated
from pandas._typing import (
AnyArrayLike,
DtypeObj,
Scalar,
Shape,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
InvalidIndexError,
PerformanceWarning,
UnsortedIndexError,
)
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import (
array_equivalent,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
from pandas.core.indexers import is_empty_indexer
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
ensure_index,
get_unanimous_names,
)
from pandas.core.indexes.frozen import FrozenList
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import (
CategoricalIndex,
DataFrame,
Series,
)
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
{"klass": "MultiIndex", "target_klass": "MultiIndex or list of tuples"}
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
def names_compat(meth):
"""
A decorator to allow either `name` or `names` keyword but not both.
This makes it easier to share code with base class.
"""
@wraps(meth)
def new_meth(self_or_cls, *args, **kwargs):
if "name" in kwargs and "names" in kwargs:
raise TypeError("Can only provide one of `names` and `name`")
elif "name" in kwargs:
kwargs["names"] = kwargs.pop("name")
return meth(self_or_cls, *args, **kwargs)
return new_meth
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
sortorder: int | None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(cls)
result._cache = {}
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
result._reset_identity()
return result
def _validate_codes(self, level: list, code: list):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(self, codes: list | None = None, levels: list | None = None):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > _lexsort_depth(self.codes, self.nlevels):
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> MultiIndex:
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
@names_compat
def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = None,
names: Sequence[Hashable] | None = None,
) -> MultiIndex:
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
tuples = cast(Collection[Tuple[Hashable, ...]], tuples)
arrays: list[Sequence[Hashable]]
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = np.asarray(tuples._values)
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrs = zip(*tuples)
arrays = cast(List[Sequence[Hashable]], arrs)
return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(
cls, iterables, sortorder=None, names=lib.no_default
) -> MultiIndex:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex:
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@cache_readonly
def _values(self) -> np.ndarray:
# We override here, since our parent uses _data, which we don't use.
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals.dtype):
vals = cast("CategoricalIndex", vals)
vals = vals._data._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or isinstance(
vals, (ABCDatetimeIndex, ABCTimedeltaIndex)
):
vals = vals.astype(object)
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "Index")
vals = np.array(vals, copy=False) # type: ignore[assignment]
values.append(vals)
arr = lib.fast_zip(values)
return arr
@property
def values(self) -> np.ndarray:
return self._values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@cache_readonly
def dtypes(self) -> Series:
"""
Return the dtypes as a Series for the underlying MultiIndex
"""
from pandas import Series
return Series(
{
f"level_{idx}" if level.name is None else level.name: level.dtype
for idx, level in enumerate(self.levels)
}
)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self) -> FrozenList:
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [x._rename(name=name) for x, name in zip(self._levels, self._names)]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self,
levels,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._view() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()
new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._reset_cache()
def set_levels(
self, levels, level=None, inplace=None, verify_integrity: bool = True
):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [
... (1, "one"),
... (1, "two"),
... (2, "one"),
... (2, "two"),
... (3, "one"),
... (3, "two")
... ],
... names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two'),
(3, 'one'),
(3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
level, levels = _require_listlike(level, levels, "Levels")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.nlevels
3
"""
return len(self._levels)
@property
def levshape(self) -> Shape:
"""
A tuple with the length of each level.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.levshape
(1, 1, 1)
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self,
codes,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes_list = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._reset_cache()
def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True):
"""
Set new codes on MultiIndex. Defaults to returning new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
level, codes = _require_listlike(level, codes, "Codes")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self) -> Callable[..., MultiIndex]:
return type(self).from_tuples
@doc(Index._shallow_copy)
def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:
names = name if name is not lib.no_default else self.names
return type(self).from_tuples(values, sortorder=None, names=names)
def _view(self) -> MultiIndex:
result = type(self)(
levels=self.levels,
codes=self.codes,
sortorder=self.sortorder,
names=self.names,
verify_integrity=False,
)
result._cache = self._cache.copy()
result._cache.pop("levels", None) # GH32669
return result
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
.. deprecated:: 1.2.0
levels : sequence, optional
.. deprecated:: 1.2.0
codes : sequence, optional
.. deprecated:: 1.2.0
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if levels is not None:
warnings.warn(
"parameter levels is deprecated and will be removed in a future "
"version. Use the set_levels method instead.",
FutureWarning,
stacklevel=2,
)
if codes is not None:
warnings.warn(
"parameter codes is deprecated and will be removed in a future "
"version. Use the set_codes method instead.",
FutureWarning,
stacklevel=2,
)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._cache.pop("levels", None) # GH32669
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
""" return a boolean if we need a qualified .info display """
def f(level):
return "mixed" in level or "string" in level or "unicode" in level
return any(f(level) for level in self._inferred_type_levels)
@doc(Index.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None) -> str:
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level_strs = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level_strs)
# numpy 1.21 deprecated implicit string casting
level_strs = level_strs.astype(str)
level_strs = np.append(level_strs, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level_strs)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi._values
def format(
self,
name: bool | None = None,
formatter: Callable | None = None,
na_rep: str | None = None,
names: bool = False,
space: int = 2,
sparsify=None,
adjoin: bool = True,
) -> list:
if name is not None:
names = name
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_nd(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, lev_name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(lev_name, escape_chars=("\t", "\r", "\n"))
if lev_name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify in [False, lib.no_default]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = sparsify_labels(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import get_adjustment
adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self) -> FrozenList:
return FrozenList(self._names)
def _set_names(self, names, level=None, validate: bool = True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : bool, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
# error: Cannot determine type of '__setitem__'
self._names[lev] = name # type: ignore[has-type]
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names,
fget=_get_names,
doc="""
Names of levels in MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.names
FrozenList(['x', 'y', 'z'])
""",
)
# --------------------------------------------------------------------
@doc(Index._get_grouper_for_level)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if any(-1 in code for code in self.codes):
return False
if all(level.is_monotonic for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self._values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self) -> list[str]:
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@doc(Index.duplicated)
def duplicated(self, keep="first") -> np.ndarray:
shape = tuple(len(lev) for lev in self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated(ids, keep)
# error: Cannot override final attribute "_duplicated"
# (previously declared in base class "IndexOpsMixin")
_duplicated = duplicated # type: ignore[misc]
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@doc(Index.dropna)
def dropna(self, how: str = "any") -> MultiIndex:
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.set_codes(codes=new_codes)
def _get_level_values(self, level: int, unique: bool = False) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int
unique : bool, default False
if True, drop duplicated values
Returns
-------
Index
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level.
Length of returned vector is equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@doc(Index.unique)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def to_frame(self, index: bool = True, name=None) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self) -> Index:
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
See Also
--------
MultiIndex.from_tuples : Convert flat index back to MultiIndex.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self._values, tupleize_cols=False)
@property
def _is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._is_lexsorted()
def _is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
Examples
--------
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()
False
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()
False
"""
return self._lexsort_depth == self.nlevels
@property
def lexsort_depth(self):
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._lexsort_depth
@cache_readonly
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
if self.sortorder is not None:
return self.sortorder
return _lexsort_depth(self.codes, self.nlevels)
def _sort_levels_monotonic(self) -> MultiIndex:
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self._is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_platform_int(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_nd(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self) -> MultiIndex:
"""
Create new MultiIndex from current that removes unused levels.
Unused level(s) means levels that are not expressed in the
labels. The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will
also be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
if lev.isna().any() and len(uniques) == len(lev):
break
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = {
"levels": list(self.levels),
"codes": list(self.codes),
"sortorder": self.sortorder,
"names": list(self.names),
}
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key, warn_float=True)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
# in general cannot be sure whether the result will be sorted
sortorder = None
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
elif isinstance(key, slice):
if key.step is None or key.step > 0:
sortorder = self.sortorder
elif isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
sortorder = None
if slobj.step is None or slobj.step > 0:
sortorder = self.sortorder
new_codes = [level_codes[slobj] for level_codes in self.codes]
return type(self)(
levels=self.levels,
codes=new_codes,
names=self._names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self: MultiIndex,
indices,
axis: int = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> MultiIndex:
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
na_value = -1
taken = [lab.take(indices) for lab in self.codes]
if allow_fill:
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self._values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats: int, axis=None) -> MultiIndex:
nv.validate_repeat((), {"axis": axis})
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "int")
repeats = ensure_platform_int(repeats) # type: ignore[assignment]
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=np.dtype("object"))
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self._lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise") -> MultiIndex:
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
# If nan should be dropped it will equal -1 here. We have to check which values
# are not nan and equal -1, this means they are missing in the index
nan_codes = isna(codes)
values[(np.equal(nan_codes, False)) & (values == -1)] = -2
if index.shape[0] == self.shape[0]:
values[np.equal(nan_codes, True)] = -2
not_found = codes[values == -2]
if len(not_found) != 0 and errors != "ignore":
raise KeyError(f"labels {not_found} not found in level")
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1) -> MultiIndex:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order) -> MultiIndex:
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
>>> mi.reorder_levels(order=['y', 'x'])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self) -> list[Categorical]:
"""
we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(
self, level=0, ascending: bool = True, sort_remaining: bool = True
) -> tuple[MultiIndex, np.ndarray]:
"""
Sort MultiIndex at the requested level.
The result will respect the original ordering of the associated
factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])
>>> mi
MultiIndex([(0, 2),
(0, 1)],
)
>>> mi.sortlevel()
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(sort_remaining=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
>>> mi.sortlevel(1)
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(1, ascending=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> tuple[MultiIndex, np.ndarray | None]:
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase.ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def _check_indexing_error(self, key):
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
def _should_fallback_to_positional(self) -> bool:
"""
Should integer key(s) be treated as positional?
"""
# GH#33355
return self.levels[0]._should_fallback_to_positional()
def _get_values_for_loc(self, series: Series, loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
if len(new_values) == 1 and not self.nlevels > 1:
# If more than one level left, we can not return a scalar
return new_values[0]
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
elif is_empty_indexer(indexer, keyarr):
# We get here when levels still contain values which are not
# actually in Index anymore
raise KeyError(f"{keyarr} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = (key,) + (slice(None),) * (len(self.levels) - 1)
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# returned ndarray is np.intp
# empty indexer
if not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self._values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
# TODO: explicitly raise here? we only have one test that
# gets here, and it is checking that we raise with method="nearest"
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
# TODO: get_indexer_with_fill docstring says values must be _sorted_
# but that doesn't appear to be enforced
indexer = self._engine.get_indexer_with_fill(
target=target._values, values=self._values, method=method, limit=limit
)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target._values)
# Note: we only get here (in extant tests at least) with
# target.nlevels == self.nlevels
return ensure_platform_int(indexer)
def get_slice_bound(
self, label: Hashable | Sequence[Hashable], side: str, kind: str | None = None
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem', None}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self._lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self._lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
elif isinstance(idx, slice):
idx = idx.start
return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels.
The location is returned as an integer/slice or boolean
mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
hash(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self._lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
if not isinstance(level, (list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
return self._get_loc_level(key, level=level, drop_level=drop_level)
def _get_loc_level(self, key, level: int | list[int] = 0, drop_level: bool = True):
"""
get_loc_level but with `level` known to be positional, not name-based.
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludge around
orig_index = new_index = self[indexer]
for i in sorted(levels, reverse=True):
try:
new_index = new_index._drop_level_numbers([i])
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level: int = 0, indexer=None):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
if step is not None and step < 0:
# Switch elements for negative step size
start, stop = stop - 1, start - 1
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.asarray(m) # type: ignore[assignment]
else:
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment]
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self._lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self._lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
start = idx.start
end = idx.stop
else:
start = level_codes.searchsorted(idx, side="left")
end = level_codes.searchsorted(idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self._lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self._lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r) -> Int64Index:
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr: Index | None, indexer: Index | None, key) -> Index:
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
indexer_intersection = indexer.intersection(idxr)
if indexer_intersection.empty and not idxr.empty and not indexer.empty:
raise KeyError(key)
return indexer_intersection
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(
_convert_to_indexer(k), indexer=indexer, key=seq
)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers: Int64Index | None = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = (idxrs if indexers is None else indexers).union(
idxrs, sort=False
)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer, key=seq)
else:
# no matches we are done
return np.array([], dtype=np.int64)
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer, key=seq)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
key=seq,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
key=seq,
)
# empty indexer
if indexer is None:
return np.array([], dtype=np.int64)
assert isinstance(indexer, Int64Index), type(indexer)
indexer = self._reorder_indexer(seq, indexer)
return indexer._values
# --------------------------------------------------------------------
def _reorder_indexer(
self,
seq: tuple[Scalar | Iterable | AnyArrayLike, ...],
indexer: Int64Index,
) -> Int64Index:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self._is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
elif isinstance(k, slice) and k.step is not None and k.step < 0:
need_sort = True
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: tuple[np.ndarray, ...] = ()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if is_scalar(k):
# GH#34603 we want to treat a scalar the same as an all equal list
k = [k]
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
elif isinstance(k, slice) and k.step is not None and k.step < 0:
new_order = np.arange(n)[k][indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,))[indexer]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None) -> MultiIndex:
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=self._names,
verify_integrity=False,
)
def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not self._should_compare(other):
# object Index or Categorical[object] may contain tuples
return False
return array_equivalent(self._values, other._values)
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
other_codes = other.codes[i]
self_mask = self_codes == -1
other_mask = other_codes == -1
if not np.array_equal(self_mask, other_mask):
return False
self_codes = self_codes[~self_mask]
self_values = self.levels[i]._values.take(self_codes)
other_codes = other_codes[~other_mask]
other_values = other.levels[i]._values.take(other_codes)
# since we use NaT both datetime64 and timedelta64 we can have a
# situation where a level is typed say timedelta64 in self (IOW it
# has other values than NaT) but types datetime64 in other (where
# its all NaT) but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other: MultiIndex) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
# We could get here with CategoricalIndex other
rvals = other._values.astype(object, copy=False)
uniq_tuples = lib.fast_unique_multiple([self._values, rvals], sort=sort)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
def _get_reconciled_name_object(self, other) -> MultiIndex:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
return self.rename(names)
return self
def _maybe_match_names(self, other):
"""
Try to find common names to attach to the result of an operation between
a and b. Return a consensus list of names if they match at least partly
or list of None if they have completely different names.
"""
if len(self.names) != len(other.names):
return [None] * len(self.names)
names = []
for a_name, b_name in zip(self.names, other.names):
if a_name == b_name:
names.append(a_name)
else:
# TODO: what if they both have np.nan for their names?
names.append(None)
return names
def _intersection(self, other, sort=False) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
other = other.astype(object, copy=False)
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
inner_tuples = self._inner_indexer(other)[0]
sort = False # inner_tuples is already sorted
except TypeError:
pass
else:
uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
left_unique = self.drop_duplicates()
indexer = left_unique.get_indexer(other.drop_duplicates())
uniq_tuples = left_unique.take(np.sort(indexer[indexer != -1]))
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _difference(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this._values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not isinstance(other, Index):
if len(other) == 0:
return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
return other, result_names
def symmetric_difference(self, other, result_name=None, sort=None):
# On equal symmetric_difference MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
tups = Index.symmetric_difference(self, other, result_name, sort)
if len(tups) == 0:
return type(self)(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
names=tups.name,
)
return type(self).from_tuples(tups, names=tups.name)
# --------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object "
"is not supported"
)
elif copy is True:
return self._view()
return self
def _validate_fill_value(self, item):
if not isinstance(item, tuple):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item
def insert(self, loc: int, item) -> MultiIndex:
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
item = self._validate_fill_value(item)
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc) -> MultiIndex:
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
@doc(Index.isin)
def isin(self, values, level=None) -> np.ndarray:
if level is None:
values = MultiIndex.from_tuples(values, names=self.names)._values
return algos.isin(self._values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
# ---------------------------------------------------------------
# Arithmetic/Numeric Methods - Disabled
__add__ = make_invalid_op("__add__")
__radd__ = make_invalid_op("__radd__")
__iadd__ = make_invalid_op("__iadd__")
__sub__ = make_invalid_op("__sub__")
__rsub__ = make_invalid_op("__rsub__")
__isub__ = make_invalid_op("__isub__")
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
# Unary methods disabled
__neg__ = make_invalid_op("__neg__")
__pos__ = make_invalid_op("__pos__")
__abs__ = make_invalid_op("__abs__")
__inv__ = make_invalid_op("__inv__")
def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:
"""Count depth (up to a maximum of `nlevels`) with which codes are lexsorted."""
int64_codes = [ensure_int64(level_codes) for level_codes in codes]
for k in range(nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def sparsify_labels(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index: Index, key) -> Index:
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index._drop_level_numbers([0])
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
def _require_listlike(level, arr, arrname: str):
"""
Ensure that level is either None or listlike, and arr is list-of-listlike.
"""
if level is not None and not is_list_like(level):
if not is_list_like(arr):
raise TypeError(f"{arrname} must be list-like")
if is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list-like")
level = [level]
arr = [arr]
elif level is None or is_list_like(level):
if not is_list_like(arr) or not is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list of lists-like")
return level, arr
|
the-stack_0_3171 | # -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
import logging
import os.path
import re
import shlex
import signal
import subprocess
import sys
import time
import tkinter as tk
import warnings
from logging import debug
from threading import Thread
from time import sleep
from tkinter import messagebox, ttk
from typing import Any, List, Optional, Set, Union, Callable # @UnusedImport; @UnusedImport
import thonny
from thonny import THONNY_USER_DIR, common, get_runner, get_shell, get_workbench
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
EOFCommand,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
is_same_path,
normpath_with_actual_case,
parse_message,
path_startswith,
serialize_message,
update_system_path,
MessageFromBackend,
universal_relpath,
)
from thonny.editors import (
get_current_breakpoints,
get_saved_current_script_filename,
is_remote_path,
is_local_path,
get_target_dirname_from_editor_filename,
extract_target_path,
)
from thonny.languages import tr
from thonny.misc_utils import construct_cmd_line, running_on_mac_os, running_on_windows
from thonny.ui_utils import CommonDialogEx, select_sequence, show_dialog
from thonny.workdlg import WorkDialog
logger = logging.getLogger(__name__)
WINDOWS_EXE = "python.exe"
OUTPUT_MERGE_THRESHOLD = 1000
RUN_COMMAND_LABEL = "" # init later when gettext is ready
RUN_COMMAND_CAPTION = ""
EDITOR_CONTENT_TOKEN = "$EDITOR_CONTENT"
EXPECTED_TERMINATION_CODE = 123
INTERRUPT_SEQUENCE = "<Control-c>"
ANSI_CODE_TERMINATOR = re.compile("[@-~]")
# other components may turn it on in order to avoid grouping output lines into one event
io_animation_required = False
_console_allocated = False
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: BackendProxy
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
import shutil
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
global _console_allocated
try:
self._check_alloc_console()
_console_allocated = True
except Exception:
logger.exception("Problem allocating console")
_console_allocated = False
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
global RUN_COMMAND_CAPTION, RUN_COMMAND_LABEL
RUN_COMMAND_LABEL = tr("Run current script")
RUN_COMMAND_CAPTION = tr("Run")
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
try:
import thonny.plugins.debugger # @UnusedImport
debugger_available = True
except ImportError:
debugger_available = False
get_workbench().add_command(
"run_current_script",
"run",
RUN_COMMAND_LABEL,
caption=RUN_COMMAND_CAPTION,
handler=self.cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self.cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=not (get_workbench().in_simple_mode() and debugger_available),
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
tr("Run current script in terminal"),
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
tr("Stop/Restart backend"),
caption=tr("Stop"),
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=100,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
tr("Interrupt execution"),
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence=INTERRUPT_SEQUENCE,
skip_sequence_binding=True, # Sequence will be bound differently
group=100,
bell_when_denied=False,
)
get_workbench().bind(INTERRUPT_SEQUENCE, self._cmd_interrupt_with_shortcut, True)
get_workbench().add_command(
"ctrld",
"run",
tr("Send EOF / Soft reboot"),
self.ctrld,
self.ctrld_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"disconnect",
"run",
tr("Disconnect"),
self.disconnect,
self.disconnect_enabled,
group=100,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command" """
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logging.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logging.warning(
"RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state())
)
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
if "id" not in cmd:
cmd["id"] = generate_command_id()
cmd["local_cwd"] = get_workbench().get_local_cwd()
# Offer the command
logging.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return None
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
# This may be only logical restart, which does not look like restart to the runner
get_workbench().event_generate("BackendRestart", full=False)
def send_command_and_wait(self, cmd: CommandToBackend, dialog_title: str) -> MessageFromBackend:
dlg = InlineCommandDialog(get_workbench(), cmd, title=dialog_title + " ...")
show_dialog(dlg)
return dlg.response
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logging.warning("Can't pile up too many commands. This command will be just ignored")
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logging.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if self._proxy.get_cwd() != working_directory:
# create compound command
# start with %cd
cd_cmd_line = construct_cd_command(working_directory) + "\n"
else:
# create simple command
cd_cmd_line = ""
rel_filename = universal_relpath(script_path, working_directory)
cmd_parts = ["%" + command_name, rel_filename] + args
exe_cmd_line = construct_cmd_line(cmd_parts, [EDITOR_CONTENT_TOKEN]) + "\n"
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_editor_content(self, command_name, args):
get_shell().submit_magic_command(
construct_cmd_line(
["%" + command_name, "-c", EDITOR_CONTENT_TOKEN] + args, [EDITOR_CONTENT_TOKEN]
)
)
def execute_current(self, command_name: str) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(True, False, 2)
filename = get_saved_current_script_filename()
if not filename:
# user has cancelled file saving
return
if (
is_remote_path(filename)
and not self._proxy.can_run_remote_files()
or is_local_path(filename)
and not self._proxy.can_run_local_files()
):
self.execute_editor_content(command_name, self._get_active_arguments())
else:
if get_workbench().get_option("run.auto_cd") and command_name[0].isupper():
working_directory = get_target_dirname_from_editor_filename(filename)
else:
working_directory = self._proxy.get_cwd()
if is_local_path(filename):
target_path = filename
else:
target_path = extract_target_path(filename)
self.execute_script(
target_path, self._get_active_arguments(), working_directory, command_name
)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (
self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self.cmd_run_current_script_enabled()
)
def cmd_run_current_script(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
filename = get_saved_current_script_filename()
if not filename:
return
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
if _console_allocated:
self._proxy.interrupt()
else:
messagebox.showerror(
"No console",
"Can't interrupt as console was not allocated.\n\nUse Stop/Restart instead.",
master=self,
)
else:
logging.warning("User tried interrupting without proxy")
def _cmd_interrupt_with_shortcut(self, event=None):
if not self._cmd_interrupt_enabled():
return None
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy.
# Disable Ctrl+C interrupt in editor and shell, when some text is selected
# (assuming user intended to copy instead of interrupting)
widget = get_workbench().focus_get()
if isinstance(widget, tk.Text):
if len(widget.tag_ranges("sel")) > 0:
# this test is reliable, unlike selection_get below
return None
elif isinstance(widget, (tk.Listbox, ttk.Entry, tk.Entry, tk.Spinbox)):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# Assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
# NB! This is not perfect, as in Linux the selection can be in another app
# ie. there may be no selection in Thonny actually.
# In other words, Ctrl+C interrupt may be dropped without reason
# when given inside the widgets listed above.
return None
except Exception:
# widget either doesn't have selection_get or it
# gave error (can happen without selection on Ubuntu)
pass
self._cmd_interrupt()
return "break"
def _cmd_interrupt_enabled(self) -> bool:
return self._proxy and self._proxy.is_connected()
def cmd_stop_restart(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.restart_backend(True)
def disconnect(self):
proxy = self.get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled(self):
return hasattr(self.get_backend_proxy(), "disconnect")
def ctrld(self):
proxy = self.get_backend_proxy()
if not proxy:
return
if get_shell().has_pending_input():
messagebox.showerror(
"Can't perform this action",
"Ctrl+D only has effect on an empty line / prompt.\n"
+ "Submit current input (press ENTER) and try again",
master=self,
)
return
proxy.send_command(EOFCommand())
self._set_state("running")
def ctrld_enabled(self):
proxy = self.get_backend_proxy()
return proxy and proxy.is_connected()
def _poll_backend_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_backend_messages() is False:
return
self._polling_after_id = get_workbench().after(20, self._poll_backend_messages)
def _pull_backend_messages(self):
while self._proxy is not None:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
logging.debug(
"RUNNER GOT: %s, %s in state: %s", msg.event_type, msg, self.get_state()
)
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
else:
"other messages don't affect the state"
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(class_event_type, event=msg) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
returncode = getattr(exc, "returncode", "?")
err = "Backend terminated or disconnected."
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logging.exception("Failed retrieving backend faults")
err = err.strip() + " Use 'Stop/Restart' to restart.\n"
if returncode != EXPECTED_TERMINATION_CODE:
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data="\n" + err)
get_workbench().become_active_window(False)
def restart_backend(self, clean: bool, first: bool = False, wait: float = 0) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_backend_messages()
if wait:
start_time = time.time()
while not self.is_waiting_toplevel_command() and time.time() - start_time <= wait:
# self._pull_backend_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart", full=True)
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
get_workbench().event_generate("BackendTerminated")
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("pythonw.exe"):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("pythonw.exe", "python.exe")
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logging.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logger.exception("Problem with finalizing console allocation")
def ready_for_remote_file_operations(self, show_message=False):
if not self._proxy or not self.supports_remote_files():
return False
ready = self._proxy.ready_for_remote_file_operations()
if not ready and show_message:
if self._proxy.is_connected():
msg = "Device is not connected"
else:
msg = (
"Device is busy -- can't perform this action now."
+ "\nPlease wait or cancel current work and try again!",
)
messagebox.showerror("Can't complete", msg, master=self)
return ready
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def supports_remote_files(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_files()
def supports_remote_directories(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_directories()
def get_node_label(self):
if self._proxy is None:
return "Back-end"
else:
return self._proxy.get_node_label()
def using_venv(self) -> bool:
from thonny.plugins.cpython import CPythonProxy
return isinstance(self._proxy, CPythonProxy) and self._proxy._in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
backend_description = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
raise NotImplementedError()
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def get_pip_gui_class(self):
return None
def interrupt(self):
"""Tries to interrupt current command without reseting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_connected(self):
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
def get_node_label(self):
"""Used as files caption if back-end has separate files"""
return "Back-end"
def get_full_label(self):
"""Used in pip GUI title"""
return self.get_node_label()
def supports_remote_files(self):
"""Whether remote file browser should be presented with this back-end"""
return False
def uses_local_filesystem(self):
"""Whether it runs code from local files"""
return True
def supports_remote_directories(self):
return False
def supports_trash(self):
return True
def can_run_remote_files(self):
raise NotImplementedError()
def can_run_local_files(self):
raise NotImplementedError()
def ready_for_remote_file_operations(self):
return False
def get_cwd(self):
return None
def get_clean_description(self):
return self.backend_description
@classmethod
def get_current_switcher_configuration(cls):
"""returns the dict of configuration entries that distinguish current backend conf from other
items in the backend switcher"""
return {"run.backend_name": cls.backend_name}
@classmethod
def get_switcher_entries(cls):
"""
Each returned entry creates one item in the backend switcher menu.
"""
return [(cls.get_current_switcher_configuration(), cls.backend_description)]
def has_custom_system_shell(self):
return False
def open_custom_system_shell(self):
raise NotImplementedError()
class SubprocessProxy(BackendProxy):
def __init__(self, clean: bool, executable: Optional[str] = None) -> None:
super().__init__(clean)
if executable:
self._executable = executable
else:
self._executable = get_interpreter_for_subprocess()
if not os.path.isfile(self._executable):
raise UserError(
"Interpreter '%s' does not exist. Please check the configuration!"
% self._executable
)
self._welcome_text = ""
self._proc = None
self._response_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self._in_venv = None
self._cwd = self._get_initial_cwd() # pylint: disable=assignment-from-none
self._start_background_process(clean=clean)
def _get_initial_cwd(self):
return None
def _start_background_process(self, clean=None, extra_args=[]):
# deque, because in one occasion I need to put messages back
self._response_queue = collections.deque()
# prepare environment
env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
env["PYTHONIOENCODING"] = "utf-8"
# because cmd line option -u won't reach child processes
# see https://github.com/thonny/thonny/issues/808
env["PYTHONUNBUFFERED"] = "1"
# Let back-end know about plug-ins
env["THONNY_USER_DIR"] = THONNY_USER_DIR
env["THONNY_FRONTEND_SYS_PATH"] = repr(sys.path)
env["THONNY_LANGUAGE"] = get_workbench().get_option("general.language")
env["FRIENDLY_TRACEBACK_LEVEL"] = str(
get_workbench().get_option("assistance.friendly_traceback_level")
)
if thonny.in_debug_mode():
env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in env:
del env["THONNY_DEBUG"]
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
cmd_line = (
[
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
]
+ self._get_launcher_with_args()
+ extra_args
)
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_local_cwd())
extra_params = {}
if sys.version_info >= (3, 6):
extra_params["encoding"] = "utf-8"
self._proc = subprocess.Popen(
cmd_line,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._get_launch_cwd(),
env=env,
universal_newlines=True,
creationflags=creationflags,
**extra_params
)
# setup asynchronous output listeners
Thread(target=self._listen_stdout, args=(self._proc.stdout,), daemon=True).start()
Thread(target=self._listen_stderr, args=(self._proc.stderr,), daemon=True).start()
def _get_launch_cwd(self):
return self.get_cwd() if self.uses_local_filesystem() else None
def _get_launcher_with_args(self):
raise NotImplementedError()
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._clear_environment()
if isinstance(cmd, ToplevelCommand):
# required by SshCPythonBackend for creating fresh target process
cmd["expected_cwd"] = self._cwd
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
getattr(self, method_name)(cmd)
else:
self._send_msg(cmd)
def _send_msg(self, msg):
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def _clear_environment(self):
pass
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def process_is_alive(self):
return self._proc is not None and self._proc.poll() is None
def is_terminated(self):
return not self.process_is_alive()
def is_connected(self):
return self.process_is_alive()
def get_sys_path(self):
return self._sys_path
def destroy(self):
self._close_backend()
def _close_backend(self):
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._response_queue = None
def _listen_stdout(self, stdout):
# debug("... started listening to stdout")
# will be called from separate thread
message_queue = self._response_queue
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
message_queue.append(msg)
if len(message_queue) > 50:
# Probably backend runs an infinite/long print loop.
# Throttle message thougput in order to keep GUI thread responsive.
while len(message_queue) > 0:
sleep(0.1)
while self.process_is_alive():
try:
data = stdout.readline()
except IOError:
sleep(0.1)
continue
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
message_queue.append(
BackendEvent("ProgramOutput", data=parts[0], stream_name="stdout")
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
message_queue.append(
BackendEvent(
"ProgramOutput", data=second_part, stream_name="stdout"
)
)
def _listen_stderr(self, stderr):
# stderr is used only for debugger debugging
while self.process_is_alive():
data = stderr.readline()
if data == "":
break
else:
self._response_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
def _store_state_info(self, msg):
if "cwd" in msg:
self._cwd = msg["cwd"]
self._publish_cwd(msg["cwd"])
if msg.get("welcome_text"):
self._welcome_text = msg["welcome_text"]
if "in_venv" in msg:
self._in_venv = msg["in_venv"]
if "sys_path" in msg:
self._sys_path = msg["sys_path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
if msg.get("executable"):
self._reported_executable = msg["executable"]
def _publish_cwd(self, cwd):
if self.uses_local_filesystem():
get_workbench().set_local_cwd(cwd)
def get_supported_features(self):
return {"run"}
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_cwd(self):
return self._cwd
def get_exe_dirs(self):
return self._exe_dirs
def fetch_next_message(self):
if not self._response_queue or len(self._response_queue) == 0:
if self.is_terminated():
raise BackendTerminatedError(self._proc.returncode if self._proc else None)
else:
return None
msg = self._response_queue.popleft()
self._store_state_info(msg)
if not hasattr(msg, "event_type"):
print("gotww", msg)
if msg.event_type == "ProgramOutput":
# combine available small output messages to one single message,
# in order to put less pressure on UI code
wait_time = 0.01
total_wait_time = 0
while True:
if len(self._response_queue) == 0:
if _ends_with_incomplete_ansi_code(msg["data"]) and total_wait_time < 0.1:
# Allow reader to send the remaining part
sleep(wait_time)
total_wait_time += wait_time
continue
else:
return msg
else:
next_msg = self._response_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
and (
len(msg["data"]) + len(next_msg["data"]) <= OUTPUT_MERGE_THRESHOLD
and ("\n" not in msg["data"] or not io_animation_required)
or _ends_with_incomplete_ansi_code(msg["data"])
)
):
msg["data"] += next_msg["data"]
else:
# not to be sent in the same block, put it back
self._response_queue.appendleft(next_msg)
return msg
else:
return msg
def _ends_with_incomplete_ansi_code(data):
pos = data.rfind("\033")
if pos == -1:
return False
# note ANSI_CODE_TERMINATOR also includes [
params_and_terminator = data[pos + 2 :]
return not ANSI_CODE_TERMINATOR.search(params_and_terminator)
def is_bundled_python(executable):
return os.path.exists(os.path.join(os.path.dirname(executable), "thonny_python.ini"))
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
if _console_allocated:
python_exe = get_interpreter_for_subprocess().replace("pythonw.exe", "python.exe")
else:
python_exe = get_interpreter_for_subprocess().replace("python.exe", "pythonw.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode=None):
Exception.__init__(self)
self.returncode = returncode
def is_venv_interpreter_of_current_interpreter(executable):
for location in [".", ".."]:
cfg_path = os.path.join(location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and os.path.samefile(home, sys.prefix):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = [
"TCL_LIBRARY",
"TK_LIBRARY",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"SSL_CERT_DIR",
"SSL_CERT_FILE",
"PYTHONHOME",
"PYTHONPATH",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]
result = {}
if os.path.samefile(
target_executable, this_executable
) or is_venv_interpreter_of_current_interpreter(target_executable):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in ["PYTHONPATH", "PYTHONHOME", "PYTHONNOUSERSITE", "PYTHONUSERBASE"]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in [
"PYTHONSTARTUP",
"PYTHONBREAKPOINT",
"PYTHONDEBUG",
"PYTHONNOUSERSITE",
"PYTHONASYNCIODEBUG",
]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if "TCL_LIBRARY" not in os.environ or "TK_LIBRARY" not in os.environ:
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logging.exception("Can't compute Tcl/Tk library location")
return result
def construct_cd_command(path) -> str:
return construct_cmd_line(["%cd", path])
_command_id_counter = 0
def generate_command_id():
global _command_id_counter
_command_id_counter += 1
return "cmd_" + str(_command_id_counter)
class InlineCommandDialog(WorkDialog):
def __init__(
self,
master,
cmd: Union[InlineCommand, Callable],
title,
instructions=None,
output_prelude=None,
autostart=True,
):
self.response = None
self._title = title
self._instructions = instructions
self._output_prelude = output_prelude
self._cmd = cmd
self.returncode = None
get_shell().set_ignore_program_output(True)
get_workbench().bind("InlineResponse", self._on_response, True)
get_workbench().bind("InlineProgress", self._on_progress, True)
get_workbench().bind("ProgramOutput", self._on_output, True)
super().__init__(master, autostart=autostart)
def get_title(self):
return self._title
def get_instructions(self) -> Optional[str]:
return self._instructions or self._cmd.get("description", "Working...")
def _on_response(self, response):
if response.get("command_id") == getattr(self._cmd, "id"):
logger.debug("Dialog got response: %s", response)
self.response = response
self.returncode = response.get("returncode", None)
success = (
not self.returncode and not response.get("error") and not response.get("errors")
)
if success:
self.set_action_text("Done!")
else:
self.set_action_text("Error")
if response.get("error"):
self.append_text("Error %s\n" % response["error"], stream_name="stderr")
if response.get("errors"):
self.append_text("Errors %s\n" % response["errors"], stream_name="stderr")
if self.returncode:
self.append_text(
"Process returned with code %s\n" % self.returncode, stream_name="stderr"
)
self.report_done(success)
def _on_progress(self, msg):
if msg.get("command_id") != getattr(self._cmd, "id"):
return
if msg.get("value", None) is not None and msg.get("maximum", None) is not None:
self.report_progress(msg["value"], msg["maximum"])
if msg.get("description"):
self.set_action_text(msg["description"])
def _on_output(self, msg):
stream_name = msg.get("stream_name", "stdout")
self.append_text(msg["data"], stream_name)
self.set_action_text_smart(msg["data"])
def start_work(self):
self.send_command_to_backend()
def send_command_to_backend(self):
if not isinstance(self._cmd, CommandToBackend):
# it was a lazy definition
self._cmd = self._cmd()
logger.debug("Starting command in dialog: %s", self._cmd)
get_runner().send_command(self._cmd)
def cancel_work(self):
super(InlineCommandDialog, self).cancel_work()
get_runner()._cmd_interrupt()
def close(self):
get_workbench().unbind("InlineResponse", self._on_response)
get_workbench().unbind("InlineProgress", self._on_progress)
super(InlineCommandDialog, self).close()
get_shell().set_ignore_program_output(False)
def get_frontend_python():
# TODO: deprecated (name can be misleading)
warnings.warn("get_frontend_python is deprecated")
return get_interpreter_for_subprocess(sys.executable)
def get_interpreter_for_subprocess(candidate=None):
if candidate is None:
candidate = sys.executable
pythonw = candidate.replace("python.exe", "pythonw.exe")
if not _console_allocated and os.path.exists(pythonw):
return pythonw
else:
return candidate.replace("pythonw.exe", "python.exe")
|
the-stack_0_3172 | # Copyright (c) 2017 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from collections import defaultdict
from tastypie.exceptions import NotFound
from django.core.exceptions import ObjectDoesNotExist
import tastypie.http as http
from tastypie import fields
from tastypie.authorization import DjangoAuthorization
from tastypie.constants import ALL_WITH_RELATIONS
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_core.services import log_register
from chroma_api.utils import dehydrate_command
from chroma_api.utils import custom_response
from chroma_api.network_interface import NetworkInterfaceResource
from chroma_api.lnet_configuration import LNetConfigurationResource
from chroma_api.authentication import AnonymousAuthentication
from chroma_core.models import Command
from chroma_core.models import Nid
from chroma_api.validation_utils import ChromaValidation, validate
from chroma_api.chroma_model_resource import ChromaModelResource
log = log_register(__name__)
class NidValidation(ChromaValidation):
mandatory_message = "This field is mandatory"
def is_valid(self, bundle, request=None, **kwargs):
errors = defaultdict(list)
if request.method != 'POST':
return errors
for nids_data in bundle.data.get('objects', [bundle.data]):
if 'lnd_network' not in nids_data:
errors['lnd_network'] = ["Field lnd_network not present in data"]
if not errors:
self.validate_object(nids_data,
errors,
{"lnd_network": self.Expectation(True),
"network_interface": self.Expectation(True),
"lnd_type": self.Expectation(int(nids_data['lnd_network'] != -1)),
"resource_uri": self.Expectation(False),
"lnet_configuration": self.Expectation(False)})
if not errors:
self.validate_resources([self.URIInfo(nids_data.get('lnet_configuration', None), LNetConfigurationResource),
self.URIInfo(nids_data['network_interface'], NetworkInterfaceResource)],
errors, request)
if not errors:
# Check the lnd_type passed is valid for the network_interface
if ('lnd_type' in nids_data) and (nids_data['lnd_type'] not in NetworkInterfaceResource().get_via_uri(nids_data['network_interface'], request).lnd_types):
errors['lnd_type'].append("lnd_type %s not valid for interface %s" % (nids_data['lnd_type'], NetworkInterfaceResource().get_via_uri(nids_data['network_interface'], request)))
return errors
###
# Allows read and update of Nid
#
# Responds to
#
# Get
# https://localhost:8000/api/nid/1/
# https://localhost:8000/api/nid/
#
# Put
# https://localhost:8000/api/nid/1
#
# Post
# https://localhost:8000/api/nid/
#
# Delete
# https://localhost:8000/api/nid/1/
# https://localhost:8000/api/nid/
class NidResource(ChromaModelResource):
"""
Nid information.
"""
network_interface = fields.ToOneField('chroma_api.network_interface.NetworkInterfaceResource', 'network_interface')
lnet_configuration = fields.ToOneField('chroma_api.lnet_configuration.LNetConfigurationResource', 'lnet_configuration')
class Meta:
queryset = Nid.objects.select_related('network_interface', 'lnet_configuration').all()
authorization = DjangoAuthorization()
authentication = AnonymousAuthentication()
validation = NidValidation()
resource_name = 'nid'
list_allowed_methods = ['get', 'post', 'delete']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
filtering = {'network_interface': ALL_WITH_RELATIONS,
'lnet_configuration': ALL_WITH_RELATIONS,
'id': ['exact']}
@validate
def obj_create(self, bundle, **kwargs):
request = bundle.request
if 'objects' in bundle.data:
nids_data = bundle.data['objects']
else:
nids_data = [bundle.data]
for nid_data in nids_data:
nid_data['network_interface'] = NetworkInterfaceResource().get_via_uri(nid_data['network_interface'], bundle.request).id
command_id = JobSchedulerClient.update_nids(nids_data)
try:
command = Command.objects.get(pk = command_id)
except ObjectDoesNotExist:
command = None
raise custom_response(self, request, http.HttpAccepted,
{
'command': dehydrate_command(command)
})
@validate
def obj_update(self, bundle, **kwargs):
self.obj_create(bundle, **kwargs)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
obj_list = self.obj_get_list(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self._nids_delete(obj_list)
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
obj = self.obj_get(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self._nids_delete([obj])
def _nids_delete(self, obj_list):
delete_list = []
for nid in obj_list:
delete_list.append({'network_interface': nid.network_interface_id, 'lnd_network': -1})
if (len(delete_list) > 0):
JobSchedulerClient.update_nids(delete_list)
|
the-stack_0_3175 | import win32gui
from errors import IdenticalWindowsError, WindowNotFound
from typing import Any
class WindowsHandler:
"""
Extract a handler for a specific active window.
Parameters
----------
screen_name : str
A ``string`` of the title to match from the list of enumerated windows.
"""
def __init__(self, screen_name: str) -> Any:
self._windows = self._enumerate_screens()
_hwdl = self._extract_window(self._windows, screen_name)
self._set_windows_foreground(_hwdl)
self.handler = _hwdl
def _enumerate_callback(self, hwdl: Any, windows: list) -> list:
"""
Enumerate all running windows.
Create a list of tuples representing the window handle plus the text
corresponding to the window.
Parameters
----------
hwdl : Any
A handler pointing to a single active window.
windows : list
A ``list`` of ``tuples`` where each item represents the handler to
the window and the corresponding text for the window.
Returns
-------
list
Returns a ``list`` of ``tuples`` where each item represents the
handler to a window and the corresponding text for the window.
"""
windows.append((hwdl, win32gui.GetWindowText(hwdl)))
def _enumerate_screens(self) -> list:
"""
Enumerate all active screens.
Get a list of all active screens running on the PC including the window
handler and the corresponding text.
Returns
-------
list
Returns a ``list`` of ``tuples`` where each item represents the
handler to a window and the corresponding text for the window.
"""
windows = []
win32gui.GetDesktopWindow()
win32gui.EnumWindows(self._enumerate_callback, windows)
return windows
def _extract_window(self, windows: list, screen_name: str) -> Any:
"""
Retrieve the handle for a specific window.
Iterate through a list of enumerated active windows on the system and
attempt to find a match for a specific window with a given title. If
multiple windows exist with the same name, throw an error that the specific
window can't be identified. If no matching windows can be found, throw an
error that it can't be found.
Parameters
----------
windows : list
A ``list`` of ``tuples`` where each item represents the handler to a
window and the corresponding text for the window.
screen_name : str
A ``string`` of the title to match from the list of enumerated windows.
Returns
-------
Any
Returns a handler to the requested window if found.
Raises
------
WindowNotFound
Raises a ``WindowNotFound`` error if no windows match the requested
title.
IdenticalWindowsError
Raises an ``IdenticalWindowsError`` when there are multiple running
windows with the same name and a unique instance cannot be found.
"""
window = [(hwdl, title) for hwdl, title in windows
if screen_name.lower() in title.lower()]
if not len(window):
raise WindowNotFound(f'Screen "{screen_name}" not found. Ensure a '
f'window with name "{screen_name}" is '
'running.')
elif len(window) > 1:
# Multiple windows have the screen name included in at least part
# of the title. Check for an exact copy of the name excluding case.
window = [(hwdl, title) for hwdl, title in window
if screen_name.lower() == title.lower()]
if len(window) != 1:
raise IdenticalWindowsError('Multiple windows contain the '
f'name {screen_name}. Unable to '
'identify unique window.')
# The first and only element is the requested window at this point.
hwdl, _ = window[0]
return hwdl
def _set_windows_foreground(self, hwdl: Any) -> None:
"""
Set the requested window to the foreground.
In order to capture screenshots, the window needs to be placed in the
foreground as the screen grabber captures the specified dimensions for
the top-most windows.
hwdl : Any
A handler to the requested window.
"""
win32gui.SetForegroundWindow(hwdl)
|
the-stack_0_3176 | # -*- coding: utf-8 -*-
"""
CSV related help functions
"""
import csv
import codecs
from setup import eol, encoding, delimiter
def dict2csv(csv_path, a_dict, sort=None):
"""
Writes a dictionary to a csv file, optinally sorted by key (sort=0) or
value (sort=1)
"""
with codecs.open(csv_path, 'w', encoding) as csv_file:
dictitems = a_dict.items()
if sort in [0, 1]:
dictitems.sort(key=lambda x:x[sort])
for (k, v) in dictitems:
csv_file.write(u'%s%s%s%s' % (k, delimiter, v, eol))
def csv2dict(csv_path, a_dict, encoding=encoding):
"""Read a dictionary from a csv file"""
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
for row in csv_reader:
if len(row) < 2:
raise IOError(_("Failed to load CSV file '%s'") % csv_file.name)
else:
a_dict[row[0].decode(encoding)] = row[1].decode(encoding)
return a_dict
|
the-stack_0_3177 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Why our own memcache client?
By Michael Barton
python-memcached doesn't use consistent hashing, so adding or
removing a memcache server from the pool invalidates a huge
percentage of cached items.
If you keep a pool of python-memcached client objects, each client
object has its own connection to every memcached server, only one of
which is ever in use. So you wind up with n * m open sockets and
almost all of them idle. This client effectively has a pool for each
server, so the number of backend connections is hopefully greatly
reduced.
python-memcache uses pickle to store things, and there was already a
huge stink about Swift using pickles in memcache
(http://osvdb.org/show/osvdb/86581). That seemed sort of unfair,
since nova and keystone and everyone else use pickles for memcache
too, but it's hidden behind a "standard" library. But changing would
be a security regression at this point.
Also, pylibmc wouldn't work for us because it needs to use python
sockets in order to play nice with eventlet.
Lucid comes with memcached: v1.4.2. Protocol documentation for that
version is at:
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
"""
import six
import six.moves.cPickle as pickle
import json
import logging
import time
from bisect import bisect
from eventlet.green import socket
from eventlet.pools import Pool
from eventlet import Timeout
from six.moves import range
from swift.common import utils
from swift.common.utils import md5, human_readable
DEFAULT_MEMCACHED_PORT = 11211
CONN_TIMEOUT = 0.3
POOL_TIMEOUT = 1.0 # WAG
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
JSON_FLAG = 2
NODE_WEIGHT = 50
PICKLE_PROTOCOL = 2
TRY_COUNT = 3
# if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = ERROR_LIMIT_DURATION = 60
DEFAULT_ITEM_SIZE_WARNING_THRESHOLD = -1
def md5hash(key):
if not isinstance(key, bytes):
if six.PY2:
key = key.encode('utf-8')
else:
key = key.encode('utf-8', errors='surrogateescape')
return md5(key, usedforsecurity=False).hexdigest().encode('ascii')
def sanitize_timeout(timeout):
"""
Sanitize a timeout value to use an absolute expiration time if the delta
is greater than 30 days (in seconds). Note that the memcached server
translates negative values to mean a delta of 30 days in seconds (and 1
additional second), client beware.
"""
if timeout > (30 * 24 * 60 * 60):
timeout += time.time()
return int(timeout)
def set_msg(key, flags, timeout, value):
if not isinstance(key, bytes):
raise TypeError('key must be bytes')
if not isinstance(value, bytes):
raise TypeError('value must be bytes')
return b' '.join([
b'set',
key,
str(flags).encode('ascii'),
str(timeout).encode('ascii'),
str(len(value)).encode('ascii'),
]) + (b'\r\n' + value + b'\r\n')
class MemcacheConnectionError(Exception):
pass
class MemcachePoolTimeout(Timeout):
pass
class MemcacheConnPool(Pool):
"""
Connection pool for Memcache Connections
The *server* parameter can be a hostname, an IPv4 address, or an IPv6
address with an optional port. See
:func:`swift.common.utils.parse_socket_string` for details.
"""
def __init__(self, server, size, connect_timeout, tls_context=None):
Pool.__init__(self, max_size=size)
self.host, self.port = utils.parse_socket_string(
server, DEFAULT_MEMCACHED_PORT)
self._connect_timeout = connect_timeout
self._tls_context = tls_context
def create(self):
addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
family, socktype, proto, canonname, sockaddr = addrs[0]
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
with Timeout(self._connect_timeout):
sock.connect(sockaddr)
if self._tls_context:
sock = self._tls_context.wrap_socket(sock,
server_hostname=self.host)
except (Exception, Timeout):
sock.close()
raise
return (sock.makefile('rwb'), sock)
def get(self):
fp, sock = super(MemcacheConnPool, self).get()
try:
if fp is None:
# An error happened previously, so we need a new connection
fp, sock = self.create()
return fp, sock
except MemcachePoolTimeout:
# This is the only place that knows an item was successfully taken
# from the pool, so it has to be responsible for repopulating it.
# Any other errors should get handled in _get_conns(); see the
# comment about timeouts during create() there.
self.put((None, None))
raise
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
"""
def __init__(
self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT,
tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False,
max_conns=2, tls_context=None, logger=None,
error_limit_count=ERROR_LIMIT_COUNT,
error_limit_time=ERROR_LIMIT_TIME,
error_limit_duration=ERROR_LIMIT_DURATION,
item_size_warning_threshold=DEFAULT_ITEM_SIZE_WARNING_THRESHOLD):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
self._error_limit_count = error_limit_count
self._error_limit_time = error_limit_time
self._error_limit_duration = error_limit_duration
for server in sorted(servers):
for i in range(NODE_WEIGHT):
self._ring[md5hash('%s-%s' % (server, i))] = server
self._tries = tries if tries <= len(servers) else len(servers)
self._sorted = sorted(self._ring)
self._client_cache = dict((
(server, MemcacheConnPool(server, max_conns, connect_timeout,
tls_context=tls_context))
for server in servers))
self._connect_timeout = connect_timeout
self._io_timeout = io_timeout
self._pool_timeout = pool_timeout
self._allow_pickle = allow_pickle
self._allow_unpickle = allow_unpickle or allow_pickle
if logger is None:
self.logger = logging.getLogger()
else:
self.logger = logger
self.item_size_warning_threshold = item_size_warning_threshold
def _exception_occurred(self, server, e, action='talking',
sock=None, fp=None, got_connection=True):
if isinstance(e, Timeout):
self.logger.error("Timeout %(action)s to memcached: %(server)s",
{'action': action, 'server': server})
elif isinstance(e, (socket.error, MemcacheConnectionError)):
self.logger.error(
"Error %(action)s to memcached: %(server)s: %(err)s",
{'action': action, 'server': server, 'err': e})
else:
self.logger.exception("Error %(action)s to memcached: %(server)s",
{'action': action, 'server': server})
try:
if fp:
fp.close()
del fp
except Exception:
pass
try:
if sock:
sock.close()
del sock
except Exception:
pass
if got_connection:
# We need to return something to the pool
# A new connection will be created the next time it is retrieved
self._return_conn(server, None, None)
if self._error_limit_time <= 0 or self._error_limit_duration <= 0:
return
now = time.time()
self._errors[server].append(now)
if len(self._errors[server]) > self._error_limit_count:
self._errors[server] = [err for err in self._errors[server]
if err > now - self._error_limit_time]
if len(self._errors[server]) > self._error_limit_count:
self._error_limited[server] = now + self._error_limit_duration
self.logger.error('Error limiting server %s', server)
def _get_conns(self, key):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
:return: generator to serve memcached connection
"""
pos = bisect(self._sorted, key)
served = []
while len(served) < self._tries:
pos = (pos + 1) % len(self._sorted)
server = self._ring[self._sorted[pos]]
if server in served:
continue
served.append(server)
if self._error_limited[server] > time.time():
continue
sock = None
try:
with MemcachePoolTimeout(self._pool_timeout):
fp, sock = self._client_cache[server].get()
yield server, fp, sock
except MemcachePoolTimeout as e:
self._exception_occurred(
server, e, action='getting a connection',
got_connection=False)
except (Exception, Timeout) as e:
# Typically a Timeout exception caught here is the one raised
# by the create() method of this server's MemcacheConnPool
# object.
self._exception_occurred(
server, e, action='connecting', sock=sock)
def _return_conn(self, server, fp, sock):
"""Returns a server connection to the pool."""
self._client_cache[server].put((fp, sock))
def set(self, key, value, serialize=True, time=0,
min_compress_len=0):
"""
Set a key/value pair in memcache
:param key: key
:param value: value
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param time: the time to live
:param min_compress_len: minimum compress length, this parameter was
added to keep the signature compatible with
python-memcached interface. This
implementation ignores it.
"""
key = md5hash(key)
timeout = sanitize_timeout(time)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
if isinstance(value, bytes):
value = value.decode('utf8')
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
elif not isinstance(value, bytes):
value = str(value).encode('utf-8')
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall(set_msg(key, flags, timeout, value))
# Wait for the set to complete
msg = fp.readline().strip()
if msg != b'STORED':
if not six.PY2:
msg = msg.decode('ascii')
self.logger.error(
"Error setting value in memcached: "
"%(server)s: %(msg)s",
{'server': server, 'msg': msg})
if 0 <= self.item_size_warning_threshold <= len(value):
self.logger.warning(
"Item size larger than warning threshold: "
"%d (%s) >= %d (%s)", len(value),
human_readable(len(value)),
self.item_size_warning_threshold,
human_readable(self.item_size_warning_threshold))
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def get(self, key):
"""
Gets the object specified by key. It will also unserialize the object
before returning if it is serialized in memcache with JSON, or if it
is pickled and unpickling is allowed.
:param key: key
:returns: value of the key in memcache
"""
key = md5hash(key)
value = None
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall(b'get ' + key + b'\r\n')
line = fp.readline().strip().split()
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
if line[0].upper() == b'VALUE' and line[1] == key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
return value
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def incr(self, key, delta=1, time=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
If passed a negative number, will use memcached's decr. Returns
the int stored in memcached
Note: The data memcached stores as the result of incr/decr is
an unsigned int. decr's that result in a number below 0 are
stored as 0.
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param time: the time to live
:returns: result of incrementing
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = b'incr'
if delta < 0:
command = b'decr'
delta = str(abs(int(delta))).encode('ascii')
timeout = sanitize_timeout(time)
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall(b' '.join([
command, key, delta]) + b'\r\n')
line = fp.readline().strip().split()
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'NOT_FOUND':
add_val = delta
if command == b'decr':
add_val = b'0'
sock.sendall(b' '.join([
b'add', key, b'0', str(timeout).encode('ascii'),
str(len(add_val)).encode('ascii')
]) + b'\r\n' + add_val + b'\r\n')
line = fp.readline().strip().split()
if line[0].upper() == b'NOT_STORED':
sock.sendall(b' '.join([
command, key, delta]) + b'\r\n')
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
ret = int(add_val)
else:
ret = int(line[0].strip())
self._return_conn(server, fp, sock)
return ret
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, time=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
:param key: key
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param time: the time to live
:returns: result of decrementing
:raises MemcacheConnectionError:
"""
return self.incr(key, delta=-delta, time=time)
def delete(self, key, server_key=None):
"""
Deletes a key/value pair from memcache.
:param key: key to be deleted
:param server_key: key to use in determining which server in the ring
is used
"""
key = md5hash(key)
server_key = md5hash(server_key) if server_key else key
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b'delete ' + key + b'\r\n')
# Wait for the delete to complete
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def set_multi(self, mapping, server_key, serialize=True, time=0,
min_compress_len=0):
"""
Sets multiple key/value pairs in memcache.
:param mapping: dictionary of keys and values to be set in memcache
:param server_key: key to use in determining which server in the ring
is used
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param time: the time to live
:min_compress_len: minimum compress length, this parameter was added
to keep the signature compatible with
python-memcached interface. This implementation
ignores it
"""
server_key = md5hash(server_key)
timeout = sanitize_timeout(time)
msg = []
for key, value in mapping.items():
key = md5hash(key)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
if isinstance(value, bytes):
value = value.decode('utf8')
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
msg.append(set_msg(key, flags, timeout, value))
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b''.join(msg))
# Wait for the set to complete
for line in range(len(mapping)):
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def get_multi(self, keys, server_key):
"""
Gets multiple values from memcache for the given keys.
:param keys: keys for values to be retrieved from memcache
:param server_key: key to use in determining which server in the ring
is used
:returns: list of values
"""
server_key = md5hash(server_key)
keys = [md5hash(key) for key in keys]
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b'get ' + b' '.join(keys) + b'\r\n')
line = fp.readline().strip().split()
responses = {}
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
if line[0].upper() == b'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()
values = []
for key in keys:
if key in responses:
values.append(responses[key])
else:
values.append(None)
self._return_conn(server, fp, sock)
return values
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
|
the-stack_0_3178 | """
Module to provide for a simple summarization of relevant output files from a build.
"""
import argparse
import os
import runpy
import sys
from shutil import copyfile
from project_summarizer.cobertura_plugin import CoberturaPlugin
from project_summarizer.junit_plugin import JUnitPlugin
from project_summarizer.project_summarizer_plugin import ProjectSummarizerPlugin
class ProjectSummarizer:
"""
Class to provide for a simple summarization of relevant output files from a build.
"""
def __init__(self):
self.__version_number = ProjectSummarizer.__get_semantic_version()
self.test_summary_publish_path = ProjectSummarizerPlugin.SUMMARY_PUBLISH_PATH
self.debug = False
self.__available_plugins = None
self.__plugin_argument_names = {}
self.__plugin_variable_names = {}
@staticmethod
def __get_semantic_version():
file_path = __file__
assert os.path.isabs(file_path)
file_path = file_path.replace(os.sep, "/")
last_index = file_path.rindex("/")
file_path = file_path[: last_index + 1] + "version.py"
version_meta = runpy.run_path(file_path)
return version_meta["__version__"]
def __parse_arguments(self):
"""
Handle any arguments for the program.
"""
parser = argparse.ArgumentParser(
description="Summarize Python files.", allow_abbrev=False
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s " + self.__version_number,
)
for next_plugin_instance in self.__available_plugins:
(
plugin_argument_name,
plugin_variable_name,
) = next_plugin_instance.add_command_line_arguments(parser)
self.__plugin_argument_names[plugin_argument_name] = next_plugin_instance
self.__plugin_variable_names[plugin_argument_name] = plugin_variable_name
parser.add_argument(
"--only-changes",
dest="only_changes",
action="store_true",
default=False,
help="only_changes",
)
parser.add_argument(
"--publish",
dest="publish_summaries",
action="store_true",
default=False,
help="publish",
)
args = parser.parse_args()
if not args.publish_summaries and not args.test_report_file:
are_plugin_arguments_present = False
arguments_as_dictionary = vars(args)
for next_plugin_argument in self.__plugin_argument_names:
plugin_variable_name = self.__plugin_variable_names[
next_plugin_argument
]
assert plugin_variable_name in arguments_as_dictionary
argument_value = arguments_as_dictionary[plugin_variable_name]
are_plugin_arguments_present = bool(argument_value.strip())
if are_plugin_arguments_present:
break
if not are_plugin_arguments_present:
parser.print_help()
sys.exit(2)
return args
def __publish_file(self, file_to_publish):
"""
Publish the specified file to the set publish directory.
"""
if not os.path.exists(self.test_summary_publish_path):
print(
f"Publish directory '{self.test_summary_publish_path}' does not exist. Creating."
)
os.makedirs(self.test_summary_publish_path)
elif not os.path.isdir(self.test_summary_publish_path):
print(
f"Publish directory '{self.test_summary_publish_path}' already exists, but as a file."
)
sys.exit(1)
if os.path.exists(file_to_publish):
try:
copyfile(
file_to_publish,
ProjectSummarizerPlugin.compute_published_path_to_file(
file_to_publish
),
)
except IOError as ex:
print(f"Publishing file '{file_to_publish}' failed ({ex}).")
sys.exit(1)
def publish_summaries(self):
"""
Respond to a request to publish any existing summaries.
"""
valid_paths = []
for plugin_instance in self.__available_plugins:
plugin_output_path = plugin_instance.get_output_path()
if os.path.exists(plugin_output_path) and not os.path.isfile(
plugin_output_path
):
print(f"Summary path '{plugin_output_path}' is not a file.")
sys.exit(1)
valid_paths.append(plugin_output_path)
for plugin_output_path in valid_paths:
self.__publish_file(plugin_output_path)
def main(self):
"""
Main entrance point.
"""
self.__available_plugins = [CoberturaPlugin(), JUnitPlugin()]
args = self.__parse_arguments()
if args.publish_summaries:
self.publish_summaries()
sys.exit(0)
arguments_as_dictionary = vars(args)
for next_command_line_argument in sys.argv:
if next_command_line_argument in self.__plugin_argument_names:
plugin_instance = self.__plugin_argument_names[
next_command_line_argument
]
plugin_variable_name = self.__plugin_variable_names[
next_command_line_argument
]
plugin_instance.generate_report(
args.only_changes, arguments_as_dictionary[plugin_variable_name]
)
if __name__ == "__main__":
ProjectSummarizer().main()
|
the-stack_0_3180 | import numpy as np
from utils import *
from images import *
np.random.seed(2)
class Imager(object):
def __init__(self, input_size, labels):
if type(input_size) is int:
self.input_size = (input_size, input_size)
else:
self.input_size = input_size
self.labels = labels
self.palette = np.random.randint(0, 256, (len(self.labels), 3)).tolist()
def imset_from_path(self, path):
ims = np.array(imread_from_path(path))
if len(ims.shape) == 3:
ims = [ims]
self.ims = ims
def imset(self, ims):
ims = np.array(ims)
if len(ims.shape) == 3:
ims = [ims]
self.ims = ims
def preprocess(self):
return improcess(self.ims, self.input_size)
def ncs_preprocess(self):
ims = improcess(self.ims, self.input_size, to_rgb=False, normalise=False) # ims are normalised by the ncs.
ims = np.transpose(np.array(ims), [0, 3, 1, 2])
return np.expand_dims(ims, 1)
def visualise_preds(self, pred_list):
self.ims = visualise(self.ims, pred_list, self.input_size, self.labels, self.palette)
return self.ims
def ncs_visualise_preds(self, objects_list):
imlist = list()
for im, objects in zip(self.ims, objects_list):
if not objects:
imlist.append(im)
continue
for obj in objects:
add_overlays_v2(obj, im, self.labels, self.palette)
imlist.append(im)
self.ims = imlist
return self.ims
def imsave(self, ims):
imwrite(ims)
|
the-stack_0_3181 | """
License: Apache-2.0
Author: Huadong Liao
E-mail: [email protected]
"""
import numpy as np
import tensorflow as tf
from core.utils import *
epsilon = 1e-9
class CapsLayer(object):
''' Capsule layer.
Args:
input: A 4-D tensor.
num_outputs: the number of capsule in this layer.
vec_len: integer, the length of the output vector of a capsule.
layer_type: string, one of 'FC' or "CONV", the type of this layer,
fully connected or convolution, for the future expansion capability
with_routing: boolean, this capsule is routing with the
lower-level layer capsule.
Returns:
A 4-D tensor.
'''
def __init__(self, num_outputs, vec_len, batch_size, stddev, iter_routing, with_routing=True, layer_type='FC'):
self.num_outputs = num_outputs
self.vec_len = vec_len
self.with_routing = with_routing
self.layer_type = layer_type
self.batch_size = batch_size
self.stddev = stddev
self.iter_routing = iter_routing
def __call__(self, input, kernel_size=None, stride=None):
'''
The parameters 'kernel_size' and 'stride' will be used while 'layer_type' equal 'CONV'
'''
if self.layer_type == 'CONV':
self.kernel_size = kernel_size
self.stride = stride
if not self.with_routing:
# the PrimaryCaps layer, a convolutional layer
# input: [batch_size, 20, 20, 256]
# assert input.get_shape() == [cfg.batch_size, 20, 20, 256]
# NOTE: I can't find out any words from the paper whether the
# PrimaryCap convolution does a ReLU activation or not before
# squashing function, but experiment show that using ReLU get a
# higher test accuracy. So, which one to use will be your choice
capsules = tf.contrib.layers.conv1d(input, self.num_outputs * self.vec_len,
self.kernel_size, self.stride, padding="VALID",
activation_fn=tf.nn.relu)
# capsules = tf.contrib.layers.conv2d(input, self.num_outputs * self.vec_len,
# self.kernel_size, self.stride,padding="VALID",
# activation_fn=None)
capsules = tf.reshape(capsules, (-1,capsules.shape[1]*capsules.shape[2]//self.vec_len, self.vec_len, 1))
# return tensor with shape [batch_size, 1152, 8, 1]
capsules = squash(capsules)
return(capsules)
if self.layer_type == 'FC':
if self.with_routing:
# the DigitCaps layer, a fully connected layer
# Reshape the input into [batch_size, 1152, 1, 8, 1]
self.input = tf.reshape(input, shape=(-1, input.shape[1], 1, input.shape[2], 1))
with tf.variable_scope('routing'):
# b_IJ: [batch_size, num_caps_l, num_caps_l_plus_1, 1, 1],
# about the reason of using 'batch_size', see issue #21
b_IJ = tf.constant(np.zeros([self.batch_size, input.shape[1].value, self.num_outputs, 1, 1], dtype=np.float32))
capsules = routing(self.input, b_IJ, self.stddev, self.iter_routing, num_outputs=self.num_outputs, num_dims=self.vec_len)
capsules = tf.squeeze(capsules, axis=1)
return(capsules)
def routing(input, b_IJ, stddev, iter_routing, num_outputs=10, num_dims=16):
''' The routing algorithm.
Args:
input: A Tensor with [batch_size, num_caps_l=1152, 1, length(u_i)=8, 1]
shape, num_caps_l meaning the number of capsule in the layer l.
num_outputs: the number of output capsules.
num_dims: the number of dimensions for output capsule.
Returns:
A Tensor of shape [batch_size, num_caps_l_plus_1, length(v_j)=16, 1]
representing the vector output `v_j` in the layer l+1
Notes:
u_i represents the vector output of capsule i in the layer l, and
v_j the vector output of capsule j in the layer l+1.
'''
# W: [1, num_caps_i, num_caps_j * len_v_j, len_u_j, 1]
input_shape = get_shape(input)
W = tf.get_variable('Weight', shape=[1, input_shape[1], num_dims * num_outputs] + input_shape[-2:],
dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=stddev))
biases = tf.get_variable('bias', shape=(1, 1, num_outputs, num_dims, 1))
# Eq.2, calc u_hat
# Since tf.matmul is a time-consuming op,
# A better solution is using element-wise multiply, reduce_sum and reshape
# ops instead. Matmul [a, b] x [b, c] is equal to a series ops as
# element-wise multiply [a*c, b] * [a*c, b], reduce_sum at axis=1 and
# reshape to [a, c]
input = tf.tile(input, [1, 1, num_dims * num_outputs, 1, 1])
# assert input.get_shape() == [cfg.batch_size, 1152, 160, 8, 1]
u_hat = reduce_sum(W * input, axis=3, keepdims=True)
u_hat = tf.reshape(u_hat, shape=[-1, input_shape[1], num_outputs, num_dims, 1])
# assert u_hat.get_shape() == [cfg.batch_size, 1152, 10, 16, 1]
# In forward, u_hat_stopped = u_hat; in backward, no gradient passed back from u_hat_stopped to u_hat
u_hat_stopped = tf.stop_gradient(u_hat, name='stop_gradient')
# line 3,for r iterations do
for r_iter in range(iter_routing):
with tf.variable_scope('iter_' + str(r_iter)):
# line 4:
# => [batch_size, 1152, 10, 1, 1]
c_IJ = softmax(b_IJ, axis=2)
# At last iteration, use `u_hat` in order to receive gradients from the following graph
if r_iter == iter_routing - 1:
# line 5:
# weighting u_hat with c_IJ, element-wise in the last two dims
# => [batch_size, 1152, 10, 16, 1]
s_J = tf.multiply(c_IJ, u_hat)
# then sum in the second dim, resulting in [batch_size, 1, 10, 16, 1]
s_J = reduce_sum(s_J, axis=1, keepdims=True) + biases
# assert s_J.get_shape() == [cfg.batch_size, 1, num_outputs, num_dims, 1]
# line 6:
# squash using Eq.1,
v_J = squash(s_J)
# assert v_J.get_shape() == [cfg.batch_size, 1, 10, 16, 1]
elif r_iter < iter_routing - 1: # Inner iterations, do not apply backpropagation
s_J = tf.multiply(c_IJ, u_hat_stopped)
s_J = reduce_sum(s_J, axis=1, keepdims=True) + biases
v_J = squash(s_J)
# line 7:
# reshape & tile v_j from [batch_size ,1, 10, 16, 1] to [batch_size, 1152, 10, 16, 1]
# then matmul in the last tow dim: [16, 1].T x [16, 1] => [1, 1], reduce mean in the
# batch_size dim, resulting in [1, 1152, 10, 1, 1]
v_J_tiled = tf.tile(v_J, [1, input_shape[1], 1, 1, 1])
u_produce_v = reduce_sum(u_hat_stopped * v_J_tiled, axis=3, keepdims=True)
# assert u_produce_v.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
# b_IJ += tf.reduce_sum(u_produce_v, axis=0, keep_dims=True)
b_IJ += u_produce_v
return(v_J)
def squash(vector):
'''Squashing function corresponding to Eq. 1
Args:
vector: A tensor with shape [batch_size, 1, num_caps, vec_len, 1] or [batch_size, num_caps, vec_len, 1].
Returns:
A tensor with the same shape as vector but squashed in 'vec_len' dimension.
'''
vec_squared_norm = reduce_sum(tf.square(vector), -2, keepdims=True)
scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + epsilon)
vec_squashed = scalar_factor * vector # element-wise
return(vec_squashed)
|
the-stack_0_3182 | import wx
from .icons import icons8_keyboard_50
from .mwindow import MWindow
_ = wx.GetTranslation
class KeymapPanel(wx.Panel):
def __init__(self, *args, context=None, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.context = context
self.list_keymap = wx.ListCtrl(
self, wx.ID_ANY, style=wx.LC_HRULES | wx.LC_REPORT | wx.LC_VRULES
)
self.button_add = wx.Button(self, wx.ID_ANY, _("Add Hotkey"))
self.text_key_name = wx.TextCtrl(self, wx.ID_ANY, "")
self.text_command_name = wx.TextCtrl(self, wx.ID_ANY, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_button_add_hotkey, self.button_add)
# end wxGlade
self.Bind(
wx.EVT_LIST_ITEM_RIGHT_CLICK, self.on_item_rightclick, self.list_keymap
)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_item_activated, self.list_keymap)
self.text_key_name.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
def initialize(self):
self.reload_keymap()
self.Children[0].SetFocus()
def finalize(self):
pass
def __set_properties(self):
self.list_keymap.SetToolTip(_("What keys are bound to which actions?"))
self.list_keymap.AppendColumn(_("Key"), format=wx.LIST_FORMAT_LEFT, width=114)
self.list_keymap.AppendColumn(
_("Command"), format=wx.LIST_FORMAT_LEFT, width=348
)
self.button_add.SetToolTip(_("Add a new hotkey"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: Keymap.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.list_keymap, 1, wx.EXPAND, 0)
sizer_2.Add(self.button_add, 0, 0, 0)
sizer_2.Add(self.text_key_name, 1, 0, 0)
sizer_2.Add(self.text_command_name, 2, 0, 0)
sizer_1.Add(sizer_2, 0, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def on_item_activated(self, event):
element = event.Text
self.text_key_name.SetValue(element)
self.text_command_name.SetValue(self.context.keymap[element])
def on_item_rightclick(self, event):
element = event.Text
menu = wx.Menu()
convert = menu.Append(
wx.ID_ANY, _("Remove %s") % str(element)[:16], "", wx.ITEM_NORMAL
)
self.Bind(wx.EVT_MENU, self.on_tree_popup_delete(element), convert)
convert = menu.Append(wx.ID_ANY, _("Reset Default"), "", wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, self.on_tree_popup_clear(element), convert)
self.PopupMenu(menu)
menu.Destroy()
def on_tree_popup_clear(self, element):
def delete(event=None):
self.context.default_keymap()
self.list_keymap.DeleteAllItems()
self.reload_keymap()
return delete
def on_tree_popup_delete(self, element):
def delete(event=None):
try:
del self.context.keymap[element]
self.list_keymap.DeleteAllItems()
self.reload_keymap()
except KeyError:
pass
return delete
def reload_keymap(self):
i = 0
for key in self.context.keymap:
value = self.context.keymap[key]
m = self.list_keymap.InsertItem(i, str(key))
i += 1
if m != -1:
self.list_keymap.SetItem(m, 1, str(value))
def on_button_add_hotkey(self, event=None): # wxGlade: Keymap.<event_handler>
keystroke = self.text_key_name.GetValue()
if len(keystroke) == 0:
dlg = wx.MessageDialog(
None,
_("Missing Keystroke"),
_("No Keystroke for binding."),
wx.OK | wx.ICON_WARNING,
)
dlg.ShowModal()
dlg.Destroy()
self.text_key_name.SetFocus()
return
if len(self.text_command_name.GetValue()) == 0:
dlg = wx.MessageDialog(
None,
_("Missing Command"),
_("No Command for binding."),
wx.OK | wx.ICON_WARNING,
)
dlg.ShowModal()
dlg.Destroy()
self.text_command_name.SetFocus()
return
self.context.keymap[
self.text_key_name.GetValue()
] = self.text_command_name.GetValue()
self.text_key_name.SetValue("")
self.text_command_name.SetValue("")
self.list_keymap.DeleteAllItems()
self.reload_keymap()
def on_key_press(self, event):
from meerk40t.gui.wxutils import get_key_name
keyvalue = get_key_name(event)
self.text_command_name.SetValue("")
if keyvalue is None:
self.text_key_name.SetValue("")
else:
self.text_key_name.SetValue(keyvalue)
for i, key in enumerate(self.context.keymap):
if key == keyvalue:
self.list_keymap.Select(i, True)
self.list_keymap.Focus(i)
self.text_command_name.SetValue(self.context.keymap[key])
else:
self.list_keymap.Select(i, False)
class Keymap(MWindow):
def __init__(self, *args, **kwds):
super().__init__(500, 530, *args, **kwds)
self.panel = KeymapPanel(self, wx.ID_ANY, context=self.context)
_icon = wx.NullIcon
_icon.CopyFromBitmap(icons8_keyboard_50.GetBitmap())
self.SetIcon(_icon)
# begin wxGlade: Keymap.__set_properties
self.SetTitle(_("Keymap Settings"))
def window_open(self):
self.panel.initialize()
def window_close(self):
self.panel.finalize()
|
the-stack_0_3183 | import logging
from threading import Thread
from time import sleep, time
from test.cl_node.errors import NonZeroExitCodeError
from test.cl_node.wait import wait_for_block_hashes_propagated_to_all_nodes
from test.cl_node.casperlabsnode import extract_block_hash_from_propose_output
CONTRACT_1 = 'old_wasm/helloname_invalid_just_1.wasm'
CONTRACT_2 = 'old_wasm/helloname_invalid_just_2.wasm'
class TimedThread(Thread):
def __init__(self,
docker_node: 'DockerNode',
command_kwargs: dict,
start_time: float) -> None:
Thread.__init__(self)
self.name = docker_node.name
self.node = docker_node
self.kwargs = command_kwargs
self.start_time = start_time
def run(self) -> None:
if self.start_time <= time():
raise Exception(f'start_time: {self.start_time} is past current time: {time()}')
while self.start_time > time():
sleep(0.001)
self.my_call(self.kwargs)
def my_call(self, kwargs):
raise NotImplementedError()
class DeployTimedTread(TimedThread):
def my_call(self, kwargs):
self.node.client.deploy(**kwargs)
class ProposeTimedThread(TimedThread):
def my_call(self, kwargs):
self.block_hash = None
try:
self.block_hash = extract_block_hash_from_propose_output(self.node.client.propose())
except NonZeroExitCodeError:
# Ignore error for no new deploys
pass
def test_neglected_invalid_block(three_node_network):
"""
Feature file: neglected_invalid_justification.feature
Scenario: 3 Nodes doing simultaneous deploys and proposes do not have neglected invalid blocks
"""
bootstrap, node1, node2 = three_node_network.docker_nodes
for cycle_count in range(4):
logging.info(f'DEPLOY_PROPOSE CYCLE COUNT: {cycle_count + 1}')
start_time = time() + 1
boot_deploy = DeployTimedTread(bootstrap,
{'session_contract': CONTRACT_1,
'payment_contract': CONTRACT_1},
start_time)
node1_deploy = DeployTimedTread(node1,
{'session_contract': CONTRACT_2,
'payment_contract': CONTRACT_2},
start_time)
node2_deploy = DeployTimedTread(node2,
{'session_contract': CONTRACT_2,
'payment_contract': CONTRACT_2},
start_time)
# Simultaneous Deploy
node1_deploy.start()
boot_deploy.start()
node2_deploy.start()
boot_deploy.join()
node1_deploy.join()
node2_deploy.join()
start_time = time() + 1
boot_deploy = ProposeTimedThread(bootstrap, {}, start_time)
node1_deploy = ProposeTimedThread(node1, {}, start_time)
node2_deploy = ProposeTimedThread(node2, {}, start_time)
# Simultaneous Propose
node1_deploy.start()
boot_deploy.start()
node2_deploy.start()
boot_deploy.join()
node1_deploy.join()
node2_deploy.join()
# Assure deploy and proposes occurred
block_hashes = [h for h in [boot_deploy.block_hash, node1_deploy.block_hash, node2_deploy.block_hash] if h]
wait_for_block_hashes_propagated_to_all_nodes(three_node_network.docker_nodes, block_hashes)
assert ' for NeglectedInvalidBlock.' not in bootstrap.logs()
assert ' for NeglectedInvalidBlock.' not in node1.logs()
assert ' for NeglectedInvalidBlock.' not in node2.logs()
|
the-stack_0_3184 | """Support for deCONZ binary sensors."""
from __future__ import annotations
from collections.abc import Callable, ValuesView
from dataclasses import dataclass
from pydeconz.sensor import (
Alarm,
CarbonMonoxide,
DeconzSensor as PydeconzSensor,
Fire,
GenericFlag,
OpenClose,
Presence,
Vibration,
Water,
)
from homeassistant.components.binary_sensor import (
DOMAIN,
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import ATTR_DARK, ATTR_ON
from .deconz_device import DeconzDevice
from .gateway import DeconzGateway, get_gateway_from_config_entry
ATTR_ORIENTATION = "orientation"
ATTR_TILTANGLE = "tiltangle"
ATTR_VIBRATIONSTRENGTH = "vibrationstrength"
PROVIDES_EXTRA_ATTRIBUTES = (
"alarm",
"carbon_monoxide",
"fire",
"flag",
"open",
"presence",
"vibration",
"water",
)
@dataclass
class DeconzBinarySensorDescriptionMixin:
"""Required values when describing secondary sensor attributes."""
suffix: str
update_key: str
value_fn: Callable[[PydeconzSensor], bool | None]
@dataclass
class DeconzBinarySensorDescription(
BinarySensorEntityDescription,
DeconzBinarySensorDescriptionMixin,
):
"""Class describing deCONZ binary sensor entities."""
ENTITY_DESCRIPTIONS = {
Alarm: [
DeconzBinarySensorDescription(
key="alarm",
value_fn=lambda device: device.alarm, # type: ignore[no-any-return]
suffix="",
update_key="alarm",
device_class=BinarySensorDeviceClass.SAFETY,
)
],
CarbonMonoxide: [
DeconzBinarySensorDescription(
key="carbon_monoxide",
value_fn=lambda device: device.carbon_monoxide, # type: ignore[no-any-return]
suffix="",
update_key="carbonmonoxide",
device_class=BinarySensorDeviceClass.CO,
)
],
Fire: [
DeconzBinarySensorDescription(
key="fire",
value_fn=lambda device: device.fire, # type: ignore[no-any-return]
suffix="",
update_key="fire",
device_class=BinarySensorDeviceClass.SMOKE,
),
DeconzBinarySensorDescription(
key="in_test_mode",
value_fn=lambda device: device.in_test_mode, # type: ignore[no-any-return]
suffix="Test Mode",
update_key="test",
device_class=BinarySensorDeviceClass.SMOKE,
entity_category=EntityCategory.DIAGNOSTIC,
),
],
GenericFlag: [
DeconzBinarySensorDescription(
key="flag",
value_fn=lambda device: device.flag, # type: ignore[no-any-return]
suffix="",
update_key="flag",
)
],
OpenClose: [
DeconzBinarySensorDescription(
key="open",
value_fn=lambda device: device.open, # type: ignore[no-any-return]
suffix="",
update_key="open",
device_class=BinarySensorDeviceClass.OPENING,
)
],
Presence: [
DeconzBinarySensorDescription(
key="presence",
value_fn=lambda device: device.presence, # type: ignore[no-any-return]
suffix="",
update_key="presence",
device_class=BinarySensorDeviceClass.MOTION,
)
],
Vibration: [
DeconzBinarySensorDescription(
key="vibration",
value_fn=lambda device: device.vibration, # type: ignore[no-any-return]
suffix="",
update_key="vibration",
device_class=BinarySensorDeviceClass.VIBRATION,
)
],
Water: [
DeconzBinarySensorDescription(
key="water",
value_fn=lambda device: device.water, # type: ignore[no-any-return]
suffix="",
update_key="water",
device_class=BinarySensorDeviceClass.MOISTURE,
)
],
}
BINARY_SENSOR_DESCRIPTIONS = [
DeconzBinarySensorDescription(
key="tampered",
value_fn=lambda device: device.tampered, # type: ignore[no-any-return]
suffix="Tampered",
update_key="tampered",
device_class=BinarySensorDeviceClass.TAMPER,
entity_category=EntityCategory.DIAGNOSTIC,
),
DeconzBinarySensorDescription(
key="low_battery",
value_fn=lambda device: device.low_battery, # type: ignore[no-any-return]
suffix="Low Battery",
update_key="lowbattery",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
),
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ binary sensor."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_sensor(
sensors: list[PydeconzSensor]
| ValuesView[PydeconzSensor] = gateway.api.sensors.values(),
) -> None:
"""Add binary sensor from deCONZ."""
entities: list[DeconzBinarySensor] = []
for sensor in sensors:
if not gateway.option_allow_clip_sensor and sensor.type.startswith("CLIP"):
continue
known_entities = set(gateway.entities[DOMAIN])
for description in (
ENTITY_DESCRIPTIONS.get(type(sensor), []) + BINARY_SENSOR_DESCRIPTIONS
):
if (
not hasattr(sensor, description.key)
or description.value_fn(sensor) is None
):
continue
new_sensor = DeconzBinarySensor(sensor, gateway, description)
if new_sensor.unique_id not in known_entities:
entities.append(new_sensor)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
gateway.signal_new_sensor,
async_add_sensor,
)
)
async_add_sensor(
[gateway.api.sensors[key] for key in sorted(gateway.api.sensors, key=int)]
)
class DeconzBinarySensor(DeconzDevice, BinarySensorEntity):
"""Representation of a deCONZ binary sensor."""
TYPE = DOMAIN
_device: PydeconzSensor
entity_description: DeconzBinarySensorDescription
def __init__(
self,
device: PydeconzSensor,
gateway: DeconzGateway,
description: DeconzBinarySensorDescription,
) -> None:
"""Initialize deCONZ binary sensor."""
self.entity_description: DeconzBinarySensorDescription = description
super().__init__(device, gateway)
if description.suffix:
self._attr_name = f"{self._device.name} {description.suffix}"
self._update_keys = {description.update_key, "reachable"}
if self.entity_description.key in PROVIDES_EXTRA_ATTRIBUTES:
self._update_keys.update({"on", "state"})
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
if self.entity_description.suffix:
return f"{self.serial}-{self.entity_description.suffix.lower()}"
return super().unique_id
@callback
def async_update_callback(self) -> None:
"""Update the sensor's state."""
if self._device.changed_keys.intersection(self._update_keys):
super().async_update_callback()
@property
def is_on(self) -> bool | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self._device)
@property
def extra_state_attributes(self) -> dict[str, bool | float | int | list | None]:
"""Return the state attributes of the sensor."""
attr: dict[str, bool | float | int | list | None] = {}
if self.entity_description.key not in PROVIDES_EXTRA_ATTRIBUTES:
return attr
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if isinstance(self._device, Presence):
if self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
elif isinstance(self._device, Vibration):
attr[ATTR_ORIENTATION] = self._device.orientation
attr[ATTR_TILTANGLE] = self._device.tilt_angle
attr[ATTR_VIBRATIONSTRENGTH] = self._device.vibration_strength
return attr
|
the-stack_0_3185 | """
This file offers the methods to automatically retrieve the graph Chloroflexi bacterium RBG_16_70_13.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ChloroflexiBacteriumRbg167013(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Chloroflexi bacterium RBG_16_70_13 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Chloroflexi bacterium RBG_16_70_13 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ChloroflexiBacteriumRbg167013",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_3186 | # Common utility functions used by various script execution tests
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
import sys
import os
import re
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
try:
import zipfile
except ImportError:
# If Python is build without Unicode support, importing _io will
# fail, which, in turn, means that zipfile cannot be imported
# Most of this module can then still be used.
pass
from test.test_support import strip_python_stderr
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
cmd_line.extend(args)
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
env.update(env_vars)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(False, *args, **env_vars)
def python_exit_code(*args):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
with open(os.devnull, 'w') as devnull:
return subprocess.call(cmd_line, stdout=devnull,
stderr=subprocess.STDOUT)
def spawn_python(*args, **kwargs):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def run_python(*args, **kwargs):
if __debug__:
p = spawn_python(*args, **kwargs)
else:
p = spawn_python('-O', *args, **kwargs)
stdout_data = kill_python(p)
return p.wait(), stdout_data
# Script creation utilities
@contextlib.contextmanager
def temp_dir():
dirname = tempfile.mkdtemp()
dirname = os.path.realpath(dirname)
try:
yield dirname
finally:
shutil.rmtree(dirname)
def make_script(script_dir, script_basename, source):
script_filename = script_basename+os.extsep+'py'
script_name = os.path.join(script_dir, script_filename)
script_file = open(script_name, 'w')
script_file.write(source)
script_file.close()
return script_name
def compile_script(script_name):
py_compile.compile(script_name, doraise=True)
if __debug__:
compiled_name = script_name + 'c'
else:
compiled_name = script_name + 'o'
return compiled_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', '')
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = compile_script(init_name)
script_name = compile_script(script_name)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
|
the-stack_0_3189 | # Copyright (c) 2019 Eric Steinberger
import pdb
import time
from os.path import dirname, abspath
import numpy as np
import sys
from DeepCFR.EvalAgentDeepCFR import EvalAgentDeepCFR
# These two eval agents HAVE TO come from the same training run and iteration for this analysis to make sense.
if len(sys.argv) < 2:
path_to_dcfr_eval_agent = dirname(abspath(__file__)) + "/trained_agents/Example_FHP_SINGLE.pkl"
else:
path_to_dcfr_eval_agent = sys.argv[1]
if len(sys.argv) == 3:
img_name = sys.argv[2]
else:
img_name = ''
N_DECK = 52
N_HOLE = 169 # 13 * 12 + 13
def hand2rep(hand):
card1_rank = hand[0][0]
card1_suit = hand[0][1]
card2_rank = hand[1][0]
card2_suit = hand[1][1]
suited = (card2_suit == card1_suit)
high_rank = max(card1_rank, card2_rank)
low_rank = min(card1_rank, card2_rank)
return (high_rank, low_rank, suited)
#--------------- Generate p0 strat -------------------------
#Loading EvalAgents and checking if hey have same experiment name
eval_agent_dcfr = EvalAgentDeepCFR.load_from_disk(path_to_eval_agent=path_to_dcfr_eval_agent)
#get an env bldr from the agent and create an env
env_bldr = eval_agent_dcfr.env_bldr
env = env_bldr.get_new_env(is_evaluating=False)
start_time = time.time()
hands = {}
while len(hands) < N_HOLE:
obs, rew, done, info = env.reset()
eval_agent_dcfr.reset(deck_state_dict=env.cards_state_dict())
hole_hand = hand2rep(env.seats[0].hand)
if hole_hand not in hands:
hands[hole_hand] = eval_agent_dcfr.get_a_probs()
'''
print(f"Computed {N_HOLE} possible hands in {time.time()-start_time} sec")
for hand in hands.keys():
print(f"for hand: {hand}, the probabilities are {hands[hand]}")
'''
#----------------------------store data for p0
import pickle
f = open(img_name + 'p0_strat.pkl', 'ab')
pickle.dump(hands, f)
f.close()
#----------------------- Generate and Store Image for p0
import plot_strat
plot_strat.np2img(hands,img_name + 'p0_strat_img.png')
#----------------------- Generate Data for p1
eval_agent_dcfr = EvalAgentDeepCFR.load_from_disk(path_to_eval_agent=path_to_dcfr_eval_agent)
env_bldr = eval_agent_dcfr.env_bldr
env = env_bldr.get_new_env(is_evaluating=False)
start_time = time.time()
hands = {}
while len(hands) < N_HOLE:
obs, rew, done, info = env.reset()
eval_agent_dcfr.reset(deck_state_dict=env.cards_state_dict())
obs, rew, done, info = env.step(2)
eval_agent_dcfr.notify_of_action(p_id_acted=0, action_he_did=2)
hole_hand = hand2rep(env.seats[1].hand)
if hole_hand not in hands:
hands[hole_hand] = eval_agent_dcfr.get_a_probs()
#----------------------------store data for p1
import pickle
f = open(img_name + 'p1_strat.pkl', 'ab')
pickle.dump(hands, f)
f.close()
#----------------------- Generate and Store Image for p1
import plot_strat
plot_strat.np2img(hands, img_name + 'p1_strat_img.png')
pdb.set_trace() |
the-stack_0_3192 | from PySide2 import QtCore, QtWidgets
from keychain.ui import constants
class SettingsMenu(QtWidgets.QWidget):
def __init__(self, settings, parent=None):
super(SettingsMenu, self).__init__(parent)
self.settings = settings
# self.setWindowFlags(QtCore.Qt.Dialog)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Popup)
self.build_ui()
def build_ui(self):
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
for sting, attrs in self.settings.items():
widget = constants.MAPPING[attrs.get("type")]
attr_widget = widget(**attrs)
main_layout.addWidget(attr_widget)
# Connect signal
if hasattr(widget, "value_signal"):
widget.value_signal.value_changed_signal.connect(lambda value : self._on_value_changed(sting, value))
def _on_value_changed(self, item, value):
self.settings.as_dict()[item] = value
|
the-stack_0_3194 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os, os.path as op
import warnings
import numpy as np
from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec
from nipype.interfaces.base import (TraitedSpec, File, InputMultiPath,
OutputMultiPath, Undefined, traits,
isdefined, OutputMultiPath)
from nipype.utils.filemanip import split_filename
from nibabel import load
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class BETInputSpec(FSLCommandInputSpec):
# We use position args here as list indices - so a negative number
# will put something on the end
in_file = File(exists=True,
desc='input file to skull strip',
argstr='%s', position=0, mandatory=True)
out_file = File(desc='name of output skull stripped image',
argstr='%s', position=1, genfile=True, hash_files=False)
outline = traits.Bool(desc='create surface outline image',
argstr='-o')
mask = traits.Bool(desc='create binary mask image',
argstr='-m')
skull = traits.Bool(desc='create skull image',
argstr='-s')
no_output = traits.Bool(argstr='-n',
desc="Don't generate segmented output")
frac = traits.Float(desc='fractional intensity threshold',
argstr='-f %.2f')
vertical_gradient = traits.Float(argstr='-g %.2f',
desc='vertical gradient in fractional intensity ' \
'threshold (-1, 1)')
radius = traits.Int(argstr='-r %d', units='mm',
desc="head radius")
center = traits.List(traits.Int, desc='center of gravity in voxels',
argstr='-c %s', minlen=0, maxlen=3,
units='voxels')
threshold = traits.Bool(argstr='-t',
desc="apply thresholding to segmented brain image and mask")
mesh = traits.Bool(argstr='-e',
desc="generate a vtk mesh brain surface")
# the remaining 'options' are more like modes (mutually exclusive) that
# FSL actually implements in a shell script wrapper around the bet binary.
# for some combinations of them in specific order a call would not fail,
# but in general using more than one of the following is clearly not
# supported
_xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding',
'remove_eyes', 'surfaces', 't2_guided')
robust = traits.Bool(desc='robust brain centre estimation ' \
'(iterates BET several times)',
argstr='-R', xor=_xor_inputs)
padding = traits.Bool(desc='improve BET if FOV is very small in Z ' \
'(by temporarily padding end slices)',
argstr='-Z', xor=_xor_inputs)
remove_eyes = traits.Bool(desc='eye & optic nerve cleanup (can be ' \
'useful in SIENA)',
argstr='-S', xor=_xor_inputs)
surfaces = traits.Bool(desc='run bet2 and then betsurf to get additional ' \
'skull and scalp surfaces (includes ' \
'registrations)',
argstr='-A', xor=_xor_inputs)
t2_guided = File(desc='as with creating surfaces, when also feeding in ' \
'non-brain-extracted T2 (includes registrations)',
argstr='-A2 %s', xor=_xor_inputs)
functional = traits.Bool(argstr='-F', xor=_xor_inputs,
desc="apply to 4D fMRI data")
reduce_bias = traits.Bool(argstr='-B', xor=_xor_inputs,
desc="bias field and neck cleanup")
class BETOutputSpec(TraitedSpec):
out_file = File(desc="path/name of skullstripped file")
mask_file = File(
desc="path/name of binary brain mask (if generated)")
outline_file = File(
desc="path/name of outline file (if generated)")
meshfile = File(
desc="path/name of vtk mesh file (if generated)")
class BET(FSLCommand):
"""Use FSL BET command for skull stripping.
For complete details, see the `BET Documentation.
<http://www.fmrib.ox.ac.uk/fsl/bet2/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> btr = fsl.BET()
>>> btr.inputs.in_file = example_data('structural.nii')
>>> btr.inputs.frac = 0.7
>>> res = btr.run() # doctest: +SKIP
"""
_cmd = 'bet'
input_spec = BETInputSpec
output_spec = BETOutputSpec
def _run_interface(self, runtime):
# The returncode is meaningless in BET. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(BET, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _gen_outfilename(self):
out_file = self.inputs.out_file
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_brain')
return os.path.abspath(out_file)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_outfilename()
if isdefined(self.inputs.mesh) and self.inputs.mesh:
outputs['meshfile'] = self._gen_fname(outputs['out_file'],
suffix='_mesh.vtk',
change_ext=False)
if (isdefined(self.inputs.mask) and self.inputs.mask) or \
(isdefined(self.inputs.reduce_bias) and \
self.inputs.reduce_bias):
outputs['mask_file'] = self._gen_fname(outputs['out_file'],
suffix='_mask')
if isdefined(self.inputs.outline) and self.inputs.outline:
outputs['outline_file'] = self._gen_fname(outputs['out_file'],
suffix='_overlay')
if isdefined(self.inputs.no_output) and self.inputs.no_output:
outputs['out_file'] = Undefined
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
class FASTInputSpec(FSLCommandInputSpec):
""" Defines inputs (trait classes) for FAST """
in_files = InputMultiPath(File(exists=True), copyfile=False,
desc='image, or multi-channel set of images, ' \
'to be segmented',
argstr='%s', position=-1, mandatory=True)
out_basename = File(desc='base name of output files',
argstr='-o %s') # uses in_file name as basename if none given
number_classes = traits.Range(low=1, high=10, argstr='-n %d',
desc='number of tissue-type classes')
output_biasfield = traits.Bool(desc='output estimated bias field',
argstr='-b')
output_biascorrected = traits.Bool(desc='output restored image ' \
'(bias-corrected image)',
argstr='-B')
img_type = traits.Enum((1, 2, 3), desc='int specifying type of image: ' \
'(1 = T1, 2 = T2, 3 = PD)',
argstr='-t %d')
bias_iters = traits.Range(low=1, high=10, argstr='-I %d',
desc='number of main-loop iterations during ' \
'bias-field removal')
bias_lowpass = traits.Range(low=4, high=40,
desc='bias field smoothing extent (FWHM) ' \
'in mm',
argstr='-l %d', units='mm')
init_seg_smooth = traits.Range(low=0.0001, high=0.1,
desc='initial segmentation spatial ' \
'smoothness (during bias field ' \
'estimation)',
argstr='-f %.3f')
segments = traits.Bool(desc='outputs a separate binary image for each ' \
'tissue type',
argstr='-g')
init_transform = File(exists=True, desc='<standard2input.mat> initialise'\
' using priors',
argstr='-a %s')
other_priors = InputMultiPath(File(exist=True), desc='alternative prior images',
argstr='-A %s', minlen=3, maxlen=3)
no_pve = traits.Bool(desc='turn off PVE (partial volume estimation)',
argstr='--nopve')
no_bias = traits.Bool(desc='do not remove bias field',
argstr='-N')
use_priors = traits.Bool(desc='use priors throughout',
argstr='-P') # must also set -a!,
# mutually inclusive??
# No, conditional
# mandatory... need to
# figure out how to
# handle with traits.
segment_iters = traits.Range(low=1, high=50,
desc='number of segmentation-initialisation'\
' iterations',
argstr='-W %d')
mixel_smooth = traits.Range(low=0.0, high=1.0,
desc='spatial smoothness for mixeltype',
argstr='-R %.2f')
iters_afterbias = traits.Range(low=1, hight=20,
desc='number of main-loop iterations ' \
'after bias-field removal',
argstr='-O %d')
hyper = traits.Range(low=0.0, high=1.0,
desc='segmentation spatial smoothness',
argstr='-H %.2f')
verbose = traits.Bool(desc='switch on diagnostic messages',
argstr='-v')
manual_seg = File(exists=True, desc='Filename containing intensities',
argstr='-s %s')
probability_maps = traits.Bool(desc='outputs individual probability maps',
argstr='-p')
class FASTOutputSpec(TraitedSpec):
"""Specify possible outputs from FAST"""
tissue_class_map = File(exists=True,
desc='path/name of binary segmented volume file' \
' one val for each class _seg')
tissue_class_files = OutputMultiPath(File(desc='path/name of binary segmented volumes ' \
'one file for each class _seg_x'))
restored_image = OutputMultiPath(File(desc='restored images (one for each input image) ' \
'named according to the input images _restore'))
mixeltype = File(desc="path/name of mixeltype volume file _mixeltype")
partial_volume_map = File(desc="path/name of partial volume file _pveseg")
partial_volume_files = OutputMultiPath(File(desc='path/name of partial volumes files ' \
'one for each class, _pve_x'))
bias_field = OutputMultiPath(File(desc='Estimated bias field _bias'))
probability_maps = OutputMultiPath(File(desc='filenames, one for each class, for each ' \
'input, prob_x'))
class FAST(FSLCommand):
""" Use FSL FAST for segmenting and bias correction.
For complete details, see the `FAST Documentation.
<http://www.fmrib.ox.ac.uk/fsl/fast4/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
Assign options through the ``inputs`` attribute:
>>> fastr = fsl.FAST()
>>> fastr.inputs.in_files = example_data('structural.nii')
>>> out = fastr.run() #doctest: +SKIP
"""
_cmd = 'fast'
input_spec = FASTInputSpec
output_spec = FASTOutputSpec
def _format_arg(self, name, spec, value):
# first do what should be done in general
formated = super(FAST, self)._format_arg(name, spec, value)
if name == 'in_files':
# FAST needs the -S parameter value to correspond to the number
# of input images, otherwise it will ignore all but the first
formated = "-S %d %s" % (len(value), formated)
return formated
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.number_classes):
nclasses = 3
else:
nclasses = self.inputs.number_classes
# when using multichannel, results basename is based on last
# input filename
if isdefined(self.inputs.out_basename):
basefile = self.inputs.out_basename
else:
basefile = self.inputs.in_files[-1]
outputs['tissue_class_map'] = self._gen_fname(basefile,
suffix='_seg')
if self.inputs.segments:
outputs['tissue_class_files'] = []
for i in range(nclasses):
outputs['tissue_class_files'].append(
self._gen_fname(basefile, suffix='_seg_%d' % i))
if isdefined(self.inputs.output_biascorrected):
outputs['restored_image'] = []
if len(self.inputs.in_files) > 1:
# for multi-image segmentation there is one corrected image
# per input
for val, f in enumerate(self.inputs.in_files):
# image numbering is 1-based
outputs['restored_image'].append(
self._gen_fname(basefile, suffix='_restore_%d' % (val + 1)))
else:
# single image segmentation has unnumbered output image
outputs['restored_image'].append(
self._gen_fname(basefile, suffix='_restore'))
outputs['mixeltype'] = self._gen_fname(basefile, suffix='_mixeltype')
if not self.inputs.no_pve:
outputs['partial_volume_map'] = self._gen_fname(basefile, suffix='_pveseg')
outputs['partial_volume_files'] = []
for i in range(nclasses):
outputs['partial_volume_files'].append(self._gen_fname(basefile,
suffix='_pve_%d' % i))
if self.inputs.output_biasfield:
outputs['bias_field'] = []
if len(self.inputs.in_files) > 1:
# for multi-image segmentation there is one bias field image
# per input
for val, f in enumerate(self.inputs.in_files):
# image numbering is 1-based
outputs['bias_field'].append(
self._gen_fname(basefile, suffix='_bias_%d' % (val + 1)))
else:
# single image segmentation has unnumbered output image
outputs['bias_field'].append(
self._gen_fname(basefile, suffix='_bias'))
if self.inputs.probability_maps:
outputs['probability_maps'] = []
for i in range(nclasses):
outputs['probability_maps'].append(
self._gen_fname(basefile, suffix='_prob_%d' % i))
return outputs
class FLIRTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-in %s', mandatory=True,
position=0, desc='input file')
# XXX Not clear if position is required for mandatory flirt inputs
# since they are prefixed with argstrs. But doing it to follow
# our previous convention and so we can test the generated command
# line.
reference = File(exists=True, argstr='-ref %s', mandatory=True,
position=1, desc='reference file')
out_file = File(argstr='-out %s', desc='registered output file',
genfile=True, position=2, hash_files=False)
out_matrix_file = File(argstr='-omat %s',
desc='output affine matrix in 4x4 asciii format',
genfile=True, position=3, hash_files=False)
in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix')
apply_xfm = traits.Bool(argstr='-applyxfm', requires=['in_matrix_file'],
desc='apply transformation supplied by in_matrix_file')
datatype = traits.Enum('char', 'short', 'int', 'float', 'double',
argstr='-datatype %s',
desc='force output data type')
cost = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi',
'leastsq', 'labeldiff',
argstr='-cost %s',
desc='cost function')
# XXX What is the difference between 'cost' and 'searchcost'? Are
# these both necessary or do they map to the same variable.
cost_func = traits.Enum('mutualinfo', 'corratio', 'normcorr', 'normmi',
'leastsq', 'labeldiff',
argstr='-searchcost %s',
desc='cost function')
uses_qform = traits.Bool(argstr='-usesqform',
desc='initialize using sform or qform')
display_init = traits.Bool(argstr='-displayinit',
desc='display initial matrix')
angle_rep = traits.Enum('quaternion', 'euler',
argstr='-anglerep %s',
desc='representation of rotation angles')
interp = traits.Enum('trilinear', 'nearestneighbour', 'sinc','spline',
argstr='-interp %s',
desc='final interpolation method used in reslicing')
sinc_width = traits.Int(argstr='-sincwidth %d', units='voxels',
desc='full-width in voxels')
sinc_window = traits.Enum('rectangular', 'hanning', 'blackman',
argstr='-sincwindow %s',
desc='sinc window') # XXX better doc
bins = traits.Int(argstr='-bins %d', desc='number of histogram bins')
dof = traits.Int(argstr='-dof %d',
desc='number of transform degrees of freedom')
no_resample = traits.Bool(argstr='-noresample',
desc='do not change input sampling')
force_scaling = traits.Bool(argstr='-forcescaling',
desc='force rescaling even for low-res images')
min_sampling = traits.Float(argstr='-minsampling %f', units='mm',
desc='set minimum voxel dimension for sampling')
padding_size = traits.Int(argstr='-paddingsize %d', units='voxels',
desc='for applyxfm: interpolates outside image '\
'by size')
searchr_x = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchrx %s',
desc='search angles along x-axis, in degrees')
searchr_y = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchry %s',
desc='search angles along y-axis, in degrees')
searchr_z = traits.List(traits.Int, minlen=2, maxlen=2, units='degrees',
argstr='-searchrz %s',
desc='search angles along z-axis, in degrees')
no_search = traits.Bool(argstr='-nosearch',
desc='set all angular searches to ranges 0 to 0')
coarse_search = traits.Int(argstr='-coarsesearch %d', units='degrees',
desc='coarse search delta angle')
fine_search = traits.Int(argstr='-finesearch %d', units='degrees',
desc='fine search delta angle')
schedule = File(exists=True, argstr='-schedule %s',
desc='replaces default schedule')
ref_weight = File(exists=True, argstr='-refweight %s',
desc='File for reference weighting volume')
in_weight = File(exists=True, argstr='-inweight %s',
desc='File for input weighting volume')
no_clamp = traits.Bool(argstr='-noclamp',
desc='do not use intensity clamping')
no_resample_blur = traits.Bool(argstr='-noresampblur',
desc='do not use blurring on downsampling')
rigid2D = traits.Bool(argstr='-2D',
desc='use 2D rigid body mode - ignores dof')
verbose = traits.Int(argstr='-verbose %d',
desc='verbose mode, 0 is least')
class FLIRTOutputSpec(TraitedSpec):
out_file = File(exists=True,
desc='path/name of registered file (if generated)')
out_matrix_file = File(exists=True,
desc='path/name of calculated affine transform ' \
'(if generated)')
class FLIRT(FSLCommand):
"""Use FSL FLIRT for coregistration.
For complete details, see the `FLIRT Documentation.
<http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_
To print out the command line help, use:
fsl.FLIRT().inputs_help()
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')
>>> flt.inputs.in_file = example_data('structural.nii')
>>> flt.inputs.reference = example_data('mni.nii')
>>> flt.inputs.out_file = 'moved_subject.nii'
>>> flt.inputs.out_matrix_file = 'subject_to_template.mat'
>>> res = flt.run() #doctest: +SKIP
"""
_cmd = 'flirt'
input_spec = FLIRTInputSpec
output_spec = FLIRTOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
# Generate an out_file if one is not provided
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_flirt')
outputs['out_file'] = os.path.abspath(outputs['out_file'])
outputs['out_matrix_file'] = self.inputs.out_matrix_file
# Generate an out_matrix file if one is not provided
if not isdefined(outputs['out_matrix_file']):
outputs['out_matrix_file'] = self._gen_fname(self.inputs.in_file,
suffix='_flirt.mat',
change_ext=False)
outputs['out_matrix_file'] = os.path.abspath(outputs['out_matrix_file'])
return outputs
def _gen_filename(self, name):
if name in ('out_file', 'out_matrix_file'):
return self._list_outputs()[name]
else:
return None
class ApplyXfm(FLIRT):
"""Currently just a light wrapper around FLIRT,
with no modifications
ApplyXfm is used to apply an existing tranform to an image
Examples
--------
>>> import nipype.interfaces.fsl as fsl
>>> from nipype.testing import example_data
>>> applyxfm = fsl.ApplyXfm()
>>> applyxfm.inputs.in_file = example_data('structural.nii')
>>> applyxfm.inputs.in_matrix_file = example_data('trans.mat')
>>> applyxfm.inputs.out_file = 'newfile.nii'
>>> applyxfm.inputs.reference = example_data('mni.nii')
>>> applyxfm.inputs.apply_xfm = True
>>> result = applyxfm.run() # doctest: +SKIP
"""
pass
class MCFLIRTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, position=0, argstr="-in %s", mandatory=True,
desc="timeseries to motion-correct")
out_file = File(argstr='-out %s', genfile=True,
desc="file to write", hash_files=False)
cost = traits.Enum('mutualinfo', 'woods', 'corratio', 'normcorr', 'normmi', 'leastsquares',
argstr='-cost %s', desc="cost function to optimize")
bins = traits.Int(argstr='-bins %d', desc="number of histogram bins")
dof = traits.Int(argstr='-dof %d', desc="degrees of freedom for the transformation")
ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to")
scaling = traits.Float(argstr='-scaling %.2f', desc="scaling factor to use")
smooth = traits.Float(argstr='-smooth %.2f', desc="smoothing factor for the cost function")
rotation = traits.Int(argstr='-rotation %d', desc="scaling factor for rotation tolerances")
stages = traits.Int(argstr='-stages %d',
desc="stages (if 4, perform final search with sinc interpolation")
init = File(exists=True, argstr='-init %s', desc="inital transformation matrix")
interpolation = traits.Enum("spline", "nn", "sinc", argstr="-%s_final",
desc="interpolation method for transformation")
use_gradient = traits.Bool(argstr='-gdt', desc="run search on gradient images")
use_contour = traits.Bool(argstr='-edge', desc="run search on contour images")
mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume")
stats_imgs = traits.Bool(argstr='-stats', desc="produce variance and std. dev. images")
save_mats = traits.Bool(argstr='-mats', desc="save transformation matrices")
save_plots = traits.Bool(argstr='-plots', desc="save transformation parameters")
save_rms = traits.Bool(argstr='-rmsabs -rmsrel', desc="save rms displacement parameters")
ref_file = File(exists=True, argstr='-reffile %s', desc="target image for motion correction")
class MCFLIRTOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="motion-corrected timeseries")
variance_img = File(exists=True, desc="variance image")
std_img = File(exists=True, desc="standard deviation image")
mean_img = File(exists=True, desc="mean timeseries image")
par_file = File(exists=True, desc="text-file with motion parameters")
mat_file = OutputMultiPath(File(exists=True), desc="transformation matrices")
rms_files = OutputMultiPath(File(exists=True),
desc="absolute and relative displacement parameters")
class MCFLIRT(FSLCommand):
"""Use FSL MCFLIRT to do within-modality motion correction.
For complete details, see the `MCFLIRT Documentation.
<http://www.fmrib.ox.ac.uk/fsl/mcflirt/index.html>`_
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> mcflt = fsl.MCFLIRT(in_file=example_data('functional.nii'), cost='mutualinfo')
>>> res = mcflt.run() # doctest: +SKIP
"""
_cmd = 'mcflirt'
input_spec = MCFLIRTInputSpec
output_spec = MCFLIRTOutputSpec
def _format_arg(self, name, spec, value):
if name == "interpolation":
if value == "trilinear":
return ""
else:
return spec.argstr % value
return super(MCFLIRT, self)._format_arg(name, spec, value)
def _list_outputs(self):
cwd = os.getcwd()
outputs = self._outputs().get()
outputs['out_file'] = self._gen_outfilename()
if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs:
outputs['variance_img'] = self._gen_fname(outputs['out_file'] + \
'_variance.ext', cwd=cwd)
outputs['std_img'] = self._gen_fname(outputs['out_file'] + \
'_sigma.ext', cwd=cwd)
outputs['mean_img'] = self._gen_fname(outputs['out_file'] + \
'_meanvol.ext', cwd=cwd)
if isdefined(self.inputs.save_mats) and self.inputs.save_mats:
_, filename = os.path.split(outputs['out_file'])
matpathname = os.path.join(cwd, filename + '.mat')
_, _, _, timepoints = load(self.inputs.in_file).get_shape()
outputs['mat_file'] = []
for t in range(timepoints):
outputs['mat_file'].append(os.path.join(matpathname,
'MAT_%04d' % t))
if isdefined(self.inputs.save_plots) and self.inputs.save_plots:
# Note - if e.g. out_file has .nii.gz, you get .nii.gz.par,
# which is what mcflirt does!
outputs['par_file'] = outputs['out_file'] + '.par'
if isdefined(self.inputs.save_rms) and self.inputs.save_rms:
outfile = outputs['out_file']
outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms']
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._gen_outfilename()
return None
def _gen_outfilename(self):
out_file = self.inputs.out_file
if isdefined(out_file):
out_file = os.path.realpath(out_file)
if not isdefined(out_file) and isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_mcf')
return os.path.abspath(out_file)
class FNIRTInputSpec(FSLCommandInputSpec):
ref_file = File(exists=True, argstr='--ref=%s', mandatory=True,
desc='name of reference image')
in_file = File(exists=True, argstr='--in=%s', mandatory=True,
desc='name of input image')
affine_file = File(exists=True, argstr='--aff=%s',
desc='name of file containing affine transform')
inwarp_file = File(exists=True, argstr='--inwarp=%s',
desc='name of file containing initial non-linear warps')
in_intensitymap_file = File(exists=True, argstr='--intin=%s',
desc='name of file/files containing initial intensity maping'\
'usually generated by previos fnirt run')
fieldcoeff_file = traits.Either(traits.Bool, File, argstr='--cout=%s',
desc='name of output file with field coefficients or true')
warped_file = File(argstr='--iout=%s',
desc='name of output image', genfile=True, hash_files=False)
field_file = traits.Either(traits.Bool, File,
argstr='--fout=%s',
desc='name of output file with field or true', hash_files=False)
jacobian_file = traits.Either(traits.Bool, File,
argstr='--jout=%s',
desc='name of file for writing out the Jacobian'\
'of the field (for diagnostic or VBM purposes)', hash_files=False)
modulatedref_file = traits.Either(traits.Bool, File,
argstr='--refout=%s',
desc='name of file for writing out intensity modulated'\
'--ref (for diagnostic purposes)', hash_files=False)
out_intensitymap_file = traits.Either(traits.Bool, File,
argstr='--intout=%s',
desc='name of files for writing information pertaining '\
'to intensity mapping', hash_files=False)
log_file = File(argstr='--logout=%s',
desc='Name of log-file', genfile=True, hash_files=False)
config_file = File(exists=True, argstr='--config=%s',
desc='Name of config file specifying command line arguments')
refmask_file = File(exists=True, argstr='--refmask=%s',
desc='name of file with mask in reference space')
inmask_file = File(exists=True, argstr='--inmask=%s',
desc='name of file with mask in input image space')
skip_refmask = traits.Bool(argstr='--applyrefmask=0', xor=['apply_refmask'],
desc='Skip specified refmask if set, default false')
skip_inmask = traits.Bool(argstr='--applyinmask=0', xor=['apply_inmask'],
desc='skip specified inmask if set, default false')
apply_refmask = traits.List(traits.Enum(0, 1), argstr='--applyrefmask=%s', xor=['skip_refmask'],
desc='list of iterations to use reference mask on (1 to use, 0 to skip)', sep=",")
apply_inmask = traits.List(traits.Enum(0, 1), argstr='--applyinmask=%s', xor=['skip_inmask'],
desc='list of iterations to use input mask on (1 to use, 0 to skip)', sep=",")
skip_implicit_ref_masking = traits.Bool(argstr='--imprefm 0',
desc='skip implicit masking based on value'\
'in --ref image. Default = 0')
skip_implicit_in_masking = traits.Bool(argstr='--impinm 0',
desc='skip implicit masking based on value'\
'in --in image. Default = 0')
refmask_val = traits.Float(argstr='--imprefval=%f',
desc='Value to mask out in --ref image. Default =0.0')
inmask_val = traits.Float(argstr='--impinval=%f',
desc='Value to mask out in --in image. Default =0.0')
max_nonlin_iter = traits.List(traits.Int,
argstr='--miter=%s',
desc='Max # of non-linear iterations list, default [5, 5, 5, 5]', sep=",")
subsampling_scheme = traits.List(traits.Int,
argstr='--subsamp=%s',
desc='sub-sampling scheme, list, default [4, 2, 1, 1]',
sep=",")
warp_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--warpres=%d,%d,%d',
desc='(approximate) resolution (in mm) of warp basis '\
'in x-, y- and z-direction, default 10, 10, 10')
spline_order = traits.Int(argstr='--splineorder=%d',
desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3')
in_fwhm = traits.List(traits.Int, argstr='--infwhm=%s',
desc='FWHM (in mm) of gaussian smoothing kernel for input volume, default [6, 4, 2, 2]', sep=",")
ref_fwhm = traits.List(traits.Int, argstr='--reffwhm=%s',
desc='FWHM (in mm) of gaussian smoothing kernel for ref volume, default [4, 2, 0, 0]', sep=",")
regularization_model = traits.Enum('membrane_energy', 'bending_energy',
argstr='--regmod=%s',
desc='Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy')
regularization_lambda = traits.List(traits.Float, argstr='--lambda=%s',
desc='Weight of regularisation, default depending on --ssqlambda and --regmod '\
'switches. See user documetation.', sep=",")
skip_lambda_ssq = traits.Bool(argstr='--ssqlambda 0',
desc='If true, lambda is not weighted by current ssq, default false')
jacobian_range = traits.Tuple(traits.Float, traits.Float,
argstr='--jacrange=%f,%f',
desc='Allowed range of Jacobian determinants, default 0.01, 100.0')
derive_from_ref = traits.Bool(argstr='--refderiv',
desc='If true, ref image is used to calculate derivatives. Default false')
intensity_mapping_model = traits.Enum('none', 'global_linear', 'global_non_linear'
'local_linear', 'global_non_linear_with_bias',
'local_non_linear', argstr='--intmod=%s',
desc='Model for intensity-mapping')
intensity_mapping_order = traits.Int(argstr='--intorder=%d',
desc='Order of poynomial for mapping intensities, default 5')
biasfield_resolution = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--biasres=%d,%d,%d',
desc='Resolution (in mm) of bias-field modelling '\
'local intensities, default 50, 50, 50')
bias_regularization_lambda = traits.Float(argstr='--biaslambda=%f',
desc='Weight of regularisation for bias-field, default 10000')
skip_intensity_mapping = traits.Bool(argstr='--estint=0', xor=['apply_intensity_mapping'],
desc='Skip estimate intensity-mapping default false')
apply_intensity_mapping = traits.List(traits.Enum(0, 1), argstr='--estint=%s', xor=['skip_intensity_mapping'],
desc='List of subsampling levels to apply intensity mapping for (0 to skip, 1 to apply)', sep=",")
hessian_precision = traits.Enum('double', 'float', argstr='--numprec=%s',
desc='Precision for representing Hessian, double or float. Default double')
class FNIRTOutputSpec(TraitedSpec):
fieldcoeff_file = File(exists=True, desc='file with field coefficients')
warped_file = File(exists=True, desc='warped image')
field_file = File(desc='file with warp field')
jacobian_file = File(desc='file containing Jacobian of the field')
modulatedref_file = File(desc='file containing intensity modulated --ref')
out_intensitymap_file = File(\
desc='file containing info pertaining to intensity mapping')
log_file = File(desc='Name of log-file')
class FNIRT(FSLCommand):
"""Use FSL FNIRT for non-linear registration.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat'))
>>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP
T1 -> Mni153
>>> from nipype.interfaces import fsl
>>> fnirt_mprage = fsl.FNIRT()
>>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2]
>>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1]
Specify the resolution of the warps
>>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6)
>>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP
We can check the command line and confirm that it's what we expect.
>>> fnirt_mprage.cmdline #doctest: +SKIP
'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii'
"""
_cmd = 'fnirt'
input_spec = FNIRTInputSpec
output_spec = FNIRTOutputSpec
filemap = {'warped_file': 'warped',
'field_file': 'field',
'jacobian_file': 'field_jacobian',
'modulatedref_file': 'modulated',
'out_intensitymap_file': 'intmap',
'log_file': 'log.txt',
'fieldcoeff_file': 'fieldwarp'}
def _list_outputs(self):
outputs = self.output_spec().get()
for key, suffix in self.filemap.items():
inval = getattr(self.inputs, key)
change_ext = True
if key in ['warped_file', 'log_file']:
if suffix.endswith('.txt'):
change_ext = False
if isdefined(inval):
outputs[key] = inval
else:
outputs[key] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
elif isdefined(inval):
if isinstance(inval, bool):
if inval:
outputs[key] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
else:
outputs[key] = os.path.abspath(inval)
return outputs
def _format_arg(self, name, spec, value):
if name in self.filemap.keys():
return spec.argstr % self._list_outputs()[name]
return super(FNIRT, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name in ['warped_file', 'log_file']:
return self._list_outputs()[name]
return None
def write_config(self, configfile):
"""Writes out currently set options to specified config file
XX TODO : need to figure out how the config file is written
Parameters
----------
configfile : /path/to/configfile
"""
try:
fid = open(configfile, 'w+')
except IOError:
print ('unable to create config_file %s' % (configfile))
for item in self.inputs.get().items():
fid.write('%s\n' % (item))
fid.close()
class ApplyWarpInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-i %s',
mandatory=True,position=-4,
desc='image to be warped')
out_file = File(argstr='-o %s', genfile=True,
desc='output filename', position=-3, hash_files=False)
ref_file = File(exists=True, argstr='-r %s',position=-2,
mandatory=True,
desc='reference image')
field_file = File(exists=True, argstr='-w %s', position=-1,
desc='file containing warp field')
abswarp = traits.Bool(argstr='--abs', xor=['relwarp'],
desc="treat warp field as absolute: x' = w(x)")
relwarp = traits.Bool(argstr='--rel', xor=['abswarp'],
desc="treat warp field as relative: x' = x + w(x)")
datatype = traits.Enum('char', 'short', 'int', 'float', 'double',
argstr='--datatype %s',
desc='Force output data type [char short int float double].')
supersample = traits.Bool(argstr='--super',
desc='intermediary supersampling of output, default is off')
superlevel = traits.Either(traits.Enum('a'), traits.Int,
argstr='--superlevel %s',
desc="level of intermediary supersampling, a for 'automatic' or integer level. Default = 2")
premat = File(exists=True, argstr='--premat %s',
desc='filename for pre-transform (affine matrix)')
postmat = File(exists=True, argstr='--postmat %s',
desc='filename for post-transform (affine matrix)')
mask_file = File(exists=True, argstr='--mask %s',
desc='filename for mask image (in reference space)')
interp = traits.Enum('nn', 'trilinear', 'sinc', 'spline', argstr='--interp %s',
desc='interpolation method')
class ApplyWarpOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Warped output file')
class ApplyWarp(FSLCommand):
"""Use FSL's applywarp to apply the results of a FNIRT registration
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> aw = fsl.ApplyWarp()
>>> aw.inputs.in_file = example_data('structural.nii')
>>> aw.inputs.ref_file = example_data('mni.nii')
>>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP
>>> res = aw.run() #doctest: +SKIP
"""
_cmd = 'applywarp'
input_spec = ApplyWarpInputSpec
output_spec = ApplyWarpOutputSpec
def _format_arg(self, name, spec, value):
if name == 'superlevel':
return spec.argstr % str(value)
return super(ApplyWarp, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_warp')
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class SliceTimerInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
mandatory=True, position=0,
desc='filename of input timeseries')
out_file = File(argstr='--out=%s', genfile=True,
desc='filename of output timeseries', hash_files=False)
index_dir = traits.Bool(argstr='--down',
desc='slice indexing from top to bottom')
time_repetition = traits.Float(argstr='--repeat=%f',
desc='Specify TR of data - default is 3s')
slice_direction = traits.Enum(1, 2, 3, argstr='--direction=%d',
desc='direction of slice acquisition (x=1, y=2, z=3) - default is z')
interleaved = traits.Bool(argstr='--odd',
desc='use interleaved acquisition')
custom_timings = File(exists=True, argstr='--tcustom=%s',
desc='slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift)')
global_shift = traits.Float(argstr='--tglobal',
desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)')
custom_order = File(exists=True, argstr='--ocustom=%s',
desc='filename of single-column custom interleave order file (first slice is referred to as 1 not 0)')
class SliceTimerOutputSpec(TraitedSpec):
slice_time_corrected_file = File(exists=True, desc='slice time corrected file')
class SliceTimer(FSLCommand):
""" use FSL slicetimer to perform slice timing correction.
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> st = fsl.SliceTimer()
>>> st.inputs.in_file = example_data('functional.nii')
>>> st.inputs.interleaved = True
>>> result = st.run() #doctest: +SKIP
"""
_cmd = 'slicetimer'
input_spec = SliceTimerInputSpec
output_spec = SliceTimerOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_st')
outputs['slice_time_corrected_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['slice_time_corrected_file']
return None
class SUSANInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='%s',
mandatory=True, position=1,
desc='filename of input timeseries')
brightness_threshold = traits.Float(argstr='%.10f',
position=2, mandatory=True,
desc='brightness threshold and should be greater than '
'noise level and less than contrast of edges to '
'be preserved.')
fwhm = traits.Float(argstr='%.10f',
position=3, mandatory=True,
desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))')
dimension = traits.Enum(3, 2, argstr='%d', position=4, usedefault=True,
desc='within-plane (2) or fully 3D (3)')
use_median = traits.Enum(1, 0, argstr='%d', position=5, usedefault=True,
desc='whether to use a local median filter in the cases where single-point noise is detected')
usans = traits.List(traits.Tuple(File(exists=True), traits.Float), maxlen=2,
argstr='', position=6, default=[], usedefault=True,
desc='determines whether the smoothing area (USAN) is to be '
'found from secondary images (0, 1 or 2). A negative '
'value for any brightness threshold will auto-set the '
'threshold at 10% of the robust range')
out_file = File(argstr='%s', position=-1, genfile=True,
desc='output file name', hash_files=False)
class SUSANOutputSpec(TraitedSpec):
smoothed_file = File(exists=True, desc='smoothed output file')
class SUSAN(FSLCommand):
""" use FSL SUSAN to perform smoothing
Examples
--------
>>> from nipype.interfaces import fsl
>>> from nipype.testing import example_data
>>> print anatfile #doctest: +SKIP
anatomical.nii #doctest: +SKIP
>>> sus = fsl.SUSAN()
>>> sus.inputs.in_file = example_data('structural.nii')
>>> sus.inputs.brightness_threshold = 2000.0
>>> sus.inputs.fwhm = 8.0
>>> result = sus.run() #doctest: +SKIP
"""
_cmd = 'susan'
input_spec = SUSANInputSpec
output_spec = SUSANOutputSpec
def _format_arg(self, name, spec, value):
if name == 'fwhm':
return spec.argstr % (float(value) / np.sqrt(8 * np.log(2)))
if name == 'usans':
if not value:
return '0'
arglist = [str(len(value))]
for filename, thresh in value:
arglist.extend([filename, '%.10f' % thresh])
return ' '.join(arglist)
return super(SUSAN, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.out_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_smooth')
outputs['smoothed_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['smoothed_file']
return None
class FUGUEInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='--in=%s',
desc='filename of input volume')
unwarped_file = File(argstr='--unwarp=%s', genfile=True,
desc='apply unwarping and save as filename', hash_files=False)
phasemap_file = File(exists=True, argstr='--phasemap=%s',
desc='filename for input phase image')
dwell_to_asym_ratio = traits.Float(argstr='--dwelltoasym=%.10f',
desc='set the dwell to asym time ratio')
dwell_time = traits.Float(argstr='--dwell=%.10f',
desc='set the EPI dwell time per phase-encode line - same as echo spacing - (sec)')
asym_se_time = traits.Float(argstr='--asym=%.10f',
desc='set the fieldmap asymmetric spin echo time (sec)')
fmap_out_file = File(argstr='--savefmap=%s',
desc='filename for saving fieldmap (rad/s)', hash_files=False)
fmap_in_file = File(exists=True, argstr='--loadfmap=%s',
desc='filename for loading fieldmap (rad/s)')
shift_out_file = File(argstr='--saveshift=%s',
desc='filename for saving pixel shift volume', hash_files=False)
shift_in_file = File(exists=True, argstr='--loadshift=%s',
desc='filename for reading pixel shift volume')
median_2dfilter = traits.Bool(argstr='--median',
desc='apply 2D median filtering')
despike_2dfilter = traits.Bool(argstr='--despike',
desc='apply a 2D de-spiking filter')
no_gap_fill = traits.Bool(argstr='--nofill',
desc='do not apply gap-filling measure to the fieldmap')
no_extend = traits.Bool(argstr='--noextend',
desc='do not apply rigid-body extrapolation to the fieldmap')
smooth2d = traits.Float(argstr='--smooth2=%.2f',
desc='apply 2D Gaussian smoothing of sigma N (in mm)')
smooth3d = traits.Float(argstr='--smooth3=%.2f',
desc='apply 3D Gaussian smoothing of sigma N (in mm)')
poly_order = traits.Int(argstr='--poly=%d',
desc='apply polynomial fitting of order N')
fourier_order = traits.Int(argstr='--fourier=%d',
desc='apply Fourier (sinusoidal) fitting of order N')
pava = traits.Bool(argstr='--pava',
desc='apply monotonic enforcement via PAVA')
despike_theshold = traits.Float(argstr='--despikethreshold=%s',
desc='specify the threshold for de-spiking (default=3.0)')
unwarp_direction = traits.Enum('x', 'y', 'z', 'x-', 'y-', 'z-',
argstr='--unwarpdir=%s',
desc='specifies direction of warping (default y)')
phase_conjugate = traits.Bool(argstr='--phaseconj',
desc='apply phase conjugate method of unwarping')
icorr = traits.Bool(argstr='--icorr', requires=['shift_in_file'],
desc='apply intensity correction to unwarping (pixel shift method only)')
icorr_only = traits.Bool(argstr='--icorronly', requires=['unwarped_file'],
desc='apply intensity correction only')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename for loading valid mask')
save_unmasked_fmap = traits.Either(traits.Bool,
traits.File,
argstr='--unmaskfmap=%s',
requires=['fmap_out_file'],
desc='saves the unmasked fieldmap when using --savefmap', hash_files=False)
save_unmasked_shift = traits.Either(traits.Bool,
traits.File,
argstr='--unmaskshift=%s',
requires=['shift_out_file'],
desc='saves the unmasked shiftmap when using --saveshift', hash_files=False)
nokspace = traits.Bool(argstr='--nokspace', desc='do not use k-space forward warping')
class FUGUEOutputSpec(TraitedSpec):
unwarped_file = File(exists=True, desc='unwarped file')
class FUGUE(FSLCommand):
"""Use FSL FUGUE to unwarp epi's with fieldmaps
Examples
--------
Please insert examples for use of this command
"""
_cmd = 'fugue'
input_spec = FUGUEInputSpec
output_spec = FUGUEOutputSpec
def __init__(self, **kwargs):
super(FUGUE, self).__init__(**kwargs)
warn('This interface has not been fully tested. Please report any failures.')
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.unwarped_file
if not isdefined(out_file):
out_file = self._gen_fname(self.inputs.in_file,
suffix='_unwarped')
outputs['unwarped_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'unwarped_file':
return self._list_outputs()['unwarped_file']
return None
class PRELUDEInputSpec(FSLCommandInputSpec):
complex_phase_file = File(exists=True, argstr='--complex=%s',
mandatory=True, xor=['magnitude_file', 'phase_file'],
desc='complex phase input volume')
magnitude_file = File(exists=True, argstr='--abs=%s',
mandatory=True,
xor=['complex_phase_file'],
desc='file containing magnitude image')
phase_file = File(exists=True, argstr='--phase=%s',
mandatory=True,
xor=['complex_phase_file'],
desc='raw phase file')
unwrapped_phase_file = File(genfile=True,
argstr='--unwrap=%s',
desc='file containing unwrapepd phase', hash_files=False)
num_partitions = traits.Int(argstr='--numphasesplit=%d',
desc='number of phase partitions to use')
labelprocess2d = traits.Bool(argstr='--labelslices',
desc='does label processing in 2D (slice at a time)')
process2d = traits.Bool(argstr='--slices',
xor=['labelprocess2d'],
desc='does all processing in 2D (slice at a time)')
process3d = traits.Bool(argstr='--force3D',
xor=['labelprocess2d', 'process2d'],
desc='forces all processing to be full 3D')
threshold = traits.Float(argstr='--thresh=%.10f',
desc='intensity threshold for masking')
mask_file = File(exists=True, argstr='--mask=%s',
desc='filename of mask input volume')
start = traits.Int(argstr='--start=%d',
desc='first image number to process (default 0)')
end = traits.Int(argstr='--end=%d',
desc='final image number to process (default Inf)')
savemask_file = File(argstr='--savemask=%s',
desc='saving the mask volume', hash_files=False)
rawphase_file = File(argstr='--rawphase=%s',
desc='saving the raw phase output', hash_files=False)
label_file = File(argstr='--labels=%s',
desc='saving the area labels output', hash_files=False)
removeramps = traits.Bool(argstr='--removeramps',
desc='remove phase ramps during unwrapping')
class PRELUDEOutputSpec(TraitedSpec):
unwrapped_phase_file = File(exists=True,
desc='unwrapped phase file')
class PRELUDE(FSLCommand):
"""Use FSL prelude to do phase unwrapping
Examples
--------
Please insert examples for use of this command
"""
input_spec = PRELUDEInputSpec
output_spec = PRELUDEOutputSpec
_cmd = 'prelude'
def __init__(self, **kwargs):
super(PRELUDE, self).__init__(**kwargs)
warn('This has not been fully tested. Please report any failures.')
def _list_outputs(self):
outputs = self._outputs().get()
out_file = self.inputs.unwrapped_phase_file
if not isdefined(out_file):
if isdefined(self.inputs.phase_file):
out_file = self._gen_fname(self.inputs.phase_file,
suffix='_unwrapped')
elif isdefined(self.inputs.complex_phase_file):
out_file = self._gen_fname(self.inputs.complex_phase_file,
suffix='_phase_unwrapped')
outputs['unwrapped_phase_file'] = os.path.abspath(out_file)
return outputs
def _gen_filename(self, name):
if name == 'unwrapped_phase_file':
return self._list_outputs()['unwrapped_phase_file']
return None
class FIRSTInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-2,
argstr='-i %s',
desc='input data file')
out_file = File('segmented', usedefault=True, mandatory=True, position=-1,
argstr='-o %s',
desc='output data file', hash_files=False)
verbose = traits.Bool(argstr='-v', position=1,
desc="Use verbose logging.")
brain_extracted = traits.Bool(argstr='-b', position=2,
desc="Input structural image is already brain-extracted")
no_cleanup = traits.Bool(argstr='-d', position=3,
desc="Input structural image is already brain-extracted")
method = traits.Enum('auto', 'fast', 'none',
xor=['method_as_numerical_threshold'],
argstr='-m', position=4,
desc=("Method must be one of auto, fast, none, or it can be entered "
"using the 'method_as_numerical_threshold' input"))
method_as_numerical_threshold = traits.Float(argstr='-m', position=4,
desc=("Specify a numerical threshold value or use the 'method' input "
"to choose auto, fast, or none"))
list_of_specific_structures = traits.List(traits.Str, argstr='-s %s',
sep=',', position=5, minlen=1,
desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp'
'L_Accu, R_Accu, L_Amyg, R_Amyg'
'L_Caud, R_Caud, L_Pall, R_Pall'
'L_Puta, R_Puta, L_Thal, R_Thal, BrStem')
affine_file = File(exists=True, position=6,
argstr='-a %s',
desc=('Affine matrix to use (e.g. img2std.mat) (does not '
're-run registration)'))
class FIRSTOutputSpec(TraitedSpec):
vtk_surfaces = OutputMultiPath(File(exists=True),
desc='VTK format meshes for each subcortical region')
bvars = OutputMultiPath(File(exists=True),
desc='bvars for each subcortical region')
original_segmentations = File(exists=True,
desc=('3D image file containing the segmented regions as integer '
'values. Uses CMA labelling'))
segmentation_file = File(exists=True,
desc='4D image file containing a single volume per segmented region')
class FIRST(FSLCommand):
"""Use FSL's run_first_all command to segment subcortical volumes
http://www.fmrib.ox.ac.uk/fsl/first/index.html
Examples
--------
>>> from nipype.interfaces import fsl
>>> first = fsl.FIRST()
>>> first.inputs.in_file = 'structural.nii'
>>> first.inputs.out_file = 'segmented.nii'
>>> res = first.run() #doctest: +SKIP
"""
_cmd = 'run_first_all'
input_spec = FIRSTInputSpec
output_spec = FIRSTOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.list_of_specific_structures):
structures = self.inputs.list_of_specific_structures
else:
structures = ['L_Hipp', 'R_Hipp',
'L_Accu', 'R_Accu',
'L_Amyg', 'R_Amyg',
'L_Caud', 'R_Caud',
'L_Pall', 'R_Pall',
'L_Puta', 'R_Puta',
'L_Thal', 'R_Thal',
'BrStem']
outputs['original_segmentations'] = \
self._gen_fname('original_segmentations')
outputs['segmentation_file'] = self._gen_fname('segmentation_file')
outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces',
structures)
outputs['bvars'] = self._gen_mesh_names('bvars', structures)
return outputs
def _gen_fname(self, name):
path, outname, ext = split_filename(self.inputs.out_file)
if name == 'original_segmentations':
return op.abspath(outname + '_all_fast_origsegs.nii.gz')
if name == 'segmentation_file':
return op.abspath(outname + '_all_fast_firstseg.nii.gz')
return None
def _gen_mesh_names(self, name, structures):
path, prefix, ext = split_filename(self.inputs.out_file)
if name == 'vtk_surfaces':
vtks = list()
for struct in structures:
vtk = prefix + '-' + struct + '_first.vtk'
vtks.append(op.abspath(vtk))
return vtks
if name == 'bvars':
bvars = list()
for struct in structures:
bvar = prefix + '-' + struct + '_first.bvars'
bvars.append(op.abspath(bvar))
return bvars
return None
|
the-stack_0_3195 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the `aea gui` sub-commands."""
import io
import os
import shutil
import tempfile
import aea.cli_gui
def create_app():
"""Create a debug version of the flask app for testing against."""
app = aea.cli_gui.run_test()
app.debug = True
app.testing = True
return app
class DummyPID:
"""Mimics the behaviour of a process id."""
def __init__(self, return_code, stdout_str, stderr_str):
"""Initialise the class."""
self.return_code = return_code
self.stdout = io.BytesIO(stdout_str.encode(encoding='UTF-8'))
self.stderr = io.BytesIO(stderr_str.encode(encoding='UTF-8'))
def poll(self):
"""Mimic the process id poll function."""
return self.return_code
class TempCWD:
"""Create a temporary current working directory."""
def __init__(self):
"""Initialise the class."""
self.temp_dir = tempfile.mkdtemp()
self.cwd = os.getcwd()
os.chdir(self.temp_dir)
def destroy(self):
"""Destroy the cwd and restore the old one."""
os.chdir(self.cwd)
try:
shutil.rmtree(self.temp_dir)
except (OSError, IOError):
pass
|
the-stack_0_3196 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: deconv.py
# Author: Qian Ge <[email protected]>
import os
import scipy.misc
import argparse
import numpy as np
import tensorflow as tf
from tensorcv.dataflow.image import ImageFromFile
import config_path as config
import sys
sys.path.append('../')
from lib.nets.vgg import DeconvBaseVGG19, BaseVGG19
import lib.utils.viz as viz
import lib.utils.normalize as normlize
import lib.utils.image as uim
IM_SIZE = 224
def get_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--imtype', type=str, default='.jpg',
help='Image type')
parser.add_argument('--feat', type=str, required=True,
help='Choose of feature map layer')
parser.add_argument('--id', type=int, default=None,
help='feature map id')
return parser.parse_args()
def im_scale(im):
return uim.im_rescale(im, [IM_SIZE, IM_SIZE])
if __name__ == '__main__':
FLAGS = get_parse()
input_im = ImageFromFile(FLAGS.imtype,
data_dir=config.im_path,
num_channel=3,
shuffle=False,
pf=im_scale,
)
input_im.set_batch_size(1)
vizmodel = DeconvBaseVGG19(config.vgg_path,
feat_key=FLAGS.feat,
pick_feat=FLAGS.id)
vizmap = vizmodel.layers['deconvim']
feat_op = vizmodel.feats
max_act_op = vizmodel.max_act
act_size = vizmodel.receptive_size[FLAGS.feat]
act_scale = vizmodel.stride[FLAGS.feat]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
max_act_list = []
while input_im.epochs_completed < 1:
im = input_im.next_batch()[0]
max_act = sess.run(max_act_op, feed_dict={vizmodel.im: im})
max_act_list.append(max_act)
max_list = np.argsort(max_act_list)[::-1]
im_file_list = input_im.get_data_list()[0]
feat_list = []
im_list = []
for i in range(0, 10):
im = input_im.next_batch()[0]
file_path = os.path.join(config.im_path, im_file_list[max_list[i]])
im = np.array([im_scale(scipy.misc.imread(file_path, mode='RGB'))])
cur_vizmap, feat_map, max_act = sess.run(
[vizmap, feat_op, max_act_op], feed_dict={vizmodel.im: im})
act_ind = np.nonzero((feat_map))
print('Location of max activation {}'.format(act_ind))
# get only the first nonzero element
act_c = (act_ind[1][0], act_ind[2][0])
min_x = max(0, int(act_c[0] * act_scale - act_size / 2))
max_x = min(IM_SIZE, int(act_c[0] * act_scale + act_size / 2))
min_y = max(0, int(act_c[1] * act_scale - act_size / 2))
max_y = min(IM_SIZE, int(act_c[1] * act_scale + act_size / 2))
im_crop = im[0, min_x:max_x, min_y:max_y, :]
act_crop = cur_vizmap[0, min_x:max_x, min_y:max_y, :]
pad_size = (act_size - im_crop.shape[0], act_size - im_crop.shape[1])
im_crop = np.pad(im_crop,
((0, pad_size[0]), (0, pad_size[1]), (0, 0)),
'constant',
constant_values=0)
act_crop = np.pad(act_crop,
((0, pad_size[0]),(0, pad_size[1]), (0, 0)),
'constant',
constant_values=0)
feat_list.append(act_crop)
im_list.append(im_crop)
viz.viz_filters(np.transpose(feat_list, (1, 2, 3, 0)),
[3, 3],
os.path.join(config.save_path, '{}_feat.png'.format(FLAGS.feat)),
gap=2,
gap_color=0,
nf=normlize.indentity,
shuffle=False)
viz.viz_filters(np.transpose(im_list, (1, 2, 3, 0)),
[3, 3],
os.path.join(config.save_path, '{}_im.png'.format(FLAGS.feat)),
gap=2,
gap_color=0,
nf=normlize.indentity,
shuffle=False)
|
the-stack_0_3197 | import subprocess
import typer
from typer.testing import CliRunner
from docs_src.options.name import tutorial005 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_option_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "-n, --name TEXT" in result.output
assert "-f, --formal" in result.output
def test_call():
result = runner.invoke(app, ["-n", "Camila"])
assert result.exit_code == 0
assert "Hello Camila" in result.output
def test_call_formal():
result = runner.invoke(app, ["-n", "Camila", "-f"])
assert result.exit_code == 0
assert "Good day Ms. Camila." in result.output
def test_call_formal_condensed():
result = runner.invoke(app, ["-fn", "Camila"])
assert result.exit_code == 0
assert "Good day Ms. Camila." in result.output
def test_call_condensed_wrong_order():
result = runner.invoke(app, ["-nf", "Camila"])
assert result.exit_code != 0
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
the-stack_0_3198 | import csv
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from sklearn.metrics import classification_report
#from dbn import SupervisedDBNClassification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
def loaddata(filename,instanceCol):
file_reader = csv.reader(open(filename,'r'),delimiter=',')
x = []
y = []
for row in file_reader:
x.append(row[0:instanceCol])
y.append(row[-1])
return np.array(x[1:]).astype((np.float32)), np.array(y[1:]).astype(np.int)
def modeldata(filename):
scores = []
print(filename)
X,Y = loaddata(filename, 99)
for i in range(3):
#print('Cross ' + str(i))
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
# relu, sigmoid
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(X_train, Y_train)
Y_pred = classifier.predict(X_test)
scores.append(accuracy_score(Y_test, Y_pred))
#print(classification_report(Y_test, Y_pred))
print('All Accuracy Scores in Cross: ' + str(scores))
print('Mean Accuracy Scores: ' + str(np.mean(scores)))
if __name__ == '__main__':
modeldata('D:\\Databases\\PDA\\CSV\\feature(MFCC-70-30-1400b).csv')
modeldata('D:\\Databases\\PDA\\CSV\\feature(FBank-70-30-1400b).csv')
modeldata('D:\\Databases\\PDA\\CSV\\feature(LogFBank-70-30-1400b).csv')
modeldata('D:\\Databases\\PDA\\CSV\\feature(Fractal-70-30-1400b).csv')
|
the-stack_0_3202 | import scrapy # noqa: F401
import snoop
import isort # noqa: F401
from itertools import zip_longest
class SPIDER_1984(scrapy.Spider):
name = 'spider_1984'
start_urls = ["https://www.reddit.com/r/linux/comments/sjlu6l/version_0345_pipewire/"]
@snoop
def parse(self, response):
srch_titles = response.xpath('//h1/text()').get()
srch_links = response.xpath('//a[@href]').getall()
srch_content = response.xpath("//p/text()").getall()
for item in zip_longest(srch_titles, srch_links, srch_content, fillvalue='missing'):
results = {
"title": item[0],
"links": item[1],
"content": item[2],
}
yield results
|
the-stack_0_3203 | import sys
sys.path.insert(0, '../../')
import pyrosim
def send_point_mass_example( sim ):
fixed_box = sim.send_box( position = ( -2, 0, 1 ) )
sim.send_slider_joint( -1, fixed_box, joint_range = 0 )
free_box = sim.send_box( position = ( -2.5, 0, 1 ) )
sim.send_point_mass_spring_joint( fixed_box, free_box,
resting_length = 0.5,
stiffness = 1.0 )
def send_hinge_example( sim ):
fixed_box = sim.send_box( position = ( 0.5, 0, 1 ),
color = ( 1, 0, 0 ) )
sim.send_slider_joint( -1, fixed_box, joint_range = 0 )
free_box = sim.send_box( position = ( -0.5, 0, 1 ),
color = ( 1, 0, 0 ) )
sim.send_hinge_spring_joint( fixed_box, free_box,
stiffness = 0.5,
axis1 = ( 0, 1, 0 ),
axis2 = ( 0, 0, 1 ),
damping = 0.01 )
def send_linear_example( sim ):
box1 = sim.send_box( position = ( 2, 0, 1 ),
color = ( 0, 1, 0 ) )
box2 = sim.send_box( position = ( 2.5, 0, 1 ),
color = ( 0, 1, 0 ) )
sim.send_linear_spring_joint( box1, box2,
stiffness = 1.0,
resting_length = 0.75,
damping = 0.01 )
sim = pyrosim.Simulator( eval_steps = -1, play_paused = True, draw_joints = True )
sim.set_friction( mu = 0 )
sim.set_current_collision_group( 'springs' )
send_point_mass_example( sim )
send_linear_example( sim )
send_hinge_example( sim )
sim.set_current_collision_group( 'environment' )
# send env box
env_box = sim.send_box( position = ( -0.6, 0, 4 ),
color = ( 0, 0, 0 ) )
sim.assign_collision( 'springs', 'environment' )
sim.start()
sim.wait_to_finish()
print(sim._raw_cerr) |
the-stack_0_3204 | #!/usr/bin/env python3
import json
import sys
pastafile = "./plugins/copypasta/copypastas.json"
def commandlist(obj):
commands = ""
for key in obj:
commands += str(key) + " "
cmds = "`"+commands.strip().replace(" ", ", ")+"`"
return cmds
with open(pastafile) as pf:
try:
obj = json.load(pf)
except ValueError:
print('Error loading JSON from file')
if (len(sys.argv) < 3):
cmds = commandlist(obj)
reply = "Missing argument. Current available copypasta are: " + cmds
else:
pasta = ""
for key in obj:
if (sys.argv[2] == key):
pasta = obj[key]
if (pasta == ""):
cmds = commandlist(obj)
reply = "Invalid argument. Current available copypasta are: " + cmds
else:
reply = pasta
print(reply)
|
the-stack_0_3205 | #!/usr/bin/env python
r"""
See help text for details.
"""
import sys
import subprocess
import re
save_dir_path = sys.path.pop(0)
modules = ['gen_arg', 'gen_print', 'gen_valid', 'gen_misc', 'gen_cmd', 'var_funcs']
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS]',
description="%(prog)s will create a status file path name adhering to the"
+ " following pattern: <status dir path>/<prefix>.yymmdd."
+ "hhmmss.status. It will then run the command string and"
+ " direct its stdout/stderr to the status file and optionally"
+ " to stdout. This dual output streaming will be"
+ " accomplished using either the \"script\" or the \"tee\""
+ " program. %(prog)s will also set and export environment"
+ " variable \"AUTO_STATUS_FILE_PATH\" for the benefit of"
+ " child programs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
parser.add_argument(
'--status_dir_path',
default='',
help="The path to the directory where the status file will be created."
+ "%(default)s The default value is obtained from environment"
+ " variable \"${STATUS_DIR_PATH}\", if set or from \"${HOME}/"
+ "status/\".")
parser.add_argument(
'--prefix',
default='',
help="The prefix for the generated file name.%(default)s The default value"
+ " is the command portion (i.e. the first token) of the command"
+ " string.")
parser.add_argument(
'--status_file_name',
default='',
help="This allows the user to explicitly specify the status file name. If"
+ " this argument is not used, %(prog)s composes a status file name."
+ " If this argument is specified, the \"--prefix\" argument is"
+ " ignored.")
parser.add_argument(
'--stdout',
default=1,
type=int,
choices=[1, 0],
help="Indicates that stdout/stderr from the command string execution"
+ " should be written to stdout as well as to the status file.")
parser.add_argument(
'--tee',
default=1,
type=int,
choices=[1, 0],
help="Indicates that \"tee\" rather than \"script\" should be used.")
parser.add_argument(
'--show_url',
default=0,
type=int,
choices=[1, 0],
help="Indicates that the status file path shown should be shown in the"
+ " form of a url. If the output is to be viewed from a browser,"
+ " this may well become a clickable link. Note that the"
+ " get_file_path_url.py program must be found in the \"PATH\""
+ " environment variable for this argument to be effective.")
parser.add_argument(
'command_string',
default='',
nargs='*',
help="The command string to be run.%(default)s")
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def validate_parms():
r"""
Validate program parameters, etc.
"""
global status_dir_path
global command_string
# Convert command_string from list to string.
command_string = " ".join(command_string)
set_pgm_arg(command_string)
valid_value(command_string)
if status_dir_path == "":
status_dir_path = \
os.environ.get("STATUS_DIR_PATH",
os.environ.get("HOME") + "/status/")
status_dir_path = add_trailing_slash(status_dir_path)
set_pgm_arg(status_dir_path)
valid_dir_path(status_dir_path)
global prefix
global status_file_name
if status_file_name == "":
if prefix == "":
prefix = command_string.split(" ")[0]
# File extensions (e.g. ".sh", ".py", .etc), look clumsy in status file names.
extension_regex = "\\.[a-zA-Z0-9]{1,3}$"
prefix = re.sub(extension_regex, "", prefix)
set_pgm_arg(prefix)
status_file_name = prefix + "." + file_date_time_stamp() + ".status"
set_pgm_arg(status_file_name)
global status_file_path
status_file_path = status_dir_path + status_file_name
# Set environment variable for the benefit of child programs.
os.environ['AUTO_STATUS_FILE_PATH'] = status_file_path
# Set deprecated but still used AUTOSCRIPT_STATUS_FILE_PATH value.
os.environ['AUTOSCRIPT_STATUS_FILE_PATH'] = status_file_path
def script_func(command_string, status_file_path):
r"""
Run the command string producing both stdout and file output via the script command and return the
shell_rc.
Description of argument(s):
command_string The command string to be run.
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
cmd_buf = "script -a -q -f " + status_file_path + " -c '" \
+ escape_bash_quotes(command_string) + " ; printf \"\\n" \
+ sprint_varx(ret_code_str, "${?}").rstrip("\n") + "\\n\"'"
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
# Retrieve return code by examining ret_code_str output statement from status file.
# Example text to be analyzed.
# auto_status_file_ret_code: 127
cmd_buf = "tail -n 10 " + status_file_path + " | egrep -a \"" \
+ ret_code_str + ":[ ]+\""
rc, output = shell_cmd(cmd_buf)
key, value = parse_key_value(output)
shell_rc = int(value)
return shell_rc
def tee_func(command_string, status_file_path):
r"""
Run the command string producing both stdout and file output via the tee command and return the shell_rc.
Description of argument(s):
command_string The command string to be run.
status_file_path The path to the status file which is to contain a copy of all stdout.
"""
cmd_buf = "set -o pipefail ; " + command_string + " 2>&1 | tee -a " \
+ status_file_path
qprint_issuing(cmd_buf)
sub_proc = subprocess.Popen(cmd_buf, shell=True)
sub_proc.communicate()
shell_rc = sub_proc.returncode
print
print_varx(ret_code_str, shell_rc)
with open(status_file_path, "a") as status_file:
# Append ret code string and status_file_path to end of status file.
status_file.write("\n" + sprint_varx(ret_code_str, shell_rc))
return shell_rc
def main():
gen_setup()
set_term_options(term_requests={'pgm_names': [command_string.split(" ")[0]]})
global ret_code_str
ret_code_str = re.sub("\\.py$", "", pgm_name) + "_ret_code"
global show_url
if show_url:
shell_rc, output = shell_cmd("which get_file_path_url.py", show_err=0)
if shell_rc != 0:
show_url = 0
set_pgm_arg(show_url)
else:
shell_rc, status_file_url = shell_cmd("get_file_path_url.py "
+ status_file_path)
status_file_url = status_file_url.rstrip("\n")
# Print status file path/url to stdout and to status file.
with open(status_file_path, "w+") as status_file:
if show_url:
print_var(status_file_url)
status_file.write(sprint_var(status_file_url))
else:
print_var(status_file_path)
status_file.write(sprint_var(status_file_path))
if stdout:
if tee:
shell_rc = tee_func(command_string, status_file_path)
else:
shell_rc = script_func(command_string, status_file_path)
if show_url:
print_var(status_file_url)
else:
print_var(status_file_path)
else:
cmd_buf = command_string + " >> " + status_file_path + " 2>&1"
shell_rc, output = shell_cmd(cmd_buf, show_err=0)
with open(status_file_path, "a") as status_file:
# Append ret code string and status_file_path to end of status
# file.
status_file.write("\n" + sprint_varx(ret_code_str, shell_rc))
# Append status_file_path print statement to end of status file.
with open(status_file_path, "a") as status_file:
if show_url:
status_file.write(sprint_var(status_file_url))
else:
status_file.write(sprint_var(status_file_path))
exit(shell_rc)
main()
|
the-stack_0_3209 | import os
import copy
import regex
import asyncio
import logging
import contextlib
import collections
from collections.abc import Mapping
import synapse
import synapse.exc as s_exc
import synapse.axon as s_axon
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.datamodel as s_datamodel
import synapse.lib.base as s_base
import synapse.lib.cell as s_cell
import synapse.lib.chop as s_chop
import synapse.lib.coro as s_coro
import synapse.lib.hive as s_hive
import synapse.lib.view as s_view
import synapse.lib.cache as s_cache
import synapse.lib.layer as s_layer
import synapse.lib.nexus as s_nexus
import synapse.lib.queue as s_queue
import synapse.lib.scope as s_scope
import synapse.lib.storm as s_storm
import synapse.lib.agenda as s_agenda
import synapse.lib.config as s_config
import synapse.lib.parser as s_parser
import synapse.lib.dyndeps as s_dyndeps
import synapse.lib.grammar as s_grammar
import synapse.lib.httpapi as s_httpapi
import synapse.lib.modules as s_modules
import synapse.lib.spooled as s_spooled
import synapse.lib.version as s_version
import synapse.lib.modelrev as s_modelrev
import synapse.lib.stormsvc as s_stormsvc
import synapse.lib.lmdbslab as s_lmdbslab
# Importing these registers their commands
import synapse.lib.stormhttp as s_stormhttp # NOQA
import synapse.lib.stormwhois as s_stormwhois # NOQA
import synapse.lib.provenance as s_provenance
import synapse.lib.stormtypes as s_stormtypes
import synapse.lib.stormlib.json as s_stormlib_json # NOQA
import synapse.lib.stormlib.stix as s_stormlib_stix
import synapse.lib.stormlib.macro as s_stormlib_macro
import synapse.lib.stormlib.model as s_stormlib_model
import synapse.lib.stormlib.backup as s_stormlib_backup # NOQA
import synapse.lib.stormlib.infosec as s_stormlib_infosec # NOQA
import synapse.lib.stormlib.project as s_stormlib_project # NOQA
import synapse.lib.stormlib.version as s_stormlib_version # NOQA
import synapse.lib.stormlib.modelext as s_stormlib_modelext # NOQA
logger = logging.getLogger(__name__)
stormlogger = logging.getLogger('synapse.storm')
'''
A Cortex implements the synapse hypergraph object.
'''
reqver = '>=0.2.0,<3.0.0'
# Constants returned in results from syncLayersEvents and syncIndexEvents
SYNC_NODEEDITS = 0 # A nodeedits: (<offs>, 0, <etyp>, (<etype args>), {<meta>})
SYNC_NODEEDIT = 1 # A nodeedit: (<offs>, 0, <etyp>, (<etype args>))
SYNC_LAYR_ADD = 3 # A layer was added
SYNC_LAYR_DEL = 4 # A layer was deleted
# push/pull def
reqValidPush = s_config.getJsValidator({
'type': 'object',
'properties': {
'url': {'type': 'string'},
'time': {'type': 'number'},
'iden': {'type': 'string', 'pattern': s_config.re_iden},
'user': {'type': 'string', 'pattern': s_config.re_iden},
},
'additionalProperties': True,
'required': ['iden', 'url', 'user', 'time'],
})
reqValidPull = reqValidPush
reqValidTagModel = s_config.getJsValidator({
'type': 'object',
'properties': {
'prune': {'type': 'number', 'minimum': 1},
'regex': {'type': 'array', 'items': {'type': ['string', 'null']}},
},
'additionalProperties': False,
'required': [],
})
def cmprkey_indx(x):
return x[1]
def cmprkey_buid(x):
return x[1][1]
async def wrap_liftgenr(iden, genr):
async for indx, buid, sode in genr:
yield iden, (indx, buid), sode
class CoreApi(s_cell.CellApi):
'''
The CoreApi is exposed when connecting to a Cortex over Telepath.
Many CoreApi methods operate on packed nodes consisting of primitive data structures
which can be serialized with msgpack/json.
An example of a packaged Node::
( (<form>, <valu>), {
"props": {
<name>: <valu>,
...
},
"tags": {
"foo": <time>,
"foo.bar": <time>,
},
})
'''
@s_cell.adminapi()
def getCoreMods(self):
return self.cell.getCoreMods()
def stat(self):
self.user.confirm(('status',))
s_common.deprecated('stat')
return self.cell.stat()
async def getModelDict(self):
'''
Return a dictionary which describes the data model.
Returns:
(dict): A model description dictionary.
'''
return await self.cell.getModelDict()
async def getModelDefs(self):
return await self.cell.getModelDefs()
def getCoreInfo(self):
'''
Return static generic information about the cortex including model definition
'''
return self.cell.getCoreInfo()
async def getCoreInfoV2(self):
'''
Return static generic information about the cortex including model definition
'''
return await self.cell.getCoreInfoV2()
def _reqValidStormOpts(self, opts):
if opts is None:
opts = {}
opts.setdefault('user', self.user.iden)
if opts.get('user') != self.user.iden:
self.user.confirm(('impersonate',))
return opts
async def callStorm(self, text, opts=None):
'''
Return the value expressed in a return() statement within storm.
'''
opts = self._reqValidStormOpts(opts)
return await self.cell.callStorm(text, opts=opts)
async def exportStorm(self, text, opts=None):
'''
Execute a storm query and package nodes for export/import.
NOTE: This API yields nodes after an initial complete lift
in order to limit exported edges.
'''
opts = self._reqValidStormOpts(opts)
async for pode in self.cell.exportStorm(text, opts=opts):
yield pode
async def feedFromAxon(self, sha256, opts=None):
'''
Import a msgpack .nodes file from the axon.
'''
opts = self._reqValidStormOpts(opts)
return await self.cell.feedFromAxon(sha256, opts=opts)
async def addCronJob(self, cdef):
'''
Add a cron job to the cortex
A cron job is a persistently-stored item that causes storm queries to be run in the future. The specification
for the times that the queries run can be one-shot or recurring.
Args:
query (str): The storm query to execute in the future
reqs (Union[Dict[str, Union[int, List[int]]], List[Dict[...]]]):
Either a dict of the fixed time fields or a list of such dicts. The keys are in the set ('year',
'month', 'dayofmonth', 'dayofweek', 'hour', 'minute'. The values must be positive integers, except for
the key of 'dayofmonth' in which it may also be a negative integer which represents the number of days
from the end of the month with -1 representing the last day of the month. All values may also be lists
of valid values.
incunit (Optional[str]):
A member of the same set as above, with an additional member 'day'. If is None (default), then the
appointment is one-shot and will not recur.
incvals (Union[int, List[int]):
A integer or a list of integers of the number of units
Returns (bytes):
An iden that can be used to later modify, query, and delete the job.
Notes:
reqs must have fields present or incunit must not be None (or both)
The incunit if not None it must be larger in unit size than all the keys in all reqs elements.
'''
cdef['creator'] = self.user.iden
s_common.deprecated('addCronJob')
self.user.confirm(('cron', 'add'), gateiden='cortex')
return await self.cell.addCronJob(cdef)
async def delCronJob(self, iden):
'''
Delete a cron job
Args:
iden (bytes): The iden of the cron job to be deleted
'''
s_common.deprecated('delCronJob')
self.user.confirm(('cron', 'del'), gateiden=iden)
await self.cell.delCronJob(iden)
async def updateCronJob(self, iden, query):
'''
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
'''
s_common.deprecated('updateCronJob')
self.user.confirm(('cron', 'set'), gateiden=iden)
await self.cell.updateCronJob(iden, query)
async def enableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
s_common.deprecated('enableCronJob')
self.user.confirm(('cron', 'set'), gateiden=iden)
await self.cell.enableCronJob(iden)
async def disableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
s_common.deprecated('disableCronJob')
self.user.confirm(('cron', 'set'), gateiden=iden)
await self.cell.disableCronJob(iden)
async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
s_common.deprecated('listCronJobs')
crons = []
for cron in await self.cell.listCronJobs():
if not self.user.allowed(('cron', 'get'), gateiden=cron.get('iden')):
continue
crons.append(cron)
return crons
async def editCronJob(self, iden, name, valu):
'''
Update a value in a cron definition.
'''
iden = str(iden)
name = str(name)
self.user.confirm(('cron', 'set', name), gateiden=iden)
return await self.cell.editCronJob(iden, name, valu)
async def setStormCmd(self, cdef):
'''
Set the definition of a pure storm command in the cortex.
'''
self.user.confirm(('admin', 'cmds'))
return await self.cell.setStormCmd(cdef)
async def delStormCmd(self, name):
'''
Remove a pure storm command from the cortex.
'''
self.user.confirm(('admin', 'cmds'))
return await self.cell.delStormCmd(name)
async def _reqDefLayerAllowed(self, perms):
view = self.cell.getView()
wlyr = view.layers[0]
self.user.confirm(perms, gateiden=wlyr.iden)
async def addNodeTag(self, iden, tag, valu=(None, None)):
'''
Add a tag to a node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
valu (tuple): A time interval tuple or (None, None).
'''
s_common.deprecated('addNodeTag')
await self._reqDefLayerAllowed(('node', 'tag', 'add', *tag.split('.')))
return await self.cell.addNodeTag(self.user, iden, tag, valu)
async def delNodeTag(self, iden, tag):
'''
Delete a tag from the node specified by iden. Deprecated in 2.0.0.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
'''
s_common.deprecated('delNodeTag')
await self._reqDefLayerAllowed(('node', 'tag', 'del', *tag.split('.')))
return await self.cell.delNodeTag(self.user, iden, tag)
async def setNodeProp(self, iden, name, valu):
'''
Set a property on a single node. Deprecated in 2.0.0.
'''
s_common.deprecated('setNodeProp')
buid = s_common.uhex(iden)
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='prop:set', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
prop = node.form.props.get(name)
self.user.confirm(('node', 'prop', 'set', prop.full), gateiden=snap.wlyr.iden)
await node.set(name, valu)
return node.pack()
async def delNodeProp(self, iden, name):
'''
Delete a property from a single node. Deprecated in 2.0.0.
'''
s_common.deprecated('delNodeProp')
buid = s_common.uhex(iden)
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='prop:del', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
prop = node.form.props.get(name)
self.user.confirm(('node', 'prop', 'del', prop.full), gateiden=snap.wlyr.iden)
await node.pop(name)
return node.pack()
async def addNode(self, form, valu, props=None):
'''
Deprecated in 2.0.0.
'''
s_common.deprecated('addNode')
async with await self.cell.snap(user=self.user) as snap:
self.user.confirm(('node', 'add', form), gateiden=snap.wlyr.iden)
with s_provenance.claim('coreapi', meth='node:add', user=snap.user.iden):
node = await snap.addNode(form, valu, props=props)
return node.pack()
async def addNodes(self, nodes):
'''
Add a list of packed nodes to the cortex.
Args:
nodes (list): [ ( (form, valu), {'props':{}, 'tags':{}}), ... ]
Yields:
(tuple): Packed node tuples ((form,valu), {'props': {}, 'tags':{}})
Deprecated in 2.0.0
'''
s_common.deprecated('addNodes')
# First check that that user may add each form
done = {}
for node in nodes:
formname = node[0][0]
if done.get(formname):
continue
await self._reqDefLayerAllowed(('node', 'add', formname))
done[formname] = True
async with await self.cell.snap(user=self.user) as snap:
with s_provenance.claim('coreapi', meth='node:add', user=snap.user.iden):
snap.strict = False
async for node in snap.addNodes(nodes):
if node is not None:
node = node.pack()
yield node
async def getFeedFuncs(self):
'''
Get a list of Cortex feed functions.
Notes:
Each feed dictinonary has the name of the feed function, the
full docstring for the feed function, and the first line of
the docstring broken out in their own keys for easy use.
Returns:
tuple: A tuple of dictionaries.
'''
return await self.cell.getFeedFuncs()
async def addFeedData(self, name, items, *, viewiden=None):
view = self.cell.getView(viewiden, user=self.user)
if view is None:
raise s_exc.NoSuchView(iden=viewiden)
wlyr = view.layers[0]
parts = name.split('.')
self.user.confirm(('feed:data', *parts), gateiden=wlyr.iden)
await self.cell.boss.promote('feeddata',
user=self.user,
info={'name': name,
'view': view.iden,
'nitems': len(items),
})
async with await self.cell.snap(user=self.user, view=view) as snap:
with s_provenance.claim('feed:data', name=name, user=snap.user.iden):
snap.strict = False
await snap.addFeedData(name, items)
async def count(self, text, opts=None):
'''
Count the number of nodes which result from a storm query.
Args:
text (str): Storm query text.
opts (dict): Storm query options.
Returns:
(int): The number of nodes resulting from the query.
'''
opts = self._reqValidStormOpts(opts)
return await self.cell.count(text, opts=opts)
async def eval(self, text, opts=None):
'''
Evaluate a storm query and yield packed nodes.
NOTE: This API is deprecated as of 2.0.0 and will be removed in 3.0.0
'''
s_common.deprecated('eval')
opts = self._reqValidStormOpts(opts)
view = self.cell._viewFromOpts(opts)
async for pode in view.iterStormPodes(text, opts=opts):
yield pode
async def storm(self, text, opts=None):
'''
Evaluate a storm query and yield result messages.
Yields:
((str,dict)): Storm messages.
'''
opts = self._reqValidStormOpts(opts)
async for mesg in self.cell.storm(text, opts=opts):
yield mesg
async def reqValidStorm(self, text, opts=None):
'''
Parse a Storm query to validate it.
Args:
text (str): The text of the Storm query to parse.
opts (dict): A Storm options dictionary.
Returns:
True: If the query is valid.
Raises:
BadSyntaxError: If the query is invalid.
'''
return await self.cell.reqValidStorm(text, opts)
async def watch(self, wdef):
'''
Hook cortex/view/layer watch points based on a specified watch definition.
Example:
wdef = { 'tags': [ 'foo.bar', 'baz.*' ] }
async for mesg in core.watch(wdef):
dostuff(mesg)
'''
s_common.deprecated('watch')
iden = wdef.get('view', self.cell.view.iden)
self.user.confirm(('watch',), gateiden=iden)
async for mesg in self.cell.watch(wdef):
yield mesg
async def syncLayerNodeEdits(self, offs, layriden=None, wait=True):
'''
Yield (indx, mesg) nodeedit sets for the given layer beginning at offset.
Once caught up, this API will begin yielding nodeedits in real-time.
The generator will only terminate on network disconnect or if the
consumer falls behind the max window size of 10,000 nodeedit messages.
'''
layr = self.cell.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
self.user.confirm(('sync',), gateiden=layr.iden)
async for item in self.cell.syncLayerNodeEdits(layr.iden, offs, wait=wait):
yield item
@s_cell.adminapi()
async def splices(self, offs=None, size=None, layriden=None):
'''
Return the list of splices at the given offset.
'''
s_common.deprecated('splices')
layr = self.cell.getLayer(layriden)
count = 0
async for mesg in layr.splices(offs=offs, size=size):
count += 1
if not count % 1000:
await asyncio.sleep(0)
yield mesg
@s_cell.adminapi()
async def splicesBack(self, offs=None, size=None):
'''
Return the list of splices backwards from the given offset.
'''
s_common.deprecated('splicesBack')
count = 0
async for mesg in self.cell.view.layers[0].splicesBack(offs=offs, size=size):
count += 1
if not count % 1000: # pragma: no cover
await asyncio.sleep(0)
yield mesg
async def spliceHistory(self):
'''
Yield splices backwards from the end of the splice log.
Will only return the user's own splices unless they are an admin.
'''
s_common.deprecated('spliceHistory')
async for splice in self.cell.spliceHistory(self.user):
yield splice
@s_cell.adminapi()
async def provStacks(self, offs, size):
'''
Return stream of (iden, provenance stack) tuples at the given offset.
'''
count = 0
for iden, stack in self.cell.provstor.provStacks(offs, size):
count += 1
if not count % 1000:
await asyncio.sleep(0)
yield s_common.ehex(iden), stack
@s_cell.adminapi()
async def getProvStack(self, iden: str):
'''
Return the provenance stack associated with the given iden.
Args:
iden (str): the iden of the provenance stack
Note: the iden appears on each splice entry as the 'prov' property
'''
if iden is None:
return None
return self.cell.provstor.getProvStack(s_common.uhex(iden))
async def getPropNorm(self, prop, valu):
'''
Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
return await self.cell.getPropNorm(prop, valu)
async def getTypeNorm(self, name, valu):
'''
Get the normalized type value based on the Cortex data model.
Args:
name (str): The type to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchType: If the type does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
return await self.cell.getTypeNorm(name, valu)
async def addForm(self, formname, basetype, typeopts, typeinfo):
'''
Add an extended form to the data model.
Extended forms *must* begin with _
'''
self.user.confirm(('model', 'form', 'add', formname))
return await self.cell.addForm(formname, basetype, typeopts, typeinfo)
async def delForm(self, formname):
'''
Remove an extended form from the data model.
'''
self.user.confirm(('model', 'form', 'del', formname))
return await self.cell.delForm(formname)
async def addFormProp(self, form, prop, tdef, info):
'''
Add an extended property to the given form.
Extended properties *must* begin with _
'''
self.user.confirm(('model', 'prop', 'add', form))
return await self.cell.addFormProp(form, prop, tdef, info)
async def delFormProp(self, form, name):
'''
Remove an extended property from the given form.
'''
self.user.confirm(('model', 'prop', 'del', form))
return await self.cell.delFormProp(form, name)
async def addUnivProp(self, name, tdef, info):
'''
Add an extended universal property.
Extended properties *must* begin with _
'''
self.user.confirm(('model', 'univ', 'add'))
return await self.cell.addUnivProp(name, tdef, info)
async def delUnivProp(self, name):
'''
Remove an extended universal property.
'''
self.user.confirm(('model', 'univ', 'del'))
return await self.cell.delUnivProp(name)
async def addTagProp(self, name, tdef, info):
'''
Add a tag property to record data about tags on nodes.
'''
self.user.confirm(('model', 'tagprop', 'add'))
return await self.cell.addTagProp(name, tdef, info)
async def delTagProp(self, name):
'''
Remove a previously added tag property.
'''
self.user.confirm(('model', 'tagprop', 'del'))
return await self.cell.delTagProp(name)
async def addStormPkg(self, pkgdef):
self.user.confirm(('pkg', 'add'))
return await self.cell.addStormPkg(pkgdef)
async def delStormPkg(self, iden):
self.user.confirm(('pkg', 'del'))
return await self.cell.delStormPkg(iden)
@s_cell.adminapi()
async def getStormPkgs(self):
return await self.cell.getStormPkgs()
@s_cell.adminapi()
async def getStormPkg(self, name):
return await self.cell.getStormPkg(name)
@s_cell.adminapi()
async def addStormDmon(self, ddef):
return await self.cell.addStormDmon(ddef)
@s_cell.adminapi()
async def getStormDmons(self):
return await self.cell.getStormDmons()
@s_cell.adminapi()
async def getStormDmonLog(self, iden):
return await self.cell.getStormDmonLog(iden)
@s_cell.adminapi()
async def getStormDmon(self, iden):
return await self.cell.getStormDmon(iden)
@s_cell.adminapi()
async def bumpStormDmon(self, iden):
return await self.cell.bumpStormDmon(iden)
@s_cell.adminapi()
async def disableStormDmon(self, iden):
return await self.cell.disableStormDmon(iden)
@s_cell.adminapi()
async def enableStormDmon(self, iden):
return await self.cell.enableStormDmon(iden)
@s_cell.adminapi()
async def delStormDmon(self, iden):
return await self.cell.delStormDmon(iden)
@s_cell.adminapi(log=True)
async def enableMigrationMode(self):
await self.cell._enableMigrationMode()
@s_cell.adminapi(log=True)
async def disableMigrationMode(self):
await self.cell._disableMigrationMode()
@s_cell.adminapi()
async def cloneLayer(self, iden, ldef=None):
ldef = ldef or {}
ldef['creator'] = self.user.iden
return await self.cell.cloneLayer(iden, ldef)
async def getStormVar(self, name, default=None):
self.user.confirm(('globals', 'get', name))
return await self.cell.getStormVar(name, default=default)
async def popStormVar(self, name, default=None):
self.user.confirm(('globals', 'pop', name))
return await self.cell.popStormVar(name, default=default)
async def setStormVar(self, name, valu):
self.user.confirm(('globals', 'set', name))
return await self.cell.setStormVar(name, valu)
async def syncLayersEvents(self, offsdict=None, wait=True):
self.user.confirm(('sync',))
async for item in self.cell.syncLayersEvents(offsdict=offsdict, wait=wait):
yield item
async def syncIndexEvents(self, matchdef, offsdict=None, wait=True):
self.user.confirm(('sync',))
async for item in self.cell.syncIndexEvents(matchdef, offsdict=offsdict, wait=wait):
yield item
async def iterFormRows(self, layriden, form, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes of a single form, optionally (re)starting at startvalue
Args:
layriden (str): Iden of the layer to retrieve the nodes
form(str): A form name
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterFormRows(layriden, form, stortype=stortype, startvalu=startvalu):
yield item
async def iterPropRows(self, layriden, form, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular secondary property, optionally (re)starting at startvalue
Args:
layriden (str): Iden of the layer to retrieve the nodes
form(str): A form name.
prop (str): A secondary property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterPropRows(layriden, form, prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterUnivRows(self, layriden, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular universal property, optionally (re)starting at startvalue
Args:
layriden (str): Iden of the layer to retrieve the nodes
prop (str): A universal property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterUnivRows(layriden, prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterTagRows(self, layriden, tag, form=None, starttupl=None):
'''
Yields (buid, (valu, form)) values that match a tag and optional form, optionally (re)starting at starttupl.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): the tag to match
form (Optional[str]): if present, only yields buids of nodes that match the form.
starttupl (Optional[Tuple[buid, form]]): if present, (re)starts the stream of values there.
Returns:
AsyncIterator[Tuple(buid, (valu, form))]
Note:
This yields (buid, (tagvalu, form)) instead of just buid, valu in order to allow resuming an interrupted
call by feeding the last value retrieved into starttupl
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterTagRows(layriden, tag, form=form, starttupl=starttupl):
yield item
async def iterTagPropRows(self, layriden, tag, prop, form=None, stortype=None, startvalu=None):
'''
Yields (buid, valu) that match a tag:prop, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): tag name
prop (str): prop name
form (Optional[str]): optional form name
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
self.user.confirm(('layer', 'lift', layriden))
async for item in self.cell.iterTagPropRows(layriden, tag, prop, form=form, stortype=stortype,
startvalu=startvalu):
yield item
class Cortex(s_cell.Cell): # type: ignore
'''
A Cortex implements the synapse hypergraph.
The bulk of the Cortex API lives on the Snap() object which can
be obtained by calling Cortex.snap() in a with block. This allows
callers to manage transaction boundaries explicitly and dramatically
increases performance.
'''
# For the cortex, nexslog:en defaults to True
confbase = copy.deepcopy(s_cell.Cell.confbase)
confbase['nexslog:en']['default'] = True # type: ignore
confdefs = {
'axon': {
'description': 'A telepath URL for a remote axon.',
'type': 'string'
},
'cron:enable': {
'default': True,
'description': 'Enable cron jobs running.',
'type': 'boolean'
},
'trigger:enable': {
'default': True,
'description': 'Enable triggers running.',
'type': 'boolean'
},
'layer:lmdb:map_async': {
'default': True,
'description': 'Set the default lmdb:map_async value in LMDB layers.',
'type': 'boolean'
},
'layers:lockmemory': {
'default': False,
'description': 'Should new layers lock memory for performance by default.',
'type': 'boolean'
},
'layers:logedits': {
'default': True,
'description': 'Whether nodeedits are logged in each layer.',
'type': 'boolean'
},
'provenance:en': {
'default': False,
'description': 'Enable provenance tracking for all writes.',
'type': 'boolean'
},
'max:nodes': {
'description': 'Maximum number of nodes which are allowed to be stored in a Cortex.',
'type': 'integer',
'minimum': 1,
'hidecmdl': True,
},
'modules': {
'default': [],
'description': 'A list of module classes to load.',
'type': 'array'
},
'storm:log': {
'default': False,
'description': 'Log storm queries via system logger.',
'type': 'boolean'
},
'storm:log:level': {
'default': 30,
'description': 'Logging log level to emit storm logs at.',
'type': 'integer'
},
'http:proxy': {
'description': 'An aiohttp-socks compatible proxy URL to use storm HTTP API.',
'type': 'string',
},
}
cellapi = CoreApi
viewapi = s_view.ViewApi
layerapi = s_layer.LayerApi
hiveapi = s_hive.HiveApi
viewctor = s_view.View.anit
layrctor = s_layer.Layer.anit
# phase 2 - service storage
async def initServiceStorage(self):
# NOTE: we may not make *any* nexus actions in this method
if self.inaugural:
await self.cellinfo.set('cortex:version', s_version.version)
corevers = self.cellinfo.get('cortex:version')
s_version.reqVersion(corevers, reqver, exc=s_exc.BadStorageVersion,
mesg='cortex version in storage is incompatible with running software')
self.views = {}
self.layers = {}
self.modules = {}
self.splicers = {}
self.feedfuncs = {}
self.stormcmds = {}
self.maxnodes = self.conf.get('max:nodes')
self.nodecount = 0
self.stormmods = {} # name: mdef
self.stormpkgs = {} # name: pkgdef
self.stormvars = None # type: s_hive.HiveDict
self.svcsbyiden = {}
self.svcsbyname = {}
self.svcsbysvcname = {} # remote name, not local name
self._propSetHooks = {}
self._runtLiftFuncs = {}
self._runtPropSetFuncs = {}
self._runtPropDelFuncs = {}
self.ontagadds = collections.defaultdict(list)
self.ontagdels = collections.defaultdict(list)
self.ontagaddglobs = s_cache.TagGlobs()
self.ontagdelglobs = s_cache.TagGlobs()
self.tagvalid = s_cache.FixedCache(self._isTagValid, size=1000)
self.tagprune = s_cache.FixedCache(self._getTagPrune, size=1000)
self.libroot = (None, {}, {})
self.bldgbuids = {} # buid -> (Node, Event) Nodes under construction
self.axon = None # type: s_axon.AxonApi
self.axready = asyncio.Event()
self.view = None # The default/main view
proven = self.conf.get('provenance:en')
self.provstor = await s_provenance.ProvStor.anit(self.dirn, proven=proven)
self.onfini(self.provstor.fini)
# generic fini handler for the Cortex
self.onfini(self._onCoreFini)
await self._initCoreHive()
self._initSplicers()
self._initStormLibs()
self._initFeedFuncs()
self._initCortexHttpApi()
self.model = s_datamodel.Model()
# Perform module loading
await self._loadCoreMods()
await self._loadExtModel()
await self._initStormCmds()
# Initialize our storage and views
await self._initCoreAxon()
await self._initCoreLayers()
await self._initCoreViews()
self.onfini(self._finiStor)
await self._initCoreQueues()
self.addHealthFunc(self._cortexHealth)
self.stormdmons = await s_storm.DmonManager.anit(self)
self.onfini(self.stormdmons)
self.agenda = await s_agenda.Agenda.anit(self)
self.onfini(self.agenda)
await self._initStormDmons()
self.trigson = self.conf.get('trigger:enable')
await self._initRuntFuncs()
taghive = await self.hive.open(('cortex', 'tagmeta'))
cmdhive = await self.hive.open(('cortex', 'storm', 'cmds'))
pkghive = await self.hive.open(('cortex', 'storm', 'packages'))
svchive = await self.hive.open(('cortex', 'storm', 'services'))
self.taghive = await taghive.dict()
self.cmdhive = await cmdhive.dict()
self.pkghive = await pkghive.dict()
self.svchive = await svchive.dict()
self.deprlocks = await self.hive.get(('cortex', 'model', 'deprlocks'), {})
# TODO: 3.0.0 conversion will truncate this hive key
for name, locked in self.deprlocks.items():
form = self.model.form(name)
if form is not None:
form.locked = locked
prop = self.model.prop(name)
if prop is not None:
prop.locked = locked
_type = self.model.type(name)
if _type is not None:
_type.locked = locked
# Finalize coremodule loading & give svchive a shot to load
await self._initPureStormCmds()
self.dynitems.update({
'cron': self.agenda,
'cortex': self,
'multiqueue': self.multiqueue,
})
await self.auth.addAuthGate('cortex', 'cortex')
def _setPropSetHook(self, name, hook):
self._propSetHooks[name] = hook
async def _callPropSetHook(self, node, prop, norm):
hook = self._propSetHooks.get(prop.full)
if hook is None:
return
await hook(node, prop, norm)
async def _execCellUpdates(self):
await self._bumpCellVers('cortex:defaults', (
(1, self._addAllLayrRead),
))
async def _addAllLayrRead(self):
layriden = self.getView().layers[0].iden
role = await self.auth.getRoleByName('all')
await role.addRule((True, ('layer', 'read')), gateiden=layriden)
async def initServiceRuntime(self):
# do any post-nexus initialization here...
if self.isactive:
await self._checkNexsIndx()
await self._checkLayerModels()
await self._initCoreMods()
await self._initStormSvcs()
# share ourself via the cell dmon as "cortex"
# for potential default remote use
self.dmon.share('cortex', self)
async def initServiceActive(self):
if self.conf.get('cron:enable'):
await self.agenda.start()
await self.stormdmons.start()
async def initServicePassive(self):
await self.agenda.stop()
await self.stormdmons.stop()
@s_nexus.Pusher.onPushAuto('model:depr:lock')
async def setDeprLock(self, name, locked):
todo = []
prop = self.model.prop(name)
if prop is not None and prop.deprecated:
todo.append(prop)
_type = self.model.type(name)
if _type is not None and _type.deprecated:
todo.append(_type)
if not todo:
mesg = 'setDeprLock() called on non-existant or non-deprecated form, property, or type.'
raise s_exc.NoSuchProp(name=name, mesg=mesg)
self.deprlocks[name] = locked
await self.hive.set(('cortex', 'model', 'deprlocks'), self.deprlocks)
for elem in todo:
elem.locked = locked
async def getDeprLocks(self):
'''
Return a dictionary of deprecated properties and their lock status.
'''
retn = {}
for prop in self.model.props.values():
if not prop.deprecated:
continue
retn[prop.full] = prop.locked
return retn
async def addCoreQueue(self, name, info):
if self.multiqueue.exists(name):
mesg = f'Queue named {name} already exists!'
raise s_exc.DupName(mesg=mesg)
await self._push('queue:add', name, info)
@s_nexus.Pusher.onPush('queue:add')
async def _addCoreQueue(self, name, info):
if self.multiqueue.exists(name):
return
await self.auth.addAuthGate(f'queue:{name}', 'queue')
creator = info.get('creator')
if creator is not None:
user = await self.auth.reqUser(creator)
await user.setAdmin(True, gateiden=f'queue:{name}', logged=False)
await self.multiqueue.add(name, info)
async def listCoreQueues(self):
return self.multiqueue.list()
async def getCoreQueue(self, name):
return self.multiqueue.status(name)
async def delCoreQueue(self, name):
if not self.multiqueue.exists(name):
mesg = f'No queue named {name} exists!'
raise s_exc.NoSuchName(mesg=mesg)
await self._push('queue:del', name)
await self.auth.delAuthGate(f'queue:{name}')
@s_nexus.Pusher.onPush('queue:del')
async def _delCoreQueue(self, name):
if not self.multiqueue.exists(name):
return
await self.multiqueue.rem(name)
async def coreQueueGet(self, name, offs=0, cull=True, wait=False):
if offs and cull:
await self.coreQueueCull(name, offs - 1)
async for item in self.multiqueue.gets(name, offs, cull=False, wait=wait):
return item
async def coreQueueGets(self, name, offs=0, cull=True, wait=False, size=None):
if offs and cull:
await self.coreQueueCull(name, offs - 1)
count = 0
async for item in self.multiqueue.gets(name, offs, cull=False, wait=wait):
yield item
count += 1
if size is not None and count >= size:
return
async def coreQueuePuts(self, name, items):
await self._push('queue:puts', name, items)
@s_nexus.Pusher.onPush('queue:puts', passitem=True)
async def _coreQueuePuts(self, name, items, nexsitem):
nexsoff, nexsmesg = nexsitem
await self.multiqueue.puts(name, items, reqid=nexsoff)
@s_nexus.Pusher.onPushAuto('queue:cull')
async def coreQueueCull(self, name, offs):
await self.multiqueue.cull(name, offs)
@s_nexus.Pusher.onPushAuto('queue:pop')
async def coreQueuePop(self, name, offs):
return await self.multiqueue.pop(name, offs)
async def coreQueueSize(self, name):
return self.multiqueue.size(name)
@s_nexus.Pusher.onPushAuto('tag:model:set')
async def setTagModel(self, tagname, name, valu):
'''
Set a model specification property for a tag.
Arguments:
tagname (str): The name of the tag.
name (str): The name of the property.
valu (object): The value of the property.
Tag Model Properties:
regex - A list of None or regular expression strings to match each tag level.
prune - A number that determines how many levels of pruning are desired.
Examples:
await core.setTagModel("cno.cve", "regex", (None, None, "[0-9]{4}", "[0-9]{5}"))
'''
meta = self.taghive.get(tagname)
if meta is None:
meta = {}
meta[name] = valu
reqValidTagModel(meta)
await self.taghive.set(tagname, meta)
# clear cached entries
if name == 'regex':
self.tagvalid.clear()
elif name == 'prune':
self.tagprune.clear()
@s_nexus.Pusher.onPushAuto('tag:model:del')
async def delTagModel(self, tagname):
'''
Delete all the model specification properties for a tag.
Arguments:
tagname (str): The name of the tag.
'''
await self.taghive.pop(tagname)
self.tagvalid.clear()
self.tagprune.clear()
@s_nexus.Pusher.onPushAuto('tag:model:pop')
async def popTagModel(self, tagname, name):
'''
Pop a property from the model specification of a tag.
Arguments:
tagname (str): The name of the tag.
name (str): The name of the specification property.
Returns:
(object): The current value of the property.
'''
meta = self.taghive.get(tagname)
if meta is None:
return None
retn = meta.pop(name, None)
await self.taghive.set(name, meta)
if name == 'regex':
self.tagvalid.clear()
elif name == 'prune':
self.tagprune.clear()
return retn
async def isTagValid(self, tagname):
'''
Check if a tag name is valid according to tag model regular expressions.
Returns:
(bool): True if the tag is valid.
'''
return self.tagvalid.get(tagname)
def _isTagValid(self, tagname):
parts = s_chop.tagpath(tagname)
for tag in s_chop.tags(tagname):
meta = self.taghive.get(tag)
if meta is None:
continue
regx = meta.get('regex')
if regx is None:
continue
for i in range(min(len(regx), len(parts))):
if regx[i] is None:
continue
if not regex.fullmatch(regx[i], parts[i]):
return False
return True
async def getTagPrune(self, tagname):
return self.tagprune.get(tagname)
def _getTagPrune(self, tagname):
prune = []
pruning = 0
for tag in s_chop.tags(tagname):
if pruning:
pruning -= 1
prune.append(tag)
continue
meta = self.taghive.get(tag)
if meta is None:
continue
pruning = meta.get('prune', 0)
if pruning:
pruning -= 1
prune.append(tag)
# if we dont reach the final tag for pruning, skip it.
if prune and not prune[-1] == tagname:
return ()
return tuple(prune)
async def getTagModel(self, tagname):
'''
Retrieve the tag model specification for a tag.
Returns:
(dict): The tag model specification or None.
'''
retn = self.taghive.get(tagname)
if retn is not None:
return dict(retn)
async def listTagModel(self):
'''
Retrieve a list of the tag model specifications.
Returns:
([(str, dict), ...]): A list of tag model specification tuples.
'''
return list(self.taghive.items())
async def _finiStor(self):
await asyncio.gather(*[view.fini() for view in self.views.values()])
await asyncio.gather(*[layr.fini() for layr in self.layers.values()])
async def _initRuntFuncs(self):
async def onSetTrigDoc(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
trig = node.snap.view.triggers.get(iden)
node.snap.user.confirm(('trigger', 'set', 'doc'), gateiden=iden)
await trig.set('doc', valu)
node.props[prop.name] = valu
async def onSetTrigName(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
trig = node.snap.view.triggers.get(iden)
node.snap.user.confirm(('trigger', 'set', 'name'), gateiden=iden)
await trig.set('name', valu)
node.props[prop.name] = valu
async def onSetCronDoc(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
appt = await self.agenda.get(iden)
node.snap.user.confirm(('cron', 'set', 'doc'), gateiden=iden)
await appt.setDoc(valu, nexs=True)
node.props[prop.name] = valu
async def onSetCronName(node, prop, valu):
valu = str(valu)
iden = node.ndef[1]
appt = await self.agenda.get(iden)
node.snap.user.confirm(('cron', 'set', 'name'), gateiden=iden)
await appt.setName(valu, nexs=True)
node.props[prop.name] = valu
self.addRuntPropSet('syn:cron:doc', onSetCronDoc)
self.addRuntPropSet('syn:cron:name', onSetCronName)
self.addRuntPropSet('syn:trigger:doc', onSetTrigDoc)
self.addRuntPropSet('syn:trigger:name', onSetTrigName)
async def _initStormDmons(self):
node = await self.hive.open(('cortex', 'storm', 'dmons'))
self.stormdmonhive = await node.dict()
for iden, ddef in self.stormdmonhive.items():
try:
await self.runStormDmon(iden, ddef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'initStormDmon ({iden}) failed: {e}')
async def _initStormSvcs(self):
for iden, sdef in self.svchive.items():
try:
await self._setStormSvc(sdef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'initStormService ({iden}) failed: {e}')
async def _initCoreQueues(self):
path = os.path.join(self.dirn, 'slabs', 'queues.lmdb')
slab = await s_lmdbslab.Slab.anit(path)
self.onfini(slab.fini)
self.multiqueue = await slab.getMultiQueue('cortex:queue', nexsroot=self.nexsroot)
@s_nexus.Pusher.onPushAuto('cmd:set')
async def setStormCmd(self, cdef):
'''
Set pure storm command definition.
Args:
cdef (dict): A Pure Stormcmd definition dictionary.
Notes:
The definition dictionary is formatted like the following::
{
'name': <name>,
'cmdargs': [
(<name>, <opts>),
]
'cmdconf': {
<str>: <valu>
},
'storm': <text>,
}
'''
name = cdef.get('name')
await self._setStormCmd(cdef)
await self.cmdhive.set(name, cdef)
async def _reqStormCmd(self, cdef):
name = cdef.get('name')
if not s_grammar.isCmdName(name):
raise s_exc.BadCmdName(name=name)
self.getStormQuery(cdef.get('storm'))
async def _getStorNodes(self, buid, layers):
# NOTE: This API lives here to make it easy to optimize
# the cluster case to minimize round trips
return [await layr.getStorNode(buid) for layr in layers]
async def _genSodeList(self, buid, sodes, layers, filtercmpr=None):
sodelist = []
if filtercmpr is not None:
filt = True
for layr in layers[-1::-1]:
sode = sodes.get(layr.iden)
if sode is None:
sode = await layr.getStorNode(buid)
if filt and filtercmpr(sode):
return
else:
filt = False
sodelist.append((layr.iden, sode))
return (buid, sodelist[::-1])
for layr in layers:
sode = sodes.get(layr.iden)
if sode is None:
sode = await layr.getStorNode(buid)
sodelist.append((layr.iden, sode))
return (buid, sodelist)
async def _mergeSodes(self, layers, genrs, cmprkey, filtercmpr=None):
lastbuid = None
sodes = {}
async for layr, (_, buid), sode in s_common.merggenr2(genrs, cmprkey):
if not buid == lastbuid or layr in sodes:
if lastbuid is not None:
sodelist = await self._genSodeList(lastbuid, sodes, layers, filtercmpr)
if sodelist is not None:
yield sodelist
sodes.clear()
lastbuid = buid
sodes[layr] = sode
if lastbuid is not None:
sodelist = await self._genSodeList(lastbuid, sodes, layers, filtercmpr)
if sodelist is not None:
yield sodelist
async def _liftByDataName(self, name, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByDataName(name):
yield (buid, [(layr, sode)])
return
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByDataName(name)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_buid):
yield sodes
async def _liftByProp(self, form, prop, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByProp(form, prop):
yield (buid, [(layr, sode)])
return
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByProp(form, prop)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx):
yield sodes
async def _liftByPropValu(self, form, prop, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByPropValu(form, prop, cmprvals):
yield (buid, [(layr, sode)])
return
def filtercmpr(sode):
props = sode.get('props')
if props is None:
return False
return props.get(prop) is not None
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByPropValu(form, prop, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx, filtercmpr):
yield sodes
async def _liftByPropArray(self, form, prop, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByPropArray(form, prop, cmprvals):
yield (buid, [(layr, sode)])
return
if prop is None:
filtercmpr = None
else:
def filtercmpr(sode):
props = sode.get('props')
if props is None:
return False
return props.get(prop) is not None
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByPropArray(form, prop, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx, filtercmpr):
yield sodes
async def _liftByFormValu(self, form, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByFormValu(form, cmprvals):
yield (buid, [(layr, sode)])
return
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByFormValu(form, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx):
yield sodes
async def _liftByTag(self, tag, form, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTag(tag, form):
yield (buid, [(layr, sode)])
return
if form is None:
def filtercmpr(sode):
tags = sode.get('tags')
if tags is None:
return False
return tags.get(tag) is not None
else:
filtercmpr = None
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTag(tag, form)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_buid, filtercmpr):
yield sodes
async def _liftByTagValu(self, tag, cmpr, valu, form, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTagValu(tag, cmpr, valu, form):
yield (buid, [(layr, sode)])
return
def filtercmpr(sode):
tags = sode.get('tags')
if tags is None:
return False
return tags.get(tag) is not None
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTagValu(tag, cmpr, valu, form)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_buid, filtercmpr):
yield sodes
async def _liftByTagProp(self, form, tag, prop, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTagProp(form, tag, prop):
yield (buid, [(layr, sode)])
return
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTagProp(form, tag, prop)))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx):
yield sodes
async def _liftByTagPropValu(self, form, tag, prop, cmprvals, layers):
if len(layers) == 1:
layr = layers[0].iden
async for _, buid, sode in layers[0].liftByTagPropValu(form, tag, prop, cmprvals):
yield (buid, [(layr, sode)])
return
def filtercmpr(sode):
tagprops = sode.get('tagprops')
if tagprops is None:
return False
props = tagprops.get(tag)
if not props:
return False
return props.get(prop) is not None
for cval in cmprvals:
genrs = []
for layr in layers:
genrs.append(wrap_liftgenr(layr.iden, layr.liftByTagPropValu(form, tag, prop, (cval,))))
async for sodes in self._mergeSodes(layers, genrs, cmprkey_indx, filtercmpr):
yield sodes
async def _setStormCmd(self, cdef):
'''
Note:
No change control or persistence
'''
await self._reqStormCmd(cdef)
def ctor(runt, runtsafe):
return s_storm.PureCmd(cdef, runt, runtsafe)
# TODO unify class ctors and func ctors vs briefs...
def getCmdBrief():
return cdef.get('descr', 'No description').strip().split('\n')[0]
ctor.getCmdBrief = getCmdBrief
ctor.pkgname = cdef.get('pkgname')
ctor.svciden = cdef.get('cmdconf', {}).get('svciden', '')
ctor.forms = cdef.get('forms', {})
def getStorNode(form):
ndef = (form.name, form.type.norm(cdef.get('name'))[0])
buid = s_common.buid(ndef)
props = {
'doc': ctor.getCmdBrief()
}
inpt = ctor.forms.get('input')
outp = ctor.forms.get('output')
nodedata = ctor.forms.get('nodedata')
if inpt:
props['input'] = tuple(inpt)
if outp:
props['output'] = tuple(outp)
if nodedata:
props['nodedata'] = tuple(nodedata)
if ctor.svciden:
props['svciden'] = ctor.svciden
if ctor.pkgname:
props['package'] = ctor.pkgname
pnorms = {}
for prop, valu in props.items():
formprop = form.props.get(prop)
if formprop is not None and valu is not None:
pnorms[prop] = formprop.type.norm(valu)[0]
return (buid, {
'ndef': ndef,
'props': pnorms,
})
ctor.getStorNode = getStorNode
name = cdef.get('name')
self.stormcmds[name] = ctor
await self.fire('core:cmd:change', cmd=name, act='add')
async def _popStormCmd(self, name):
self.stormcmds.pop(name, None)
await self.fire('core:cmd:change', cmd=name, act='del')
async def delStormCmd(self, name):
'''
Remove a previously set pure storm command.
'''
ctor = self.stormcmds.get(name)
if ctor is None:
mesg = f'No storm command named {name}.'
raise s_exc.NoSuchCmd(name=name, mesg=mesg)
return await self._push('cmd:del', name)
@s_nexus.Pusher.onPush('cmd:del')
async def _delStormCmd(self, name):
ctor = self.stormcmds.get(name)
if ctor is None:
return
cdef = self.cmdhive.get(name)
if cdef is None:
mesg = f'The storm command ({name}) is not dynamic.'
raise s_exc.CantDelCmd(mesg=mesg)
await self.cmdhive.pop(name)
self.stormcmds.pop(name, None)
await self.fire('core:cmd:change', cmd=name, act='del')
@s_nexus.Pusher.onPushAuto('pkg:add')
async def addStormPkg(self, pkgdef):
'''
Add the given storm package to the cortex.
This will store the package for future use.
'''
s_storm.reqValidPkgdef(pkgdef)
name = pkgdef.get('name')
olddef = self.pkghive.get(name, None)
if olddef is not None:
await self._dropStormPkg(olddef)
await self.loadStormPkg(pkgdef)
await self.pkghive.set(name, pkgdef)
async def delStormPkg(self, name):
pkgdef = self.pkghive.get(name, None)
if pkgdef is None:
mesg = f'No storm package: {name}.'
raise s_exc.NoSuchPkg(mesg=mesg)
return await self._push('pkg:del', name)
@s_nexus.Pusher.onPush('pkg:del')
async def _delStormPkg(self, name):
'''
Delete a storm package by name.
'''
pkgdef = await self.pkghive.pop(name, None)
if pkgdef is None:
return
await self._dropStormPkg(pkgdef)
async def getStormPkg(self, name):
return self.stormpkgs.get(name)
async def getStormPkgs(self):
return list(self.pkghive.values())
async def getStormMods(self):
return self.stormmods
async def getStormMod(self, name):
return self.stormmods.get(name)
def getDataModel(self):
return self.model
async def _tryLoadStormPkg(self, pkgdef):
try:
await self.loadStormPkg(pkgdef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
name = pkgdef.get('name', '')
logger.exception(f'Error loading pkg: {name}, {str(e)}')
async def _confirmStormPkg(self, pkgdef):
'''
Validate a storm package for loading. Raises if invalid.
'''
# Validate package def
s_storm.reqValidPkgdef(pkgdef)
pkgname = pkgdef.get('name')
# Check minimum synapse version
minversion = pkgdef.get('synapse_minversion')
if minversion is not None and tuple(minversion) > s_version.version:
mesg = f'Storm package {pkgname} requires Synapse {minversion} but ' \
f'Cortex is running {s_version.version}'
raise s_exc.BadVersion(mesg=mesg)
# Validate storm contents from modules and commands
mods = pkgdef.get('modules', ())
cmds = pkgdef.get('commands', ())
onload = pkgdef.get('onload')
svciden = pkgdef.get('svciden')
if onload is not None:
self.getStormQuery(onload)
for mdef in mods:
modtext = mdef.get('storm')
self.getStormQuery(modtext)
mdef.setdefault('modconf', {})
if svciden:
mdef['modconf']['svciden'] = svciden
for cdef in cmds:
cdef['pkgname'] = pkgname
cdef.setdefault('cmdconf', {})
if svciden:
cdef['cmdconf']['svciden'] = svciden
cmdtext = cdef.get('storm')
self.getStormQuery(cmdtext)
async def loadStormPkg(self, pkgdef):
'''
Load a storm package into the storm library for this cortex.
NOTE: This will *not* persist the package (allowing service dynamism).
'''
await self._confirmStormPkg(pkgdef)
name = pkgdef.get('name')
mods = pkgdef.get('modules', ())
cmds = pkgdef.get('commands', ())
# now actually load...
self.stormpkgs[name] = pkgdef
# copy the mods dict and smash the ref so
# updates are atomic and dont effect running
# storm queries.
stormmods = self.stormmods.copy()
for mdef in mods:
modname = mdef.get('name')
stormmods[modname] = mdef
self.stormmods = stormmods
for cdef in cmds:
await self._setStormCmd(cdef)
onload = pkgdef.get('onload')
if onload is not None and self.isactive:
async def _onload():
try:
async for mesg in self.storm(onload):
if mesg[0] in ('print', 'warn'):
logger.warning(f'onload output: {mesg}')
await asyncio.sleep(0)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception: # pragma: no cover
logger.warning(f'onload failed for package: {name}')
self.schedCoro(_onload())
async def _dropStormPkg(self, pkgdef):
'''
Reverse the process of loadStormPkg()
'''
for mdef in pkgdef.get('modules', ()):
modname = mdef.get('name')
self.stormmods.pop(modname, None)
for cdef in pkgdef.get('commands', ()):
name = cdef.get('name')
await self._popStormCmd(name)
pkgname = pkgdef.get('name')
self.stormpkgs.pop(pkgname, None)
def getStormSvc(self, name):
ssvc = self.svcsbyiden.get(name)
if ssvc is not None:
return ssvc
ssvc = self.svcsbyname.get(name)
if ssvc is not None:
return ssvc
ssvc = self.svcsbysvcname.get(name)
if name is not None:
return ssvc
async def waitStormSvc(self, name, timeout=None):
ssvc = self.getStormSvc(name)
return await s_coro.event_wait(ssvc.ready, timeout=timeout)
async def addStormSvc(self, sdef):
'''
Add a registered storm service to the cortex.
'''
iden = sdef.get('iden')
if iden is None:
iden = sdef['iden'] = s_common.guid()
if self.svcsbyiden.get(iden) is not None:
mesg = f'Storm service already exists: {iden}'
raise s_exc.DupStormSvc(mesg=mesg)
return await self._push('svc:add', sdef)
@s_nexus.Pusher.onPush('svc:add')
async def _addStormSvc(self, sdef):
iden = sdef.get('iden')
ssvc = self.svcsbyiden.get(iden)
if ssvc is not None:
return ssvc.sdef
ssvc = await self._setStormSvc(sdef)
await self.svchive.set(iden, sdef)
return ssvc.sdef
async def delStormSvc(self, iden):
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg, iden=iden)
return await self._push('svc:del', iden)
@s_nexus.Pusher.onPush('svc:del')
async def _delStormSvc(self, iden):
'''
Delete a registered storm service from the cortex.
'''
sdef = self.svchive.get(iden)
if sdef is None: # pragma: no cover
return
try:
if self.isactive:
await self.runStormSvcEvent(iden, 'del')
except asyncio.CancelledError: # pragma: no cover TODO: remove once py 3.8 only
raise
except Exception as e:
logger.exception(f'service.del hook for service {iden} failed with error: {e}')
sdef = await self.svchive.pop(iden)
await self._delStormSvcPkgs(iden)
name = sdef.get('name')
if name is not None:
self.svcsbyname.pop(name, None)
ssvc = self.svcsbyiden.pop(iden, None)
if ssvc is not None:
self.svcsbysvcname.pop(ssvc.svcname, None)
await ssvc.fini()
async def _delStormSvcPkgs(self, iden):
'''
Delete storm packages associated with a service.
'''
oldpkgs = []
for _, pdef in self.pkghive.items():
pkgiden = pdef.get('svciden')
if pkgiden and pkgiden == iden:
oldpkgs.append(pdef)
for pkg in oldpkgs:
name = pkg.get('name')
if name:
await self._delStormPkg(name)
async def setStormSvcEvents(self, iden, edef):
'''
Set the event callbacks for a storm service. Extends the sdef dict.
Args:
iden (str): The service iden.
edef (dict): The events definition.
Notes:
The edef is formatted like the following::
{
<name> : {
'storm': <storm>
}
}
where ``name`` is one of the following items:
add
Run the given storm '*before* the service is first added (a la service.add), but not on a reconnect.
del
Run the given storm *after* the service is removed (a la service.del), but not on a disconnect.
Returns:
dict: An updated storm service definition dictionary.
'''
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg)
sdef['evts'] = edef
await self.svchive.set(iden, sdef)
return sdef
async def _runStormSvcAdd(self, iden):
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg)
if sdef.get('added', False):
return
try:
await self.runStormSvcEvent(iden, 'add')
except asyncio.CancelledError: # pragma: no cover TODO: remove once py 3.8 only
raise
except Exception as e:
logger.exception(f'runStormSvcEvent service.add failed with error {e}')
return
sdef['added'] = True
await self.svchive.set(iden, sdef)
async def runStormSvcEvent(self, iden, name):
assert name in ('add', 'del')
sdef = self.svchive.get(iden)
if sdef is None:
mesg = f'No storm service with iden: {iden}'
raise s_exc.NoSuchStormSvc(mesg=mesg)
evnt = sdef.get('evts', {}).get(name, {}).get('storm')
if evnt is None:
return
opts = {'vars': {'cmdconf': {'svciden': iden}}}
coro = s_common.aspin(self.storm(evnt, opts=opts))
if name == 'add':
await coro
else:
self.schedCoro(coro)
async def _setStormSvc(self, sdef):
ssvc = await s_stormsvc.StormSvcClient.anit(self, sdef)
self.onfini(ssvc)
self.svcsbyiden[ssvc.iden] = ssvc
self.svcsbyname[ssvc.name] = ssvc
return ssvc
def getStormSvcs(self):
return list(self.svcsbyiden.values())
# Global stormvars APIs
async def getStormVar(self, name, default=None):
return self.stormvars.get(name, default=default)
@s_nexus.Pusher.onPushAuto('stormvar:pop')
async def popStormVar(self, name, default=None):
return await self.stormvars.pop(name, default=default)
@s_nexus.Pusher.onPushAuto('stormvar:set')
async def setStormVar(self, name, valu):
return await self.stormvars.set(name, valu)
async def itemsStormVar(self):
for item in self.stormvars.items():
yield item
async def _cortexHealth(self, health):
health.update('cortex', 'nominal')
async def _loadExtModel(self):
self.extforms = await (await self.hive.open(('cortex', 'model', 'forms'))).dict()
self.extprops = await (await self.hive.open(('cortex', 'model', 'props'))).dict()
self.extunivs = await (await self.hive.open(('cortex', 'model', 'univs'))).dict()
self.exttagprops = await (await self.hive.open(('cortex', 'model', 'tagprops'))).dict()
for formname, basetype, typeopts, typeinfo in self.extforms.values():
try:
self.model.addType(formname, basetype, typeopts, typeinfo)
form = self.model.addForm(formname, {}, ())
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'Extended form ({formname}) error: {e}')
else:
if form.type.deprecated:
mesg = f'The extended property {formname} is using a deprecated type {form.type.name} which will' \
f' be removed in 3.0.0'
logger.warning(mesg)
for form, prop, tdef, info in self.extprops.values():
try:
prop = self.model.addFormProp(form, prop, tdef, info)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'ext prop ({form}:{prop}) error: {e}')
else:
if prop.type.deprecated:
mesg = f'The extended property {prop.full} is using a deprecated type {prop.type.name} which will' \
f' be removed in 3.0.0'
logger.warning(mesg)
for prop, tdef, info in self.extunivs.values():
try:
self.model.addUnivProp(prop, tdef, info)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'ext univ ({prop}) error: {e}')
for prop, tdef, info in self.exttagprops.values():
try:
self.model.addTagProp(prop, tdef, info)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning(f'ext tag prop ({prop}) error: {e}')
@contextlib.asynccontextmanager
async def watcher(self, wdef):
iden = wdef.get('view', self.view.iden)
view = self.views.get(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
async with await s_queue.Window.anit(maxsize=10000) as wind:
tags = wdef.get('tags')
if tags is not None:
tglobs = s_cache.TagGlobs()
[tglobs.add(t, True) for t in tags]
async def ontag(mesg):
name = mesg[1].get('tag')
if not tglobs.get(name):
return
await wind.put(mesg)
for layr in self.view.layers:
layr.on('tag:add', ontag, base=wind)
layr.on('tag:del', ontag, base=wind)
yield wind
async def watch(self, wdef):
'''
Hook cortex/view/layer watch points based on a specified watch definition.
( see CoreApi.watch() docs for details )
'''
async with self.watcher(wdef) as wind:
async for mesg in wind:
yield mesg
@s_nexus.Pusher.onPushAuto('model:univ:add')
async def addUnivProp(self, name, tdef, info):
# the loading function does the actual validation...
if not name.startswith('_'):
mesg = 'ext univ name must start with "_"'
raise s_exc.BadPropDef(name=name, mesg=mesg)
self.model.addUnivProp(name, tdef, info)
await self.extunivs.set(name, (name, tdef, info))
await self.fire('core:extmodel:change', prop=name, act='add', type='univ')
async def addForm(self, formname, basetype, typeopts, typeinfo):
if not formname.startswith('_'):
mesg = 'Extended form must begin with "_"'
raise s_exc.BadFormDef(form=formname, mesg=mesg)
if self.model.form(formname) is not None:
mesg = f'Form name already exists: {formname}'
raise s_exc.DupFormName(mesg=mesg)
return await self._push('model:form:add', formname, basetype, typeopts, typeinfo)
@s_nexus.Pusher.onPush('model:form:add')
async def _addForm(self, formname, basetype, typeopts, typeinfo):
self.model.addType(formname, basetype, typeopts, typeinfo)
self.model.addForm(formname, {}, ())
await self.extforms.set(formname, (formname, basetype, typeopts, typeinfo))
await self.fire('core:extmodel:change', form=formname, act='add', type='form')
async def delForm(self, formname):
if not formname.startswith('_'):
mesg = 'Extended form must begin with "_"'
raise s_exc.BadFormDef(form=formname, mesg=mesg)
if self.model.form(formname) is None:
raise s_exc.NoSuchForm(name=formname)
return await self._push('model:form:del', formname)
@s_nexus.Pusher.onPush('model:form:del')
async def _delForm(self, formname):
for layr in self.layers.values():
async for item in layr.iterFormRows(formname):
mesg = f'Nodes still exist with form: {formname}'
raise s_exc.CantDelForm(mesg=mesg)
self.model.delForm(formname)
self.model.delType(formname)
await self.extforms.pop(formname, None)
await self.fire('core:extmodel:change', form=formname, act='del', type='form')
@s_nexus.Pusher.onPushAuto('model:prop:add')
async def addFormProp(self, form, prop, tdef, info):
if not prop.startswith('_') and not form.startswith('_'):
mesg = 'Extended prop must begin with "_" or be added to an extended form.'
raise s_exc.BadPropDef(prop=prop, mesg=mesg)
_prop = self.model.addFormProp(form, prop, tdef, info)
if _prop.type.deprecated:
mesg = f'The extended property {_prop.full} is using a deprecated type {_prop.type.name} which will' \
f' be removed in 3.0.0'
logger.warning(mesg)
await self.extprops.set(f'{form}:{prop}', (form, prop, tdef, info))
await self.fire('core:extmodel:change', form=form, prop=prop, act='add', type='formprop')
async def delFormProp(self, form, prop):
full = f'{form}:{prop}'
pdef = self.extprops.get(full)
if pdef is None:
mesg = f'No ext prop named {full}'
raise s_exc.NoSuchProp(form=form, prop=prop, mesg=mesg)
return await self._push('model:prop:del', form, prop)
@s_nexus.Pusher.onPush('model:prop:del')
async def _delFormProp(self, form, prop):
'''
Remove an extended property from the cortex.
'''
full = f'{form}:{prop}'
pdef = self.extprops.get(full)
if pdef is None:
return
for layr in self.layers.values():
async for item in layr.iterPropRows(form, prop):
mesg = f'Nodes still exist with prop: {form}:{prop}'
raise s_exc.CantDelProp(mesg=mesg)
self.model.delFormProp(form, prop)
await self.extprops.pop(full, None)
await self.fire('core:extmodel:change',
form=form, prop=prop, act='del', type='formprop')
async def delUnivProp(self, prop):
udef = self.extunivs.get(prop)
if udef is None:
mesg = f'No ext univ named {prop}'
raise s_exc.NoSuchUniv(name=prop, mesg=mesg)
return await self._push('model:univ:del', prop)
@s_nexus.Pusher.onPush('model:univ:del')
async def _delUnivProp(self, prop):
'''
Remove an extended universal property from the cortex.
'''
udef = self.extunivs.get(prop)
if udef is None:
return
univname = '.' + prop
for layr in self.layers.values():
async for item in layr.iterUnivRows(univname):
mesg = f'Nodes still exist with universal prop: {prop}'
raise s_exc.CantDelUniv(mesg=mesg)
self.model.delUnivProp(prop)
await self.extunivs.pop(prop, None)
await self.fire('core:extmodel:change', name=prop, act='del', type='univ')
async def addTagProp(self, name, tdef, info):
if self.exttagprops.get(name) is not None:
raise s_exc.DupPropName(name=name)
return await self._push('model:tagprop:add', name, tdef, info)
@s_nexus.Pusher.onPush('model:tagprop:add')
async def _addTagProp(self, name, tdef, info):
if self.exttagprops.get(name) is not None:
return
self.model.addTagProp(name, tdef, info)
await self.exttagprops.set(name, (name, tdef, info))
await self.fire('core:tagprop:change', name=name, act='add')
async def delTagProp(self, name):
pdef = self.exttagprops.get(name)
if pdef is None:
mesg = f'No tag prop named {name}'
raise s_exc.NoSuchProp(mesg=mesg, name=name)
return await self._push('model:tagprop:del', name)
@s_nexus.Pusher.onPush('model:tagprop:del')
async def _delTagProp(self, name):
pdef = self.exttagprops.get(name)
if pdef is None:
return
for layr in self.layers.values():
if await layr.hasTagProp(name):
mesg = f'Nodes still exist with tagprop: {name}'
raise s_exc.CantDelProp(mesg=mesg)
self.model.delTagProp(name)
await self.exttagprops.pop(name, None)
await self.fire('core:tagprop:change', name=name, act='del')
async def addNodeTag(self, user, iden, tag, valu=(None, None)):
'''
Add a tag to a node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
valu (tuple): A time interval tuple or (None, None).
'''
buid = s_common.uhex(iden)
async with await self.snap(user=user) as snap:
with s_provenance.claim('coreapi', meth='tag:add', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
await node.addTag(tag, valu=valu)
return node.pack()
async def addNode(self, user, form, valu, props=None):
async with await self.snap(user=user) as snap:
node = await snap.addNode(form, valu, props=props)
return node.pack()
async def delNodeTag(self, user, iden, tag):
'''
Delete a tag from the node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
'''
buid = s_common.uhex(iden)
async with await self.snap(user=user) as snap:
with s_provenance.claim('coreapi', meth='tag:del', user=snap.user.iden):
node = await snap.getNodeByBuid(buid)
if node is None:
raise s_exc.NoSuchIden(iden=iden)
await node.delTag(tag)
return node.pack()
async def _onCoreFini(self):
'''
Generic fini handler for cortex components which may change or vary at runtime.
'''
if self.axon:
await self.axon.fini()
async def syncLayerNodeEdits(self, iden, offs, wait=True):
'''
Yield (offs, mesg) tuples for nodeedits in a layer.
'''
layr = self.getLayer(iden)
if layr is None:
raise s_exc.NoSuchLayer(iden=iden)
async for item in layr.syncNodeEdits(offs, wait=wait):
yield item
async def syncLayersEvents(self, offsdict=None, wait=True):
'''
Yield (offs, layriden, STYP, item, meta) tuples for nodeedits for *all* layers, interspersed with add/del
layer messages.
STYP is one of the following constants:
SYNC_NODEEDITS: item is a nodeedits (buid, form, edits)
SYNC_LAYR_ADD: A layer was added (item and meta are empty)
SYNC_LAYR_DEL: A layer was deleted (item and meta are empty)
Args:
offsdict(Optional(Dict[str,int])): starting nexus/editlog offset by layer iden. Defaults to 0 for
unspecified layers or if offsdict is None.
wait(bool): whether to pend and stream value until this layer is fini'd
'''
async def layrgenr(layr, startoff, endoff=None, newlayer=False):
if newlayer:
yield layr.addoffs, layr.iden, SYNC_LAYR_ADD, (), {}
wait = endoff is None
if not layr.isfini:
async for ioff, item, meta in layr.syncNodeEdits2(startoff, wait=wait):
if endoff is not None and ioff >= endoff: # pragma: no cover
break
yield ioff, layr.iden, SYNC_NODEEDITS, item, meta
if layr.isdeleted:
yield layr.deloffs, layr.iden, SYNC_LAYR_DEL, (), {}
# End of layrgenr
async for item in self._syncNodeEdits(offsdict, layrgenr, wait=wait):
yield item
async def syncIndexEvents(self, matchdef, offsdict=None, wait=True):
'''
Yield (offs, layriden, <STYPE>, <item>) tuples from the nodeedit logs of all layers starting
from the given nexus/layer offset (they are synchronized). Only edits that match the filter in matchdef will
be yielded, plus EDIT_PROGRESS (see layer.syncIndexEvents) messages.
The format of the 4th element of the tuple depends on STYPE. STYPE is one of the following constants:
SYNC_LAYR_ADD: item is an empty tuple ()
SYNC_LAYR_DEL: item is an empty tuple ()
SYNC_NODEEDIT: item is (buid, form, ETYPE, VALS, META)) or (None, None, s_layer.EDIT_PROGRESS, (), ())
For edits in the past, events are yielded in offset order across all layers. For current data (wait=True),
events across different layers may be emitted slightly out of offset order.
Note:
Will not yield any values from layers created with logedits disabled
Args:
matchdef(Dict[str, Sequence[str]]): a dict describing which events are yielded. See
layer.syncIndexEvents for matchdef specification.
offsdict(Optional(Dict[str,int])): starting nexus/editlog offset by layer iden. Defaults to 0 for
unspecified layers or if offsdict is None.
wait(bool): whether to pend and stream value until this layer is fini'd
'''
async def layrgenr(layr, startoff, endoff=None, newlayer=False):
''' Yields matching results from a single layer '''
if newlayer:
yield layr.addoffs, layr.iden, SYNC_LAYR_ADD, ()
wait = endoff is None
ioff = startoff
if not layr.isfini:
async for ioff, item in layr.syncIndexEvents(startoff, matchdef, wait=wait):
if endoff is not None and ioff >= endoff: # pragma: no cover
break
yield ioff, layr.iden, SYNC_NODEEDIT, item
if layr.isdeleted:
yield layr.deloffs, layr.iden, SYNC_LAYR_DEL, ()
# End of layrgenr
async for item in self._syncNodeEdits(offsdict, layrgenr, wait=wait):
yield item
async def _syncNodeEdits(self, offsdict, genrfunc, wait=True):
'''
Common guts between syncIndexEvents and syncLayersEvents
First, it streams from the layers up to the current offset, sorted by offset.
Then it streams from all the layers simultaneously.
Args:
offsdict(Dict[str, int]): starting nexus/editlog offset per layer. Defaults to 0 if layer not present
genrfunc(Callable): an async generator function that yields tuples that start with an offset. The input
parameters are:
layr(Layer): a Layer object
startoff(int); the starting offset
endoff(Optional[int]): the ending offset
newlayer(bool): whether to emit a new layer item first
wait(bool): when the end of the log is hit, whether to continue to wait for new entries and yield them
'''
topoffs = await self.getNexsIndx() # The latest offset when this function started
catchingup = True # whether we've caught up to topoffs
layrsadded = {} # layriden -> True. Captures all the layers added while catching up
todo = set() # outstanding futures of active live streaming from layers
layrgenrs = {} # layriden -> genr. maps active layers to that layer's async generator
if offsdict is None:
offsdict = {}
newtodoevent = asyncio.Event()
async with await s_base.Base.anit() as base:
def addlayr(layr, newlayer=False):
'''
A new layer joins the live stream
'''
genr = genrfunc(layr, topoffs, newlayer=newlayer)
layrgenrs[layr.iden] = genr
task = base.schedCoro(genr.__anext__())
task.iden = layr.iden
todo.add(task)
newtodoevent.set()
def onaddlayr(mesg):
etyp, event = mesg
layriden = event['iden']
layr = self.getLayer(layriden)
if catchingup:
layrsadded[layr] = True
return
addlayr(layr, newlayer=True)
self.on('core:layr:add', onaddlayr, base=base)
# First, catch up to what was the current offset when we started, guaranteeing order
genrs = [genrfunc(layr, offsdict.get(layr.iden, 0), endoff=topoffs) for layr in self.layers.values()]
async for item in s_common.merggenr(genrs, lambda x, y: x[0] < y[0]):
yield item
catchingup = False
if not wait:
return
# After we've caught up, read on genrs from all the layers simultaneously
todo.clear()
for layr in self.layers.values():
if layr not in layrsadded:
addlayr(layr)
for layr in layrsadded:
addlayr(layr, newlayer=True)
# Also, wake up if we get fini'd
finitask = base.schedCoro(self.waitfini())
todo.add(finitask)
newtodotask = base.schedCoro(newtodoevent.wait())
todo.add(newtodotask)
while not self.isfini:
newtodoevent.clear()
done, _ = await asyncio.wait(todo, return_when=asyncio.FIRST_COMPLETED)
for donetask in done:
try:
todo.remove(donetask)
if donetask is finitask: # pragma: no cover # We were fini'd
return
if donetask is newtodotask:
newtodotask = base.schedCoro(newtodoevent.wait())
todo.add(newtodotask)
continue
layriden = donetask.iden
result = donetask.result()
yield result
# Re-add a task to wait on the next iteration of the generator
genr = layrgenrs[layriden]
task = base.schedCoro(genr.__anext__())
task.iden = layriden
todo.add(task)
except StopAsyncIteration:
# Help out the garbage collector
del layrgenrs[layriden]
async def spliceHistory(self, user):
'''
Yield splices backwards from the end of the nodeedit log.
Will only return user's own splices unless they are an admin.
'''
layr = self.view.layers[0]
count = 0
async for _, mesg in layr.splicesBack():
count += 1
if not count % 1000: # pragma: no cover
await asyncio.sleep(0)
if user.iden == mesg[1]['user'] or user.isAdmin():
yield mesg
async def _initCoreHive(self):
stormvarsnode = await self.hive.open(('cortex', 'storm', 'vars'))
self.stormvars = await stormvarsnode.dict()
self.onfini(self.stormvars)
async def _initCoreAxon(self):
turl = self.conf.get('axon')
if turl is None:
path = os.path.join(self.dirn, 'axon')
conf = {}
proxyurl = self.conf.get('http:proxy')
if proxyurl is not None:
conf['http:proxy'] = proxyurl
self.axon = await s_axon.Axon.anit(path, conf=conf)
self.axon.onfini(self.axready.clear)
self.dynitems['axon'] = self.axon
self.axready.set()
return
async def teleloop():
self.axready.clear()
while not self.isfini:
try:
self.axon = await s_telepath.openurl(turl)
self.axon.onfini(teleloop)
self.dynitems['axon'] = self.axon
self.axready.set()
return
except asyncio.CancelledError: # TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.warning('remote axon error: %r' % (e,))
await self.waitfini(1)
self.schedCoro(teleloop())
async def _initStormCmds(self):
'''
Registration for built-in Storm commands.
'''
self.addStormCmd(s_storm.MaxCmd)
self.addStormCmd(s_storm.MinCmd)
self.addStormCmd(s_storm.TeeCmd)
self.addStormCmd(s_storm.TreeCmd)
self.addStormCmd(s_storm.HelpCmd)
self.addStormCmd(s_storm.IdenCmd)
self.addStormCmd(s_storm.SpinCmd)
self.addStormCmd(s_storm.SudoCmd)
self.addStormCmd(s_storm.UniqCmd)
self.addStormCmd(s_storm.CountCmd)
self.addStormCmd(s_storm.GraphCmd)
self.addStormCmd(s_storm.LimitCmd)
self.addStormCmd(s_storm.MergeCmd)
self.addStormCmd(s_storm.SleepCmd)
self.addStormCmd(s_storm.ScrapeCmd)
self.addStormCmd(s_storm.DelNodeCmd)
self.addStormCmd(s_storm.LiftByVerb)
self.addStormCmd(s_storm.MoveTagCmd)
self.addStormCmd(s_storm.ReIndexCmd)
self.addStormCmd(s_storm.EdgesDelCmd)
self.addStormCmd(s_storm.ParallelCmd)
self.addStormCmd(s_storm.TagPruneCmd)
self.addStormCmd(s_storm.ViewExecCmd)
self.addStormCmd(s_storm.BackgroundCmd)
self.addStormCmd(s_storm.SpliceListCmd)
self.addStormCmd(s_storm.SpliceUndoCmd)
self.addStormCmd(s_stormlib_macro.MacroExecCmd)
for cdef in s_stormsvc.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
for cdef in s_storm.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
for cdef in s_stormlib_macro.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
for cdef in s_stormlib_model.stormcmds:
await self._trySetStormCmd(cdef.get('name'), cdef)
async def _initPureStormCmds(self):
oldcmds = []
for name, cdef in self.cmdhive.items():
cmdiden = cdef.get('cmdconf', {}).get('svciden')
if cmdiden and self.svchive.get(cmdiden) is None:
oldcmds.append(name)
else:
await self._trySetStormCmd(name, cdef)
for name in oldcmds:
logger.warning(f'Removing old command: [{name}]')
await self.cmdhive.pop(name)
for pkgdef in self.pkghive.values():
await self._tryLoadStormPkg(pkgdef)
async def _trySetStormCmd(self, name, cdef):
try:
await self._setStormCmd(cdef)
except (asyncio.CancelledError, Exception):
logger.exception(f'Storm command load failed: {name}')
def _initStormLibs(self):
'''
Registration for built-in Storm Libraries
'''
for path, ctor in s_stormtypes.registry.iterLibs():
# Skip libbase which is registered as a default ctor in the storm Runtime
if path:
self.addStormLib(path, ctor)
def _initSplicers(self):
'''
Registration for splice handlers.
'''
splicers = {
'tag:add': self._onFeedTagAdd,
'tag:del': self._onFeedTagDel,
'node:add': self._onFeedNodeAdd,
'node:del': self._onFeedNodeDel,
'prop:set': self._onFeedPropSet,
'prop:del': self._onFeedPropDel,
'tag:prop:set': self._onFeedTagPropSet,
'tag:prop:del': self._onFeedTagPropDel,
}
self.splicers.update(**splicers)
def _initFeedFuncs(self):
'''
Registration for built-in Cortex feed functions.
'''
self.setFeedFunc('syn.nodes', self._addSynNodes)
self.setFeedFunc('syn.splice', self._addSynSplice)
self.setFeedFunc('syn.nodeedits', self._addSynNodeEdits)
def _initCortexHttpApi(self):
'''
Registration for built-in Cortex httpapi endpoints
'''
self.addHttpApi('/api/v1/storm', s_httpapi.StormV1, {'cell': self})
self.addHttpApi('/api/v1/watch', s_httpapi.WatchSockV1, {'cell': self})
self.addHttpApi('/api/v1/storm/call', s_httpapi.StormCallV1, {'cell': self})
self.addHttpApi('/api/v1/storm/nodes', s_httpapi.StormNodesV1, {'cell': self})
self.addHttpApi('/api/v1/storm/export', s_httpapi.StormExportV1, {'cell': self})
self.addHttpApi('/api/v1/reqvalidstorm', s_httpapi.ReqValidStormV1, {'cell': self})
self.addHttpApi('/api/v1/storm/vars/set', s_httpapi.StormVarsSetV1, {'cell': self})
self.addHttpApi('/api/v1/storm/vars/get', s_httpapi.StormVarsGetV1, {'cell': self})
self.addHttpApi('/api/v1/storm/vars/pop', s_httpapi.StormVarsPopV1, {'cell': self})
self.addHttpApi('/api/v1/model', s_httpapi.ModelV1, {'cell': self})
self.addHttpApi('/api/v1/model/norm', s_httpapi.ModelNormV1, {'cell': self})
self.addHttpApi('/api/v1/core/info', s_httpapi.CoreInfoV1, {'cell': self})
async def getCellApi(self, link, user, path):
if not path:
return await self.cellapi.anit(self, link, user)
# allow an admin to directly open the cortex hive
# (perhaps this should be a Cell() level pattern)
if path[0] == 'hive' and user.isAdmin():
return await self.hiveapi.anit(self.hive, user)
if path[0] == 'layer':
if len(path) == 1:
# get the top layer for the default view
layr = self.getLayer()
return await self.layerapi.anit(self, link, user, layr)
if len(path) == 2:
layr = self.getLayer(path[1])
if layr is None:
raise s_exc.NoSuchLayer(iden=path[1])
return await self.layerapi.anit(self, link, user, layr)
if path[0] == 'view':
view = None
if len(path) == 1:
view = self.getView(user=user)
elif len(path) == 2:
view = self.getView(path[1], user=user)
if view is not None:
return await self.viewapi.anit(self, link, user, view)
raise s_exc.NoSuchPath(path=path)
async def getModelDict(self):
return self.model.getModelDict()
async def getModelDefs(self):
return self.model.getModelDefs()
async def getFormCounts(self):
'''
Return total form counts for all existing layers
'''
counts = collections.defaultdict(int)
for layr in self.layers.values():
layrcounts = await layr.getFormCounts()
for name, valu in layrcounts.items():
counts[name] += valu
return dict(counts)
def onTagAdd(self, name, func):
'''
Register a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
# TODO allow name wild cards
if '*' in name:
self.ontagaddglobs.add(name, func)
else:
self.ontagadds[name].append(func)
def offTagAdd(self, name, func):
'''
Unregister a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagaddglobs.rem(name, func)
return
cblist = self.ontagadds.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass
def onTagDel(self, name, func):
'''
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.add(name, func)
else:
self.ontagdels[name].append(func)
def offTagDel(self, name, func):
'''
Unregister a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.rem(name, func)
return
cblist = self.ontagdels.get(name)
if cblist is None:
return
try:
cblist.remove(func)
except ValueError:
pass
def addRuntLift(self, prop, func):
'''
Register a runt lift helper for a given prop.
Args:
prop (str): Full property name for the prop to register the helper for.
func:
Returns:
None: None.
'''
self._runtLiftFuncs[prop] = func
async def runRuntLift(self, full, valu=None, cmpr=None):
'''
Execute a runt lift function.
Args:
full (str): Property to lift by.
valu:
cmpr:
Returns:
bytes, list: Yields bytes, list tuples where the list contains a series of
key/value pairs which are used to construct a Node object.
'''
func = self._runtLiftFuncs.get(full)
if func is not None:
async for pode in func(full, valu, cmpr):
yield pode
def addRuntPropSet(self, full, func):
'''
Register a prop set helper for a runt form
'''
self._runtPropSetFuncs[full] = func
async def runRuntPropSet(self, node, prop, valu):
func = self._runtPropSetFuncs.get(prop.full)
if func is None:
raise s_exc.IsRuntForm(mesg='No prop:set func set for runt property.',
prop=prop.full, valu=valu, ndef=node.ndef)
ret = await s_coro.ornot(func, node, prop, valu)
return ret
def addRuntPropDel(self, full, func):
'''
Register a prop set helper for a runt form
'''
self._runtPropDelFuncs[full] = func
async def runRuntPropDel(self, node, prop):
func = self._runtPropDelFuncs.get(prop.full)
if func is None:
raise s_exc.IsRuntForm(mesg='No prop:del func set for runt property.',
prop=prop.full, ndef=node.ndef)
ret = await s_coro.ornot(func, node, prop)
return ret
async def _checkLayerModels(self):
mrev = s_modelrev.ModelRev(self)
await mrev.revCoreLayers()
async def _loadView(self, node):
view = await self.viewctor(self, node)
self.views[view.iden] = view
self.dynitems[view.iden] = view
async def fini():
self.views.pop(view.iden, None)
self.dynitems.pop(view.iden, None)
view.onfini(fini)
return view
async def _initCoreViews(self):
defiden = self.cellinfo.get('defaultview')
for iden, node in await self.hive.open(('cortex', 'views')):
view = await self._loadView(node)
if iden == defiden:
self.view = view
for view in self.views.values():
view.init2()
# if we have no views, we are initializing. Add a default main view and layer.
if not self.views:
assert self.inaugural, 'Cortex initialization failed: there are no views.'
ldef = {'name': 'default'}
ldef = await self.addLayer(ldef=ldef, nexs=False)
layriden = ldef.get('iden')
role = await self.auth.getRoleByName('all')
await role.addRule((True, ('layer', 'read')), gateiden=layriden, nexs=False)
vdef = {
'name': 'default',
'layers': (layriden,),
'worldreadable': True,
}
vdef = await self.addView(vdef, nexs=False)
iden = vdef.get('iden')
await self.cellinfo.set('defaultview', iden)
self.view = self.getView(iden)
async def addView(self, vdef, nexs=True):
vdef['iden'] = s_common.guid()
vdef.setdefault('parent', None)
vdef.setdefault('worldreadable', False)
vdef.setdefault('creator', self.auth.rootuser.iden)
s_view.reqValidVdef(vdef)
if nexs:
return await self._push('view:add', vdef)
else:
return await self._addView(vdef)
@s_nexus.Pusher.onPush('view:add')
async def _addView(self, vdef):
s_view.reqValidVdef(vdef)
iden = vdef['iden']
if iden in self.views:
return
for lyriden in vdef['layers']:
if lyriden not in self.layers:
raise s_exc.NoSuchLayer(iden=lyriden)
creator = vdef.get('creator', self.auth.rootuser.iden)
user = await self.auth.reqUser(creator)
await self.auth.addAuthGate(iden, 'view')
await user.setAdmin(True, gateiden=iden, logged=False)
# worldreadable is not get persisted with the view; the state ends up in perms
worldread = vdef.pop('worldreadable', False)
if worldread:
role = await self.auth.getRoleByName('all')
await role.addRule((True, ('view', 'read')), gateiden=iden, nexs=False)
node = await self.hive.open(('cortex', 'views', iden))
info = await node.dict()
for name, valu in vdef.items():
await info.set(name, valu)
view = await self._loadView(node)
view.init2()
return await view.pack()
async def delView(self, iden):
view = self.views.get(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
return await self._push('view:del', iden)
@s_nexus.Pusher.onPush('view:del')
async def _delView(self, iden):
'''
Delete a cortex view by iden.
Note:
This does not delete any of the view's layers
'''
view = self.views.get(iden, None)
if view is None:
return
if iden == self.view.iden:
raise s_exc.SynErr(mesg='Cannot delete the main view')
for cview in self.views.values():
if cview.parent is not None and cview.parent.iden == iden:
raise s_exc.SynErr(mesg='Cannot delete a view that has children')
await self.hive.pop(('cortex', 'views', iden))
await view.delete()
await self.auth.delAuthGate(iden)
async def delLayer(self, iden):
layr = self.layers.get(iden, None)
if layr is None:
raise s_exc.NoSuchLayer(iden=iden)
return await self._push('layer:del', iden)
@s_nexus.Pusher.onPush('layer:del', passitem=True)
async def _delLayer(self, iden, nexsitem):
layr = self.layers.get(iden, None)
if layr is None:
return
for view in self.views.values():
if layr in view.layers:
raise s_exc.LayerInUse(iden=iden)
del self.layers[iden]
for pdef in layr.layrinfo.get('pushs', {}).values():
await self.delActiveCoro(pdef.get('iden'))
for pdef in layr.layrinfo.get('pulls', {}).values():
await self.delActiveCoro(pdef.get('iden'))
await self.auth.delAuthGate(iden)
self.dynitems.pop(iden)
await self.hive.pop(('cortex', 'layers', iden))
await layr.delete()
layr.deloffs = nexsitem[0]
async def setViewLayers(self, layers, iden=None):
'''
Args:
layers ([str]): A top-down list of of layer guids
iden (str): The view iden (defaults to default view).
'''
view = self.getView(iden)
if view is None:
raise s_exc.NoSuchView(iden=iden)
await view.setLayers(layers)
def getLayer(self, iden=None):
'''
Get a Layer object.
Args:
iden (str): The layer iden to retrieve.
Returns:
Layer: A Layer object.
'''
if iden is None:
return self.view.layers[0]
# For backwards compatibility, resolve references to old layer iden == cortex.iden to the main layer
# TODO: due to our migration policy, remove in 3.0.0
if iden == self.iden:
return self.view.layers[0]
return self.layers.get(iden)
def listLayers(self):
return self.layers.values()
async def getLayerDef(self, iden=None):
layr = self.getLayer(iden)
if layr is not None:
return await layr.pack()
async def getLayerDefs(self):
return [await lyr.pack() for lyr in list(self.layers.values())]
def getView(self, iden=None, user=None):
'''
Get a View object.
Args:
iden (str): The View iden to retrieve.
Returns:
View: A View object.
'''
if iden is None:
if user is not None:
iden = user.profile.get('cortex:view')
if iden is None:
iden = self.view.iden
# For backwards compatibility, resolve references to old view iden == cortex.iden to the main view
# TODO: due to our migration policy, remove in 3.0.0
if iden == self.iden:
iden = self.view.iden
view = self.views.get(iden)
if view is None:
return None
if user is not None:
user.confirm(('view', 'read'), gateiden=iden)
return view
def listViews(self):
return list(self.views.values())
async def getViewDef(self, iden):
view = self.getView(iden=iden)
if view is not None:
return await view.pack()
async def getViewDefs(self):
return [await v.pack() for v in list(self.views.values())]
async def addLayer(self, ldef=None, nexs=True):
'''
Add a Layer to the cortex.
Args:
ldef (Optional[Dict]): layer configuration
nexs (bool): whether to record a nexus transaction (internal use only)
'''
ldef = ldef or {}
ldef['iden'] = s_common.guid()
ldef.setdefault('creator', self.auth.rootuser.iden)
ldef.setdefault('lockmemory', self.conf.get('layers:lockmemory'))
ldef.setdefault('logedits', self.conf.get('layers:logedits'))
ldef.setdefault('readonly', False)
s_layer.reqValidLdef(ldef)
if nexs:
return await self._push('layer:add', ldef)
else:
return await self._addLayer(ldef, (None, None))
@s_nexus.Pusher.onPush('layer:add', passitem=True)
async def _addLayer(self, ldef, nexsitem):
s_layer.reqValidLdef(ldef)
iden = ldef.get('iden')
if iden in self.layers:
return
layr = self.layers.get(iden)
if layr is not None:
return await layr.pack()
creator = ldef.get('creator')
user = await self.auth.reqUser(creator)
node = await self.hive.open(('cortex', 'layers', iden))
layrinfo = await node.dict()
for name, valu in ldef.items():
await layrinfo.set(name, valu)
layr = await self._initLayr(layrinfo, nexsoffs=nexsitem[0])
await user.setAdmin(True, gateiden=iden, logged=False)
# forward wind the new layer to the current model version
await layr.setModelVers(s_modelrev.maxvers)
return await layr.pack()
async def _initLayr(self, layrinfo, nexsoffs=None):
'''
Instantiate a Layer() instance via the provided layer info HiveDict.
'''
layr = await self._ctorLayr(layrinfo)
layr.addoffs = nexsoffs
self.layers[layr.iden] = layr
self.dynitems[layr.iden] = layr
if self.maxnodes:
counts = await layr.getFormCounts()
self.nodecount += sum(counts.values())
def onadd():
self.nodecount += 1
def ondel():
self.nodecount -= 1
layr.nodeAddHook = onadd
layr.nodeDelHook = ondel
await self.auth.addAuthGate(layr.iden, 'layer')
for pdef in layrinfo.get('pushs', {}).values():
await self.runLayrPush(layr, pdef)
for pdef in layrinfo.get('pulls', {}).values():
await self.runLayrPull(layr, pdef)
await self.fire('core:layr:add', iden=layr.iden)
return layr
async def _ctorLayr(self, layrinfo):
'''
Actually construct the Layer instance for the given HiveDict.
'''
iden = layrinfo.get('iden')
path = s_common.gendir(self.dirn, 'layers', iden)
# In case that we're a mirror follower and we have a downstream layer, disable upstream sync
# TODO allow_upstream needs to be separated out
mirror = self.conf.get('mirror')
return await s_layer.Layer.anit(layrinfo, path, nexsroot=self.nexsroot, allow_upstream=not mirror)
async def _initCoreLayers(self):
node = await self.hive.open(('cortex', 'layers'))
for _, node in node:
layrinfo = await node.dict()
await self._initLayr(layrinfo)
@s_nexus.Pusher.onPushAuto('layer:push:add')
async def addLayrPush(self, layriden, pdef):
reqValidPush(pdef)
iden = pdef.get('iden')
layr = self.layers.get(layriden)
if layr is None:
return
pushs = layr.layrinfo.get('pushs')
if pushs is None:
pushs = {}
# handle last-message replay
if pushs.get(iden) is not None:
return
pushs[iden] = pdef
await layr.layrinfo.set('pushs', pushs)
await self.runLayrPush(layr, pdef)
@s_nexus.Pusher.onPushAuto('layer:push:del')
async def delLayrPush(self, layriden, pushiden):
layr = self.layers.get(layriden)
if layr is None:
return
pushs = layr.layrinfo.get('pushs')
if pushs is None:
return
pdef = pushs.pop(pushiden, None)
if pdef is None:
return
await layr.layrinfo.set('pushs', pushs)
await self.delActiveCoro(pushiden)
@s_nexus.Pusher.onPushAuto('layer:pull:add')
async def addLayrPull(self, layriden, pdef):
reqValidPull(pdef)
iden = pdef.get('iden')
layr = self.layers.get(layriden)
if layr is None:
return
pulls = layr.layrinfo.get('pulls')
if pulls is None:
pulls = {}
# handle last-message replay
if pulls.get(iden) is not None:
return
pulls[iden] = pdef
await layr.layrinfo.set('pulls', pulls)
await self.runLayrPull(layr, pdef)
@s_nexus.Pusher.onPushAuto('layer:pull:del')
async def delLayrPull(self, layriden, pulliden):
layr = self.layers.get(layriden)
if layr is None:
return
pulls = layr.layrinfo.get('pulls')
if pulls is None:
return
pdef = pulls.pop(pulliden, None)
if pdef is None:
return
await layr.layrinfo.set('pulls', pulls)
await self.delActiveCoro(pulliden)
async def runLayrPush(self, layr, pdef):
url = pdef.get('url')
iden = pdef.get('iden')
# push() will refire as needed
async def push():
async with await self.boss.promote(f'layer push: {layr.iden} {iden}', self.auth.rootuser):
async with await s_telepath.openurl(url) as proxy:
await self._pushBulkEdits(layr, proxy, pdef)
self.addActiveCoro(push, iden=iden)
async def runLayrPull(self, layr, pdef):
url = pdef.get('url')
iden = pdef.get('iden')
# pull() will refire as needed
async def pull():
async with await self.boss.promote(f'layer pull: {layr.iden} {iden}', self.auth.rootuser):
async with await s_telepath.openurl(url) as proxy:
await self._pushBulkEdits(proxy, layr, pdef)
self.addActiveCoro(pull, iden=iden)
async def _pushBulkEdits(self, layr0, layr1, pdef):
iden = pdef.get('iden')
user = pdef.get('user')
gvar = f'push:{iden}'
async with await s_base.Base.anit() as base:
queue = s_queue.Queue(maxsize=10000)
async def fill():
try:
filloffs = await self.getStormVar(gvar, -1)
async for item in layr0.syncNodeEdits(filloffs + 1, wait=True):
await queue.put(item)
await queue.close()
except asyncio.CancelledError: # pragma: no cover
raise
except Exception as e:
logger.exception(f'pushBulkEdits fill() error: {e}')
await queue.close()
base.schedCoro(fill())
async for chunk in queue.slices():
meta = {'time': s_common.now(), 'user': user}
alledits = []
for offs, edits in chunk:
# prevent push->push->push nodeedits growth
alledits.extend(edits)
if len(alledits) > 1000:
await layr1.storNodeEdits(alledits, meta)
await self.setStormVar(gvar, offs)
alledits.clear()
if alledits:
await layr1.storNodeEdits(alledits, meta)
await self.setStormVar(gvar, offs)
async def _checkNexsIndx(self):
layroffs = [await layr.getEditIndx() for layr in list(self.layers.values())]
if layroffs:
maxindx = max(layroffs)
if maxindx > await self.getNexsIndx():
await self.setNexsIndx(maxindx)
async def cloneLayer(self, iden, ldef=None):
'''
Make a copy of a Layer in the cortex.
Args:
iden (str): Layer iden to clone
ldef (Optional[Dict]): Layer configuration overrides
Note:
This should only be called with a reasonably static Cortex
due to possible races.
'''
layr = self.layers.get(iden, None)
if layr is None:
raise s_exc.NoSuchLayer(iden=iden)
ldef = ldef or {}
ldef['iden'] = s_common.guid()
ldef.setdefault('creator', self.auth.rootuser.iden)
return await self._push('layer:clone', iden, ldef)
@s_nexus.Pusher.onPush('layer:clone', passitem=True)
async def _cloneLayer(self, iden, ldef, nexsitem):
layr = self.layers.get(iden)
if layr is None:
return
newiden = ldef.get('iden')
if newiden in self.layers:
return
newpath = s_common.gendir(self.dirn, 'layers', newiden)
await layr.clone(newpath)
node = await self.hive.open(('cortex', 'layers', iden))
copynode = await self.hive.open(('cortex', 'layers', newiden))
layrinfo = await node.dict()
copyinfo = await copynode.dict()
for name, valu in layrinfo.items():
await copyinfo.set(name, valu)
for name, valu in ldef.items():
await copyinfo.set(name, valu)
copylayr = await self._initLayr(copyinfo, nexsoffs=nexsitem[0])
creator = copyinfo.get('creator')
user = await self.auth.reqUser(creator)
await user.setAdmin(True, gateiden=newiden, logged=False)
return await copylayr.pack()
def addStormCmd(self, ctor):
'''
Add a synapse.lib.storm.Cmd class to the cortex.
'''
if not s_grammar.isCmdName(ctor.name):
raise s_exc.BadCmdName(name=ctor.name)
self.stormcmds[ctor.name] = ctor
async def addStormDmon(self, ddef):
'''
Add a storm dmon task.
'''
iden = s_common.guid()
ddef['iden'] = iden
return await self._push('storm:dmon:add', ddef)
@s_nexus.Pusher.onPushAuto('storm:dmon:bump')
async def bumpStormDmon(self, iden):
ddef = self.stormdmonhive.get(iden)
if ddef is None:
return False
if self.isactive:
dmon = self.stormdmons.getDmon(iden)
if dmon is not None:
await dmon.bump()
return True
async def _bumpUserDmons(self, iden):
'''
Bump all the Dmons for a given user.
Args:
iden (str): User iden.
'''
for dmoniden, ddef in list(self.stormdmonhive.items()):
if ddef.get('user') == iden:
await self.bumpStormDmon(dmoniden)
@s_nexus.Pusher.onPushAuto('storm:dmon:enable')
async def enableStormDmon(self, iden):
ddef = self.stormdmonhive.get(iden)
if ddef is None:
return False
curv = ddef.get('enabled')
ddef['enabled'] = True
await self.stormdmonhive.set(iden, ddef)
if self.isactive and not curv:
dmon = self.stormdmons.getDmon(iden)
await dmon.run()
return True
@s_nexus.Pusher.onPushAuto('storm:dmon:disable')
async def disableStormDmon(self, iden):
ddef = self.stormdmonhive.get(iden)
if ddef is None:
return False
curv = ddef.get('enabled')
ddef['enabled'] = False
await self.stormdmonhive.set(iden, ddef)
if self.isactive and curv:
dmon = self.stormdmons.getDmon(iden)
await dmon.stop()
return True
@s_nexus.Pusher.onPush('storm:dmon:add')
async def _onAddStormDmon(self, ddef):
iden = ddef['iden']
dmon = self.stormdmons.getDmon(iden)
if dmon is not None:
return dmon.pack()
if ddef.get('user') is None:
user = await self.auth.getUserByName('root')
ddef['user'] = user.iden
dmon = await self.runStormDmon(iden, ddef)
await self.stormdmonhive.set(iden, ddef)
return dmon.pack()
async def delStormDmon(self, iden):
'''
Stop and remove a storm dmon.
'''
ddef = self.stormdmonhive.get(iden)
if ddef is None:
mesg = f'No storm daemon exists with iden {iden}.'
raise s_exc.NoSuchIden(mesg=mesg)
return await self._push('storm:dmon:del', iden)
@s_nexus.Pusher.onPush('storm:dmon:del')
async def _delStormDmon(self, iden):
ddef = await self.stormdmonhive.pop(iden)
if ddef is None: # pragma: no cover
return
await self.stormdmons.popDmon(iden)
def getStormCmd(self, name):
return self.stormcmds.get(name)
async def runStormDmon(self, iden, ddef):
# validate ddef before firing task
s_storm.reqValidDdef(ddef)
dmon = self.stormdmons.getDmon(iden)
if dmon is not None:
return dmon
await self.auth.reqUser(ddef['user'])
# raises if parser failure
self.getStormQuery(ddef.get('storm'))
dmon = await self.stormdmons.addDmon(iden, ddef)
return dmon
async def getStormDmon(self, iden):
return self.stormdmons.getDmonDef(iden)
async def getStormDmons(self):
return self.stormdmons.getDmonDefs()
async def getStormDmonLog(self, iden):
return self.stormdmons.getDmonRunlog(iden)
def addStormLib(self, path, ctor):
root = self.libroot
# (name, {kids}, {funcs})
for name in path:
step = root[1].get(name)
if step is None:
step = (name, {}, {})
root[1][name] = step
root = step
root[2]['ctor'] = ctor
def getStormLib(self, path):
root = self.libroot
for name in path:
step = root[1].get(name)
if step is None:
return None
root = step
return root
def getStormCmds(self):
return list(self.stormcmds.items())
async def getAxon(self):
await self.axready.wait()
return self.axon.iden
def setFeedFunc(self, name, func):
'''
Set a data ingest function.
def func(snap, items):
loaditems...
'''
self.feedfuncs[name] = func
def getFeedFunc(self, name):
'''
Get a data ingest function.
'''
return self.feedfuncs.get(name)
async def getFeedFuncs(self):
ret = []
for name, ctor in self.feedfuncs.items():
# TODO - Future support for feed functions defined via Storm.
doc = getattr(ctor, '__doc__', None)
if doc is None:
doc = 'No feed docstring'
doc = doc.strip()
desc = doc.split('\n')[0]
ret.append({'name': name,
'desc': desc,
'fulldoc': doc,
})
return tuple(ret)
async def _addSynNodes(self, snap, items):
'''
Add nodes to the Cortex via the packed node format.
'''
async for node in snap.addNodes(items):
yield node
async def _addSynSplice(self, snap, items):
for item in items:
func = self.splicers.get(item[0])
if func is None:
await snap.warn(f'no such splice: {item!r}')
continue
try:
await func(snap, item)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception as e:
logger.exception('splice error')
await snap.warn(f'splice error: {e}')
async def _onFeedNodeAdd(self, snap, mesg):
ndef = mesg[1].get('ndef')
if ndef is None:
await snap.warn(f'Invalid Splice: {mesg!r}')
return
await snap.addNode(*ndef)
async def _onFeedNodeDel(self, snap, mesg):
ndef = mesg[1].get('ndef')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.delete()
async def _onFeedPropSet(self, snap, mesg):
ndef = mesg[1].get('ndef')
name = mesg[1].get('prop')
valu = mesg[1].get('valu')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.set(name, valu)
async def _onFeedPropDel(self, snap, mesg):
ndef = mesg[1].get('ndef')
name = mesg[1].get('prop')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.pop(name)
async def _onFeedTagAdd(self, snap, mesg):
ndef = mesg[1].get('ndef')
tag = mesg[1].get('tag')
valu = mesg[1].get('valu')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.addTag(tag, valu=valu)
async def _onFeedTagDel(self, snap, mesg):
ndef = mesg[1].get('ndef')
tag = mesg[1].get('tag')
node = await snap.getNodeByNdef(ndef)
if node is None:
return
await node.delTag(tag)
async def _onFeedTagPropSet(self, snap, mesg):
tag = mesg[1].get('tag')
prop = mesg[1].get('prop')
ndef = mesg[1].get('ndef')
valu = mesg[1].get('valu')
node = await snap.getNodeByNdef(ndef)
if node is not None:
await node.setTagProp(tag, prop, valu)
async def _onFeedTagPropDel(self, snap, mesg):
tag = mesg[1].get('tag')
prop = mesg[1].get('prop')
ndef = mesg[1].get('ndef')
node = await snap.getNodeByNdef(ndef)
if node is not None:
await node.delTagProp(tag, prop)
async def _addSynNodeEdits(self, snap, items):
for item in items:
item = s_common.unjsonsafe_nodeedits(item)
await snap.applyNodeEdits(item)
async def setUserLocked(self, iden, locked):
retn = await s_cell.Cell.setUserLocked(self, iden, locked)
await self._bumpUserDmons(iden)
return retn
def getCoreMod(self, name):
return self.modules.get(name)
def getCoreMods(self):
ret = []
for modname, mod in self.modules.items():
ret.append((modname, mod.conf))
return ret
def _initStormOpts(self, opts):
if opts is None:
opts = {}
opts.setdefault('user', self.auth.rootuser.iden)
return opts
def _viewFromOpts(self, opts):
user = self._userFromOpts(opts)
viewiden = opts.get('view')
if viewiden is None:
viewiden = user.profile.get('cortex:view')
if viewiden is None:
viewiden = self.view.iden
# For backwards compatibility, resolve references to old view iden == cortex.iden to the main view
# TODO: due to our migration policy, remove in 3.0.0
if viewiden == self.iden: # pragma: no cover
viewiden = self.view.iden
view = self.views.get(viewiden)
if view is None:
raise s_exc.NoSuchView(iden=viewiden)
user.confirm(('view', 'read'), gateiden=viewiden)
return view
def _userFromOpts(self, opts):
if opts is None:
return self.auth.rootuser
useriden = opts.get('user')
if useriden is None:
return self.auth.rootuser
user = self.auth.user(useriden)
if user is None:
mesg = f'No user found with iden: {useriden}'
raise s_exc.NoSuchUser(mesg, iden=useriden)
return user
async def count(self, text, opts=None):
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
i = 0
async for _ in view.eval(text, opts=opts):
i += 1
return i
async def storm(self, text, opts=None):
'''
'''
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
async for mesg in view.storm(text, opts=opts):
yield mesg
async def callStorm(self, text, opts=None):
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
return await view.callStorm(text, opts=opts)
async def exportStorm(self, text, opts=None):
opts = self._initStormOpts(opts)
user = self._userFromOpts(opts)
view = self._viewFromOpts(opts)
await self.boss.promote('storm:export', user=user, info={'query': text})
spooldict = await s_spooled.Dict.anit()
async with await self.snap(user=user, view=view) as snap:
async for pode in snap.iterStormPodes(text, opts=opts):
await spooldict.set(pode[1]['iden'], pode)
await asyncio.sleep(0)
for iden, pode in spooldict.items():
await asyncio.sleep(0)
edges = []
async for verb, n2iden in snap.iterNodeEdgesN1(s_common.uhex(iden)):
await asyncio.sleep(0)
if not spooldict.has(n2iden):
continue
edges.append((verb, n2iden))
if edges:
pode[1]['edges'] = edges
yield pode
async def feedFromAxon(self, sha256, opts=None):
opts = self._initStormOpts(opts)
user = self._userFromOpts(opts)
view = self._viewFromOpts(opts)
await self.boss.promote('feeddata', user=user, info={'name': 'syn.nodes', 'sha256': sha256})
# ensure that the user can make all node edits in the layer
user.confirm(('node',), gateiden=view.layers[0].iden)
q = s_queue.Queue(maxsize=10000)
feedexc = None
async with await s_base.Base.anit() as base:
async def fill():
nonlocal feedexc
try:
async for item in self.axon.iterMpkFile(sha256):
await q.put(item)
except Exception as e:
logger.exception(f'feedFromAxon.fill(): {e}')
feedexc = e
finally:
await q.close()
base.schedCoro(fill())
count = 0
async with await self.snap(user=user, view=view) as snap:
# feed the items directly to syn.nodes
async for items in q.slices(size=100):
async for node in self._addSynNodes(snap, items):
count += 1
if feedexc is not None:
raise feedexc
return count
async def nodes(self, text, opts=None):
'''
A simple non-streaming way to return a list of nodes.
'''
if self.isfini: # pragma: no cover
raise s_exc.IsFini()
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
return await view.nodes(text, opts=opts)
async def eval(self, text, opts=None):
'''
Evaluate a storm query and yield packed nodes.
NOTE: This API is deprecated as of 2.0.0 and will be removed in 3.0.0
'''
s_common.deprecated('eval')
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
async for node in view.eval(text, opts=opts):
yield node
async def stormlist(self, text, opts=None):
return [m async for m in self.storm(text, opts=opts)]
@s_cache.memoizemethod(size=10000)
def getStormQuery(self, text, mode='storm'):
'''
Parse storm query text and return a Query object.
'''
query = copy.deepcopy(s_parser.parseQuery(text, mode=mode))
query.init(self)
return query
@contextlib.asynccontextmanager
async def getStormRuntime(self, query, opts=None):
opts = self._initStormOpts(opts)
view = self._viewFromOpts(opts)
user = self._userFromOpts(opts)
async with await self.snap(user=user, view=view) as snap:
async with snap.getStormRuntime(query, opts=opts, user=user) as runt:
yield runt
async def reqValidStorm(self, text, opts=None):
'''
Parse a storm query to validate it.
Args:
text (str): The text of the Storm query to parse.
opts (dict): A Storm options dictionary.
Returns:
True: If the query is valid.
Raises:
BadSyntaxError: If the query is invalid.
'''
if opts is None:
opts = {}
mode = opts.get('mode', 'storm')
self.getStormQuery(text, mode)
return True
def _logStormQuery(self, text, user):
'''
Log a storm query.
'''
if self.conf.get('storm:log'):
lvl = self.conf.get('storm:log:level')
stormlogger.log(lvl, 'Executing storm query {%s} as [%s]', text, user.name,
extra={'synapse': {'text': text, 'username': user.name, 'user': user.iden}})
async def getNodeByNdef(self, ndef, view=None):
'''
Return a single Node() instance by (form,valu) tuple.
'''
name, valu = ndef
form = self.model.forms.get(name)
if form is None:
raise s_exc.NoSuchForm(name=name)
norm, info = form.type.norm(valu)
buid = s_common.buid((form.name, norm))
async with await self.snap(view=view) as snap:
return await snap.getNodeByBuid(buid)
def getCoreInfo(self):
s_common.deprecated('Cortex.getCoreInfo')
return {
'version': synapse.version,
'modeldef': self.model.getModelDefs(),
'stormcmds': {cmd: {} for cmd in self.stormcmds.keys()},
}
async def getCoreInfoV2(self):
return {
'version': synapse.version,
'modeldict': await self.getModelDict(),
'stormdocs': await self.getStormDocs(),
}
async def getStormDocs(self):
'''
Get a struct containing the Storm Types documentation.
Returns:
dict: A Dictionary of storm documentation information.
'''
ret = {
'libraries': s_stormtypes.registry.getLibDocs(),
'types': s_stormtypes.registry.getTypeDocs(),
# 'cmds': ... # TODO - support cmd docs
# 'packages': ... # TODO - Support inline information for packages?
}
return ret
async def addNodes(self, nodedefs, view=None):
'''
Quickly add/modify a list of nodes from node definition tuples.
This API is the simplest/fastest way to add nodes, set node props,
and add tags to nodes remotely.
Args:
nodedefs (list): A list of node definition tuples. See below.
A node definition tuple is defined as:
( (form, valu), {'props':{}, 'tags':{})
The "props" or "tags" keys may be omitted.
'''
async with await self.snap(view=view) as snap:
snap.strict = False
async for node in snap.addNodes(nodedefs):
yield node
async def addFeedData(self, name, items, *, viewiden=None):
'''
Add data using a feed/parser function.
Args:
name (str): The name of the feed record format.
items (list): A list of items to ingest.
iden (str): The iden of a view to use.
If a view is not specified, the default view is used.
'''
view = self.getView(viewiden)
if view is None:
raise s_exc.NoSuchView(iden=viewiden)
async with await self.snap(view=view) as snap:
snap.strict = False
await snap.addFeedData(name, items)
async def snap(self, user=None, view=None):
'''
Return a transaction object for the default view.
Args:
user (str): The user to get the snap for.
view (View): View object to use when making the snap.
Notes:
This must be used as an asynchronous context manager.
Returns:
s_snap.Snap: A Snap object for the view.
'''
if view is None:
view = self.view
if user is None:
user = await self.auth.getUserByName('root')
snap = await view.snap(user)
return snap
async def loadCoreModule(self, ctor, conf=None):
'''
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
'''
if conf is None:
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
await self.fire('core:module:load', module=ctor)
return modu
async def _loadCoreMods(self):
mods = []
cmds = []
mdefs = []
for ctor in list(s_modules.coremods):
await self._preLoadCoreModule(ctor, mods, cmds, mdefs)
for ctor in self.conf.get('modules'):
await self._preLoadCoreModule(ctor, mods, cmds, mdefs, custom=True)
self.model.addDataModels(mdefs)
[self.addStormCmd(c) for c in cmds]
async def _preLoadCoreModule(self, ctor, mods, cmds, mdefs, custom=False):
conf = None
# allow module entry to be (ctor, conf) tuple
if isinstance(ctor, (list, tuple)):
ctor, conf = ctor
modu = self._loadCoreModule(ctor, conf=conf)
if modu is None:
return
mods.append(modu)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
cmds.extend(modu.getStormCmds())
model_defs = modu.getModelDefs()
if custom:
for _mdef, mnfo in model_defs:
mnfo['custom'] = True
mdefs.extend(model_defs)
async def _initCoreMods(self):
with s_provenance.claim('init', meth='_initCoreMods'):
for ctor, modu in list(self.modules.items()):
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
def _loadCoreModule(self, ctor, conf=None):
if ctor in self.modules:
raise s_exc.ModAlreadyLoaded(mesg=f'{ctor} already loaded')
try:
modu = s_dyndeps.tryDynFunc(ctor, self, conf=conf)
self.modules[ctor] = modu
return modu
except Exception:
logger.exception('mod load fail: %s' % (ctor,))
return None
async def stat(self):
stats = {
'iden': self.iden,
'layer': await self.getLayer().stat(),
'formcounts': await self.getFormCounts(),
}
return stats
async def getPropNorm(self, prop, valu):
'''
Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
pobj = self.model.prop(prop)
if pobj is None:
raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.',
prop=prop)
norm, info = pobj.type.norm(valu)
return norm, info
async def getTypeNorm(self, name, valu):
'''
Get the normalized type value based on the Cortex data model.
Args:
name (str): The type to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchType: If the type does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
tobj = self.model.type(name)
if tobj is None:
raise s_exc.NoSuchType(mesg=f'The type {name} does not exist.',
name=name)
norm, info = tobj.norm(valu)
return norm, info
@staticmethod
def _convert_reqdict(reqdict):
return {s_agenda.TimeUnit.fromString(k): v for (k, v) in reqdict.items()}
async def addCronJob(self, cdef):
'''
Add a cron job to the cortex. Convenience wrapper around agenda.add
A cron job is a persistently-stored item that causes storm queries to be run in the future. The specification
for the times that the queries run can be one-shot or recurring.
Args:
query (str): The storm query to execute in the future
reqs (Union[Dict[str, Union[int, List[int]]], List[Dict[...]]]):
Either a dict of the fixed time fields or a list of such dicts. The keys are in the set ('year',
'month', 'dayofmonth', 'dayofweek', 'hour', 'minute'. The values must be positive integers, except for
the key of 'dayofmonth' in which it may also be a negative integer which represents the number of days
from the end of the month with -1 representing the last day of the month. All values may also be lists
of valid values.
incunit (Optional[str]):
A member of the same set as above, with an additional member 'day'. If is None (default), then the
appointment is one-shot and will not recur.
incvals (Union[int, List[int]):
A integer or a list of integers of the number of units
Returns (bytes):
An iden that can be used to later modify, query, and delete the job.
Notes:
reqs must have fields present or incunit must not be None (or both)
The incunit if not None it must be larger in unit size than all the keys in all reqs elements.
Non-recurring jobs may also have a req of 'now' which will cause the job to also execute immediately.
'''
s_agenda.reqValidCdef(cdef)
incunit = cdef.get('incunit')
reqs = cdef.get('reqs')
try:
if incunit is not None:
if isinstance(incunit, (list, tuple)):
incunit = [s_agenda.TimeUnit.fromString(i) for i in incunit]
else:
incunit = s_agenda.TimeUnit.fromString(incunit)
cdef['incunit'] = incunit
if isinstance(reqs, Mapping):
reqs = self._convert_reqdict(reqs)
else:
reqs = [self._convert_reqdict(req) for req in reqs]
if incunit is not None and s_agenda.TimeUnit.NOW in reqs:
mesg = "Recurring jobs may not be scheduled to run 'now'"
raise s_exc.BadConfValu(mesg)
cdef['reqs'] = reqs
except KeyError:
raise s_exc.BadConfValu('Unrecognized time unit')
cdef['iden'] = s_common.guid()
return await self._push('cron:add', cdef)
@s_nexus.Pusher.onPush('cron:add')
async def _onAddCronJob(self, cdef):
iden = cdef['iden']
appt = self.agenda.appts.get(iden)
if appt is not None:
return appt.pack()
user = await self.auth.reqUser(cdef['creator'])
cdef = await self.agenda.add(cdef)
await self.auth.addAuthGate(iden, 'cronjob')
await user.setAdmin(True, gateiden=iden, logged=False)
return cdef
@s_nexus.Pusher.onPushAuto('cron:del')
async def delCronJob(self, iden):
'''
Delete a cron job
Args:
iden (bytes): The iden of the cron job to be deleted
'''
try:
await self.agenda.delete(iden)
except s_exc.NoSuchIden:
return
await self.auth.delAuthGate(iden)
@s_nexus.Pusher.onPushAuto('cron:mod')
async def updateCronJob(self, iden, query):
'''
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
'''
await self.agenda.mod(iden, query)
@s_nexus.Pusher.onPushAuto('cron:enable')
async def enableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
await self.agenda.enable(iden)
@s_nexus.Pusher.onPushAuto('cron:disable')
async def disableCronJob(self, iden):
'''
Enable a cron job
Args:
iden (bytes): The iden of the cron job to be changed
'''
await self.agenda.disable(iden)
async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
crons = []
for _, cron in self.agenda.list():
info = cron.pack()
user = self.auth.user(cron.creator)
info['username'] = user.name
crons.append(info)
return crons
@s_nexus.Pusher.onPushAuto('cron:edit')
async def editCronJob(self, iden, name, valu):
'''
Modify a cron job definition.
'''
appt = await self.agenda.get(iden)
# TODO make this generic and check cdef
if name == 'name':
await appt.setName(str(valu))
return appt.pack()
if name == 'doc':
await appt.setDoc(str(valu))
return appt.pack()
mesg = f'editCronJob name {name} is not supported for editing.'
raise s_exc.BadArg(mesg=mesg)
async def _enableMigrationMode(self):
'''
Prevents cron jobs and triggers from running
'''
self.agenda.enabled = False
self.trigson = False
async def _disableMigrationMode(self):
'''
Allows cron jobs and triggers to run
'''
if self.conf.get('cron:enable'):
self.agenda.enabled = True
if self.conf.get('trigger:enable'):
self.trigson = True
async def iterFormRows(self, layriden, form, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes of a single form, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
form (str): A form name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterFormRows(form, stortype=stortype, startvalu=startvalu):
yield item
async def iterPropRows(self, layriden, form, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular secondary property, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
form (str): A form name.
prop (str): A universal property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterPropRows(form, prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterUnivRows(self, layriden, prop, stortype=None, startvalu=None):
'''
Yields buid, valu tuples of nodes with a particular universal property, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
prop (str): A universal property name.
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterUnivRows(prop, stortype=stortype, startvalu=startvalu):
yield item
async def iterTagRows(self, layriden, tag, form=None, starttupl=None):
'''
Yields (buid, (valu, form)) values that match a tag and optional form, optionally (re)starting at starttupl.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): the tag to match
form (Optional[str]): if present, only yields buids of nodes that match the form.
starttupl (Optional[Tuple[buid, form]]): if present, (re)starts the stream of values there.
Returns:
AsyncIterator[Tuple(buid, (valu, form))]
Note:
This yields (buid, (tagvalu, form)) instead of just buid, valu in order to allow resuming an interrupted
call by feeding the last value retrieved into starttupl
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterTagRows(tag, form=form, starttupl=starttupl):
yield item
async def iterTagPropRows(self, layriden, tag, prop, form=None, stortype=None, startvalu=None):
'''
Yields (buid, valu) that match a tag:prop, optionally (re)starting at startvalu.
Args:
layriden (str): Iden of the layer to retrieve the nodes
tag (str): tag name
prop (str): prop name
form (Optional[str]): optional form name
stortype (Optional[int]): a STOR_TYPE_* integer representing the type of form:prop
startvalu (Any): The value to start at. May only be not None if stortype is not None.
Returns:
AsyncIterator[Tuple(buid, valu)]
'''
layr = self.getLayer(layriden)
if layr is None:
raise s_exc.NoSuchLayer(iden=layriden)
async for item in layr.iterTagPropRows(tag, prop, form=form, stortype=stortype, startvalu=startvalu):
yield item
@contextlib.asynccontextmanager
async def getTempCortex(mods=None):
'''
Get a proxy to a cortex backed by a temporary directory.
Args:
mods (list): A list of modules which are loaded into the cortex.
Notes:
The cortex and temporary directory are town down on exit.
This should only be called from synchronous code.
Returns:
Proxy to the cortex.
'''
with s_common.getTempDir() as dirn:
async with await Cortex.anit(dirn) as core:
if mods:
for mod in mods:
await core.loadCoreModule(mod)
async with core.getLocalProxy() as prox:
yield prox
|
the-stack_0_3214 | import asyncio
import collections
import gc
from contextlib import contextmanager, suppress
import copy
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from tlz import merge, memoize, assoc
from tornado import gen
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError, Status
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
TimeoutError,
)
from .worker import Worker
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=1,
disconnect_timeout=3,
scheduler_kwargs={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with suppress(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
"""Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 5:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(10)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(200):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == Status.running:
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with suppress(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with clean():
yield
|
the-stack_0_3215 | # -*- coding: utf-8 -*-
#
# Database upgrade script
#
# RLPCM Template Version 1.1.5 => 1.1.6
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/BRCMS/RLP/upgrade/1.1.5-1.1.6.py
#
import sys
from uuid import uuid4
#from gluon.storage import Storage
#from gluon.tools import callback
from s3 import S3Duplicate
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
def info(msg):
sys.stderr.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
# Load models for tables
#ftable = s3db.org_facility
IMPORT_XSLT_FOLDER = os.path.join(request.folder, "static", "formats", "s3csv")
TEMPLATE_FOLDER = os.path.join(request.folder, "modules", "templates", "BRCMS")
# -----------------------------------------------------------------------------
# Upgrade user roles
#
if not failed:
info("Upgrade user roles")
bi = s3base.S3BulkImporter()
filename = os.path.join(TEMPLATE_FOLDER, "RLP", "auth_roles.csv")
with open(filename, "r") as File:
try:
bi.import_role(filename)
except Exception as e:
infoln("...failed")
infoln(sys.exc_info()[1])
failed = True
else:
infoln("...done")
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("UPGRADE FAILED - Action rolled back.")
else:
db.commit()
infoln("UPGRADE SUCCESSFUL.")
|
the-stack_0_3217 | # -*- coding: utf-8 -*-
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Download LFNs in a dataset
Usage:
%s <dataset name>
""" % Script.scriptName)
Script.registerSwitch("", "save=", "The directory which save files.")
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if (len(args) != 1):
gLogger.error("Please support the dataset name")
DIRAC.exit(-1)
dataset = args[0]
dir_save = args[0]
for k,v in Script.getUnprocessedSwitches():
if k.lower() in ["save"]:
dir_save = v
gLogger.info("Dataset Name: ", dataset)
gLogger.info("Save in: ", dir_save)
# Get the list of LFNs in one dataset
from DIRAC.Core.DISET.RPCClient import RPCClient
transferRequest = RPCClient("Transfer/Dataset")
res = transferRequest.list(dataset)
if not res["OK"]:
gLogger.error(res)
DIRAC.exit(-1)
file_list = [v[1] for v in res["Value"]]
gLogger.debug("File List", file_list)
# Begin to save file
# Refer to dirac-dms-get-file.py in DIRAC/Interfaces/scripts
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
res = dirac.getFile( file_list, destDir = dir_save, printOutput = True )
if not res["OK"]:
gLogger.error(res)
DIRAC.exit(-1)
DIRAC.exit(0)
|
the-stack_0_3218 | #!/usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import serial
import sys
import time
from demo_plot_defs import *
from other_funcs import get_freq_response, get_phase_response
if (sys.argv[1] == "help"):
print ("usage: demo_plot.py MIN MAX options...")
print ("MIN MAX define the range of frequencies to test")
print ("Possible options: linear, phase, calibrate.")
print ("linear will produce a linear plot instead of a logarithmic one")
print ("phase will produce a phase response in addition to the frequency response plot")
print ("calibrate is recommended and allows you to run an extra test with just a copper wire to better calibrate the final output.")
print ("")
sys.exit (1)
elif len(sys.argv) < 3:
print ("usage: demo_plot.py MIN MAX options...")
sys.exit (1)
try:
float (sys.argv[1])
float (sys.argv[2])
except ValueError:
print ("usage: demo_plot.py MIN MAX options...")
print ("MIN and MAX must be floating point!")
sys.exit (1)
#Initialize
s = connect_gpa()
mc_init(s)
synth_init(s)
frontend_init(s)
lower_bound = float (sys.argv[1])
upper_bound = float (sys.argv[2])
freqs_f = np.logspace(np.log10(lower_bound), np.log10(upper_bound), 60) # 1 kHz to 150 MHz
freqs_p = np.logspace(np.log10(lower_bound), np.log10(upper_bound), 30) # 1 kHz to 150 MHz
data_f = []
data_p = []
data_calibrate_f = []
data_calibrate_p = []
if "calibrate" in sys.argv:
input ("Please double check that the wire is connected and press Enter...")
data_calibrate_f = get_freq_response(s, lower_bound, upper_bound, freqs_f)
if "phase" in sys.argv:
data_calibrate_p = get_phase_response(s, lower_bound, upper_bound, freqs_p)
input ("Now connect your filter for testing and press Enter ...")
data_f = get_freq_response(s, lower_bound, upper_bound, freqs_f)
if "calibrate" in sys.argv:
for i in range(len(data_f)):
data_f[i] = data_f[i] - data_calibrate_f[i]
plt.subplot(2, 1, 1)
#ax = plt.axes(xlim=(1e3, 1e9))
if 'linear' in sys.argv:
plot, = plt.plot (freqs_f, data_f)
else:
plot, = plt.semilogx (freqs_f, data_f)
if "phase" not in sys.argv:
plt.xlabel ("Frequency (Hz)")
plt.ylabel ("Amplitude (dB, calibrated)")
plt.title ("Voltage Insertion Gain, calibrated")
plt.grid (True)
if "phase" in sys.argv:
data_p = get_phase_response(s, lower_bound, upper_bound, freqs_p)
if "calibrate" in sys.argv:
for i in range(len(data_p)):
data_p[i] = data_p[i] - data_calibrate_p[i]
plt.subplot(2, 1, 2)
#ax = plt.axes(xlim=(1e3, 1e9))
if 'linear' in sys.argv:
plot, = plt.plot (freqs_p, data_p)
else:
plot, = plt.semilogx (freqs_p, data_p)
plt.xlabel ("Frequency (Hz)")
plt.ylabel ("Phase (deg, calibrated)")
plt.title ("Phase Shift, calibrated")
plt.grid (True)
plt.savefig('out.png')
plt.show ()
|
the-stack_0_3220 | import os
import discord
from discord import Embed
import settings
def prune_participant_name(name):
name = name.split(' ')[0]
name = name.split('/')[0]
name = name.split('\\')[0]
name = name.split('-')[0]
name = name.split('(')[0]
name = name.split(')')[0]
name = name.split('+')[0]
name = name.split('&')[0]
name = name.title()
return name
def fuzzy_string_match(first, second):
if len(first) > 3:
return second.lower() in first.lower()
else:
return first.lower() == second.lower()
async def send_announcement(ctx, announcement):
for channel in ctx.guild.channels:
if channel.name.lower() == settings.ANNOUNCEMENT_CHANNEL_NAME.lower():
await channel.send(announcement)
break
def extract_identifier(member):
for role in member.roles:
if role.name.title() in settings.IDENTIFIERS:
return role.name.title()
return None
def extract_role(member, identifier):
for role in member.roles:
if role.name.title() in settings.ROLES.ALL:
return role.name.title()
return settings.ROLES.from_identifier_default(identifier.title())
async def get_event_message(channel, client):
def is_event_message(m):
# Look for a message that has an embed with a footer that contains the id of the bot
if len(m.embeds) > 0:
footer = m.embeds[0].footer
return False if footer is Embed.Empty else str(client.user.id) == m.embeds[0].footer.text
return False
# Check if the bot has an event message in this channel already
event_message = await channel.history().find(is_event_message)
return event_message
async def show_event(channel, client, embed, new_event=False):
def is_event_message(m):
# Look for a message that has an embed with a footer that contains the id of the bot
if len(m.embeds) > 0:
footer = m.embeds[0].footer
return False if footer is Embed.Empty else str(client.user.id) == m.embeds[0].footer.text
await channel.purge(check=lambda m: not is_event_message(m))
event_message = await get_event_message(channel, client)
if event_message is None:
event_message = await channel.send(embed=embed)
new_event = True
else:
await event_message.edit(embed=embed)
if new_event:
await event_message.clear_reactions()
await event_message.add_reaction(emoji=settings.SIGNUP_REACTION)
await event_message.add_reaction(emoji=settings.DECLINE_REACTION)
def log(*args):
is_logging_active = os.getenv('LOGGING')
if is_logging_active:
print(*args)
|
the-stack_0_3221 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 09:04:27 2018
@author: jeremiasknoblauch
Description: Well-log data processing
"""
"""System packages/modules"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import csv
import datetime
import matplotlib
"""Modules of the BOCPDMS algorithm"""
from cp_probability_model import CpModel
from BVAR_NIG import BVARNIG
from BVAR_NIG_DPD import BVARNIGDPD
from detector import Detector
from Evaluation_tool import EvaluationTool
baseline_working_directory = ("//Users//jeremiasknoblauch//Documents//OxWaSP"+
"//BOCPDMS/Code//SpatialBOCD//Data//well log")
well_file = baseline_working_directory + "//well.txt"
mode = "DPD" #Two modes available:
#"DPD" -> Density Power Divergence, which is
# the same as the beta-divergence.
#"KL" -> Kullback Leibler Divergence, i.e. standard bayes
"""STEP 1: Read in the nile data from well.txt"""
raw_data = []
count = 0
with open(well_file) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
raw_data += row
raw_data_float = []
for entry in raw_data:
raw_data_float.append(float(entry))
raw_data = raw_data_float
"""STEP 2: Format the data so that it can be processed with a Detector
object and instantiations of ProbabilityModel subclasses"""
T = int(len(raw_data))
S1, S2 = 1,1 #S1, S2 give you spatial dimensions
data = np.array(raw_data).reshape(T,1,1)
"""STEP 3: Set up the optimization parameters. These only apply if mode = "DPD"
since the standard Bayesian inference is exact"""
VB_window_size = 360 #W in pseudo-code of paper
full_opt_thinning = 20 #i.e., every when the run-length is divisible by 20,
#we perform a full optimization step
SGD_approx_goodness = 10 #In the pure SGD steps, how big is your batch
anchor_approx_goodness_SVRG = 50 #in the SVRG steps, how big is your batch (NOT USED)
anchor_approx_goodness_SCSG = 25 #in the SCSG steps, how big is your batch
first_opt = 10
alpha_param_opt_t = 0 #Indicates how many time period you wait before learning
#about beta (alpha in DPD notation of Basu et al. ('98))
"""STEP 4: Set up the priors for the model universe's elements"""
a, b = 1, pow(10,7)
alpha_param = 0.05 #Initialization for the parameter-beta (alpha)
alpha_rld = 0.0001 #Initialization for the run-length-beta (alpha)
rld_DPD = "power_divergence" #The run-length robustification can be set inde-
#pendently from the parameter robustness.
a_KL, b_KL = 1, pow(10,4)
rld_KL = "kullback_leibler" #The run-length robustification can be set inde-
#pendently from the parameter robustness.
rld_learning = True #Whether or not we learn about the beta (alpha)
#robustifying the run-length (if rld = "power_divergence")
param_learning = "individual" #Irrelevant for well-log because we only run
#a single model.
#Set Prior mean and variance scales
prior_mean_scale, prior_var_scale = np.mean(data), 0.25
cp_intensity = 100 # cp_intensity = k => prior prob P(CP at time t) = 1/k
np.random.seed(999) #To exactly reproduce paper results/pictures
"""STEP 5: Create models"""
model_universe_DPD = []
model_universe_KL = []
model_universe_DPD = model_universe_DPD + [BVARNIGDPD(
prior_a=a,
prior_b=b, #b,
S1=S1,
S2=S2,
alpha_param = alpha_param,
prior_mean_beta=None,
prior_var_beta=None,
prior_mean_scale=prior_mean_scale,
prior_var_scale=prior_var_scale,
nbh_sequence=None,
restriction_sequence = None,
hyperparameter_optimization = "online",
VB_window_size = VB_window_size,
full_opt_thinning = full_opt_thinning,
SGD_batch_size = SGD_approx_goodness,
anchor_batch_size_SCSG = anchor_approx_goodness_SCSG,
anchor_batch_size_SVRG = None,
first_full_opt = first_opt
)]
model_universe_KL = model_universe_KL + [BVARNIG(
prior_a = a_KL,
prior_b = b_KL,
S1 = S1,
S2 = S2,
prior_mean_scale = prior_mean_scale,
prior_var_scale = prior_var_scale,
nbh_sequence = None,
restriction_sequence = None,
hyperparameter_optimization = "online"
)]
"""STEP 6: Set up the detectors from this"""
model_universe_DPD = np.array(model_universe_DPD)
model_universe_KL = np.array(model_universe_KL)
model_prior = np.array([1.0/len(model_universe_DPD)]*len(model_universe_DPD))
cp_model = CpModel(cp_intensity)
detector_DPD = Detector(
data=data,
model_universe=model_universe_DPD,
model_prior = model_prior,
cp_model = cp_model,
S1 = S1,
S2 = S2,
T = T,
store_rl=True,
store_mrl=True,
trim_type="keep_K",
threshold = 50,
notifications = 100,
save_performance_indicators = True,
generalized_bayes_rld = rld_DPD,
alpha_param_learning = param_learning,
alpha_param = alpha_param,
alpha_param_opt_t = 100,
alpha_rld = alpha_rld,
alpha_rld_learning = rld_learning,
loss_der_rld_learning="absolute_loss"
)
detector_DPD.run()
detector_KL = Detector(
data=data,
model_universe=model_universe_KL,
model_prior = model_prior,
cp_model = cp_model,
S1 = S1,
S2 = S2,
T = T,
store_rl=True,
store_mrl=True,
trim_type="keep_K",
threshold = 50,
notifications = 100,
save_performance_indicators = True,
generalized_bayes_rld = rld_KL,
alpha_param_learning = param_learning,
alpha_param = alpha_param,
alpha_param_opt_t = 100,
alpha_rld = alpha_rld,
alpha_rld_learning = rld_learning,
loss_der_rld_learning="absolute_loss"
)
detector_KL.run()
"""STEP 7: Make graphing tool"""
EvTDPD = EvaluationTool()
EvTDPD.build_EvaluationTool_via_run_detector(detector_DPD)
EvTKL = EvaluationTool()
EvTKL.build_EvaluationTool_via_run_detector(detector_KL)
"""STEP 8: Plotting Pictures in paper"""
matplotlib.rcParams.update({'figure.autolayout': False})
"""Get the different CPs"""
CPsDPD = np.array([e[0] for e in EvTDPD.results[EvTDPD.names.index("MAP CPs")][-2]])
CPsKL = np.array([e[0] for e in EvTKL.results[EvTKL.names.index("MAP CPs")][-2]])
k = 25
additional_CPs = []
for cp_kl in CPsKL:
lower = CPsDPD - k < cp_kl
upper = CPsDPD + k > cp_kl
if (not np.any(lower == upper)):
additional_CPs.append([cp_kl,0])
height_ratio =[10,4,8]
KL_CP_color = "crimson"
DPD_CP_color = "darkblue"
max_color_KL = "red"
max_color_DPD = "blue"
max_width = 1
CP_linewidth_DPD = 2
CP_linewidth_KL = 1
CP_style_KL = (0,(1,2.25))
CP_style_DPD = "solid"
CP_transparence_KL = 0.75
CP_transparence_DPD = 0.5
show_CPs_in_rld = False
xlabsize, ylabsize, ticksize = 15,15,12
fig, ax_array = plt.subplots(3,
figsize=(18,10),
sharex = True,
gridspec_kw = {'height_ratios':height_ratio})
fig.subplots_adjust(hspace = .05,
left = None, bottom = None,
right = None, top = None)
ylabel_coords = [0.0, 0.25]
EvTDPD.plot_raw_TS(data.reshape(T,S1*S2), indices = [0], xlab = None,
show_MAP_CPs = True,
time_range = np.linspace(1,T, T, dtype=int),
print_plt = False,
ylab = "Response",
ax = ax_array[0],
custom_colors_series = ["black"]*5,
custom_colors_CPs = [DPD_CP_color]* 100,
custom_linestyles = [CP_style_DPD]*100,
custom_linewidth = CP_linewidth_DPD,
custom_transparency = CP_transparence_DPD,
ylab_fontsize = ylabsize,
yticks_fontsize = ticksize,
ylabel_coords = [-0.06,0.5],
additional_CPs = additional_CPs,
custom_colors_additional_CPs = [KL_CP_color] * 100,
custom_linewidth_additional_CPs = CP_linewidth_KL,
custom_linestyles_additional_CPs = [CP_style_KL] * 10,
custom_transparency_additional_CPs = CP_transparence_KL)
EvTDPD.plot_run_length_distr(buffer=0, show_MAP_CPs = show_CPs_in_rld,
mark_median = False,
mark_max = True,
upper_limit = 1300,
print_colorbar = False,
colorbar_location= None,
xlab = "",
ylab = "",
log_format = False, aspect_ratio = 'auto',
time_range = np.linspace(1,
T-2,
T-2, dtype=int),
start = 1, stop = T,
all_dates = None,
custom_colors = [DPD_CP_color] * 30,
custom_linestyles = [CP_style_DPD]*30,
custom_linewidth = CP_linewidth_DPD,
xlab_fontsize = xlabsize,
ylab_fontsize = ylabsize,
xticks_fontsize = ticksize,
yticks_fontsize = ticksize,
ax = ax_array[1], figure = fig,
no_transform = True,
date_instructions_formatter = None,
date_instructions_locator = None,
arrow_distance = 25,
mark_max_linewidth = max_width,
mark_max_color = max_color_DPD)
EvTKL.plot_run_length_distr(buffer=0, show_MAP_CPs = show_CPs_in_rld,
mark_median = False,
mark_max = True, upper_limit = 1200,
print_colorbar = True,
colorbar_location= 'bottom',
space_to_colorbar = 0.6,
log_format = False, aspect_ratio = 'auto',
C1=0,C2=700,
time_range = np.linspace(1,
T-2,
T-2, dtype=int),
start = 1, stop = T,
all_dates = None,
custom_colors = [KL_CP_color] * 30,
custom_linestyles = [CP_style_KL]*30,
custom_linewidth = CP_linewidth_KL,
xlab_fontsize =xlabsize,
ylab_fontsize = ylabsize,
xticks_fontsize = ticksize,
yticks_fontsize = ticksize,
ylabel_coords = [-0.06, 1.25],
ax = ax_array[2], figure = fig,
no_transform = True,
date_instructions_formatter = None,
date_instructions_locator = None,
xlab = "Time",
ylab = "run length",
arrow_distance = 25,
mark_max_linewidth = max_width,
mark_max_color = max_color_KL)
fig.savefig(baseline_working_directory + "//well.pdf",
format = "pdf", dpi = 800)
fig.savefig(baseline_working_directory + "//well.jpg",
format = "jpg", dpi = 800)
"""STEP 9: Plot some performance metrics"""
def abs_loss_lim(x, lim):
x[np.where(x >= lim)] = lim
return np.abs(x)
sd = np.sqrt(np.var(data))
print("CPs are ", detector_DPD.CPs[-2])
train = 0
until = -2
resids = (data[1:] - EvTKL.results[10].reshape(T,1)[:-1])[train:until]
print("summary MSE KL:",
np.mean(np.power((data[1:] -
EvTKL.results[10].reshape(T,1)[:-1])[train:until],2)))
print("summary MAE KL:",
np.mean(np.abs((data[1:] -
EvTKL.results[10].reshape(T,1)[:-1])[train:until])))
resids = (data - EvTDPD.results[10].reshape(T,1)[:-1])[train:until]
print("summary MSE DPD:",
np.mean(np.power(((data -
EvTDPD.results[10].reshape(T,1)[:-1]))[train:until],2)))
print("summary MAE DPD:",
np.mean(np.abs(((data -
EvTDPD.results[10].reshape(T,1)[:-1]))[train:until])))
|
the-stack_0_3222 | from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from boxes.authorization import load_can_edit
from boxes.forms import BoxForm
from boxes.models import Box
# @@@ problem with this is that the box_edit.html and box_create.html won't have domain objects in context
def get_auth_vars(request):
auth_vars = {}
if request.method == "POST":
keys = [k for k in request.POST.keys() if k.startswith("boxes_auth_")]
for key in keys:
auth_vars[key.replace("boxes_auth_", "")] = request.POST.get(key)
auth_vars["user"] = request.user
return auth_vars
def box_edit(request, pk):
box = get_object_or_404(Box, pk=pk)
if request.method == "POST":
#if not load_can_edit()(request, **get_auth_vars(request)):
# return HttpResponseForbidden()
form = BoxForm(request.POST, instance=box)
if form.is_valid():
form.save()
return render_to_response("boxes/refresh.html", {})
else:
form = BoxForm(instance=box)
ctx = {
"form": form,
"box": box,
}
ctx = RequestContext(request, ctx)
return render_to_response("boxes/box_edit.html", ctx)
def box_create(request, label):
if request.method == "POST":
#if not load_can_edit()(request, **get_auth_vars(request)):
# return HttpResponseForbidden()
form = BoxForm(request.POST)
if form.is_valid():
box = form.save(commit=False)
box.label = label
box.created_by = request.user
box.last_updated_by = request.user
box.save()
return render_to_response("boxes/refresh.html", {})
else:
form = BoxForm()
ctx = {
"form": form,
"label": label
}
ctx = RequestContext(request, ctx)
return render_to_response("boxes/box_create.html", ctx)
|
the-stack_0_3223 | import sst
import os
sst.setProgramOption("timebase", "1ps")
sst_root = os.getenv( "SST_ROOT" )
app = sst_root + "/sst-elements/src/sst/elements/ariel/frontend/simple/examples/stream/stream"
if not os.path.exists(app):
app = os.getenv( "OMP_EXE" )
l2PrefetchParams = {
"prefetcher": "cassini.StridePrefetcher",
"reach": 8
}
ariel = sst.Component("a0", "ariel.ariel")
ariel.addParams({
"verbose" : "0",
"maxcorequeue" : "256",
"maxissuepercycle" : "2",
"pipetimeout" : "0",
"executable" : app,
"arielmode" : "1",
"launchparamcount" : 1,
"launchparam0" : "-ifeellucky",
})
memmgr = ariel.setSubComponent("memmgr", "ariel.MemoryManagerSimple")
corecount = 1;
l1cache = sst.Component("l1cache", "memHierarchy.Cache")
l1cache.addParams({
"cache_frequency" : "2 Ghz",
"cache_size" : "64 KB",
"coherence_protocol" : "MSI",
"replacement_policy" : "lru",
"associativity" : "8",
"access_latency_cycles" : "1",
"cache_line_size" : "64",
"L1" : "1",
"debug" : "0",
})
memctrl = sst.Component("memory", "memHierarchy.MemController")
memctrl.addParams({
"clock" : "1GHz",
})
memory = memctrl.setSubComponent("backend", "memHierarchy.simpleMem")
memory.addParams({
"access_time" : "10ns",
"mem_size" : "2048MiB",
})
cpu_cache_link = sst.Link("cpu_cache_link")
cpu_cache_link.connect( (ariel, "cache_link_0", "50ps"), (l1cache, "high_network_0", "50ps") )
memory_link = sst.Link("mem_bus_link")
memory_link.connect( (l1cache, "low_network_0", "50ps"), (memctrl, "direct_link", "50ps") )
# Set the Statistic Load Level; Statistics with Enable Levels (set in
# elementInfoStatistic) lower or equal to the load can be enabled (default = 0)
sst.setStatisticLoadLevel(5)
# Set the desired Statistic Output (sst.statOutputConsole is default)
sst.setStatisticOutput("sst.statOutputConsole")
#sst.setStatisticOutput("sst.statOutputTXT", {"filepath" : "./TestOutput.txt"
# })
#sst.setStatisticOutput("sst.statOutputCSV", {"filepath" : "./TestOutput.csv",
# "separator" : ", "
# })
# Enable Individual Statistics for the Component with output at end of sim
# Statistic defaults to Accumulator
ariel.enableStatistics([
"cycles",
"instruction_count",
"read_requests",
"write_requests"
])
l1cache.enableStatistics([
"CacheHits",
"CacheMisses"
])
|
the-stack_0_3224 | import taso as ts
import onnx
import os
import argparse
import re
def squeeze(graph, out_channels, input):
weight = graph.new_weight(dims=(out_channels, input.dim(1), 1, 1))
return graph.conv2d(input=input, weight=weight,
strides=(1, 1), padding="SAME",
activation="RELU")
def fit(graph, current, input):
if input.dim(2) == current.dim(2):
return squeeze(graph, current.dim(1), input)
else:
weight = graph.new_weight(dims=(current.dim(1), input.dim(1), 3, 3))
return graph.conv2d(input=input, weight=weight, strides=(2, 2), padding="SAME", activation="RELU")
def seperable_conv(graph, input, out_channels, kernels, strides, padding, activation = "NONE"):
assert input.dim(1) % out_channels == 0, "input.dim(1)={}, out_channels={}".format(input.dim(1), out_channels)
weight1 = graph.new_weight(dims=(out_channels, input.dim(1) // out_channels, kernels[0], kernels[1]))
t = graph.conv2d(input=input, weight=weight1, strides=strides, padding=padding)
weight2 = graph.new_weight(dims=(out_channels, t.dim(1), 1, 1))
return graph.conv2d(input=t, weight=weight2, strides=(1, 1), padding="SAME", activation=activation)
def normal_cell(graph, prev, cur, out_channels):
cur = squeeze(graph, out_channels, cur)
prev = fit(graph, cur, prev)
ts = list()
ts.append(seperable_conv(graph, input=cur, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(cur)
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(seperable_conv(graph, input=cur, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(graph.avgpool2d(input=cur, kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(prev)
ts.append(graph.avgpool2d(input=prev, kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(graph.avgpool2d(input=prev, kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
assert len(ts) == 10, "Expected 10 tensors, got {}".format(len(ts))
outputs = list()
for i in range(5):
outputs.append(graph.add(ts[2*i], ts[2*i+1]))
return graph.concat(1, outputs)
def reduction_cell(graph, prev, cur, out_channels):
cur = squeeze(graph, out_channels, cur)
prev = fit(graph, cur, prev)
ts = list()
outputs = list()
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(7,7), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=cur, out_channels=out_channels,
kernels=(5,5), strides=(2,2), padding="SAME"))
outputs.append(graph.add(ts[0], ts[1]))
ts.append(graph.maxpool2d(input=cur, kernels=(3,3), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(7,7), strides=(2,2), padding="SAME"))
outputs.append(graph.add(ts[2], ts[3]))
ts.append(graph.avgpool2d(input=cur, kernels=(3,3), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=prev, out_channels=out_channels,
kernels=(5,5), strides=(2,2), padding="SAME"))
outputs.append(graph.add(ts[4], ts[5]))
ts.append(graph.maxpool2d(input=cur, kernels=(3,3), strides=(2,2), padding="SAME"))
ts.append(seperable_conv(graph, input=outputs[0], out_channels=out_channels,
kernels=(3,3), strides=(1,1), padding="SAME"))
outputs.append(graph.add(ts[6], ts[7]))
ts.append(graph.avgpool2d(input=outputs[0], kernels=(3,3), strides=(1,1), padding="SAME"))
ts.append(outputs[1])
outputs.append(graph.add(ts[8], ts[9]))
return graph.concat(1, outputs)
#here we need to parse arguments
parser = argparse.ArgumentParser()
# parser.add_argument("-a", "--alpha", help="alpha", default = 1.05)
parser.add_argument("-b", "--budget", help="budget", required=True)
# parser.add_argument("-s", "--sample_size", help="sample_size")
# parser.add_argument("-n", "--block_num", help="block_num", required = True)
parser.add_argument("-c", "--cuda", help="cuda device", default = 0)
parser.add_argument("-r", "--runtimes", help="the number of runs required", required = True)
parser.add_argument("-m", "--method", help="the method to use", required = True)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(int(args.cuda))
budget=int(args.budget)
# block_num=int(args.block_num)
runtimes=int(args.runtimes)
methods=int(args.method)
# BUILD THE ORIGINAL GRAPH
graph = ts.new_graph()
input = graph.new_input(dims=(1,3,224,224))
weight = graph.new_weight(dims=(64,3,7,7))
input = graph.conv2d(input=input, weight=weight, strides=(2,2),
padding="SAME", activation="RELU")
input = graph.maxpool2d(input=input, kernels=(3,3), strides=(2,2), padding="SAME")
out_channels = 128
for i in range(3):
prev = input
cur = input
for j in range(5):
t = normal_cell(graph, prev, cur, out_channels)
prev = cur
cur = t
out_channels *= 2
input = reduction_cell(graph, prev, cur, out_channels)
# new_graph = ts.optimize(graph, alpha=1.0, budget=-1)
import timeit
# this helper function write "to_write" to the file
def write_result(to_write):
f = open('results.py','a')
f.write(to_write)
f.close()
def get_memory():
my_pid = os.getpid()
print(os.system("grep VmHWM /proc/" + str(my_pid)+ "/status > memory.txt"))
print(os.system("grep VmHWM /proc/" + str(my_pid)+ "/status"))
print(str(my_pid))
f2 = open("memory.txt","r")
lines = f2.readlines()
for line3 in lines:
pattern = r'VmHWM:\s*([0-9]+) kB'
matchObj = re.match(pattern, line3)
memory = int(matchObj.group(1))
break
return memory
# repeat_time = 1
# DO OPTIMIZATION AND RECORD RESULTS
# write_result('all_results = dict()\n')
# write_result('\nall_results["sysmlpartition"] = dict()\n')
# write_result('\nall_results["sysmltrick"] = dict()\n')
# write_result('\nall_results["sampletrick"] = dict()\n')
# write_result('\nall_results["sampletrick_truenewreuse"] = dict()\n')
# write_result('\nall_results["reuse"] = dict()\n')
# write_result('\nall_results["prune"] = dict()\n')
write_result('\nall_results = model_results["nasneta' + 'b' + str(budget) + '"]\n')
for repeat_time in range(runtimes, runtimes+1):
write_result('\nrepeat_time = ' + str(repeat_time) + '\n')
# # for sampletrick with true new reuse
# # RUN THIS ALGORITHM TO PREPARE THE OP_DICT
# if ((methods == -1) or (methods == 1)):
# # write_result('result = dict()\n')
# write_result('result = all_results["sampletrick_truenewreuse"][repeat_time]\n')
# new_graph = ts.optimize_sampletrick_truenewreuse(graph, 3, alpha=1.05, budget=budget, print_subst = False, sample_size = 20)
# #record the peak memory
# write_result('result["memory"] = ' + str(get_memory()) + '\n')
# # write_result('all_results["sampletrick_truenewreuse"][repeat_time] = result\n')
# write_result('all_results["sysmlpartition"][repeat_time] = dict()\n')
if ((methods == -1) or (methods == 2)):
# write_result('result = dict()\n')
write_result('result = all_results["sysmlpartition"][repeat_time]\n')
threshold = 30
partitions = list()
#
start_time = timeit.default_timer()
ts.graph_partition(graph, threshold, partitions = partitions)
end_time = timeit.default_timer()
write_result('result["partition_time"] = ' + str(end_time-start_time) + '\n')
#
new_graph = ts.optimize_partition(graph, alpha = 1.05, budget = budget, print_subst = True, eraly_stop_num = -1, partitions = partitions)
#record the peak memory
write_result('result["memory"] = ' + str(get_memory()) + '\n')
# write_result('all_results["sysmlpartition"][repeat_time] = result\n')
# for sysmltrick without partition
if ((methods == -1) or (methods == 3)):
# write_result('result = dict()\n')
write_result('result = all_results["sysmltrick"][repeat_time]\n')
new_graph = ts.optimize_sysmltrick(graph, alpha = 1.05, budget = budget, print_subst = False, eraly_stop_num = -1)
#record the peak memory
write_result('result["memory"] = ' + str(get_memory()) + '\n')
# write_result('all_results["sysmltrick"][repeat_time] = result\n')
# for sampletrick
if ((methods == -1) or (methods == 4)):
# write_result('result = dict()\n')
write_result('result = all_results["sampletrick_optimized"][repeat_time]\n')
# new_graph = ts.optimize_sampletrick(graph, alpha=1.05, budget=budget, print_subst = False, sample_size = 20)
new_graph = ts.optimize_sampletrick_newreuse_2samplestep(graph, alpha=1.05, budget=budget, print_subst = False, sample_size = 20)
#record the peak memory
write_result('result["memory"] = ' + str(get_memory()) + '\n')
# write_result('all_results["sampletrick"][repeat_time] = result\n')
# # for reuse
# write_result('result = dict()\n')
# new_graph = ts.optimize_reuse(graph, alpha=1.05, budget=budget, print_subst = True)
# write_result('all_results["reuse"][repeat_time] = result\n')
# # for prune
# write_result('result = dict()\n')
# new_graph = ts.optimize_prune(graph, alpha=1.05, budget=budget, print_subst = True)
# write_result('all_results["prune"][repeat_time] = result\n')
# STORE THE RESULTS IN THE MODEL_RESULTS VAR
# write_result('\nmodel_results["nasneta' + 'b' + str(budget) + '"] = all_results\n')
|
the-stack_0_3225 | from Task import Task
from Settings import EvolvedConfig
from Interfaces import Evolved5gJenkinsApi
from Helper import Level
class JenkinsBase(Task):
def __init__(self, name, parent, params, logMethod):
super().__init__(name, parent, params, logMethod, None)
self.config = EvolvedConfig().JenkinsApi
self.client = None
def Run(self):
try:
self.client = self.getApiClient()
except Exception as e:
self.Log(Level.Error, f"Unable to create Jenkins API client: {e}")
self.client = None
def getApiClient(self) -> Evolved5gJenkinsApi:
if not self.config.Enabled:
raise RuntimeError(f"Trying to run {self.name} Task while Jenkins API is not enabled")
return Evolved5gJenkinsApi(self.config.Host, self.config.Port,
self.config.User, self.config.Password)
class JenkinsJob(JenkinsBase):
def __init__(self, logMethod, parent, params):
super().__init__("Jenkins Job", parent, params, logMethod)
self.paramRules = {
'Instance': (None, True),
'Job': (None, True),
'GitUrl': (None, True),
'GitBranch': (None, True),
'Version': ('1.0', False),
'PublishKey': ('JenkinsJobId', False),
}
def Run(self):
super().Run()
if self.client is None: return
instance = self.params["Instance"]
job = self.params["Job"]
url = self.params["GitUrl"]
branch = self.params["GitBranch"]
version = self.params["Version"]
self.Log(Level.DEBUG,
f"Trying to trigger job '{job}' on instance '{instance}' ({url}|{branch}|{version})")
try:
jobId = self.client.TriggerJob(instance, job, url, branch, version)
self.Log(Level.INFO, f"Triggered '{job}'. Received Job Id: {jobId}")
self.Publish(self.params["PublishKey"], jobId)
except Exception as e:
self.Log(Level.ERROR, f"Unable to trigger job: {e}")
self.SetVerdictOnError()
class JenkinsStatus(JenkinsBase):
def __init__(self, logMethod, parent, params):
super().__init__("Jenkins Status", parent, params, logMethod)
self.paramRules = {
'JobId': (None, True),
'PublishKey': ('JenkinsJobStatus', False),
}
def Run(self):
super().Run()
if self.client is None: return
jobId = self.params['JobId']
try:
status, message = self.client.CheckJob(jobId)
message = message if message is not None else "<No details>"
self.Log(Level.INFO, f"Status of job '{jobId}': {status} ('{message}')")
self.Publish(self.params["PublishKey"], status)
except Exception as e:
self.Log(Level.ERROR, f"Unable to check job '{jobId}' status: {e}")
self.SetVerdictOnError()
|
the-stack_0_3226 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
#
# author: Gabriele Girelli
# email: [email protected]
# version: 1.1.1dev
# date: 180308
# project: pre-processing sequencing data
#
# credits:
# Dr. F. Agostini for the nice chats and for providing an initial prototype.
#
# aim:
# Deduplicate FASTQ file: remove duplicate sequence by keeping the higher
# quality one. Moreover, remove reads with "N" in the initial portion of a
# read, if requested by the user.
#
# description:
# Initially, the records in the FASTQ are quickly counted with bash "wc -l".
# Then, the full FASTQ file is read and parsed with Bio.SeqIO. Each record is
# stored in plain text format alongside its quality in a dictionary, with its
# sequence as key. If "N" (i.e., any nucleotide) is found in the initial
# portion (user-defined) of a sequence, the sequence is discarded. Each
# sequence is compared to the encountered ones and replaces it only and only
# if its quality is higher (either sum or mean). It is also possible to
# manually set an upper limit of resident memory using the --max-mem option.
#
# notes:
# The current implementation requires less RAM than previous ones, and shorter
# times to compute. Instead of storing each FASTQ record as parsed, it stores
# them as plain text alongside sequence and its quality (minor redundancy).
# For a 20 GB plain FASTQ, approx. 15 GB of resident memory are required.
#
# ==============================================================================
# DEPENDENCIES =================================================================
import argparse
import binascii
from Bio import SeqIO # type: ignore
import gzip
import numpy as np
import os
import resource
from subprocess import check_output
import sys
from tqdm import tqdm # type: ignore
# PARAMETERS ===================================================================
# Add script description
parser = argparse.ArgumentParser(
description="""
author: Gabriele Girelli
email: [email protected]
version: 1.1.1dev
date: 180308
project: pre-processing sequencing data
credits:
Dr. F. Agostini for the nice chats and for providing an initial prototype.
aim:
Deduplicate FASTQ file: remove duplicate sequence by keeping the higher
quality one. Moreover, remove reads with "N" in the initial portion of a
read, if requested by the user.
description:
Initially, the records in the FASTQ are quickly counted with bash "wc -l".
Then, the full FASTQ file is read and parsed with Bio.SeqIO. Each record is
stored in plain text format alongside its quality in a dictionary, with its
sequence as key. If "N" (i.e., any nucleotide) is found in the initial
portion (user-defined) of a sequence, the sequence is discarded. Each
sequence is compared to the encountered ones and replaces it only and only
if its quality is higher (either sum or mean). It is also possible to
manually set an upper limit of resident memory using the --max-mem option.
notes:
The current implementation requires less RAM than previous ones, and shorter
times to compute. Instead of storing each FASTQ record as parsed, it stores
them as plain text alongside sequence and its quality (minor redundancy).
For a 20 GB plain FASTQ, approx. 15 GB of resident memory are required.
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# Add mandatory arguments
parser.add_argument(
"fastq",
type=str,
nargs=1,
help="""Path to input FASTQ file.
Both gzipped and plain FASTQ formats are supported""",
)
# Add arguments with default value
parser.add_argument(
"-n",
type=int,
nargs=1,
metavar="nt",
default=[0],
help="""Length [nt] of sequence initial portion to search for N.
Default: 0.""",
)
parser.add_argument(
"--max-mem",
type=int,
nargs=1,
metavar="MB",
help="""Upper limit (in MB) of resident memory for the deduplication
process. Use -1 for unlimited. Not compatible with MacOS. Default: -1.""",
default=[-1],
)
# Add flags
parser.add_argument(
"--use-mean-qual",
action="store_const",
dest="doMean",
const=True,
default=False,
help="Select sequences based on mean quality instead of quality sum.",
)
# Version flag
version = "1.1.1dev"
parser.add_argument(
"--version",
action="version",
version="%s v%s"
% (
sys.argv[0],
version,
),
)
# Parse arguments
args = parser.parse_args()
# Assign to in-script variables
ipath = args.fastq[0]
basename = os.path.splitext(os.path.basename(ipath))[0]
linker_length = args.n[0]
max_mem = args.max_mem[0]
if max_mem < 0:
max_mem = np.inf
def floatMean(x):
return float(np.mean(x))
def intMean(x):
return int(np.mean(x))
doMean = args.doMean
if doMean:
qCalc = floatMean
else:
qCalc = intMean
# FUNCTIONS ====================================================================
def get_mem():
# Memory profiling
# From https://goo.gl/HkfNpu
if sys.platform == "darwin":
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024.0 ** 2)
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0
def check_mem():
# Print memory profiling
print("%f MB" % (get_mem(),))
def set_default(v, default):
# Set default value for function argument
if v is None:
return default
return v
def is_gz_file(filepath):
# https://stackoverflow.com/a/47080739
with open(filepath, "rb") as test_f:
return binascii.hexlify(test_f.read(2)) == b"1f8b"
def write_output(oh, records):
"""
Write output after filtering.
Args:
oh (file/gzip): output file handle.
records (dict): records dictionary after filtering.
"""
for k in tqdm(list(records)):
# Pop to empty mem sooner
oh.write(records.pop(k)[0])
def cmp_record(rec, records, ncounter, linker_length):
"""
Compares a record to stored one, replace previous ones based on quality and
discard if "N" is present in the initial portion of the sequence.
Args:
rec (SeqRecord): single FASTQ record.
records (dict): records dictionary during filtering.
ncounter (int): number of records discarded due to "N".
linker_length (int): Length [nt] of sequence portion to search for N.
Returns:
dict: records dictionary after comparison.
"""
# Extract record's sequence, let's make it comfy
seq = str(rec.seq)
# Skip if N in linker sequence
if "N" not in seq[:linker_length]:
# Prepare record for storage
q = qCalc(rec.letter_annotations["phred_quality"])
if seq not in records.keys():
# Store record
records[seq] = (rec.format("fastq"), q)
elif q > records[seq][1]:
# Replace stored record
records[seq] = (rec.format("fastq"), q)
else:
ncounter += 1
return (records, ncounter)
def log_result(ncounter, nrecs):
print("%d records removed due to presence of 'N'." % ncounter)
print("%d records after deduplication." % nrecs)
print("Peaked at %.1f MB of resident memory." % get_mem())
def run(ih, oh, linker_length, nrecs):
"""
Run the script on the input file: remove records with Ns in the initial
portion of the sequence, if linker_length is larger than 0.
Args:
ih (file/gzip): input file handle.
oh (file/gzip): output file handle.
linker_length (int): Length [nt] of sequence portion to search for N.
nrecs (int): expected number of records.
"""
# Read all records
print("Reading and filtering...")
records = {}
ncounter = 0
# Parse FASTQ records
gen = SeqIO.parse(ih, "fastq")
with tqdm(total=nrecs) as pbar:
for i in range(nrecs):
# Compare current record with stored ones
records, ncounter = cmp_record(next(gen), records, ncounter, linker_length)
# Update progress bar
pbar.update(1)
log_result(ncounter, len(records))
# Remove duplicates and write output
print("Writing...")
write_output(oh, records)
def run_mm(ih, oh, linker_length, nrecs, max_mem=None):
"""
Run the script on the input file: remove records with Ns in the initial
portion of the sequence, if linker_length is larger than 0.
Performs resident memory profiling with upper limit set by the user.
Args:
ih (file/gzip): input file handle.
oh (file/gzip): output file handle.
linker_length (int): Length [nt] of sequence portion to search for N.
nrecs (int): expected number of records.
max_mem (int): upper resident memory limit in MB.
"""
# Default memory limit to infinity
if max_mem < 0:
max_mem = None
max_mem = set_default(max_mem, np.inf)
# Read all records
print("Reading and filtering...")
records = {}
ncounter = 0
# Parse FASTQ records
gen = SeqIO.parse(ih, "fastq")
with tqdm(total=nrecs) as pbar:
for i in range(nrecs):
# Stop when the mem limit is hit
if get_mem() >= max_mem:
sys.exit("!ABORTED! Hit resident memory limit of %d MB." % (max_mem,))
# Compare current record with stored ones
records, ncounter = cmp_record(next(gen), records, ncounter, linker_length)
# Update progress bar
pbar.update(1)
log_result(ncounter, len(records))
# Remove duplicates and write output
print("Writing...")
write_output(oh, records)
# RUN ==========================================================================
# Log input --------------------------------------------------------------------
print("\n# fqdedup.py v%s - Single-end FASTQ deduplication" % version)
print("Input: %s" % (ipath,))
if is_gz_file(ipath):
print("! Gzipped FASTQ deduplication style.")
# Prepare to parse a gzipped FASTQ input
catter = "zcat"
opath = "%s/%s.dedup.gz" % (os.path.dirname(ipath), basename)
oh = gzip.open(opath, "wt")
ih = gzip.open(ipath, "rt")
else:
print("! Plain FASTQ deduplication style.")
# Prepare to parse a plain FASTQ input
catter = "cat"
opath = "%s/%s.dedup.fastq" % (os.path.dirname(ipath), basename)
oh = open(opath, "wt")
ih = open(ipath, "rt")
if doMean:
print("!Using average quality for sequence selection.")
else:
print("! Using quality sum for sequence selection.")
if 0 != linker_length:
print("! Discarding sequences with N in the first %d bases." % (linker_length,))
if np.inf != max_mem:
print("! Upper resident memory limit set to %d MB." % (max_mem,))
print()
# Count records in input -------------------------------------------------------
print("Counting records...")
nrecs = int(
int(check_output(["bash", "-c", "%s '%s' | wc -l" % (catter, ipath)])) / 4.0
)
print("> Found %d records." % (nrecs,))
# Run --------------------------------------------------------------------------
if np.inf == max_mem:
# No memory management
run(ih, oh, linker_length, nrecs)
else:
# With memory management
run_mm(ih, oh, linker_length, nrecs, max_mem)
# END ==========================================================================
################################################################################
|
the-stack_0_3227 | """Tests for certbot.cli."""
import argparse
import unittest
import os
import tempfile
import mock
import six
from six.moves import reload_module # pylint: disable=import-error
from acme import challenges
from certbot import cli
from certbot import constants
from certbot import errors
from certbot.plugins import disco
import certbot.tests.util as test_util
from certbot.tests.util import TempDirTestCase
PLUGINS = disco.PluginsRegistry.find_all()
class TestReadFile(TempDirTestCase):
'''Test cli.read_file'''
_multiprocess_can_split_ = True
def test_read_file(self):
rel_test_path = os.path.relpath(os.path.join(self.tempdir, 'foo'))
self.assertRaises(
argparse.ArgumentTypeError, cli.read_file, rel_test_path)
test_contents = b'bar\n'
with open(rel_test_path, 'wb') as f:
f.write(test_contents)
path, contents = cli.read_file(rel_test_path)
self.assertEqual(path, os.path.abspath(path))
self.assertEqual(contents, test_contents)
class ParseTest(unittest.TestCase): # pylint: disable=too-many-public-methods
'''Test the cli args entrypoint'''
_multiprocess_can_split_ = True
def setUp(self):
reload_module(cli)
@staticmethod
def _unmocked_parse(*args, **kwargs):
"""Get result of cli.prepare_and_parse_args."""
return cli.prepare_and_parse_args(PLUGINS, *args, **kwargs)
@staticmethod
def parse(*args, **kwargs):
"""Mocks zope.component.getUtility and calls _unmocked_parse."""
with test_util.patch_get_utility():
return ParseTest._unmocked_parse(*args, **kwargs)
def _help_output(self, args):
"Run a command, and return the output string for scrutiny"
output = six.StringIO()
def write_msg(message, *args, **kwargs): # pylint: disable=missing-docstring,unused-argument
output.write(message)
with mock.patch('certbot.main.sys.stdout', new=output):
with test_util.patch_get_utility() as mock_get_utility:
mock_get_utility().notification.side_effect = write_msg
with mock.patch('certbot.main.sys.stderr'):
self.assertRaises(SystemExit, self._unmocked_parse, args, output)
return output.getvalue()
@mock.patch("certbot.cli.flag_default")
def test_cli_ini_domains(self, mock_flag_default):
tmp_config = tempfile.NamedTemporaryFile()
# use a shim to get ConfigArgParse to pick up tmp_config
shim = lambda v: constants.CLI_DEFAULTS[v] if v != "config_files" else [tmp_config.name]
mock_flag_default.side_effect = shim
namespace = self.parse(["certonly"])
self.assertEqual(namespace.domains, [])
tmp_config.write(b"domains = example.com")
tmp_config.flush()
namespace = self.parse(["certonly"])
self.assertEqual(namespace.domains, ["example.com"])
namespace = self.parse(["renew"])
self.assertEqual(namespace.domains, [])
def test_no_args(self):
namespace = self.parse([])
for d in ('config_dir', 'logs_dir', 'work_dir'):
self.assertEqual(getattr(namespace, d), cli.flag_default(d))
def test_install_abspath(self):
cert = 'cert'
key = 'key'
chain = 'chain'
fullchain = 'fullchain'
with mock.patch('certbot.main.install'):
namespace = self.parse(['install', '--cert-path', cert,
'--key-path', 'key', '--chain-path',
'chain', '--fullchain-path', 'fullchain'])
self.assertEqual(namespace.cert_path, os.path.abspath(cert))
self.assertEqual(namespace.key_path, os.path.abspath(key))
self.assertEqual(namespace.chain_path, os.path.abspath(chain))
self.assertEqual(namespace.fullchain_path, os.path.abspath(fullchain))
def test_help(self):
self._help_output(['--help']) # assert SystemExit is raised here
out = self._help_output(['--help', 'all'])
self.assertTrue("--configurator" in out)
self.assertTrue("how a certificate is deployed" in out)
self.assertTrue("--webroot-path" in out)
self.assertTrue("--text" not in out)
self.assertTrue("--dialog" not in out)
self.assertTrue("%s" not in out)
self.assertTrue("{0}" not in out)
self.assertTrue("--renew-hook" not in out)
out = self._help_output(['-h', 'nginx'])
if "nginx" in PLUGINS:
# may be false while building distributions without plugins
self.assertTrue("--nginx-ctl" in out)
self.assertTrue("--webroot-path" not in out)
self.assertTrue("--checkpoints" not in out)
out = self._help_output(['-h'])
self.assertTrue("letsencrypt-auto" not in out) # test cli.cli_command
if "nginx" in PLUGINS:
self.assertTrue("Use the Nginx plugin" in out)
else:
self.assertTrue("(the certbot nginx plugin is not" in out)
out = self._help_output(['--help', 'plugins'])
self.assertTrue("--webroot-path" not in out)
self.assertTrue("--prepare" in out)
self.assertTrue('"plugins" subcommand' in out)
# test multiple topics
out = self._help_output(['-h', 'renew'])
self.assertTrue("--keep" in out)
out = self._help_output(['-h', 'automation'])
self.assertTrue("--keep" in out)
out = self._help_output(['-h', 'revoke'])
self.assertTrue("--keep" not in out)
out = self._help_output(['--help', 'install'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['--help', 'revoke'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
self.assertTrue("--reason" in out)
out = self._help_output(['-h', 'config_changes'])
self.assertTrue("--cert-path" not in out)
self.assertTrue("--key-path" not in out)
out = self._help_output(['-h'])
self.assertTrue(cli.SHORT_USAGE in out)
self.assertTrue(cli.COMMAND_OVERVIEW[:100] in out)
self.assertTrue("%s" not in out)
self.assertTrue("{0}" not in out)
def test_help_no_dashes(self):
self._help_output(['help']) # assert SystemExit is raised here
out = self._help_output(['help', 'all'])
self.assertTrue("--configurator" in out)
self.assertTrue("how a certificate is deployed" in out)
self.assertTrue("--webroot-path" in out)
self.assertTrue("--text" not in out)
self.assertTrue("--dialog" not in out)
self.assertTrue("%s" not in out)
self.assertTrue("{0}" not in out)
out = self._help_output(['help', 'install'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
out = self._help_output(['help', 'revoke'])
self.assertTrue("--cert-path" in out)
self.assertTrue("--key-path" in out)
def test_parse_domains(self):
short_args = ['-d', 'example.com']
namespace = self.parse(short_args)
self.assertEqual(namespace.domains, ['example.com'])
short_args = ['-d', 'trailing.period.com.']
namespace = self.parse(short_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
short_args = ['-d', 'example.com,another.net,third.org,example.com']
namespace = self.parse(short_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net',
'third.org'])
long_args = ['--domains', 'example.com']
namespace = self.parse(long_args)
self.assertEqual(namespace.domains, ['example.com'])
long_args = ['--domains', 'trailing.period.com.']
namespace = self.parse(long_args)
self.assertEqual(namespace.domains, ['trailing.period.com'])
long_args = ['--domains', 'example.com,another.net,example.com']
namespace = self.parse(long_args)
self.assertEqual(namespace.domains, ['example.com', 'another.net'])
def test_preferred_challenges(self):
short_args = ['--preferred-challenges', 'http, tls-sni-01, dns']
namespace = self.parse(short_args)
expected = [challenges.HTTP01.typ,
challenges.TLSSNI01.typ, challenges.DNS01.typ]
self.assertEqual(namespace.pref_challs, expected)
short_args = ['--preferred-challenges', 'jumping-over-the-moon']
# argparse.ArgumentError makes argparse print more information
# to stderr and call sys.exit()
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, self.parse, short_args)
def test_server_flag(self):
namespace = self.parse('--server example.com'.split())
self.assertEqual(namespace.server, 'example.com')
def test_must_staple_flag(self):
short_args = ['--must-staple']
namespace = self.parse(short_args)
self.assertTrue(namespace.must_staple)
self.assertTrue(namespace.staple)
def test_no_gui(self):
args = ['renew', '--dialog']
stderr = six.StringIO()
with mock.patch('certbot.main.sys.stderr', new=stderr):
namespace = self.parse(args)
self.assertTrue(namespace.noninteractive_mode)
self.assertTrue("--dialog is deprecated" in stderr.getvalue())
def _check_server_conflict_message(self, parser_args, conflicting_args):
try:
self.parse(parser_args)
self.fail( # pragma: no cover
"The following flags didn't conflict with "
'--server: {0}'.format(', '.join(conflicting_args)))
except errors.Error as error:
self.assertTrue('--server' in str(error))
for arg in conflicting_args:
self.assertTrue(arg in str(error))
def test_staging_flag(self):
short_args = ['--staging']
namespace = self.parse(short_args)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
short_args += '--server example.com'.split()
self._check_server_conflict_message(short_args, '--staging')
def _assert_dry_run_flag_worked(self, namespace, existing_account):
self.assertTrue(namespace.dry_run)
self.assertTrue(namespace.break_my_certs)
self.assertTrue(namespace.staging)
self.assertEqual(namespace.server, constants.STAGING_URI)
if existing_account:
self.assertTrue(namespace.tos)
self.assertTrue(namespace.register_unsafely_without_email)
else:
self.assertFalse(namespace.tos)
self.assertFalse(namespace.register_unsafely_without_email)
def test_dry_run_flag(self):
config_dir = tempfile.mkdtemp()
short_args = '--dry-run --config-dir {0}'.format(config_dir).split()
self.assertRaises(errors.Error, self.parse, short_args)
self._assert_dry_run_flag_worked(
self.parse(short_args + ['auth']), False)
self._assert_dry_run_flag_worked(
self.parse(short_args + ['certonly']), False)
self._assert_dry_run_flag_worked(
self.parse(short_args + ['renew']), False)
account_dir = os.path.join(config_dir, constants.ACCOUNTS_DIR)
os.mkdir(account_dir)
os.mkdir(os.path.join(account_dir, 'fake_account_dir'))
self._assert_dry_run_flag_worked(self.parse(short_args + ['auth']), True)
self._assert_dry_run_flag_worked(self.parse(short_args + ['renew']), True)
short_args += ['certonly']
self._assert_dry_run_flag_worked(self.parse(short_args), True)
short_args += '--server example.com'.split()
conflicts = ['--dry-run']
self._check_server_conflict_message(short_args, '--dry-run')
short_args += ['--staging']
conflicts += ['--staging']
self._check_server_conflict_message(short_args, conflicts)
def test_option_was_set(self):
key_size_option = 'rsa_key_size'
key_size_value = cli.flag_default(key_size_option)
self.parse('--rsa-key-size {0}'.format(key_size_value).split())
self.assertTrue(cli.option_was_set(key_size_option, key_size_value))
self.assertTrue(cli.option_was_set('no_verify_ssl', True))
config_dir_option = 'config_dir'
self.assertFalse(cli.option_was_set(
config_dir_option, cli.flag_default(config_dir_option)))
def test_encode_revocation_reason(self):
for reason, code in constants.REVOCATION_REASONS.items():
namespace = self.parse(['--reason', reason])
self.assertEqual(namespace.reason, code)
for reason, code in constants.REVOCATION_REASONS.items():
namespace = self.parse(['--reason', reason.upper()])
self.assertEqual(namespace.reason, code)
def test_force_interactive(self):
self.assertRaises(
errors.Error, self.parse, "renew --force-interactive".split())
self.assertRaises(
errors.Error, self.parse, "-n --force-interactive".split())
def test_deploy_hook_conflict(self):
with mock.patch("certbot.cli.sys.stderr"):
self.assertRaises(SystemExit, self.parse,
"--renew-hook foo --deploy-hook bar".split())
def test_deploy_hook_matches_renew_hook(self):
value = "foo"
namespace = self.parse(["--renew-hook", value,
"--deploy-hook", value,
"--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, value)
self.assertEqual(namespace.renew_hook, value)
def test_deploy_hook_sets_renew_hook(self):
value = "foo"
namespace = self.parse(
["--deploy-hook", value, "--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, value)
self.assertEqual(namespace.renew_hook, value)
def test_renew_hook_conflict(self):
with mock.patch("certbot.cli.sys.stderr"):
self.assertRaises(SystemExit, self.parse,
"--deploy-hook foo --renew-hook bar".split())
def test_renew_hook_matches_deploy_hook(self):
value = "foo"
namespace = self.parse(["--deploy-hook", value,
"--renew-hook", value,
"--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, value)
self.assertEqual(namespace.renew_hook, value)
def test_renew_hook_does_not_set_renew_hook(self):
value = "foo"
namespace = self.parse(
["--renew-hook", value, "--disable-hook-validation"])
self.assertEqual(namespace.deploy_hook, None)
self.assertEqual(namespace.renew_hook, value)
def test_max_log_backups_error(self):
with mock.patch('certbot.cli.sys.stderr'):
self.assertRaises(
SystemExit, self.parse, "--max-log-backups foo".split())
self.assertRaises(
SystemExit, self.parse, "--max-log-backups -42".split())
def test_max_log_backups_success(self):
value = "42"
namespace = self.parse(["--max-log-backups", value])
self.assertEqual(namespace.max_log_backups, int(value))
class DefaultTest(unittest.TestCase):
"""Tests for certbot.cli._Default."""
_multiprocess_can_split_ = True
def setUp(self):
# pylint: disable=protected-access
self.default1 = cli._Default()
self.default2 = cli._Default()
def test_boolean(self):
self.assertFalse(self.default1)
self.assertFalse(self.default2)
def test_equality(self):
self.assertEqual(self.default1, self.default2)
def test_hash(self):
self.assertEqual(hash(self.default1), hash(self.default2))
class SetByCliTest(unittest.TestCase):
"""Tests for certbot.set_by_cli and related functions."""
_multiprocess_can_split_ = True
def setUp(self):
reload_module(cli)
def test_webroot_map(self):
args = '-w /var/www/html -d example.com'.split()
verb = 'renew'
self.assertTrue(_call_set_by_cli('webroot_map', args, verb))
def test_report_config_interaction_str(self):
cli.report_config_interaction('manual_public_ip_logging_ok',
'manual_auth_hook')
cli.report_config_interaction('manual_auth_hook', 'manual')
self._test_report_config_interaction_common()
def test_report_config_interaction_iterable(self):
cli.report_config_interaction(('manual_public_ip_logging_ok',),
('manual_auth_hook',))
cli.report_config_interaction(('manual_auth_hook',), ('manual',))
self._test_report_config_interaction_common()
def _test_report_config_interaction_common(self):
"""Tests implied interaction between manual flags.
--manual implies --manual-auth-hook which implies
--manual-public-ip-logging-ok. These interactions don't actually
exist in the client, but are used here for testing purposes.
"""
args = ['--manual']
verb = 'renew'
for v in ('manual', 'manual_auth_hook', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
cli.set_by_cli.detector = None
args = ['--manual-auth-hook', 'command']
for v in ('manual_auth_hook', 'manual_public_ip_logging_ok'):
self.assertTrue(_call_set_by_cli(v, args, verb))
self.assertFalse(_call_set_by_cli('manual', args, verb))
def _call_set_by_cli(var, args, verb):
with mock.patch('certbot.cli.helpful_parser') as mock_parser:
with test_util.patch_get_utility():
mock_parser.args = args
mock_parser.verb = verb
return cli.set_by_cli(var)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
the-stack_0_3228 | import torch
from torch.nn.functional import cross_entropy
def check_accuracy(loader, model, device):
num_correct = 0
num_samples = 0
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
def train(model, loader_train, loader_val, optimizer, device, epochs=1, log=False, print_every=100):
"""
Train a model on CIFAR-10 using the PyTorch Module API.
Inputs:
- model: A PyTorch Module giving the model to train.
- optimizer: An Optimizer object we will use to train the model
- epochs: (Optional) A Python integer giving the number of epochs to train for
Returns: Nothing, but prints model accuracies during training.
"""
model = model.to(device=device)
for _ in range(epochs):
for t, (x, y) in enumerate(loader_train):
model.train()
x = x.to(device=device)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = cross_entropy(scores, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
#check_accuracy(loader_val, model, device)
#print()
def eval_model(loader, model, device):
model = model.to(device=device)
pred = []
groundtruth = []
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
pred += preds.tolist()
groundtruth += y.tolist()
return pred, groundtruth |
the-stack_0_3229 |
# import the necessary packages
import win32gui
#import keyboard as keyboard
#import pygame as pygame
#import pythoncom
#import win32con
from PIL import ImageGrab, Image
from imutils.video import VideoStream, FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import pyautogui
import logging
import keyboard
import destiny2_bot_ui_state
from destiny2_bot_osd import destiny2_bot_osd
# construct the argument parse and parse the arguments
# replace this with the url generated by the Wyze app
rtsp_url = "rtsp://wyzecampan:[email protected]/live"
ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image",
# default="test2.jpg", help="path to the input image")
# ap.add_argument("--cascade",
# default="opencv\data\haarcascades\haarcascade_frontalcatface_extended.xml",
# help="path to cat detector haar cascade")
ap.add_argument("-d", "--debug", action="store_true", default=False,
help="debugging output")
args = ap.parse_args()
if not args.debug:
logging.basicConfig(level=logging.INFO)
def main():
# initialize the video stream
# and initialize the FPS counter
#logging.info("starting video stream...")
frame = None
# src=0 is default web cam
#vs = VideoStream(src=0).start()
screenWidth, screenHeight = pyautogui.size()
screenRatio = screenWidth/screenHeight
logging.info("screenWith: {}x{}, format: {:.2f}:1".format(screenWidth,
screenHeight, screenRatio))
logging.info("Creating output window")
cv2.namedWindow("Output", cv2.WINDOW_NORMAL)
# scaling the screen to 70% for second monitor...
cv2.resizeWindow('Output', (int(screenWidth*.70), int(screenHeight*.70)))
cv2.moveWindow('Output', -1440, 200)
try:
destiny_window = win32gui.FindWindow(None, "Destiny 2")
win32gui.SetForegroundWindow(destiny_window)
except:
logging.debug("Couldn't find Destiny 2 window, is it running?")
cv2.destroyAllWindows()
#exit(1)
osd = destiny2_bot_osd(screenWidth, screenHeight)
# Add keyboard hotkeys
keyboard.add_hotkey('ctrl+shift+a', osd.add_console, args=['ctrl+shift+a pressed'])
# START EVENT LOOP
while True:
# grab a screenshot of the desktop
frame = np.array(ImageGrab.grab(bbox=(0, 40,
screenWidth, screenHeight)))
osd.fps_update(frame)
osd.write_console(frame)
# show the output frame
# scale frame to window
rect = cv2.getWindowImageRect('Output')
im_scaled = cv2.resize(frame, (rect[2], rect[3]))
# convert frame back to RGB to display correctly
RGB_img = cv2.cvtColor(im_scaled, cv2.COLOR_BGR2RGB)
cv2.imshow("Output", RGB_img)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
logging.info("exiting")
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
the-stack_0_3230 | import gym
import numpy as np
from gym.spaces.box import Box
import pdb
# Taken from https://github.com/openai/universe-starter-agent
def create_atari_env(env_id):
env = gym.make(env_id)
return env
# process each frame
def _process_frame42(frame):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [1, 42, 42])
return frame
|
the-stack_0_3232 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
class Solution:
def isMatch(self, s: str, p: str) -> bool:
self.s = " "+s
self.p = " "+p
self.matrix = [[0]*len(self.p) for x in self.s]
self.matrix[0][0]=1
for i in range(len(s)+1):
for j in range(1,len(p)+1):
if self.matched(i,j):
self.matrix[i][j]=1
return self.matrix[len(s)][len(p)]
def matched(self,i:int,j:int)->bool:
if self.equal(i, j):
return self.matrix[i-1][j-1]
elif self.p[j]=='*':
if self.equal(i,j-1):
return self.matrix[i][j-2] or self.matrix[i-1][j]
else:
return self.matrix[i][j-2]
def equal(self,i:int,j:int)->bool:
return i != 0 and self.p[j]== '.' or self.s[i]==self.p[j]
a=Solution()
print(a.isMatch("aa","a*"))
print(a.isMatch("ba","a*"))
print(a.isMatch("","*"))
print(a.isMatch("","."))
|
the-stack_0_3234 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from .mixins import deferred, Base
class PopulationSample(Base, db.Model):
__tablename__ = 'population_samples'
response_id = deferred(
db.Column(db.Integer, db.ForeignKey('responses.id'), nullable=False),
'PopulationSample')
population_document_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id')), 'PopulationSample')
population = deferred(db.Column(db.Integer), 'PopulationSample')
sample_worksheet_document_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id')), 'PopulationSample')
samples = deferred(db.Column(db.Integer), 'PopulationSample')
sample_evidence_document_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id')), 'PopulationSample')
_publish_attrs = [
'response',
'population_document',
'population',
'sample_worksheet_document',
'sample_evidence_document',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PopulationSample, cls).eager_query()
return query.options(
orm.subqueryload('response'),
orm.subqueryload('population_document'),
orm.subqueryload('sample_worksheet_document'),
orm.subqueryload('sample_evidence_document'))
|
the-stack_0_3237 | """Common methods used across tests for Bond."""
from asyncio import TimeoutError as AsyncIOTimeoutError
from contextlib import nullcontext
from datetime import timedelta
from typing import Any, Dict, Optional
from homeassistant import core
from homeassistant.components.bond.const import DOMAIN as BOND_DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, STATE_UNAVAILABLE
from homeassistant.setup import async_setup_component
from homeassistant.util import utcnow
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
def patch_setup_entry(domain: str, *, enabled: bool = True):
"""Patch async_setup_entry for specified domain."""
if not enabled:
return nullcontext()
return patch(f"homeassistant.components.bond.{domain}.async_setup_entry")
async def setup_bond_entity(
hass: core.HomeAssistant,
config_entry: MockConfigEntry,
*,
patch_version=False,
patch_device_ids=False,
patch_platforms=False,
):
"""Set up Bond entity."""
config_entry.add_to_hass(hass)
with patch_bond_version(enabled=patch_version), patch_bond_device_ids(
enabled=patch_device_ids
), patch_setup_entry("cover", enabled=patch_platforms), patch_setup_entry(
"fan", enabled=patch_platforms
), patch_setup_entry(
"light", enabled=patch_platforms
), patch_setup_entry(
"switch", enabled=patch_platforms
):
return await hass.config_entries.async_setup(config_entry.entry_id)
async def setup_platform(
hass: core.HomeAssistant,
platform: str,
discovered_device: Dict[str, Any],
bond_device_id: str = "bond-device-id",
props: Dict[str, Any] = None,
):
"""Set up the specified Bond platform."""
mock_entry = MockConfigEntry(
domain=BOND_DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_ACCESS_TOKEN: "test-token"},
)
mock_entry.add_to_hass(hass)
with patch("homeassistant.components.bond.PLATFORMS", [platform]):
with patch_bond_version(), patch_bond_device_ids(
return_value=[bond_device_id]
), patch_bond_device(
return_value=discovered_device
), patch_bond_device_state(), patch_bond_device_properties(
return_value=props
), patch_bond_device_state():
assert await async_setup_component(hass, BOND_DOMAIN, {})
await hass.async_block_till_done()
return mock_entry
def patch_bond_version(
enabled: bool = True, return_value: Optional[dict] = None, side_effect=None
):
"""Patch Bond API version endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = {"bondid": "test-bond-id"}
return patch(
"homeassistant.components.bond.Bond.version",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device_ids(enabled: bool = True, return_value=None, side_effect=None):
"""Patch Bond API devices endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = []
return patch(
"homeassistant.components.bond.Bond.devices",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device(return_value=None):
"""Patch Bond API device endpoint."""
return patch(
"homeassistant.components.bond.Bond.device", return_value=return_value,
)
def patch_bond_action():
"""Patch Bond API action endpoint."""
return patch("homeassistant.components.bond.Bond.action")
def patch_bond_device_properties(return_value=None):
"""Patch Bond API device properties endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_properties",
return_value=return_value,
)
def patch_bond_device_state(return_value=None, side_effect=None):
"""Patch Bond API device state endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_state",
return_value=return_value,
side_effect=side_effect,
)
async def help_test_entity_available(
hass: core.HomeAssistant, domain: str, device: Dict[str, Any], entity_id: str
):
"""Run common test to verify available property."""
await setup_platform(hass, domain, device)
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
with patch_bond_device_state(side_effect=AsyncIOTimeoutError()):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
|
the-stack_0_3238 | import discord
import datetime
import pytz
from discord.ext import commands
from utils.bot import EpicBot
from config import MAIN_COLOR
from utils.time import convert_int_to_weekday
from utils.custom_checks import mutual_guild
from handler import slash_command, InteractionContext
stream_schedule = {
0: True, # Monday
1: False, # Tuesday
2: True, # Wednesday
3: True, # Thrusday
4: False, # Friday
5: True, # Saturday
6: False # Sunday
}
live_text = "Ramaziz will be live today!"
not_live_text = "Ramaziz will not be live today!"
be_sure = "Be sure to check <#762550256918724640> in case of any stream cancellations!"
class RamTimeView(discord.ui.View):
def __init__(self, author_id: int, time_embed: discord.Embed, current_time: datetime.datetime):
super().__init__(timeout=None)
self.author_id = author_id
self.time_embed = time_embed
self.current_time = current_time
@discord.ui.button(label="Time", emoji='⏰', style=discord.ButtonStyle.blurple, disabled=True)
async def time(self, button: discord.ui.Button, interaction: discord.Interaction):
for item in self.children:
item.disabled = False
button.disabled = True
await interaction.message.edit(embed=self.time_embed, view=self)
@discord.ui.button(label="Stream Schedule", emoji='📝', style=discord.ButtonStyle.blurple)
async def stream_schedule(self, button: discord.ui.Button, interaction: discord.Interaction):
for item in self.children:
item.disabled = False
button.disabled = True
stream_schedule_embed = discord.Embed(
title="Stream Schedule",
description="Ramaziz's twitch stream schedule: **[Go follow!](https://twitch.tv/ramaziz)**",
color=MAIN_COLOR
).add_field(
name="Current Stream",
value=f"{live_text if stream_schedule[self.current_time.weekday()] else not_live_text}\n{be_sure}",
inline=False
).add_field(
name="Schedule",
value='\n'.join([f"**{convert_int_to_weekday(i)}** • {stream_schedule[i]}" for i in stream_schedule]),
inline=False
)
await interaction.message.edit(embed=stream_schedule_embed, view=self)
@discord.ui.button(label="Close menu", emoji='⏹️', style=discord.ButtonStyle.danger)
async def close(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.message.delete()
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user.id == self.author_id:
return True
else:
return await interaction.response.send_message("Not your command o_o", ephemeral=True)
class PrivateCmds(commands.Cog):
def __init__(self, client: EpicBot):
self.client = client
@commands.command(
aliases=['ram-time', 'time-ram', 'timeram', 'time_ram', 'ramaziztime', 'ramaziz_time', 'ramaziz-time', 'ramtime'],
help="Ever wonder what time is it for Ramaziz?"
)
@mutual_guild(719157704467152977)
@slash_command(name='ramtime', guild_ids=[719157704467152977, 749996055369875456], help="Check what time it is for Ramaziz!")
async def ram_time(self, ctx: InteractionContext):
dt_utc = datetime.datetime.now(tz=pytz.UTC)
dt_nzt = dt_utc.astimezone(pytz.timezone("NZ"))
time_embed = discord.Embed(title="⏰ Ram Time", color=MAIN_COLOR)
time_embed.add_field(name="Time", value=f"{dt_nzt.strftime('%I : %M : %S %p')}", inline=False)
time_embed.add_field(name="Date", value=f"{convert_int_to_weekday(dt_nzt.weekday())} | {dt_nzt.day} / {dt_nzt.month} / {dt_nzt.year}", inline=False)
view = RamTimeView(ctx.author.id, time_embed, dt_nzt)
await ctx.reply(embed=time_embed, view=view)
@slash_command(guild_ids=[746202728031584358], help="Very very secret command, don't tell Kitten btw! 👀")
async def kitten(self, ctx: InteractionContext):
await ctx.reply("Don't tell kitten 👀 but dogs are kinda cute uwu", ephemeral=True)
def setup(client: EpicBot):
client.add_cog(PrivateCmds(client))
|
the-stack_0_3242 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Infrastructure of options for Koalas.
"""
from contextlib import contextmanager
import json
from typing import Union, Any, Tuple, Callable, List, Dict
from pyspark._globals import _NoValue, _NoValueType
from databricks.koalas.utils import default_session
__all__ = ["get_option", "set_option", "reset_option", "options", "option_context"]
class Option:
"""
Option class that defines an option with related properties.
This class holds all information relevant to the one option. Also,
Its instance can validate if the given value is acceptable or not.
It is currently for internal usage only.
Parameters
----------
key: str, keyword-only argument
the option name to use.
doc: str, keyword-only argument
the documentation for the current option.
default: Any, keyword-only argument
default value for this option.
types: Union[Tuple[type, ...], type], keyword-only argument
default is str. It defines the expected types for this option. It is
used with `isinstance` to validate the given value to this option.
check_func: Tuple[Callable[[Any], bool], str], keyword-only argument
default is a function that always returns `True` with a empty string.
It defines:
- a function to check the given value to this option
- the error message to show when this check is failed
When new value is set to this option, this function is called to check
if the given value is valid.
Examples
--------
>>> option = Option(
... key='option.name',
... doc="this is a test option",
... default="default",
... types=(float, int),
... check_func=(lambda v: v > 0, "should be a positive float"))
>>> option.validate('abc') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The value for option 'option.name' was <class 'str'>;
however, expected types are [(<class 'float'>, <class 'int'>)].
>>> option.validate(-1.1)
Traceback (most recent call last):
...
ValueError: should be a positive float
>>> option.validate(1.1)
"""
def __init__(
self,
*,
key: str,
doc: str,
default: Any,
types: Union[Tuple[type, ...], type] = str,
check_func: Tuple[Callable[[Any], bool], str] = (lambda v: True, "")
):
self.key = key
self.doc = doc
self.default = default
self.types = types
self.check_func = check_func
def validate(self, v: Any) -> None:
"""
Validate the given value and throw an exception with related information such as key.
"""
if not isinstance(v, self.types):
raise ValueError(
"The value for option '%s' was %s; however, expected types are "
"[%s]." % (self.key, type(v), str(self.types))
)
if not self.check_func[0](v):
raise ValueError(self.check_func[1])
# Available options.
#
# NOTE: if you are fixing or adding an option here, make sure you execute `show_options()` and
# copy & paste the results into show_options 'docs/source/user_guide/options.rst' as well.
# See the examples below:
# >>> from databricks.koalas.config import show_options
# >>> show_options()
_options = [
Option(
key="display.max_rows",
doc=(
"This sets the maximum number of rows Koalas should output when printing out "
"various output. For example, this value determines the number of rows to be "
"shown at the repr() in a dataframe. Set `None` to unlimit the input length. "
"Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'display.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.max_rows",
doc=(
"'compute.max_rows' sets the limit of the current DataFrame. Set `None` to unlimit "
"the input length. When the limit is set, it is executed by the shortcut by "
"collecting the data into driver side, and then using pandas API. If the limit is "
"unset, the operation is executed by PySpark. Default is 1000."
),
default=1000,
types=(int, type(None)),
check_func=(
lambda v: v is None or v >= 0,
"'compute.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="compute.shortcut_limit",
doc=(
"'compute.shortcut_limit' sets the limit for a shortcut. "
"It computes specified number of rows and use its schema. When the dataframe "
"length is larger than this limit, Koalas uses PySpark to compute."
),
default=1000,
types=int,
check_func=(
lambda v: v >= 0,
"'compute.shortcut_limit' should be greater than or equal to 0.",
),
),
Option(
key="compute.ops_on_diff_frames",
doc=(
"This determines whether or not to operate between two different dataframes. "
"For example, 'combine_frames' function internally performs a join operation which "
"can be expensive in general. So, if `compute.ops_on_diff_frames` variable is not "
"True, that method throws an exception."
),
default=False,
types=bool,
),
Option(
key="compute.default_index_type",
doc=("This sets the default index type: sequence, distributed and distributed-sequence."),
default="sequence",
types=str,
check_func=(
lambda v: v in ("sequence", "distributed", "distributed-sequence"),
"Index type should be one of 'sequence', 'distributed', 'distributed-sequence'.",
),
),
Option(
key="compute.ordered_head",
doc=(
"'compute.ordered_head' sets whether or not to operate head with natural ordering. "
"Koalas does not guarantee the row ordering so `head` could return some rows from "
"distributed partitions. If 'compute.ordered_head' is set to True, Koalas performs "
"natural ordering beforehand, but it will cause a performance overhead."
),
default=False,
types=bool,
),
Option(
key="plotting.max_rows",
doc=(
"'plotting.max_rows' sets the visual limit on top-n-based plots such as `plot.bar` "
"and `plot.pie`. If it is set to 1000, the first 1000 data points will be used "
"for plotting. Default is 1000."
),
default=1000,
types=int,
check_func=(
lambda v: v is v >= 0,
"'plotting.max_rows' should be greater than or equal to 0.",
),
),
Option(
key="plotting.sample_ratio",
doc=(
"'plotting.sample_ratio' sets the proportion of data that will be plotted for sample-"
"based plots such as `plot.line` and `plot.area`. "
"This option defaults to 'plotting.max_rows' option."
),
default=None,
types=(float, type(None)),
check_func=(
lambda v: v is None or 1 >= v >= 0,
"'plotting.sample_ratio' should be 1.0 >= value >= 0.0.",
),
),
Option(
key="plotting.backend",
doc=(
"Backend to use for plotting. Default is matplotlib. "
"Supports any package that has a top-level `.plot` method. "
"Some options are: [matplotlib, plotly, pandas_bokeh, pandas_altair]."
),
default="matplotlib",
types=str,
),
] # type: List[Option]
_options_dict = dict(zip((option.key for option in _options), _options)) # type: Dict[str, Option]
_key_format = "koalas.{}".format
class OptionError(AttributeError, KeyError):
pass
def show_options():
"""
Make a pretty table that can be copied and pasted into public documentation.
This is currently for an internal purpose.
Examples
--------
>>> show_options() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
================... =======... =====================...
Option Default Description
================... =======... =====================...
display.max_rows 1000 This sets the maximum...
...
================... =======... =====================...
"""
import textwrap
header = ["Option", "Default", "Description"]
row_format = "{:<31} {:<14} {:<53}"
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
print(row_format.format(*header))
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
for option in _options:
doc = textwrap.fill(option.doc, 53)
formatted = "".join([line + "\n" + (" " * 47) for line in doc.split("\n")]).rstrip()
print(row_format.format(option.key, repr(option.default), formatted))
print(row_format.format("=" * 31, "=" * 14, "=" * 53))
def get_option(key: str, default: Union[Any, _NoValueType] = _NoValue) -> Any:
"""
Retrieves the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
default : object
The default value if the option is not set yet. The value should be JSON serializable.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists and the default is not provided
"""
_check_option(key)
if default is _NoValue:
default = _options_dict[key].default
_options_dict[key].validate(default)
return json.loads(default_session().conf.get(_key_format(key), default=json.dumps(default)))
def set_option(key: str, value: Any) -> None:
"""
Sets the value of the specified option.
Parameters
----------
key : str
The key which should match a single option.
value : object
New value of option. The value should be JSON serializable.
Returns
-------
None
"""
_check_option(key)
_options_dict[key].validate(value)
default_session().conf.set(_key_format(key), json.dumps(value))
def reset_option(key: str) -> None:
"""
Reset one option to their default value.
Pass "all" as argument to reset all options.
Parameters
----------
key : str
If specified only option will be reset.
Returns
-------
None
"""
_check_option(key)
default_session().conf.unset(_key_format(key))
@contextmanager
def option_context(*args):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'compute.max_rows', 5):
... print(get_option('display.max_rows'), get_option('compute.max_rows'))
10 5
>>> print(get_option('display.max_rows'), get_option('compute.max_rows'))
1000 1000
"""
if len(args) == 0 or len(args) % 2 != 0:
raise ValueError("Need to invoke as option_context(pat, val, [(pat, val), ...]).")
opts = dict(zip(args[::2], args[1::2]))
orig_opts = {key: get_option(key) for key in opts}
try:
for key, value in opts.items():
set_option(key, value)
yield
finally:
for key, value in orig_opts.items():
set_option(key, value)
def _check_option(key: str) -> None:
if key not in _options_dict:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
class DictWrapper:
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
return set_option(canonical_key, val)
else:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix:
prefix += "."
canonical_key = prefix + key
candidates = [
k for k in d.keys() if all(x in k.split(".") for x in canonical_key.split("."))
]
if len(candidates) == 1 and candidates[0] == canonical_key:
return get_option(canonical_key)
elif len(candidates) == 0:
raise OptionError(
"No such option: '{}'. Available options are [{}]".format(
key, ", ".join(list(_options_dict.keys()))
)
)
else:
return DictWrapper(d, canonical_key)
def __dir__(self):
prefix = object.__getattribute__(self, "prefix")
d = object.__getattribute__(self, "d")
if prefix == "":
candidates = d.keys()
offset = 0
else:
candidates = [k for k in d.keys() if all(x in k.split(".") for x in prefix.split("."))]
offset = len(prefix) + 1 # prefix (e.g. "compute.") to trim.
return [c[offset:] for c in candidates]
options = DictWrapper(_options_dict)
|
the-stack_0_3243 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-protocol",
"base-python",
"backoff==1.10.0",
"pendulum==1.2.0",
"requests==2.25.1",
]
TEST_REQUIREMENTS = ["pytest", "requests_mock==1.8.0"]
setup(
name="source_zendesk_talk",
description="Source implementation for Zendesk Talk.",
author="Airbyte",
author_email="[email protected]",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS + TEST_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json"]},
)
|
the-stack_0_3247 | # Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module generates a docker environment for a job'''
from __future__ import division
from fabric.api import sudo, run, settings
from logging import getLogger
from os.path import join as join_path
from time import sleep
from tests.comparison.leopard.controller import (
SHOULD_BUILD_IMPALA,
SHOULD_LOAD_DATA,
SHOULD_PULL_DOCKER_IMAGE)
import random
import os
IMPALA_HOME = '/home/dev/Impala'
CORE_PATH = '/tmp/core_files'
DEFAULT_BRANCH_NAME = 'origin/cdh5-trunk'
DEFAULT_DOCKER_IMAGE_NAME = 'impala-desktop.ca.cloudera.com:5000/ubuntu-14.04:cdh5-trunk'
DOCKER_USER_NAME = 'dev'
NUM_START_ATTEMPTS = 50
NUM_FABRIC_ATTEMPTS = 50
LOG = getLogger('ImpalaDockerEnv')
def retry(func):
'''Retry decorator.'''
def wrapper(*args, **kwargs):
attempt_num = 0
while True:
attempt_num += 1
try:
return func(*args, **kwargs)
except:
LOG.exception('{0} exception [{1}] (try: {2})'.format(
func.__name__, args[0], attempt_num))
if attempt_num == NUM_FABRIC_ATTEMPTS:
raise
sleep_time = random.randint(1, attempt_num)
sleep(sleep_time)
return wrapper
class ImpalaDockerEnv(object):
'''Represents an Impala environemnt inside a Docker container. Used for starting
Impala, getting stack traces after a crash and keeping track of the ports on which SSH,
Postgres and Impala are running.
'''
def __init__(self, git_command):
self.ssh_port = None
self.impala_port = None
self.postgres_port = None
self.container_id = None
self.git_command = git_command
self.host = os.environ['TARGET_HOST']
self.host_username = os.environ['TARGET_HOST_USERNAME']
self.docker_image_name = os.environ.get(
'DOCKER_IMAGE_NAME', DEFAULT_DOCKER_IMAGE_NAME)
def stop_docker(self):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
retry(sudo)('docker stop {0}'.format(self.container_id), pty=True)
retry(sudo)('docker rm {0}'.format(self.container_id), pty=True)
def start_new_container(self):
'''Starts a container with port forwarding for ssh, impala and postgres. '''
for _ in range(NUM_START_ATTEMPTS):
with settings(warn_only = True, host_string = self.host, user = self.host_username):
set_core_dump_location_command = \
"echo '/tmp/core_files/core.%e.%p' | sudo tee /proc/sys/kernel/core_pattern"
sudo(set_core_dump_location_command, pty=True)
port = random.randint(0, 999)
self.ssh_port = 55000 + port
self.impala_port = 56000 + port
self.postgres_port = 57000 + port
start_command = ''
if SHOULD_PULL_DOCKER_IMAGE:
start_command = 'docker pull {docker_image_name} && '.format(
docker_image_name = self.docker_image_name)
start_command += (
'docker run -d -t -p {postgres_port}:5432 -p {ssh_port}:22 '
'-p {impala_port}:21050 {docker_image_name} /bin/docker-boot-daemon').format(
ssh_port = self.ssh_port,
impala_port = self.impala_port,
postgres_port = self.postgres_port,
docker_image_name = self.docker_image_name)
try:
self.container_id = sudo(start_command, pty=True)
except:
LOG.exception('start_new_container')
if self.container_id is not None:
break
else:
LOG.error('Container failed to start after {0} attempts'.format(NUM_START_ATTEMPTS))
def get_git_hash(self):
'''Returns Git hash if the current commit. '''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
git_hash = retry(run)('cd {IMPALA_HOME} && git rev-parse --short HEAD'.format(
IMPALA_HOME = IMPALA_HOME))
return git_hash
def run_all(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
run_all_command = (
'mkdir -p {CORE_PATH} && chmod 777 {CORE_PATH} && cd {IMPALA_HOME} '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/create-test-configuration.sh '
'&& {IMPALA_HOME}/testdata/bin/run-all.sh').format(
IMPALA_HOME = IMPALA_HOME,
CORE_PATH=CORE_PATH)
retry(run)(run_all_command, pty=False)
def build_impala(self):
'''Fetches and Builds Impala. If git_command is not present the latest version is
fetched by default. '''
build_command = None
if self.git_command:
build_command = (
'docker-boot && cd {IMPALA_HOME} && {git_command} '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/buildall.sh -notests').format(
git_command = self.git_command,
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
elif SHOULD_BUILD_IMPALA:
build_command = (
'docker-boot && cd {IMPALA_HOME} '
'&& git fetch --all && git checkout origin/cdh5-trunk '
'&& source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/buildall.sh -notests').format(
IMPALA_HOME = IMPALA_HOME,
CORE_PATH = CORE_PATH)
if build_command:
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
result = retry(run)(build_command, pty=False)
LOG.info('Build Complete, Result: {0}'.format(result))
def load_data(self):
if SHOULD_LOAD_DATA:
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
self.start_impala()
load_command = '''cd {IMPALA_HOME} \
&& source bin/impala-config.sh \
&& ./tests/comparison/data_generator.py \
--use-postgresql --db-name=functional \
--migrate-table-names=alltypes,alltypestiny,alltypesagg migrate \
&& ./tests/comparison/data_generator.py --use-postgresql'''.format(
IMPALA_HOME=IMPALA_HOME)
result = retry(run)(load_command, pty=False)
def start_impala(self):
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
start_command = ('source {IMPALA_HOME}/bin/impala-config.sh '
'&& {IMPALA_HOME}/bin/start-impala-cluster.py').format(IMPALA_HOME = IMPALA_HOME)
result = retry(run)(start_command, pty=False)
return result
def is_impala_running(self):
'''Check that exactly 3 impalads are running inside the docker instance.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
return retry(run)('ps aux | grep impalad').count('/service/impalad') == 3
def get_stack(self):
'''Finds the newest core file and extracts the stack trace from it using gdb. '''
IMPALAD_PATH = '{IMPALA_HOME}/be/build/debug/service/impalad'.format(
IMPALA_HOME = IMPALA_HOME)
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
core_file_name = retry(run)('ls {0} -t1 | head -1'.format(CORE_PATH))
LOG.info('Core File Name: {0}'.format(core_file_name))
if 'core' not in core_file_name:
return None
core_full_path = join_path(CORE_PATH, core_file_name)
stack_trace = retry(run)('gdb {0} {1} --batch --quiet --eval-command=bt'.format(
IMPALAD_PATH, core_full_path))
self.delete_core_files()
return stack_trace
def delete_core_files(self):
'''Delete all core files. This is usually done after the stack was extracted.'''
with settings(
warn_only = True,
host_string = '{0}@{1}:{2}'.format(DOCKER_USER_NAME, self.host, self.ssh_port),
password = os.environ['DOCKER_PASSWORD']):
retry(run)('rm -f {0}/core.*'.format(CORE_PATH))
def prepare(self):
'''Create a new Impala Environment. Starts a docker container and builds Impala in it.
'''
self.start_new_container()
LOG.info('Container Started')
# Wait for the SSH service to start inside the docker instance. Usually takes 1
# second. This is simple and reliable. An alternative implementation is to poll with
# timeout if SSH was started.
sleep(10)
self.build_impala()
try:
result = self.run_all()
except Exception:
LOG.info('run_all exception')
LOG.info('Run All Complete, Result: {0}'.format(result))
self.load_data()
|
the-stack_0_3248 | import sys
class VirtualMachine:
def __init__(self, name, ram=1, cpu=1.3, hdd=100, os="debian"):
self.name = name
self.ram = ram
self.cpu = cpu
self.hdd = hdd
self.os = os
self.status = 0
self.proc = []
def stop(self):
self.status = 0
self.proc = []
def start(self):
self.status = 1
def suspend(self):
self.status = 2
def reboot(self):
self.stop()
self.start()
def run(self, pid, ram, cpu, hdd):
self.proc.append({
'pid' : pid,
'ram' : ram,
'cpu' : cpu,
'hdd' : hdd
}
)
print(f' -Ejecutamos el proceso {pid}')
def ram_usage(self):
uso_ram = 0
for proceso in self.proc:
uso_ram += proceso['ram']
return round(uso_ram / self.ram * 100, 2)
def cpu_usage (self):
uso_cpu = 0
for proceso in self.proc:
uso_cpu += proceso['cpu']
return round(uso_cpu / self.cpu * 100, 2)
def hdd_usage (self):
uso_hdd = 0
for proceso in self.proc:
uso_hdd += proceso['hdd']
return round(uso_hdd / self.hdd * 100, 2)
def __str__(self):
estado = ''
if self.status == 0:
estado = 'Stopped'
elif self.status == 1:
estado = 'Running'
else:
estado = 'Suspended'
return f'Nombre: {self.name} | SO: {self.os} | {estado} | RAM: {self.ram} | CPU: {self.cpu} | HDD: {self.hdd} | {self.ram_usage()}% RAM used | {self.cpu_usage()}% CPU used | {self.hdd_usage()} % HDD used'
if __name__ == '__main__':
print('═════════════════')
print('Máquina virtual 1')
print('═════════════════')
print('1. Creamos la máquina virtual Minas Tirith')
vm1 = VirtualMachine('Minas Tirith', 8, 2.3, 380, 'ubuntu')
print(vm1)
print('2. Arrancamos la máquina virtual')
vm1.start()
print(vm1)
print('3. Lanzamos los procesos 1, 4 y 7')
vm1.run(1, 1.7, 0.3, 20)
vm1.run(4, 4, 0.9, 100)
vm1.run(7, 0.4, 1.1, 250)
print(vm1)
print('4. Paramos la máquina virtual')
vm1.stop()
print(vm1)
print(' ')
print('═════════════════')
print('Máquina virtual 2')
print('═════════════════')
print('1. Creamos la máquina virtual Rohan')
vm2 = VirtualMachine ('Rohan', 6, 1.9, 250, 'debian')
print(vm2)
print('2. Arrancamos la máquina virtual')
vm2.start()
print(vm2)
print('3. Lanzamos los procesos 2, 5 y 8')
vm2.run(2, 0.6, 0.7, 50)
vm2.run(5, 2.1, 0.2, 75)
vm2.run(8, 2.5, 0.4, 30)
print(vm2)
print('4. Paramos la máquina virtual')
vm2.stop()
print(vm2)
print(' ')
print('═════════════════')
print('Máquina virtual 3')
print('═════════════════')
print('1. Creamos la máquina virtual Rivendel')
vm3 = VirtualMachine ('Rivendel', 16, 3, 1000, 'opensuse')
print(vm3)
print('2. Arrancamos la máquina virtual')
vm3.start()
print(vm3)
print('3. Lanzamos los procesos 3, 6 y 9')
vm3.run(3, 2, 1, 25)
vm3.run(6, 0.3, 0.5, 12)
vm3.run(9, 1.4, 0.8, 65)
print(vm3)
print('4. Paramos la máquina virtual')
vm3.stop()
print(vm3)
|
the-stack_0_3250 | """
This file offers the methods to automatically retrieve the graph friendster.
The graph is automatically retrieved from the NetworkRepository repository.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def Friendster(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/networkrepository",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the friendster graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of friendster graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="Friendster",
repository="networkrepository",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_3251 | """
Common options for ``minidcos docker`` commands.
"""
from typing import Callable
import click
from dcos_e2e.backends import Docker
from dcos_e2e.node import Transport
def node_transport_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for node transport options.
"""
transports = {
'ssh': Transport.SSH,
'docker-exec': Transport.DOCKER_EXEC,
}
backend_default = Docker().transport
[default_option] = [
transport for transport in transports
if transports[transport] == backend_default
]
function = click.option(
'--transport',
type=click.Choice(sorted(transports.keys())),
callback=lambda ctx, param, value: transports[value],
default=default_option,
show_default=True,
envvar='MINIDCOS_DOCKER_TRANSPORT',
help=(
'The communication transport to use. '
'On macOS the SSH transport requires IP routing to be set up. '
'See "minidcos docker setup-mac-network". '
'It also requires the "ssh" command to be available. '
'This can be provided by setting the `MINIDCOS_DOCKER_TRANSPORT` '
'environment variable. '
'When using a TTY, different transports may use different line '
'endings.'
),
)(command) # type: Callable[..., None]
return function
def wait_for_dcos_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for waiting for DC/OS to be up.
"""
function = click.option(
'--wait-for-dcos',
is_flag=True,
help=(
'Wait for DC/OS after creating the cluster. '
'This is equivalent to using "minidcos docker wait" after this '
'command. '
'"minidcos docker wait" has various options available and so may '
'be more appropriate for your use case. '
'If the chosen transport is "docker-exec", this will skip HTTP '
'checks and so the cluster may not be fully ready.'
),
)(command) # type: Callable[..., None]
return function
|
the-stack_0_3252 | from django.core.urlresolvers import reverse
from django.core import mail
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import UserFactory
from oscar.test.testcases import WebTestCase
class TestAnAdmin(WebTestCase):
def setUp(self):
self.staff = UserFactory(is_staff=True, username='1234')
self.commtype = CommunicationEventType.objects.create(
name="Password reset",
category=CommunicationEventType.USER_RELATED)
def test_can_preview_an_email(self):
list_page = self.app.get(reverse('dashboard:comms-list'),
user=self.staff)
update_page = list_page.click('Edit')
form = update_page.form
form['email_subject_template'] = 'Hello {{ user.username }}'
form['email_body_template'] = 'Hello {{ user.username }}'
form['email_body_html_template'] = 'Hello {{ user.username }}'
preview = form.submit('show_preview')
self.assertTrue('Hello 1234' in preview.content.decode('utf8'))
def test_can_send_a_preview_email(self):
list_page = self.app.get(reverse('dashboard:comms-list'),
user=self.staff)
update_page = list_page.click('Edit')
form = update_page.form
form['email_subject_template'] = 'Hello {{ user.username }}'
form['email_body_template'] = 'Hello {{ user.username }}'
form['email_body_html_template'] = 'Hello {{ user.username }}'
form['preview_email'] = '[email protected]'
form.submit('send_preview')
self.assertEqual(len(mail.outbox), 1)
|
the-stack_0_3253 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import json
import six
from heat.api.aws import utils as aws_utils
from heat.common import exception
from heat.engine import function
from heat.engine import resource
class FindInMap(function.Function):
'''
A function for resolving keys in the template mappings.
Takes the form::
{ "Fn::FindInMap" : [ "mapping",
"key",
"value" ] }
'''
def __init__(self, stack, fn_name, args):
super(FindInMap, self).__init__(stack, fn_name, args)
try:
self._mapname, self._mapkey, self._mapvalue = self.args
except ValueError as ex:
raise KeyError(six.text_type(ex))
def result(self):
mapping = self.stack.t.maps[function.resolve(self._mapname)]
key = function.resolve(self._mapkey)
value = function.resolve(self._mapvalue)
return mapping[key][value]
class GetAZs(function.Function):
'''
A function for retrieving the availability zones.
Takes the form::
{ "Fn::GetAZs" : "<region>" }
'''
def result(self):
# TODO(therve): Implement region scoping
#region = function.resolve(self.args)
if self.stack is None:
return ['nova']
else:
return self.stack.get_availability_zones()
class ParamRef(function.Function):
'''
A function for resolving parameter references.
Takes the form::
{ "Ref" : "<param_name>" }
'''
def __init__(self, stack, fn_name, args):
super(ParamRef, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
param_name = function.resolve(self.args)
try:
return self.parameters[param_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=param_name,
key='unknown')
class ResourceRef(function.Function):
'''
A function for resolving resource references.
Takes the form::
{ "Ref" : "<resource_name>" }
'''
def _resource(self, path='unknown'):
resource_name = function.resolve(self.args)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dependencies(self, path):
return itertools.chain(super(ResourceRef, self).dependencies(path),
[self._resource(path)])
def result(self):
return self._resource().FnGetRefId()
def Ref(stack, fn_name, args):
'''
A function for resolving parameters or resource references.
Takes the form::
{ "Ref" : "<param_name>" }
or::
{ "Ref" : "<resource_name>" }
'''
if args in stack:
RefClass = ResourceRef
else:
RefClass = ParamRef
return RefClass(stack, fn_name, args)
class GetAtt(function.Function):
'''
A function for resolving resource attributes.
Takes the form::
{ "Fn::GetAtt" : [ "<resource_name>",
"<attribute_name" ] }
'''
def __init__(self, stack, fn_name, args):
super(GetAtt, self).__init__(stack, fn_name, args)
self._resource_name, self._attribute = self._parse_args()
def _parse_args(self):
try:
resource_name, attribute = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute]') % self.fn_name)
return resource_name, attribute
def _resource(self, path='unknown'):
resource_name = function.resolve(self._resource_name)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dependencies(self, path):
return itertools.chain(super(GetAtt, self).dependencies(path),
[self._resource(path)])
def validate(self):
super(GetAtt, self).validate()
res = self._resource()
attr = function.resolve(self._attribute)
if (type(res).FnGetAtt == resource.Resource.FnGetAtt and
attr not in res.attributes_schema.keys()):
raise exception.InvalidTemplateAttribute(
resource=self._resource_name, key=attr)
def result(self):
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME, r.UPDATE)):
return r.FnGetAtt(attribute)
else:
return None
class Select(function.Function):
'''
A function for selecting an item from a list or map.
Takes the form (for a list lookup)::
{ "Fn::Select" : [ "<index>", [ "<value_1>", "<value_2>", ... ] ] }
Takes the form (for a map lookup)::
{ "Fn::Select" : [ "<index>", { "<key_1>": "<value_1>", ... } ] }
If the selected index is not found, this function resolves to an empty
string.
'''
def __init__(self, stack, fn_name, args):
super(Select, self).__init__(stack, fn_name, args)
try:
self._lookup, self._strings = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[index, collection]') % self.fn_name)
def result(self):
index = function.resolve(self._lookup)
try:
index = int(index)
except (ValueError, TypeError):
pass
strings = function.resolve(self._strings)
if strings == '':
# an empty string is a common response from other
# functions when result is not currently available.
# Handle by returning an empty string
return ''
if isinstance(strings, basestring):
# might be serialized json.
try:
strings = json.loads(strings)
except ValueError as json_ex:
fmt_data = {'fn_name': self.fn_name,
'err': json_ex}
raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
if isinstance(strings, collections.Mapping):
if not isinstance(index, basestring):
raise TypeError(_('Index to "%s" must be a string') %
self.fn_name)
return strings.get(index, '')
if (isinstance(strings, collections.Sequence) and
not isinstance(strings, basestring)):
if not isinstance(index, (int, long)):
raise TypeError(_('Index to "%s" must be an integer') %
self.fn_name)
try:
return strings[index]
except IndexError:
return ''
if strings is None:
return ''
raise TypeError(_('Arguments to %s not fully resolved') %
self.fn_name)
class Join(function.Function):
'''
A function for joining strings.
Takes the form::
{ "Fn::Join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
'''
def __init__(self, stack, fn_name, args):
super(Join, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (basestring, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if strings is None:
strings = []
if (isinstance(strings, basestring) or
not isinstance(strings, collections.Sequence)):
raise TypeError(_('"%s" must operate on a list') % self.fn_name)
delim = function.resolve(self._delim)
if not isinstance(delim, basestring):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
if s is None:
return ''
if not isinstance(s, basestring):
raise TypeError(
_('Items to join must be strings %s') % (repr(s)[:200]))
return s
return delim.join(ensure_string(s) for s in strings)
class Split(function.Function):
'''
A function for splitting strings.
Takes the form::
{ "Fn::Split" : [ "<delim>", "<string_1><delim><string_2>..." ] }
And resolves to::
[ "<string_1>", "<string_2>", ... ]
'''
def __init__(self, stack, fn_name, args):
super(Split, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "str1,str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (basestring, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if not isinstance(self._delim, basestring):
raise TypeError(_("Delimiter for %s must be string") %
self.fn_name)
if not isinstance(strings, basestring):
raise TypeError(_("String to split must be string; got %s") %
type(strings))
return strings.split(self._delim)
class Replace(function.Function):
'''
A function for performing string substitutions.
Takes the form::
{ "Fn::Replace" : [
{ "<key_1>": "<value_1>", "<key_2>": "<value_2>", ... },
"<key_1> <key_2>"
] }
And resolves to::
"<value_1> <value_2>"
This is implemented using python str.replace on each key. The order in
which replacements are performed is undefined.
'''
def __init__(self, stack, fn_name, args):
super(Replace, self).__init__(stack, fn_name, args)
self._mapping, self._string = self._parse_args()
if not isinstance(self._mapping, collections.Mapping):
raise TypeError(_('"%s" parameters must be a mapping') %
self.fn_name)
def _parse_args(self):
example = ('{"%s": '
'[ {"$var1": "foo", "%%var2%%": "bar"}, '
'"$var1 is %%var2%%"]}' % self.fn_name)
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (basestring, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
mapping, string = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
else:
return mapping, string
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, basestring):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, basestring):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value, (basestring, int, long, float, bool)):
raise TypeError(_('"%s" params must be strings or numbers') %
self.fn_name)
return string.replace(placeholder, unicode(value))
return reduce(replace, six.iteritems(mapping), template)
class Base64(function.Function):
'''
A placeholder function for converting to base64.
Takes the form::
{ "Fn::Base64" : "<string>" }
This function actually performs no conversion. It is included for the
benefit of templates that convert UserData to Base64. Heat accepts UserData
in plain text.
'''
def result(self):
resolved = function.resolve(self.args)
if not isinstance(resolved, basestring):
raise TypeError(_('"%s" argument must be a string') % self.fn_name)
return resolved
class MemberListToMap(function.Function):
'''
A function for converting lists containing enumerated keys and values to
a mapping.
Takes the form::
{ 'Fn::MemberListToMap' : [ 'Name',
'Value',
[ '.member.0.Name=<key_0>',
'.member.0.Value=<value_0>',
... ] ] }
And resolves to::
{ "<key_0>" : "<value_0>", ... }
The first two arguments are the names of the key and value.
'''
def __init__(self, stack, fn_name, args):
super(MemberListToMap, self).__init__(stack, fn_name, args)
try:
self._keyname, self._valuename, self._list = self.args
except ValueError:
correct = '''
{'Fn::MemberListToMap': ['Name', 'Value',
['.member.0.Name=key',
'.member.0.Value=door']]}
'''
raise TypeError(_('Wrong Arguments try: "%s"') % correct)
if not isinstance(self._keyname, basestring):
raise TypeError(_('%s Key Name must be a string') % self.fn_name)
if not isinstance(self._valuename, basestring):
raise TypeError(_('%s Value Name must be a string') % self.fn_name)
def result(self):
member_list = function.resolve(self._list)
if not isinstance(member_list, collections.Iterable):
raise TypeError(_('Member list must be a list'))
def item(s):
if not isinstance(s, basestring):
raise TypeError(_("Member list items must be strings"))
return s.split('=', 1)
partials = dict(item(s) for s in member_list)
return aws_utils.extract_param_pairs(partials,
prefix='',
keyname=self._keyname,
valuename=self._valuename)
class ResourceFacade(function.Function):
'''
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
{ "Fn::ResourceFacade": "<attribute_type>" }
where the valid attribute types are "Metadata", "DeletionPolicy" and
"UpdatePolicy".
'''
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'Metadata', 'DeletionPolicy', 'UpdatePolicy'
)
def __init__(self, stack, fn_name, args):
super(ResourceFacade, self).__init__(stack, fn_name, args)
if self.args not in self._RESOURCE_ATTRIBUTES:
fmt_data = {'fn_name': self.fn_name,
'allowed': ', '.join(self._RESOURCE_ATTRIBUTES)}
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be one of: %(allowed)s') % fmt_data)
def result(self):
attr = function.resolve(self.args)
if attr == self.METADATA:
return self.stack.parent_resource.metadata_get()
elif attr == self.UPDATE_POLICY:
up = self.stack.parent_resource.t.get('UpdatePolicy', {})
return function.resolve(up)
elif attr == self.DELETION_POLICY:
dp = self.stack.parent_resource.t.deletion_policy()
return function.resolve(dp)
|
the-stack_0_3254 | import hashlib
import itertools
import logging
from collections import OrderedDict
from typing import Any, Generator, Iterable, List, Mapping, Type, Union
logger = logging.getLogger(__name__)
def make_hash(data: Union[List, OrderedDict]) -> str:
return hashlib.md5(str(data).encode()).hexdigest()
def ensure_list(value: Any) -> List[Any]:
""" Convert or unpack any iterable into a list, with the exception of mappings.
If the passed value is either a mapping or not an iterable, it is returned
wrapped in a list.
Example:
>>> iterable = [1,2,3]
>>> ensure_iterable(iterable)
>>> [1,2,3]
>>> mapping = {"a": 1, "b": 2}
>>> ensure_iterable(mapping)
>>> [{"a": 1, "b": 2}]
>>> scalar = "hello world!"
>>> ensure_iterable(scalar)
>>> ["hello world!"]
"""
if isinstance(value, (Mapping, str)): # do not unpack dictionaries
return [value]
elif isinstance(value, Iterable):
return list(value)
else:
return [value]
def reduce(values: Iterable) -> Union[Iterable, Any]:
""" Reduce an iterable to a scalar if length is 1. Returns None if iterable
is empty. """
try:
while isinstance(values, Iterable) and not isinstance(values, (Mapping, str)):
values = list(values)
if len(values) <= 1:
values = values[0]
else:
break
return values
except IndexError:
return None
def chunks(iterable: Iterable, n: int = 1000, cls: Type = list) -> Generator:
""" Slice and unpack a nested iterable into a flat iterable containing a
maximum of n elements.
Arguments:
iterable {Iterable} -- items to process
Keyword Arguments:
n {int} -- max number of elements per chunk (default: 1000)
cls {Type} -- iterable type in which to cast chunks (default: list)
Yields:
Generator -- generator of iterables
"""
it = iter(iterable)
while True:
chunked = itertools.islice(it, n)
try:
first_element = next(chunked)
except StopIteration:
return
yield cls(itertools.chain((first_element,), chunked))
|
the-stack_0_3255 | import discord
from discord.ext import commands
from logger import getLogger
l = getLogger("main")
class Curation(commands.Cog, description="Information about curating games for Flashpoint."):
def __init__(self, bot):
self.bot = bot
@commands.command(name="curation", aliases=["ct", "curation-tutorial"], brief="Curation tutorial.",
description="Curation tutorial.")
async def curation_tutorial(self, ctx: discord.ext.commands.Context):
l.debug(
f"curation tutorial command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("Curation tutorial:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Curation_Tutorial>")
@commands.command(name="not-accepted", aliases=["notaccepted", "disallowed", "blacklist", "blacklisted", "na"],
brief="Not accepted curations.", description="A list of curations not accepted in Flashpoint.")
async def not_accepted(self, ctx: discord.ext.commands.Context):
l.debug(
f"not-accepted command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("These are games/animations not allowed in Flashpoint for any reason:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Not_Accepted_Curations>")
@commands.command(name="meta", aliases=["curation-format", "format", "metadata", "cf"], brief="Metadata file.")
async def meta(self, ctx: discord.ext.commands.Context):
l.debug(f"meta command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("List of Metadata Fields:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Curation_Format#List_of_Metadata_Fields>")
@commands.command(name="tags", brief="Tags in Flashpoint.", description="A list of tags in Flashpoint.")
async def tags(self, ctx: discord.ext.commands.Context):
l.debug(f"tags command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("List of Tags:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Tags>")
@commands.command(name="lang", aliases=["langs", "languages"], brief="Language codes.",
description="A list of ISO631-1 codes from Wikipedia.")
async def lang(self, ctx: discord.ext.commands.Context):
l.debug(f"lang command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("List of Language Codes:\n"
"🔗 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>")
@commands.command(name="edits", aliases=["pending", "fixes", "pendies"], brief="Pending fixes.",
description="Information about making metadata fixes.")
async def edits(self, ctx: discord.ext.commands.Context):
l.debug(f"edits command invoked from {ctx.author.id} in channel {ctx.channel.id} - {ctx.message.jump_url}")
await ctx.channel.send("Making metadata edits:\n"
"🔗 <https://bluemaxima.org/flashpoint/datahub/Metadata_Edits>")
def setup(bot: commands.Bot):
bot.add_cog(Curation(bot))
|
the-stack_0_3256 | """
Using a while loop with an and conditional
"""
if __name__ == "__main__":
i = 0
while( (input("Enter your name: ")!= "your name") and i < 10):
print("Nope sorry try again")
i = i + 1
print("Done with program") |
the-stack_0_3257 | # -*- coding: utf-8 -*-
from __future__ import print_function
import collections
from operator import itemgetter
import os.path
import oursql
import phpserialize as php
import wmflabs
def is_autopatrol(log_params):
p = php.loads(log_params)
return p["6::auto"] == 1
def get_patrol_stats(db, oldest_ts):
with db.cursor() as c:
c.execute("""select log_user_text, log_params from logging
where log_action = 'patrol' and log_timestamp > ?
order by log_timestamp
desc""",
params=[oldest_ts])
patrols = collections.Counter()
for log_user_text, log_params in c:
if not is_autopatrol(log_params):
patrols[log_user_text.decode("utf-8")] += 1
return collections.OrderedDict(reversed(sorted(patrols.items(), key=itemgetter(1))))
db.close()
def connect(db_name, addr=None):
"""
Connect to database.
Args:
db_name (str): name of db without '_p' suffix
addr (tuple): tuple like (host, port)
"""
if addr is None:
return wmflabs.db.connect(db_name)
host, port = addr
port = int(port)
return oursql.connect(db=db_name + '_p',
host=host,
port=port,
read_default_file=os.path.expanduser("~/replica.my.cnf"),
charset=None,
use_unicode=False,
)
|
the-stack_0_3259 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" PauliSumOp Class """
import logging
from typing import Dict, List, Optional, Set, Tuple, Union, cast
import numpy as np
from scipy.sparse import spmatrix
from qiskit.circuit import Instruction, ParameterExpression
from qiskit.quantum_info import Pauli, SparsePauliOp
from qiskit.quantum_info.operators.symplectic.pauli_table import PauliTable
from qiskit.quantum_info.operators.custom_iterator import CustomIterator
from ..exceptions import OpflowError
from ..list_ops.summed_op import SummedOp
from ..list_ops.tensored_op import TensoredOp
from ..operator_base import OperatorBase
from .primitive_op import PrimitiveOp
logger = logging.getLogger(__name__)
class PauliSumOp(PrimitiveOp):
"""Class for Operators backend by Terra's ``SparsePauliOp`` class."""
def __init__(
self,
primitive: SparsePauliOp,
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
) -> None:
"""
Args:
primitive: The SparsePauliOp which defines the behavior of the underlying function.
coeff: A coefficient multiplying the primitive.
Raises:
TypeError: invalid parameters.
"""
if not isinstance(primitive, SparsePauliOp):
raise TypeError(
f"PauliSumOp can only be instantiated with SparsePauliOp, not {type(primitive)}"
)
super().__init__(primitive, coeff=coeff)
def primitive_strings(self) -> Set[str]:
return {"SparsePauliOp"}
@property
def num_qubits(self) -> int:
return self.primitive.num_qubits # type: ignore
@property
def coeffs(self):
"""Return the Pauli coefficients."""
return self.coeff * self.primitive.coeffs
def matrix_iter(self, sparse=False):
"""Return a matrix representation iterator.
This is a lazy iterator that converts each term in the PauliSumOp
into a matrix as it is used. To convert to a single matrix use the
:meth:`to_matrix` method.
Args:
sparse (bool): optionally return sparse CSR matrices if True,
otherwise return Numpy array matrices
(Default: False)
Returns:
MatrixIterator: matrix iterator object for the PauliTable.
"""
class MatrixIterator(CustomIterator):
"""Matrix representation iteration and item access."""
def __repr__(self):
return "<PauliSumOp_matrix_iterator at {}>".format(hex(id(self)))
def __getitem__(self, key):
sumopcoeff = self.obj.coeff * self.obj.primitive.coeffs[key]
mat = PauliTable._to_matrix(self.obj.primitive.table.array[key],
sparse=sparse)
return sumopcoeff * mat
return MatrixIterator(self)
def add(self, other: OperatorBase) -> OperatorBase:
if not self.num_qubits == other.num_qubits:
raise ValueError(
f"Sum of operators with different numbers of qubits, {self.num_qubits} and "
f"{other.num_qubits}, is not well defined"
)
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.coeff * self.primitive + other.coeff * other.primitive, coeff=1 # type: ignore
)
from .pauli_op import PauliOp
if isinstance(other, PauliOp):
return PauliSumOp(
self.coeff * self.primitive # type: ignore
+ other.coeff * SparsePauliOp(other.primitive)
)
return SummedOp([self, other])
def mul(self, scalar: Union[int, float, complex, ParameterExpression]) -> OperatorBase:
if isinstance(scalar, (int, float, complex)) and scalar != 0:
return PauliSumOp(scalar * self.primitive, coeff=self.coeff) # type: ignore
return super().mul(scalar)
def adjoint(self) -> OperatorBase:
return PauliSumOp(
self.primitive.conjugate(), coeff=self.coeff.conjugate() # type:ignore
)
def equals(self, other: OperatorBase) -> bool:
self_reduced, other_reduced = self.reduce(), other.reduce()
if not isinstance(other_reduced, PauliSumOp):
return False
if isinstance(self_reduced.coeff, ParameterExpression) or isinstance(
other_reduced.coeff, ParameterExpression
):
return (
self_reduced.coeff == other_reduced.coeff
and self_reduced.primitive == other_reduced.primitive # type:ignore
)
return (
len(self_reduced) == len(other_reduced)
and self_reduced.primitive == other_reduced.primitive
)
def _expand_dim(self, num_qubits: int) -> "PauliSumOp":
return PauliSumOp(
self.primitive.tensor( # type:ignore
SparsePauliOp(Pauli("I" * num_qubits))
),
coeff=self.coeff,
)
def tensor(self, other: OperatorBase) -> OperatorBase:
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.primitive.tensor(other.primitive), # type:ignore
coeff=self.coeff * other.coeff,
)
return TensoredOp([self, other])
def permute(self, permutation: List[int]) -> "PauliSumOp":
"""Permutes the sequence of ``PauliSumOp``.
Args:
permutation: A list defining where each Pauli should be permuted. The Pauli at index
j of the primitive should be permuted to position permutation[j].
Returns:
A new PauliSumOp representing the permuted operator. For operator (X ^ Y ^ Z) and
indices=[1,2,4], it returns (X ^ I ^ Y ^ Z ^ I).
Raises:
OpflowError: if indices do not define a new index for each qubit.
"""
if len(permutation) != self.num_qubits:
raise OpflowError("List of indices to permute must have the "
"same size as Pauli Operator")
length = max(permutation) + 1
spop = self.primitive.tensor( # type:ignore
SparsePauliOp(Pauli("I" * (length - self.num_qubits)))
)
permutation = [i for i in range(length) if i not in permutation] + permutation
permutation = np.arange(length)[np.argsort(permutation)]
permutation = np.hstack([permutation, permutation + length]) # type: ignore
spop.table.array = spop.table.array[:, permutation]
return PauliSumOp(spop, self.coeff)
def compose(
self,
other: OperatorBase,
permutation: Optional[List[int]] = None,
front: bool = False,
) -> OperatorBase:
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
new_self = cast(PauliSumOp, new_self)
if front:
return other.compose(new_self)
# If self is identity, just return other.
if not np.any(new_self.primitive.table.array): # type: ignore
return other * new_self.coeff * sum(new_self.coeffs) # type: ignore
# Both PauliSumOps
if isinstance(other, PauliSumOp):
return PauliSumOp(
new_self.primitive * other.primitive, # type:ignore
coeff=new_self.coeff * other.coeff,
)
# TODO: implement compose with PauliOp
# pylint: disable=cyclic-import,import-outside-toplevel
from ..state_fns.circuit_state_fn import CircuitStateFn
from .circuit_op import CircuitOp
if isinstance(other, (CircuitOp, CircuitStateFn)):
return new_self.to_pauli_op().to_circuit_op().compose(other) # type: ignore
return super(PauliSumOp, new_self).compose(other)
def to_matrix(self, massive: bool = False) -> np.ndarray:
OperatorBase._check_massive("to_matrix", True, self.num_qubits, massive)
if isinstance(self.coeff, ParameterExpression):
return (self.primitive.to_matrix(sparse=True)).toarray() * self.coeff # type: ignore
return (self.primitive.to_matrix(sparse=True) * self.coeff).toarray() # type: ignore
def __str__(self) -> str:
def format_sign(x):
return x.real if np.isreal(x) else x
def format_number(x):
x = format_sign(x)
if isinstance(x, (int, float)) and x < 0:
return f"- {-x}"
return f"+ {x}"
indent = "" if self.coeff == 1 else " "
prim_list = self.primitive.to_list() # type: ignore
if prim_list:
first = prim_list[0]
if isinstance(first[1], (int, float)) and first[1] < 0:
main_string = indent + f"- {-first[1].real} * {first[0]}"
else:
main_string = indent + f"{format_sign(first[1])} * {first[0]}"
main_string += "".join([f"\n{indent}{format_number(c)} * {p}" for p, c in prim_list[1:]])
return f"{main_string}" if self.coeff == 1 else f"{self.coeff} * (\n{main_string}\n)"
def eval(
self,
front: Optional[Union[str, Dict[str, complex], np.ndarray, OperatorBase]] = None,
) -> Union[OperatorBase, float, complex]:
if front is None:
return self.to_matrix_op()
# pylint: disable=import-outside-toplevel,cyclic-import
from ..list_ops.list_op import ListOp
from ..state_fns.circuit_state_fn import CircuitStateFn
from ..state_fns.dict_state_fn import DictStateFn
from ..state_fns.state_fn import StateFn
from .circuit_op import CircuitOp
from .pauli_op import PauliOp
# For now, always do this. If it's not performant, we can be more granular.
if not isinstance(front, OperatorBase):
front = StateFn(front, is_measurement=False)
if isinstance(front, ListOp) and front.distributive:
return front.combo_fn(
[self.eval(front.coeff * front_elem) for front_elem in front.oplist] # type: ignore
)
else:
if self.num_qubits != front.num_qubits:
raise ValueError(
"eval does not support operands with differing numbers of qubits, "
"{} and {}, respectively.".format(self.num_qubits, front.num_qubits)
)
if isinstance(front, DictStateFn):
new_dict = {} # type: Dict
corrected_x_bits = self.primitive.table.X[::-1] # type: ignore
corrected_z_bits = self.primitive.table.Z[::-1] # type: ignore
coeffs = self.primitive.coeffs # type:ignore
for bstr, v in front.primitive.items():
bitstr = np.asarray(list(bstr)).astype(np.int).astype(np.bool)
new_b_str = np.logical_xor(bitstr, corrected_x_bits)
new_str = ["".join(map(str, 1 * bs)) for bs in new_b_str]
z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits), axis=1)
y_factor = np.product(
np.sqrt(1 - 2 * np.logical_and(corrected_x_bits, corrected_z_bits) + 0j),
axis=1,
)
for i, n_str in enumerate(new_str):
new_dict[n_str] = (
v * z_factor[i] * y_factor[i] * coeffs[i]
) + new_dict.get(n_str, 0)
return DictStateFn(new_dict, coeff=self.coeff * front.coeff)
elif isinstance(front, StateFn) and front.is_measurement:
raise ValueError("Operator composed with a measurement is undefined.")
# Composable types with PauliOp
elif isinstance(front, (PauliSumOp, PauliOp, CircuitOp, CircuitStateFn)):
return self.compose(front).eval() # type: ignore
# Covers VectorStateFn and OperatorStateFn
return self.to_matrix_op().eval(front.to_matrix_op()) # type: ignore
def exp_i(self) -> OperatorBase:
""" Return a ``CircuitOp`` equivalent to e^-iH for this operator H. """
# TODO: optimize for some special cases
from ..evolutions.evolved_op import EvolvedOp
return EvolvedOp(self)
def to_instruction(self) -> Instruction:
return self.to_matrix_op().to_circuit().to_instruction() # type: ignore
def to_pauli_op(self, massive: bool = False) -> OperatorBase:
from .pauli_op import PauliOp
def to_real(x):
return x.real if np.isreal(x) else x
def to_native(x):
return x.item() if isinstance(x, np.generic) else x
if len(self.primitive) == 1:
return PauliOp(
Pauli((self.primitive.table.Z[0], self.primitive.table.X[0])), # type: ignore
to_native(to_real(self.primitive.coeffs[0])) * self.coeff, # type: ignore
)
return SummedOp(
[
PauliOp(
Pauli((s.table.Z[0], s.table.X[0])),
to_native(to_real(s.coeffs[0])),
)
for s in self.primitive
],
coeff=self.coeff,
)
def __getitem__(self, offset: Union[int, slice]) -> "PauliSumOp":
"""Allows array-indexing style access to the ``PauliSumOp``.
Args:
offset: The index of ``PauliSumOp``.
Returns:
The ``PauliSumOp`` at index ``offset``,
"""
return PauliSumOp(self.primitive[offset], self.coeff)
def __len__(self) -> int:
"""Length of ``SparsePauliOp``.
Returns:
An int equal to the length of SparsePauliOp.
"""
return len(self.primitive)
# pylint: disable=arguments-differ
def reduce(self, atol: Optional[float] = None, rtol: Optional[float] = None) -> "PauliSumOp":
"""Simplify the primitive ``SparsePauliOp``.
Args:
atol: Absolute tolerance for checking if coefficients are zero (Default: 1e-8).
rtol: Relative tolerance for checking if coefficients are zero (Default: 1e-5).
Returns:
The simplified ``PauliSumOp``.
"""
if isinstance(self.coeff, (int, float, complex)):
primitive = self.coeff * self.primitive # type: ignore
return PauliSumOp(primitive.simplify(atol=atol, rtol=rtol)) # type: ignore
return PauliSumOp(self.primitive.simplify(atol=atol, rtol=rtol), self.coeff) # type: ignore
def to_spmatrix(self) -> spmatrix:
"""Returns SciPy sparse matrix representation of the ``PauliSumOp``.
Returns:
CSR sparse matrix representation of the ``PauliSumOp``.
Raises:
ValueError: invalid parameters.
"""
return self.primitive.to_matrix(sparse=True) * self.coeff # type: ignore
@classmethod
def from_list(
cls,
pauli_list: List[Tuple[str, Union[int, float, complex]]],
coeff: Union[int, float, complex, ParameterExpression] = 1.0,
) -> "PauliSumOp":
"""Construct from a pauli_list with the form [(pauli_str, coeffs)]
Args:
pauli_list: A list of Tuple of pauli_str and coefficient.
coeff: A coefficient multiplying the primitive.
Returns:
The PauliSumOp constructed from the pauli_list.
"""
return cls(SparsePauliOp.from_list(pauli_list), coeff=coeff)
|
the-stack_0_3260 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='sample',
version='0.1.0',
description='RPyOpenCL',
long_description=readme,
author='Shazz',
author_email='[email protected]',
url='https://github.com/shazz/DistributedOpenCL',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
|
the-stack_0_3264 | import utils as util
import os
import ImgSplit_multi_process
import SplitOnlyImage_multi_process
import shutil
from multiprocessing import Pool
from DOTA2COCO import DOTA2COCOTest, DOTA2COCOTrain
import argparse
wordname_5 = ['1', '2', '3', '4', '5']
def parse_args():
parser = argparse.ArgumentParser(description='prepare dota1')
parser.add_argument('--srcpath', default='/media/adminer/data/Rocketforce/Little/')
parser.add_argument('--dstpath', default=r'/media/adminer/data/Rocketforce/Little_mmdet/',
help='prepare data')
args = parser.parse_args()
return args
def single_copy(src_dst_tuple):
shutil.copyfile(*src_dst_tuple)
def filecopy(srcpath, dstpath, num_process=32):
pool = Pool(num_process)
filelist = util.GetFileFromThisRootDir(srcpath)
name_pairs = []
for file in filelist:
basename = os.path.basename(file.strip())
dstname = os.path.join(dstpath, basename)
name_tuple = (file, dstname)
name_pairs.append(name_tuple)
pool.map(single_copy, name_pairs)
def singel_move(src_dst_tuple):
shutil.move(*src_dst_tuple)
def filemove(srcpath, dstpath, num_process=32):
pool = Pool(num_process)
filelist = util.GetFileFromThisRootDir(srcpath)
name_pairs = []
for file in filelist:
basename = os.path.basename(file.strip())
dstname = os.path.join(dstpath, basename)
name_tuple = (file, dstname)
name_pairs.append(name_tuple)
pool.map(filemove, name_pairs)
def getnamelist(srcpath, dstfile):
filelist = util.GetFileFromThisRootDir(srcpath)
with open(dstfile, 'w') as f_out:
for file in filelist:
basename = util.mybasename(file)
f_out.write(basename + '\n')
def prepare(srcpath, dstpath):
"""
:param srcpath: train, val, test
train --> trainval1024, val --> trainval1024, test --> test1024
:return:
"""
if not os.path.exists(os.path.join(dstpath, 'test1024_2')):
os.mkdir(os.path.join(dstpath, 'test1024_2'))
if not os.path.exists(os.path.join(dstpath, 'trainval1024')):
os.mkdir(os.path.join(dstpath, 'trainval1024'))
split_train = ImgSplit_multi_process.splitbase(os.path.join(srcpath, 'train'),
os.path.join(dstpath, 'trainval1024'),
gap=200,
subsize=1024,
num_process=32,
ext='.tif'
)
split_train.splitdata(1)
split_val = ImgSplit_multi_process.splitbase(os.path.join(srcpath, 'val'),
os.path.join(dstpath, 'trainval1024'),
gap=200,
subsize=1024,
num_process=32,
ext='.tif'
)
split_val.splitdata(1)
# split_test = SplitOnlyImage_multi_process.splitbase(os.path.join(srcpath, 'test2', 'images'),
# os.path.join(dstpath, 'test1024_2', 'images'),
# gap=200,
# subsize=1024,
# num_process=32,
# ext='.tif'
# )
# split_test.splitdata(1)
DOTA2COCOTrain(os.path.join(dstpath, 'trainval1024'), os.path.join(dstpath, 'trainval1024', 'DOTA_trainval1024.json'), wordname_5, difficult='-1')
# DOTA2COCOTest(os.path.join(dstpath, 'test1024_2'), os.path.join(dstpath, 'test1024_2', 'DOTA_test1024_2.json'), wordname_5)
if __name__ == '__main__':
args = parse_args()
srcpath = args.srcpath
dstpath = args.dstpath
prepare(srcpath, dstpath) |
the-stack_0_3268 | import pandas as pd
from os import path
import vowpalwabbit
import unittest
import platform
import math
import re
def helper_get_test_dir():
curr_path = path.dirname(path.realpath(__file__))
return path.join(path.dirname(path.dirname(curr_path)), "test")
def helper_get_data():
train_data = [
{
"action": 1,
"cost": 2,
"probability": 0.4,
"feature1": "a",
"feature2": "c",
"feature3": "",
},
{
"action": 3,
"cost": 0,
"probability": 0.2,
"feature1": "b",
"feature2": "d",
"feature3": "",
},
{
"action": 4,
"cost": 1,
"probability": 0.5,
"feature1": "a",
"feature2": "b",
"feature3": "",
},
{
"action": 2,
"cost": 1,
"probability": 0.3,
"feature1": "a",
"feature2": "b",
"feature3": "c",
},
{
"action": 3,
"cost": 1,
"probability": 0.7,
"feature1": "a",
"feature2": "d",
"feature3": "",
},
]
train_df = pd.DataFrame(train_data)
train_df["index"] = range(1, len(train_df) + 1)
train_df = train_df.set_index("index")
test_data = [
{"feature1": "b", "feature2": "c", "feature3": ""},
{"feature1": "a", "feature2": "", "feature3": "b"},
{"feature1": "b", "feature2": "b", "feature3": ""},
{"feature1": "a", "feature2": "", "feature3": "b"},
]
test_df = pd.DataFrame(test_data)
# Add index to data frame
test_df["index"] = range(1, len(test_df) + 1)
test_df = test_df.set_index("index")
return train_df, test_df
def test_getting_started_example_cb():
return helper_getting_started_example("--cb")
def test_getting_started_example_legacy_cb():
return helper_getting_started_example("--cb_force_legacy --cb")
# Returns true if they are close enough to be considered equal.
def are_floats_equal(float_one_str: str, float_two_str: str, epsilon: float) -> bool:
float_one = float(float_one_str)
float_two = float(float_two_str)
# Special case handle these two as they will not be equal when checking absolute difference.
# But for the purposes of comparing the diff they are equal.
if math.isinf(float_one) and math.isinf(float_two):
return True
if math.isnan(float_one) and math.isnan(float_two):
return True
delta = abs(float_one - float_two)
if delta < epsilon:
return True
# Large number comparison code migrated from Perl RunTests
# We have a 'big enough' difference, but this difference
# may still not be meaningful in all contexts. Big numbers should be compared by ratio rather than
# by difference
# Must ensure we can divide (avoid div-by-0)
# If numbers are so small (close to zero),
# ($delta > $Epsilon) suffices for deciding that
# the numbers are meaningfully different
if abs(float_two) <= 1.0:
return False
# Now we can safely divide (since abs($word2) > 0) and determine the ratio difference from 1.0
ratio_delta = abs(float_one / float_two - 1.0)
return ratio_delta < epsilon
def is_float(value: str) -> bool:
try:
float(value)
return True
except ValueError:
return False
def is_line_different(output_line: str, ref_line: str, epsilon: float) -> bool:
output_tokens = re.split("[ \t:,@]+", output_line)
ref_tokens = re.split("[ \t:,@]+", ref_line)
if len(output_tokens) != len(ref_tokens):
return True
for output_token, ref_token in zip(output_tokens, ref_tokens):
output_is_float = is_float(output_token)
ref_is_float = is_float(ref_token)
if output_is_float and ref_is_float:
are_equal = are_floats_equal(output_token, ref_token, epsilon)
if not are_equal:
return True
else:
if output_token != ref_token:
return True
return False
@unittest.skipIf(
platform.machine() == "aarch64", "skipping due to floating-point error on aarch64"
)
def helper_getting_started_example(which_cb):
train_df, test_df = helper_get_data()
vw = vowpalwabbit.Workspace(which_cb + " 4 --log_level off", enable_logging=True)
for i in train_df.index:
action = train_df.loc[i, "action"]
cost = train_df.loc[i, "cost"]
probability = train_df.loc[i, "probability"]
feature1 = train_df.loc[i, "feature1"]
feature2 = train_df.loc[i, "feature2"]
feature3 = train_df.loc[i, "feature3"]
learn_example = (
str(action)
+ ":"
+ str(cost)
+ ":"
+ str(probability)
+ " | "
+ str(feature1)
+ " "
+ str(feature2)
+ " "
+ str(feature3)
)
vw.learn(learn_example)
assert (
vw.get_prediction_type() == vw.pMULTICLASS
), "prediction_type should be multiclass"
for j in test_df.index:
feature1 = test_df.loc[j, "feature1"]
feature2 = test_df.loc[j, "feature2"]
feature3 = test_df.loc[j, "feature3"]
choice = vw.predict(
"| " + str(feature1) + " " + str(feature2) + " " + str(feature3)
)
assert isinstance(choice, int), "choice should be int"
assert choice == 3, "predicted action should be 3 instead of " + str(choice)
# test that metrics is empty since "--extra_metrics filename" was not supplied
assert len(vw.get_learner_metrics()) == 0
vw.finish()
output = vw.get_log()
if which_cb.find("legacy") != -1:
test_file = "test-sets/ref/python_test_cb_legacy.stderr"
else:
test_file = "test-sets/ref/python_test_cb.stderr"
with open(path.join(helper_get_test_dir(), test_file), "r") as file:
expected = file.readlines()
for expected_line, output_line in zip(expected, output):
output_line = output_line.replace("...", "").strip()
expected_line = expected_line.replace("...", "").strip()
assert not is_line_different(output_line, expected_line, 0.001)
def test_getting_started_example_with():
train_df, test_df = helper_get_data()
# with syntax calls into vw.finish() automatically.
# you actually want to use 'with vowpalwabbit.Workspace("--cb 4") as vw:'
# but we need to assert on vw.finished for test purposes
vw = vowpalwabbit.Workspace("--cb 4")
with vw as vw:
for i in train_df.index:
action = train_df.loc[i, "action"]
cost = train_df.loc[i, "cost"]
probability = train_df.loc[i, "probability"]
feature1 = train_df.loc[i, "feature1"]
feature2 = train_df.loc[i, "feature2"]
feature3 = train_df.loc[i, "feature3"]
learn_example = (
str(action)
+ ":"
+ str(cost)
+ ":"
+ str(probability)
+ " | "
+ str(feature1)
+ " "
+ str(feature2)
+ " "
+ str(feature3)
)
vw.learn(learn_example)
assert (
vw.get_prediction_type() == vw.pMULTICLASS
), "prediction_type should be multiclass"
for j in test_df.index:
feature1 = test_df.loc[j, "feature1"]
feature2 = test_df.loc[j, "feature2"]
feature3 = test_df.loc[j, "feature3"]
choice = vw.predict(
"| " + str(feature1) + " " + str(feature2) + " " + str(feature3)
)
assert isinstance(choice, int), "choice should be int"
assert choice == 3, "predicted action should be 3"
assert vw.finished == True, "with syntax should finish() vw instance"
|
the-stack_0_3271 | # -*- coding: utf-8 -*-
from datetime import datetime
import requests
from shipane_sdk.base_quant_client import BaseQuantClient
from shipane_sdk.joinquant.transaction import JoinQuantTransaction
class JoinQuantClient(BaseQuantClient):
BASE_URL = 'https://www.joinquant.com'
def __init__(self, **kwargs):
super(JoinQuantClient, self).__init__('JoinQuant')
self._session = requests.Session()
self._username = kwargs.get('username', None)
self._password = kwargs.get('password', None)
self._backtest_id = kwargs.get('backtest_id', None)
def login(self):
self._session.headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36',
'Referer': '{}/user/login/index'.format(self.BASE_URL),
'X-Requested-With': 'XMLHttpRequest',
'Origin': self.BASE_URL,
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
self._session.get(self.BASE_URL)
response = self._session.post('{}/user/login/doLogin?ajax=1'.format(self.BASE_URL), data={
'CyLoginForm[username]': self._username,
'CyLoginForm[pwd]': self._password,
'ajax': 1
})
self._session.headers.update({
'cookie': response.headers['Set-Cookie']
})
super(JoinQuantClient, self).login()
def query(self):
today_str = datetime.today().strftime('%Y-%m-%d')
response = self._session.get('{}/algorithm/live/transactionDetail'.format(self.BASE_URL), params={
'backtestId': self._backtest_id,
'date': today_str,
'ajax': 1
})
transaction_detail = response.json()
raw_transactions = transaction_detail['data']['transaction']
transactions = []
for raw_transaction in raw_transactions:
transaction = JoinQuantTransaction(raw_transaction).normalize()
transactions.append(transaction)
return transactions
|
the-stack_0_3272 | #!/usr/bin/env python
# Description: access topcons.net via WSDL service
# Copyright Nanjiang Shu ([email protected])
from __future__ import print_function
import os
import sys
import argparse
progname = os.path.basename(sys.argv[0])
wspace = ''.join([" "]*len(progname))
no_suds_message="""\
suds is not installed!
Please install suds by
$ pip install suds (for Python2)
$ pip install suds-jurko (for Python3)
"""
try:
from suds.client import Client
except ImportError:
print(no_suds_message, file=sys.stderr)
sys.exit(1)
import urllib
MAX_FILESIZE_IN_MB = 9
MAX_FILESIZE = MAX_FILESIZE_IN_MB*1024*1024
def ReadFile(infile, mode="r"):#{{{
try:
fpin = open(infile, mode)
content = fpin.read()
fpin.close()
return content
except IOError:
print("Failed to read file %s with mode '%s'"%(infile, mode), file=sys.stderr)
return ""
#}}}
def main(g_params):#{{{
wsdl_url = "https://topcons.net/pred/api_submitseq/?wsdl"
parser = argparse.ArgumentParser(
description='Access topcons2 web-server (https://topcons.net) through WSDL service ',
#formatter_class=argparse.RawDescriptionHelpFormatter,
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
Created 2015-02-04, updated 2018-01-12, Nanjiang Shu
Examples:
# submit test.fa with jobname 'test' to the server
%s -m submit -seq test.fa -jobname test
# try to retrieve the result for jobid 'rst_TTT' and save it to the current directory
%s -m get -jobid rst_TTT
'''%(progname, progname))
parser.add_argument('-m', action='store',
dest='mode', default='submit', choices=['submit','get'], required=True,
help='Set the mode of API\nsubmit - submit a job to WSDL\nget - retrieve the result from the server')
parser.add_argument('-seq', metavar='FILE', dest='seqfile',
help='Supply input sequence in FASTA format')
parser.add_argument('-jobname', metavar='STR', dest='jobname',
help='Give the job a name')
parser.add_argument('-jobid', metavar='STR', dest='jobid',
help='Retrieve the result by supplying a valid jobid')
parser.add_argument('-email', metavar='STR', dest='email',
help='Send a notification to the email when the result is ready')
parser.add_argument('-outpath', metavar='DIR', dest='outpath',
help='Save the retrieved data to outpath, (default: ./)')
args = parser.parse_args()
mode = args.mode
jobid = ""
email = ""
jobname = ""
fixtopfile = ""
seqfile = ""
outpath = "."
if args.jobid != None:
jobid = args.jobid
if args.email != None:
email = args.email
if args.jobname != None:
jobname = args.jobname
if args.seqfile != None:
seqfile = args.seqfile
if args.outpath != None:
outpath = args.outpath
if mode == "submit":
if seqfile == "":
print("You want to submit a job but seqfile is not set. Exit!", file=sys.stderr)
return 1
elif not os.path.exists(seqfile):
print("seqfile %s does not exist. Exit!"%(seqfile),file=sys.stderr)
return 1
try:
filesize = os.path.getsize(seqfile)
except OSError:
print("failed to get the size of seqfile %s. Exit"%(seqfile), file=sys.stderr)
return 1
if filesize >= MAX_FILESIZE:
print("You input seqfile %s exceeds the upper limit %d Mb."%(
seqfile, MAX_FILESIZE_IN_MB), file=sys.stderr)
print("Please split your seqfile and submit again.", file=sys.stderr)
return 1
seq = ReadFile(seqfile)
fixtop = ""
if fixtopfile != "":
fixtop = ReadFile(fixtopfile)
myclient = Client(wsdl_url, cache=None)
retValue = myclient.service.submitjob(seq, fixtop, jobname, email)
if len(retValue) >= 1:
strs = retValue[0]
jobid = strs[0]
result_url = strs[1]
numseq_str = strs[2]
errinfo = strs[3]
warninfo = strs[4]
if jobid != "None" and jobid != "":
print("You have successfully submitted your job "\
"with %s sequences. jobid = %s\n"%(numseq_str, jobid))
if warninfo != "" and warninfo != "None":
print("Warning message: %s\n"%str(warninfo))
else:
print("Failed to submit job!\n")
if errinfo != "" and errinfo != "None":
print("Error message:%s\n"% str(errinfo))
if warninfo != "" and warninfo != "None":
print("Warning message:%s\n"% str(warninfo))
else:
print("Failed to submit job!")
return 1
else:
if jobid == "":
print("You want to get the result of a job but jobid is not set. Exit!", file=sys.stderr )
return 1
myclient = Client(wsdl_url, cache=None)
retValue = myclient.service.checkjob(jobid)
if len(retValue) >= 1:
strs = retValue[0]
status = strs[0]
result_url = strs[1]
errinfo = strs[2]
if status == "Failed":
print("Your job with jobid %s is failed!"%(jobid))
if errinfo != "" and errinfo != "None":
print("Error message:\n"%str(errinfo))
elif status == "Finished":
print("Your job with jobid %s is finished!"%(jobid))
if not os.path.exists(outpath):
try:
os.makedirs(outpath)
except OSError:
print("Failed to create the outpath %s"%(outpath))
return 1
outfile = "%s/%s.zip"%(outpath, jobid)
urllib.urlretrieve (result_url, outfile)
if os.path.exists(outfile):
print("The result file %s has been retrieved for jobid %s"%(outfile, jobid))
else:
print("Failed to retrieve result for jobid %s"%(jobid))
elif status == "None":
print("Your job with jobid %s does not exist! Please check you typing!"%(jobid))
else:
print("Your job with jobid %s is not ready, status = %s"%(jobid, status))
else:
print("Failed to get job!")
return 1
return 0
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['isQuiet'] = True
return g_params
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
sys.exit(main(g_params))
|
the-stack_0_3273 | import random
import math
import torch
from torch import nn, Tensor
import torchvision
from torch.jit.annotations import List, Tuple, Dict, Optional
from torchvision.ops import misc as misc_nn_ops
from .image_list import ImageList
from .roi_heads import paste_masks_in_image
@torch.jit.unused
def _resize_image_and_masks_onnx(image, self_min_size, self_max_size, target):
# type: (Tensor, float, float, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
from torch.onnx import operators
im_shape = operators.shape_as_tensor(image)[-2:]
min_size = torch.min(im_shape).to(dtype=torch.float32)
max_size = torch.max(im_shape).to(dtype=torch.float32)
scale_factor = torch.min(self_min_size / min_size, self_max_size / max_size)
image = torch.nn.functional.interpolate(
image[None], scale_factor=scale_factor, mode='bilinear',
align_corners=False)[0]
if target is None:
return image, target
if "masks" in target:
mask = target["masks"]
mask = misc_nn_ops.interpolate(mask[None].float(), scale_factor=scale_factor)[0].byte()
target["masks"] = mask
return image, target
def _resize_image_and_masks(image, self_min_size, self_max_size, target):
# type: (Tensor, float, float, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
im_shape = torch.tensor(image.shape[-2:])
min_size = float(torch.min(im_shape))
max_size = float(torch.max(im_shape))
scale_factor = self_min_size / min_size
if max_size * scale_factor > self_max_size:
scale_factor = self_max_size / max_size
image = torch.nn.functional.interpolate(
image[None], scale_factor=scale_factor, mode='bilinear',
align_corners=False)[0]
if target is None:
return image, target
if "masks" in target:
mask = target["masks"]
mask = misc_nn_ops.interpolate(mask[None].float(), scale_factor=scale_factor)[0].byte()
target["masks"] = mask
return image, target
class GeneralizedRCNNTransform(nn.Module):
"""
Performs input / target transformation before feeding the data to a GeneralizedRCNN
model.
The transformations it perform are:
- input normalization (mean subtraction and std division)
- input / target resizing to match min_size / max_size
It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets
"""
def __init__(self, min_size, max_size, image_mean, image_std):
super(GeneralizedRCNNTransform, self).__init__()
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
self.image_mean = image_mean
self.image_std = image_std
def forward(self,
images, # type: List[Tensor]
targets=None # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]]
images = [img for img in images]
for i in range(len(images)):
image = images[i]
target_index = targets[i] if targets is not None else None
if image.dim() != 3:
raise ValueError("images is expected to be a list of 3d tensors "
"of shape [C, H, W], got {}".format(image.shape))
image = self.normalize(image)
image, target_index = self.resize(image, target_index)
images[i] = image
if targets is not None and target_index is not None:
targets[i] = target_index
image_sizes = [img.shape[-2:] for img in images]
images = self.batch_images(images)
image_sizes_list = torch.jit.annotate(List[Tuple[int, int]], [])
for image_size in image_sizes:
assert len(image_size) == 2
image_sizes_list.append((image_size[0], image_size[1]))
image_list = ImageList(images, image_sizes_list)
return image_list, targets
def normalize(self, image):
dtype, device = image.dtype, image.device
mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device)
std = torch.as_tensor(self.image_std, dtype=dtype, device=device)
return (image - mean[:, None, None]) / std[:, None, None]
def torch_choice(self, k):
# type: (List[int]) -> int
"""
Implements `random.choice` via torch ops so it can be compiled with
TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803
is fixed.
"""
index = int(torch.empty(1).uniform_(0., float(len(k))).item())
return k[index]
def resize(self, image, target):
# type: (Tensor, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]
h, w = image.shape[-2:]
if self.training:
size = float(self.torch_choice(self.min_size))
else:
# FIXME assume for now that testing uses the largest scale
size = float(self.min_size[-1])
if torchvision._is_tracing():
image, target = _resize_image_and_masks_onnx(image, size, float(self.max_size), target)
else:
image, target = _resize_image_and_masks(image, size, float(self.max_size), target)
if target is None:
return image, target
bbox = target["boxes"]
bbox = resize_boxes(bbox, (h, w), image.shape[-2:])
target["boxes"] = bbox
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = resize_keypoints(keypoints, (h, w), image.shape[-2:])
target["keypoints"] = keypoints
return image, target
# _onnx_batch_images() is an implementation of
# batch_images() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_batch_images(self, images, size_divisible=32):
# type: (List[Tensor], int) -> Tensor
max_size = []
for i in range(images[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
stride = size_divisible
max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size[2] = (torch.ceil((max_size[2].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# which is not yet supported in onnx
padded_imgs = []
for img in images:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
return torch.stack(padded_imgs)
def max_by_axis(self, the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def batch_images(self, images, size_divisible=32):
# type: (List[Tensor], int) -> Tensor
if torchvision._is_tracing():
# batch_images() does not export well to ONNX
# call _onnx_batch_images() instead
return self._onnx_batch_images(images, size_divisible)
max_size = self.max_by_axis([list(img.shape) for img in images])
stride = float(size_divisible)
max_size = list(max_size)
max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride)
max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride)
batch_shape = [len(images)] + max_size
batched_imgs = images[0].new_full(batch_shape, 0)
for img, pad_img in zip(images, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
return batched_imgs
def postprocess(self,
result, # type: List[Dict[str, Tensor]]
image_shapes, # type: List[Tuple[int, int]]
original_image_sizes # type: List[Tuple[int, int]]
):
# type: (...) -> List[Dict[str, Tensor]]
if self.training:
return result
for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)):
boxes = pred["boxes"]
boxes = resize_boxes(boxes, im_s, o_im_s)
result[i]["boxes"] = boxes
if "masks" in pred:
masks = pred["masks"]
masks = paste_masks_in_image(masks, boxes, o_im_s)
result[i]["masks"] = masks
if "keypoints" in pred:
keypoints = pred["keypoints"]
keypoints = resize_keypoints(keypoints, im_s, o_im_s)
result[i]["keypoints"] = keypoints
return result
def __repr__(self):
format_string = self.__class__.__name__ + '('
_indent = '\n '
format_string += "{0}Normalize(mean={1}, std={2})".format(_indent, self.image_mean, self.image_std)
format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format(_indent, self.min_size,
self.max_size)
format_string += '\n)'
return format_string
def resize_keypoints(keypoints, original_size, new_size):
# type: (Tensor, List[int], List[int]) -> Tensor
ratios = [
torch.tensor(s, dtype=torch.float32, device=keypoints.device) /
torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_h, ratio_w = ratios
resized_data = keypoints.clone()
if torch._C._get_tracing_state():
resized_data_0 = resized_data[:, :, 0] * ratio_w
resized_data_1 = resized_data[:, :, 1] * ratio_h
resized_data = torch.stack((resized_data_0, resized_data_1, resized_data[:, :, 2]), dim=2)
else:
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
return resized_data
def resize_boxes(boxes, original_size, new_size):
# type: (Tensor, List[int], List[int]) -> Tensor
ratios = [
torch.tensor(s, dtype=torch.float32, device=boxes.device) /
torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_height, ratio_width = ratios
xmin, ymin, xmax, ymax = boxes.unbind(1)
xmin = xmin * ratio_width
xmax = xmax * ratio_width
ymin = ymin * ratio_height
ymax = ymax * ratio_height
return torch.stack((xmin, ymin, xmax, ymax), dim=1)
|
the-stack_0_3274 | import time
import os
import argparse
import sys
import datetime
sys.path.append('../../python/src')
from libnyumaya import AudioRecognition, FeatureExtractor
from auto_platform import AudiostreamSource, play_command,default_libpath
def detectKeywords(libpath):
audio_stream = AudiostreamSource()
extractor = FeatureExtractor(libpath)
detector = AudioRecognition(libpath)
extactor_gain = 1.0
#Add one or more keyword models
keywordIdFirefox = detector.addModel('../../models/Hotword/firefox_v1.4.5.premium',0.6)
keywordIdSheila = detector.addModel('../../models/Hotword/sheila_v1.4.5.premium',0.6)
keywordIdMarvin = detector.addModel('../../models/Hotword/marvin_v1.4.5.premium',0.6)
keywordIdAlexa = detector.addModel('../../models/Hotword/alexa_v1.4.5.premium',0.6)
bufsize = detector.getInputDataSize()
print("Audio Recognition Version: " + detector.getVersionString())
audio_stream.start()
try:
while(True):
frame = audio_stream.read(bufsize*2,bufsize*2)
if(not frame):
time.sleep(0.01)
continue
features = extractor.signalToMel(frame,extactor_gain)
prediction = detector.runDetection(features)
if(prediction != 0):
now = datetime.datetime.now().strftime("%d.%b %Y %H:%M:%S")
if(prediction == keywordIdFirefox):
print("Firefox detected:" + now)
elif(prediction == keywordIdSheila):
print("Sheila detected:" + now)
elif(prediction == keywordIdMarvin):
print("Marvin detected:" + now)
elif(prediction == keywordIdAlexa):
print("Alexa detected:" + now)
os.system(play_command + " ../resources/ding.wav")
except KeyboardInterrupt:
print("Terminating")
audio_stream.stop()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--libpath', type=str,
default=default_libpath,
help='Path to Platform specific nyumaya_lib.')
FLAGS, unparsed = parser.parse_known_args()
detectKeywords(FLAGS.libpath)
|
the-stack_0_3276 | import os
import torch
import torch.nn as nn
from torch_geometric.nn.conv import RGCNConv, GCNConv, GINConv
import torch_geometric.nn as gnn
import numpy as np
from torch_geometric.data import Data
import torch.nn.functional as F
class TemporalExtGCN(nn.Module):
def __init__(self, lfd_params, is_training=False, filename=None,
node_size=500, num_relations=1, output_size=4):
super().__init__()
self.lfd_params = lfd_params
# model filenames
self.filename = os.path.join(filename, ".".join(["model", "temporal_gcn", "pt"]))
# constants params
self.num_relations = num_relations # should be 7?
self.node_size = node_size
self.hidden_size = 512
self.output_size = output_size
# define model vars
# CONSIDER STACKED (will need ReLU, check on actual ITR data)
#self.gcn = GCNConv(self.node_size, self.hidden_size)
#self.gcn1 = GCNConv(self.node_size, self.hidden_size)
#self.gcn2 = GCNConv(self.hidden_size, self.hidden_size)
#self.gcn3 = GCNConv(self.hidden_size, self.hidden_size)
#self.gcn4 = GCNConv(self.hidden_size, self.hidden_size)
#print("self.node_size, self.hidden_size, self.output_size:",
# self.node_size, self.hidden_size, self.output_size)
self.gcn1 = RGCNConv(self.node_size, self.hidden_size, num_relations=self.num_relations)
self.gcn2 = RGCNConv(self.hidden_size, self.hidden_size, num_relations=self.num_relations)
#self.gcn3 = RGCNConv(self.hidden_size, self.hidden_size, num_relations=self.num_relations)#
#self.gcn4 = RGCNConv(self.hidden_size, self.hidden_size, num_relations=self.num_relations)#
#self.densegcn = gnn.DenseGCNConv(self.hidden_size, self.output_size)
#nn1 = nn.Sequential(nn.Linear(self.node_size, self.hidden_size), nn.ReLU(), nn.Linear(self.hidden_size, self.hidden_size))
#self.gcn = GINConv(nn1)
#self.drop = torch.nn.Dropout(p=0.25)
# print("temp_ext_gcn.py:", self.node_size, int(self.node_size/2) * self.output_size)
self.fc = nn.Linear(self.hidden_size, self.output_size)
# load model parameters
if not is_training:
assert self.filename is not None, \
"ERROR: temporal_ext_linear.py: filename must be defined when is_training is False"
self.load_model(self.filename)
else:
print("TemporalExtLinear is training")
def forward(self, x):
x, edge_idx, edge_attr, batch = x.x, x.edge_index, x.edge_attr, x.batch
#print("x9:", x.shape)
#print("edge_idx:", edge_idx.shape)
#print("edge_attr:", edge_attr.shape)
#print("batch:", batch.shape)
x = x.float().cuda()
edge_idx = edge_idx.long().cuda()
edge_attr = edge_attr.cuda()
batch = batch.cuda()
x = F.relu(self.gcn1(x, edge_idx, edge_attr))
x = F.relu(self.gcn2(x, edge_idx, edge_attr))
#x = F.relu(self.gcn3(x, edge_idx, edge_attr))#
#x = F.relu(self.gcn4(x, edge_idx, edge_attr))#
x = gnn.global_add_pool(x, batch)
x = self.fc(x)
return x
def save_model(self):
torch.save(self.state_dict(), self.filename)
print("TemporalExtLinear Linear model saved to: ", self.filename)
def load_model(self, filename):
assert os.path.exists(filename), "ERROR: temporal_ext_linear.py: Cannot locate saved model - "+filename
print("Loading TemporalExtLinear from: " + filename)
checkpoint = torch.load(filename)
self.load_state_dict(checkpoint, strict=True)
for param in self.parameters():
param.requires_grad = False
|
the-stack_0_3277 | # coding: utf-8
#
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.activity.gae_models."""
from __future__ import annotations
from core.constants import constants
from core.platform import models
from core.tests import test_utils
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import activity_models
from mypy_imports import base_models
(base_models, activity_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.activity])
class ActivityListModelTest(test_utils.GenericTestBase):
"""Tests the ActivityListModel class."""
def test_get_deletion_policy(self) -> None:
self.assertEqual(
activity_models.ActivityReferencesModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_featured_activity_list_always_exists(self) -> None:
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertIsNotNone(featured_model_instance)
self.assertEqual(featured_model_instance.id, 'featured')
self.assertEqual(featured_model_instance.activity_references, [])
def test_retrieving_non_existent_list(self) -> None:
with self.assertRaisesRegexp(Exception, 'Invalid ActivityListModel'): # type: ignore[no-untyped-call]
activity_models.ActivityReferencesModel.get_or_create(
'nonexistent_key')
def test_updating_featured_activity_list(self) -> None:
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertEqual(featured_model_instance.activity_references, [])
featured_model_instance.activity_references = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '0',
}]
featured_model_instance.update_timestamps()
featured_model_instance.put()
featured_model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.assertEqual(featured_model_instance.id, 'featured')
self.assertEqual(
featured_model_instance.activity_references, [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '0',
}])
|
the-stack_0_3278 | # this builtin is needed so we can overwrite in test
import asyncio
import json
import logging
import os
import aiohttp
import questionary
from aiohttp import ClientTimeout
from prompt_toolkit.styles import Style
from typing import Any
from typing import Text, Optional, Dict, List
from rasa.cli import utils as cli_utils
from rasa.core import utils
from rasa.core.channels.channel import RestInput
from rasa.core.constants import DEFAULT_SERVER_URL
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX
from rasa.utils.io import DEFAULT_ENCODING
logger = logging.getLogger(__name__)
STREAM_READING_TIMEOUT_ENV = "RASA_SHELL_STREAM_READING_TIMEOUT_IN_SECONDS"
DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS = 10
def print_buttons(
message: Dict[Text, Any],
is_latest_message: bool = False,
color=cli_utils.bcolors.OKBLUE,
) -> Optional[questionary.Question]:
if is_latest_message:
choices = cli_utils.button_choices_from_message_data(
message, allow_free_text_input=True
)
question = questionary.select(
message.get("text"),
choices,
style=Style([("qmark", "#6d91d3"), ("", "#6d91d3"), ("answer", "#b373d6")]),
)
return question
else:
cli_utils.print_color("Buttons:", color=color)
for idx, button in enumerate(message.get("buttons")):
cli_utils.print_color(cli_utils.button_to_string(button, idx), color=color)
def print_bot_output(
message: Dict[Text, Any],
is_latest_message: bool = False,
color=cli_utils.bcolors.OKBLUE,
) -> Optional[questionary.Question]:
if "buttons" in message:
question = print_buttons(message, is_latest_message, color)
if question:
return question
if "text" in message:
cli_utils.print_color(message.get("text"), color=color)
if "image" in message:
cli_utils.print_color("Image: " + message.get("image"), color=color)
if "attachment" in message:
cli_utils.print_color("Attachment: " + message.get("attachment"), color=color)
if "elements" in message:
cli_utils.print_color("Elements:", color=color)
for idx, element in enumerate(message.get("elements")):
cli_utils.print_color(
cli_utils.element_to_string(element, idx), color=color
)
if "quick_replies" in message:
cli_utils.print_color("Quick Replies:", color=color)
for idx, element in enumerate(message.get("quick_replies")):
cli_utils.print_color(cli_utils.button_to_string(element, idx), color=color)
if "custom" in message:
cli_utils.print_color("Custom json:", color=color)
cli_utils.print_color(json.dumps(message.get("custom"), indent=2), color=color)
def get_user_input(previous_response: Optional[Dict[str, Any]]) -> Optional[Text]:
button_response = None
if previous_response is not None:
button_response = print_bot_output(previous_response, is_latest_message=True)
if button_response is not None:
response = cli_utils.payload_from_button_question(button_response)
if response == cli_utils.FREE_TEXT_INPUT_PROMPT:
# Re-prompt user with a free text input
response = get_user_input({})
else:
response = questionary.text(
"",
qmark="Your input ->",
style=Style([("qmark", "#b373d6"), ("", "#b373d6")]),
).ask()
return response.strip() if response is not None else None
async def send_message_receive_block(
server_url, auth_token, sender_id, message
) -> List[Dict[Text, Any]]:
payload = {"sender": sender_id, "message": message}
url = f"{server_url}/webhooks/rest/webhook?token={auth_token}"
async with aiohttp.ClientSession() as session:
async with session.post(url, json=payload, raise_for_status=True) as resp:
return await resp.json()
async def send_message_receive_stream(
server_url: Text, auth_token: Text, sender_id: Text, message: Text
):
payload = {"sender": sender_id, "message": message}
url = f"{server_url}/webhooks/rest/webhook?stream=true&token={auth_token}"
# Define timeout to not keep reading in case the server crashed in between
timeout = _get_stream_reading_timeout()
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(url, json=payload, raise_for_status=True) as resp:
async for line in resp.content:
if line:
yield json.loads(line.decode(DEFAULT_ENCODING))
def _get_stream_reading_timeout() -> ClientTimeout:
timeout_in_seconds = int(
os.environ.get(
STREAM_READING_TIMEOUT_ENV, DEFAULT_STREAM_READING_TIMEOUT_IN_SECONDS
)
)
return ClientTimeout(timeout_in_seconds)
async def record_messages(
sender_id,
server_url=DEFAULT_SERVER_URL,
auth_token="",
max_message_limit=None,
use_response_stream=True,
) -> int:
"""Read messages from the command line and print bot responses."""
exit_text = INTENT_MESSAGE_PREFIX + "stop"
cli_utils.print_success(
"Bot loaded. Type a message and press enter "
"(use '{}' to exit): ".format(exit_text)
)
num_messages = 0
previous_response = None
await asyncio.sleep(0.5) # Wait for server to start
while not utils.is_limit_reached(num_messages, max_message_limit):
text = get_user_input(previous_response)
if text == exit_text or text is None:
break
if use_response_stream:
bot_responses = send_message_receive_stream(
server_url, auth_token, sender_id, text
)
previous_response = None
async for response in bot_responses:
if previous_response is not None:
print_bot_output(previous_response)
previous_response = response
else:
bot_responses = await send_message_receive_block(
server_url, auth_token, sender_id, text
)
previous_response = None
for response in bot_responses:
if previous_response is not None:
print_bot_output(previous_response)
previous_response = response
num_messages += 1
await asyncio.sleep(0) # Yield event loop for others coroutines
return num_messages
class CmdlineInput(RestInput):
@classmethod
def name(cls) -> Text:
return "cmdline"
def url_prefix(self) -> Text:
return RestInput.name()
|
the-stack_0_3279 | # https://rosalind.info/problems/sims/
def fmtfa(fasta: list):
prev = True
header = []
seq = []
for f in fasta:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
# INPUT -------------------------------------------
file_in = "sample/dataset/sims.txt"
file_out = "sample/output/sims.txt"
# file_in = "case/dataset/sims.txt"
with open(file_in) as f:
data = f.read().splitlines()
with open(file_out) as f:
outcome = f.read().splitlines()
# MAIN -------------------------------------------
# OUTPUT -------------------------------------------
with open("case/output/sims.txt", "w") as f:
f.write()
# END
|
the-stack_0_3280 | import matplotlib.pyplot as plt
plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
ax.annotate("",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
)
plt.show()
|
the-stack_0_3281 | # JupyterHub configuration
#
## If you update this file, do not forget to delete the `jupyterhub_data` volume before restarting the jupyterhub service:
##
## docker volume rm jupyterhub_jupyterhub_data
##
## or, if you changed the COMPOSE_PROJECT_NAME to <name>:
##
## docker volume rm <name>_jupyterhub_data
##
import os
import sys
## Generic
c.JupyterHub.admin_access = True
c.Spawner.default_url = '/lab'
## Authenticator
from oauthenticator.generic import GenericOAuthenticator
#c.Application.log_level = 'DEBUG'
c.JupyterHub.authenticator_class = GenericOAuthenticator
c.GenericOAuthenticator.client_id = os.environ['OAUTH2_CLIENT_ID']
c.GenericOAuthenticator.client_secret = os.environ['OAUTH2_CLIENT_SECRET']
c.GenericOAuthenticator.token_url = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/token'
c.GenericOAuthenticator.userdata_url = os.environ['OAUTH2_USERDATA_URL']
c.GenericOAuthenticator.userdata_params = {'state': 'state'}
# the next can be a callable as well, e.g.: lambda t: t.get('complex').get('structure').get('username')
#c.GenericOAuthenticator.username_key = 'preferred_username'
c.GenericOAuthenticator.login_service = 'IServ'
c.GenericOAuthenticator.scope = ['openid', 'profile', 'email', 'groups']
c.GenericOAuthenticator.admin_groups = ['Admins', 'admins']
c.GenericOAuthenticator.oauth_callback_url = 'https://jupyter.gymnasium-ditzingen.de/hub/oauth_callback'
c.OAuthenticator.tls_verify = False
# from oauthenticator.oauth2 import OAuthLoginHandler
# from oauthenticator.generic import GenericOAuthenticator
# from tornado.auth import OAuth2Mixin
# # OAuth2 endpoints
# class MyOAuthMixin(OAuth2Mixin):
# _OAUTH_AUTHORIZE_URL = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/auth' ## Better move this to .env!
# _OAUTH_ACCESS_TOKEN_URL = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/token'
# class MyOAuthLoginHandler(OAuthLoginHandler, MyOAuthMixin):
# pass
# # Authenticator configuration
# class MyOAuthAuthenticator(GenericOAuthenticator):
# login_service = 'IServ'
# login_handler = MyOAuthLoginHandler
# userdata_url = 'https://gymnasium-ditzingen.de/iserv/public/oauth/userinfo'
# token_url = 'https://gymnasium-ditzingen.de/iserv/oauth/v2/token'
# oauth_callback_url = 'https://jupyter.gymnasium-ditzingen.de/hub/oauth_callback'
# client_id = os.environ['OAUTH2_CLIENT_ID'] # Your client ID and secret, as provided to you
# client_secret = os.environ['OAUTH2_CLIENT_SECRET'] # by the OAuth2 service.
# c.JupyterHub.authenticator_class = MyOAuthAuthenticator
## Docker spawner
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.image = os.environ['DOCKER_JUPYTER_CONTAINER']
c.DockerSpawner.network_name = os.environ['DOCKER_NETWORK_NAME']
# See https://github.com/jupyterhub/dockerspawner/blob/master/examples/oauth/jupyterhub_config.py
c.JupyterHub.hub_ip = os.environ['HUB_IP']
# user data persistence
# see https://github.com/jupyterhub/dockerspawner#data-persistence-and-dockerspawner
notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/jovyan' # THIS NEEDS TO CHANGE?
c.DockerSpawner.notebook_dir = notebook_dir
c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir }
# Other stuff
c.Spawner.cpu_limit = 1
c.Spawner.mem_limit = '10G'
## Services
c.JupyterHub.load_roles = [
{
"name": "jupyterhub-idle-culler-role",
"scopes": [
"list:users",
"read:users:activity",
"delete:servers",
# "admin:users", # if using --cull-users
],
# assignment of role's permissions to:
"services": ["jupyterhub-idle-culler-service"],
}
]
c.JupyterHub.services = [
{
"name": "jupyterhub-idle-culler-service",
"command": [
sys.executable,
"-m", "jupyterhub_idle_culler",
"--timeout=3600",
],
"admin": True, # Has to be disabled version>2.0
}
]
|
the-stack_0_3282 | #!/usr/bin/env python
# encoding: utf-8
r"""
Module containing all Pyclaw solution objects
"""
from __future__ import absolute_import
import six
class State(object):
r"""
A PyClaw State object contains the current state on a particular patch,
including the unkowns q, the time t, and the auxiliary coefficients aux.
The variables num_eqn and num_aux determine the length of the first
dimension of the q and aux arrays.
:State Data:
The arrays :attr:`q`, and :attr:`aux` have variable
extents based on the patch dimensions and the values of
:attr:`num_eqn` and :attr:`num_aux`.
A State object is automatically created upon instantiation of a Solution object
from a Domain object:
>>> from clawpack import pyclaw
>>> x = pyclaw.Dimension('x',0.0,1.0,100)
>>> domain = pyclaw.Domain(x)
>>> num_eqn = 1
>>> solution = pyclaw.Solution(num_eqn,domain)
>>> print solution.state
PyClaw State object
Patch dimensions: [100]
Time t=0.0
Number of conserved quantities: 1
<BLANKLINE>
A State lives on a Patch, and can be instantiated directly
by first creating a Patch:
>>> x = pyclaw.Dimension('x',0.,1.,100)
>>> patch = pyclaw.Patch((x))
The arguments to the constructor are the patch, the number of equations,
and the number of auxiliary fields:
>>> state = pyclaw.State(patch,3,2)
>>> state.q.shape
(3, 100)
>>> state.aux.shape
(2, 100)
>>> state.t
0.0
Note that state.q and state.aux are initialized as empty arrays (not zeroed).
Additional parameters, such as scalar values that are used in the Riemann solver,
can be set using the dictionary state.problem_data.
"""
def __getattr__(self, key):
if key in ('num_dim', 'p_centers', 'p_edges', 'c_centers', 'c_edges',
'num_cells', 'lower', 'upper', 'delta', 'centers', 'edges',
'gauges'):
return self._get_grid_attribute(key)
else:
raise AttributeError("'State' object has no attribute '"+key+"'")
def _get_grid_attribute(self, name):
r"""
Return grid attribute
:Output:
- (id) - Value of attribute from ``grid``
"""
return getattr(self.grid,name)
# ========== Property Definitions ========================================
@property
def num_eqn(self):
r"""(int) - Number of unknowns (components of q)"""
if self.q is None:
raise Exception('state.num_eqn has not been set.')
else: return self.q.shape[0]
@property
def num_aux(self):
r"""(int) - Number of auxiliary fields"""
if self.aux is not None: return self.aux.shape[0]
else: return 0
@property
def grid(self):
return self.patch.grid
@property
def mp(self):
r"""(int) - Number of derived quantities"""
if self.p is not None: return self.p.shape[0]
else: return 0
@mp.setter
def mp(self,mp):
if self.p is not None:
raise Exception('Cannot change state.mp after aux is initialized.')
else:
self.p = self.new_array(mp)
@property
def mF(self):
r"""(int) - Number of output functionals"""
if self.F is not None: return self.F.shape[0]
else: return 0
@mF.setter
def mF(self,mF):
if self.F is not None:
raise Exception('Cannot change state.mF after aux is initialized.')
else:
self.F = self.new_array(mF)
# ========== Class Methods ===============================================
def __init__(self,geom,num_eqn,num_aux=0):
from clawpack.pyclaw import geometry
if isinstance(geom,geometry.Patch):
self.patch = geom
elif isinstance(geom,geometry.Domain):
self.patch = geom.patches[0]
else:
raise Exception("""A PyClaw State object must be initialized with
a PyClaw Patch object.""")
# ========== Attribute Definitions ===================================
r"""pyclaw.Patch.patch - The patch this state lives on"""
self.p = None
r"""(ndarray(mp,...)) - Cell averages of derived quantities."""
self.F = None
r"""(ndarray(mF,...)) - Cell averages of output functional densities."""
self.problem_data = {}
r"""(dict) - Dictionary of global values for this patch,
``default = {}``"""
self.t=0.
r"""(float) - Current time represented on this patch,
``default = 0.0``"""
self.index_capa = -1
self.keep_gauges = False
r"""(bool) - Keep gauge values in memory for every time step,
``default = False``"""
self.gauge_data = []
r"""(list) - List of numpy.ndarray objects. Each element of the list
stores the values of the corresponding gauge if ``keep_gauges`` is set
to ``True``"""
self.q = self.new_array(num_eqn)
self.aux = self.new_array(num_aux)
def __str__(self):
output = "PyClaw State object\n"
output += "Patch dimensions: %s\n" % str(self.patch.num_cells_global)
output += "Time t=%s\n" % (self.t)
output += "Number of conserved quantities: %s\n" % str(self.q.shape[0])
if self.aux is not None:
output += "Number of auxiliary fields: %s\n" % str(self.aux.shape[0])
if self.problem_data != {}:
output += "problem_data: "+self.problem_data.__str__()
return output
def is_valid(self):
r"""
Checks to see if this state is valid
The state is declared valid based on the following criteria:
- :attr:`q` is Fortran contiguous
- :attr:`aux` is Fortran contiguous
A debug logger message will be sent documenting exactly what was not
valid.
:Output:
- (bool) - True if valid, false otherwise.
"""
import logging
valid = True
logger = logging.getLogger('pyclaw.solution')
if not self.q.flags['F_CONTIGUOUS']:
logger.debug('q array is not Fortran contiguous.')
valid = False
if self.aux is not None:
if not self.aux.flags['F_CONTIGUOUS']:
logger.debug('q array is not Fortran contiguous.')
valid = False
return valid
def set_cparam(self,fortran_module):
"""
Set the variables in fortran_module.cparam to the corresponding values in
patch.problem_data. This is the mechanism for passing scalar variables to the
Fortran Riemann solvers; cparam must be defined as a common block in the
Riemann solver.
This function should be called from solver.setup(). This seems like a fragile
interdependency between solver and state; perhaps problem_data should belong
to solver instead of state.
This function also checks that the set of variables defined in cparam
all appear in problem_data.
"""
if hasattr(fortran_module,'cparam'):
try:
paramlist = [parm for parm in fortran_module.cparam.__dir__()
if '__' not in parm]
except AttributeError: # Python 2
paramlist = dir(fortran_module.cparam)
if not set(paramlist) <= set(self.problem_data.keys()):
raise Exception("""Some required value(s) in the cparam common
block in the Riemann solver have not been
set in problem_data.""")
for global_var_name,global_var_value in six.iteritems(self.problem_data):
setattr(fortran_module.cparam,global_var_name,global_var_value)
def set_num_ghost(self,num_ghost):
"""
Virtual routine (does nothing). Overridden in the petclaw.state class.
"""
pass
def set_q_from_qbc(self,num_ghost,qbc):
"""
Set the value of q using the array qbc. Typically this is called
after qbc has been updated by the solver.
"""
num_dim = self.patch.num_dim
if num_dim == 1:
self.q = qbc[:,num_ghost:-num_ghost]
elif num_dim == 2:
self.q = qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost]
elif num_dim == 3:
self.q = qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost]
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
def set_aux_from_auxbc(self,num_ghost,auxbc):
"""
Set the value of aux using the array auxbc.
"""
patch = self.patch
if patch.num_dim == 1:
self.aux = auxbc[:,num_ghost:-num_ghost]
elif patch.num_dim == 2:
self.aux = auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost]
elif patch.num_dim == 3:
self.aux = auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost]
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
def get_qbc_from_q(self,num_ghost,qbc):
"""
Fills in the interior of qbc by copying q to it.
"""
num_dim = self.patch.num_dim
if num_dim == 1:
qbc[:,num_ghost:-num_ghost] = self.q
elif num_dim == 2:
qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.q
elif num_dim == 3:
qbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.q
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
return qbc
def get_auxbc_from_aux(self,num_ghost,auxbc):
"""
Fills in the interior of auxbc by copying aux to it.
"""
num_dim = self.patch.num_dim
if num_dim == 1:
auxbc[:,num_ghost:-num_ghost] = self.aux
elif num_dim == 2:
auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.aux
elif num_dim == 3:
auxbc[:,num_ghost:-num_ghost,num_ghost:-num_ghost,num_ghost:-num_ghost] = self.aux
else:
raise Exception("Assumption (1 <= num_dim <= 3) violated.")
return auxbc
# ========== Copy functionality ==========================================
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self,memo={}):
import copy
result = self.__class__(copy.deepcopy(self.patch),self.num_eqn,self.num_aux)
result.__init__(copy.deepcopy(self.patch),self.num_eqn,self.num_aux)
for attr in ('t'):
setattr(result,attr,copy.deepcopy(getattr(self,attr)))
if self.q is not None:
result.q = copy.deepcopy(self.q)
if self.aux is not None:
result.aux = copy.deepcopy(self.aux)
result.problem_data = copy.deepcopy(self.problem_data)
return result
def sum_F(self,i):
import numpy as np
return np.sum(np.abs(self.F[i,...]))
def new_array(self,dof):
import numpy as np
if dof==0: return None
shape = [dof]
shape.extend(self.grid.num_cells)
return np.empty(shape,order='F')
def get_q_global(self):
r"""
Returns a copy of state.q.
"""
return self.q.copy()
def get_aux_global(self):
r"""
Returns a copy of state.aux.
"""
return self.aux.copy()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
the-stack_0_3284 | from hydroDL import kPath, utils
from hydroDL.app import waterQuality
from hydroDL.master import basins
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.master import slurm
from hydroDL.post import axplot, figplot
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy
codeLst = sorted(usgs.varC)
ep = 500
reTest = False
wqData = waterQuality.DataModelWQ('sbWTQ')
siteNoLst = wqData.info.siteNo.unique()
nSite = len(siteNoLst)
# single
labelLst = ['ntnonly', 'q', 'ntnq']
cLst = 'bgr'
labLst2 = ['input NTN', 'input Q', 'input NTN+Q']
corrMat = np.full([nSite, len(codeLst), len(labelLst)], np.nan)
rmseMat = np.full([nSite, len(codeLst), len(labelLst)], np.nan)
for iLab, label in enumerate(labelLst):
for iCode, code in enumerate(codeLst):
trainSet = '{}-Y1'.format(code)
testSet = '{}-Y2'.format(code)
if label == 'qpred':
outName = '{}-{}-{}-{}'.format('sbWTQ', code, label, trainSet)
else:
outName = '{}-{}-{}-{}'.format('sbWT', code, label, trainSet)
master = basins.loadMaster(outName)
ic = wqData.varC.index(code)
# for iT, subset in enumerate([trainSet, testSet]):
subset = testSet
yP, ycP = basins.testModel(
outName, subset, wqData=wqData, ep=ep, reTest=reTest)
ind = wqData.subset[subset]
info = wqData.info.iloc[ind].reset_index()
p = yP[-1, :, master['varY'].index(code)]
o = wqData.c[-1, ind, ic]
for iS, siteNo in enumerate(siteNoLst):
indS = info[info['siteNo'] == siteNo].index.values
rmse, corr = utils.stat.calErr(p[indS], o[indS])
corrMat[iS, iCode, iLab] = corr
rmseMat[iS, iCode, iLab] = rmse
# plot box
labLst1 = [usgs.codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
dataBox = list()
for k in range(len(codeLst)):
code = codeLst[k]
temp = list()
for i in range(len(labelLst)):
temp.append(corrMat[:, k, i])
dataBox.append(temp)
fig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5, cLst=cLst,
label2=labLst2, figsize=(12, 4), yRange=[0, 1])
# fig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5,
# label2=labLst2, figsize=(12, 4), sharey=False)
fig.show()
# significance test
testLst = ['add Q', 'add NTN']
indLst = [[0, 2], [1, 2]]
codeStrLst = ['{} {}'.format(
code, usgs.codePdf.loc[code]['shortName']) for code in codeLst]
dfS = pd.DataFrame(index=codeStrLst, columns=testLst)
for (test, ind) in zip(testLst, indLst):
for k, code in enumerate(codeLst):
data = [corrMat[:, k, x] for x in ind]
[a, b], _ = utils.rmNan(data)
s, p = scipy.stats.ttest_ind(a, b, equal_var=False)
# s, p = scipy.stats.ttest_rel(a, b)
dfS.loc[codeStrLst[k]][test] = p
dfS['aver R'] = np.nanmean(corrMat[:, :, 2], axis=0)
pd.options.display.float_format = '{:,.2f}'.format
print(dfS)
|
the-stack_0_3285 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0003_subscription'),
]
operations = [
migrations.AlterModelOptions(
name='subscription',
options={'ordering': ('-created',)},
),
]
|
the-stack_0_3286 | # Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# War packaging.
jar_filetype = FileType([".jar"])
LIBS = [
"//java/com/google/gerrit/common:version",
"//java/com/google/gerrit/httpd/init",
"//lib:postgresql",
"//lib/bouncycastle:bcpkix",
"//lib/bouncycastle:bcprov",
"//lib/bouncycastle:bcpg",
"//lib/log:impl-log4j",
"//resources:log4j-config",
]
PGMLIBS = [
"//java/com/google/gerrit/pgm",
]
def _add_context(in_file, output):
input_path = in_file.path
return [
"unzip -qd %s %s" % (output, input_path),
]
def _add_file(in_file, output):
output_path = output
input_path = in_file.path
short_path = in_file.short_path
n = in_file.basename
if short_path.startswith("gerrit-"):
n = short_path.split("/")[0] + "-" + n
elif short_path.startswith("java/"):
n = short_path[5:].replace("/", "_")
output_path += n
return [
"test -L %s || ln -s $(pwd)/%s %s" % (output_path, input_path, output_path),
]
def _make_war(input_dir, output):
return "(%s)" % " && ".join([
"root=$(pwd)",
"TZ=UTC",
"export TZ",
"cd %s" % input_dir,
"find . -exec touch -t 198001010000 '{}' ';' 2> /dev/null",
"zip -X -9qr ${root}/%s ." % (output.path),
])
def _war_impl(ctx):
war = ctx.outputs.war
build_output = war.path + ".build_output"
inputs = []
# Create war layout
cmd = [
"set -e;rm -rf " + build_output,
"mkdir -p " + build_output,
"mkdir -p %s/WEB-INF/lib" % build_output,
"mkdir -p %s/WEB-INF/pgm-lib" % build_output,
]
# Add lib
transitive_lib_deps = depset()
for l in ctx.attr.libs:
if hasattr(l, "java"):
transitive_lib_deps += l.java.transitive_runtime_deps
elif hasattr(l, "files"):
transitive_lib_deps += l.files
for dep in transitive_lib_deps:
cmd += _add_file(dep, build_output + "/WEB-INF/lib/")
inputs.append(dep)
# Add pgm lib
transitive_pgmlib_deps = depset()
for l in ctx.attr.pgmlibs:
transitive_pgmlib_deps += l.java.transitive_runtime_deps
for dep in transitive_pgmlib_deps:
if dep not in inputs:
cmd += _add_file(dep, build_output + "/WEB-INF/pgm-lib/")
inputs.append(dep)
# Add context
transitive_context_deps = depset()
if ctx.attr.context:
for jar in ctx.attr.context:
if hasattr(jar, "java"):
transitive_context_deps += jar.java.transitive_runtime_deps
elif hasattr(jar, "files"):
transitive_context_deps += jar.files
for dep in transitive_context_deps:
cmd += _add_context(dep, build_output)
inputs.append(dep)
# Add zip war
cmd.append(_make_war(build_output, war))
ctx.actions.run_shell(
inputs = inputs,
outputs = [war],
mnemonic = "WAR",
command = "\n".join(cmd),
use_default_shell_env = True,
)
# context: go to the root directory
# libs: go to the WEB-INF/lib directory
# pgmlibs: go to the WEB-INF/pgm-lib directory
_pkg_war = rule(
attrs = {
"context": attr.label_list(allow_files = True),
"libs": attr.label_list(allow_files = jar_filetype),
"pgmlibs": attr.label_list(allow_files = False),
},
outputs = {"war": "%{name}.war"},
implementation = _war_impl,
)
def pkg_war(name, ui = "ui_optdbg", context = [], doc = False, **kwargs):
doc_ctx = []
doc_lib = []
ui_deps = []
if ui == "polygerrit" or ui == "ui_optdbg" or ui == "ui_optdbg_r":
ui_deps.append("//polygerrit-ui/app:polygerrit_ui")
if ui and ui != "polygerrit":
ui_deps.append("//gerrit-gwtui:%s" % ui)
if doc:
doc_ctx.append("//Documentation:html")
doc_lib.append("//Documentation:index")
_pkg_war(
name = name,
libs = LIBS + doc_lib,
pgmlibs = PGMLIBS,
context = doc_ctx + context + ui_deps + [
"//java:gerrit-main-class_deploy.jar",
"//webapp:assets",
],
**kwargs
)
|
the-stack_0_3288 | '''
QUESTÃO 5
Um palíndromo é uma seqüência de caracteres cuja leitura é idêntica se feita da direita
para esquerda ou vice−versa. Por exemplo: OSSO e OVO são palíndromos. Em textos
mais complexos os espaços e pontuação são ignorados. A frase SUBI NO ONIBUS é o
exemplo de uma frase palíndroma onde os espaços foram ignorados. Faça um
programa que leia uma seqüência de caracteres, mostre−a e diga se é um palíndromo
ou não.
'''
import pilha as pilha
from unidecode import unidecode
def palindromoPilha(array):
auxA = []
auxB = []
if pilha.isEmpty(array):
print("String vazia")
else:
print("\n*************** teste de palíndromo ***************")
print('Verificar: ' + array)
# Remover acentos
array = unidecode(array)
# Deixar letras minúsculas, pois evita erro de comparação
array = array.lower()
# Verificar se contém espaço e remover
if " " in array:
array = array.replace(" ", "")
loop = ((pilha.size(array)) - 1)
for i in range(loop, -1, -1):
auxA = pilha.push(array[i], auxA)
auxB = pilha.push(array[loop - i], auxB)
if auxA == auxB:
print("Teste Verdadeiro")
return True
else:
print("Teste Falso")
return False
# Função Palíndromo utilizando estrutura de pilha
print("\n********** Função de Palíndromo **********")
palin = ['ralo do dólar', 'até o poeta', 'tomarei café após a sopa']
for i in range(len(palin)):
palindromoPilha(palin[i]) |
the-stack_0_3289 | from abc import ABC, abstractmethod
from typing import List, Tuple, Union
import torch
from torch.nn import functional as F
class Bandit(ABC):
"""Abstract Base class for bandits"""
@abstractmethod
def step(self, action: int) -> Tuple[torch.Tensor, int]:
"""Generate reward for given action and select next context.
Args:
action (int): Selected action.
Returns:
Tuple[torch.Tensor, int]: Tuple of the next context and the
reward generated for given action
"""
@abstractmethod
def reset(self) -> torch.Tensor:
"""Reset bandit.
Returns:
torch.Tensor: Current context selected by bandit.
"""
class BanditAgent(ABC):
"""Abstract Base class for bandit solving agents"""
@abstractmethod
def select_action(self, context: torch.Tensor) -> int:
"""Select an action based on given context
Args:
context (torch.Tensor): The context vector to select action for
Returns:
int: The action to take
"""
class MultiArmedBandit(Bandit):
"""
Base Class for a Contextual Multi-armed Bandit
:param bandits: Number of bandits
:param arms: Number of arms in each bandit
:param context_type: Give context as either tensor or int
:type bandits: int
:type arms: int
:type context_type: str
"""
def __init__(self, bandits: int = 1, arms: int = 1, context_type: str = "tensor"):
self._nbandits = bandits
self._narms = arms
self.n_actions = arms
self.context_dim = bandits
if not (context_type == "int" or context_type == "tensor"):
raise ValueError(
f"context_type should be either tensor or int, found {context_type}"
)
self.context_type = context_type
self._reset_metrics()
self._reset_bandit()
@property
def reward_hist(self) -> List[float]:
"""
Get the history of rewards received at each step
:returns: List of rewards
:rtype: list
"""
return self._reward_hist
@property
def regret_hist(self) -> List[float]:
"""
Get the history of regrets incurred at each step
:returns: List of regrest
:rtype: list
"""
return self._regret_hist
@property
def cum_regret_hist(self) -> Union[List[int], List[float]]:
return self._cum_regret_hist
@property
def cum_reward_hist(self) -> Union[List[int], List[float]]:
return self._cum_reward_hist
@property
def cum_regret(self) -> Union[int, float]:
return self._cum_regret
@property
def cum_reward(self) -> Union[int, float]:
return self._cum_reward
@property
def arms(self) -> int:
"""
Get the number of arms in each bandit
:returns: Number of arms in each bandit
:rtype: int
"""
return self._narms
@property
def bandits(self) -> int:
"""
Get the number of bandits
:returns: Number of bandits
:rtype: int
"""
return self._nbandits
def _reset_metrics(self) -> None:
"""
Resets the various metrics to empty
"""
self._regret_hist = []
self._reward_hist = []
self._cum_regret_hist = []
self._cum_reward_hist = []
self._cum_regret = 0
self._cum_reward = 0
def _reset_bandit(self) -> None:
"""
Resets the current bandit and context
"""
self.curr_bandit = torch.randint(self.bandits, (1,))
self.curr_context = F.one_hot(
self.curr_bandit, num_classes=self.context_dim
).to(torch.float)
def reset(self) -> torch.Tensor:
"""
Resets metrics to empty the current bandit randomly
:returns: The current bandit as observation
:rtype: int
"""
self._reset_metrics()
self._reset_bandit()
if self.context_type == "tensor":
return self.curr_context.view(-1)
elif self.context_type == "int":
return self.curr_bandit.item()
def step(self, action: int) -> Tuple[Union[int, torch.Tensor], Union[int, float]]:
"""
Takes an action in the bandit and returns the sampled reward
This method needs to be implemented in the specific bandit.
:param action: The action to take
:type action: int
:returns: Reward sampled for the action taken
:rtype: int, float ...
"""
reward, max_reward = self._compute_reward(action)
regret = max_reward - reward
self._cum_regret += regret
self.cum_regret_hist.append(self._cum_regret)
self.regret_hist.append(regret)
self._cum_reward += reward
self.cum_reward_hist.append(self._cum_reward)
self.reward_hist.append(reward)
self._reset_bandit()
if self.context_type == "tensor":
return self.curr_context.view(-1), reward
elif self.context_type == "int":
return self.curr_bandit, reward
|
the-stack_0_3292 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot
from .utils.fixes import logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB']
class BaseNB(BaseEstimator, ClassifierMixin, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, optional (default=1e-9)
Portion of the largest variance of all features that is added to
variances for calculation stability.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
epsilon_ : float
absolute additive value to variances
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None, var_smoothing=1e-09)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None, var_smoothing=1e-09)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
_ALPHA_MIN = 1e-10
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.feature_count_.shape[1]:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes] (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples] (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : array, shape (n_classes, )
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : array, shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : boolean, optional (default=True)
Only used in edge case with a single class in the training set.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. Not used.
norm : boolean, optional (default=False)
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical weights for class complements.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_all_ : array, shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB(alpha=1.0, class_prior=None, fit_prior=True, norm=False)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _count(self, X, Y):
"""Count feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# BaseNB.predict uses argmax, but ComplementNB operates with argmin.
feature_log_prob = -logged
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = -feature_log_prob / summed
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse="csr")
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
the-stack_0_3294 | from __future__ import annotations
import typing
from types import TracebackType
import httpx
from ._endpoints_mappings import MappingsEndpoint
from ._endpoints_near_misses import NearMissesEndpoint
from ._endpoints_recordings import RecordingsEndpoint
from ._endpoints_requests import RequestsEndpoint
from ._endpoints_scenarios import ScenariosEndpoint
from ._endpoints_system import SystemEndpoint
from ._exceptions import WiremockConnectionException
from ._exceptions import WiremockForbiddenException
from ._exceptions import WiremockMalformedRequest
from ._exceptions import WiremockNotFoundException
from ._exceptions import WiremockServerException
from ._exceptions import WiremockTimeoutException
from ._response import WiremockResponse
from ._schemas import WiremockSchema
from ._types import TimeoutTypes
from ._types import VerifyTypes
class WiremockClient:
"""
A (synchronous) python client for the wiremock admin API.
The WiremockClient instance is a facade of various wiremock endpoints; to access the endpoints
refer to:
https://wiremock.org/docs/api/
:param host: The host of the running wiremock instance
:param port: The port wiremock is listening on
:param timeout: Configuration for connect, read, write & pool timeouts.
Timeout can be either a tuple of up to length 4; a single float (for all equal timeouts)
or a httpx.Timeout instance.
:param client_verify: configure ssl configurations; False by default and not checking SSL certs.
"""
def __init__(
self,
https: bool = False,
host: str = "localhost",
port: int = 8080,
timeout: TimeoutTypes = 30.00,
client_verify: VerifyTypes = False,
) -> None:
protocol = "http" if not https else "https"
self.host = f"{protocol}://{host}:{port}/__admin/"
self.client = httpx.Client(base_url=self.host, timeout=timeout, verify=client_verify)
self.dispatcher = Dispatcher(self.client, self.host)
self.stubs = MappingsEndpoint(self.dispatcher)
self.requests = RequestsEndpoint(self.dispatcher)
self.near_misses = NearMissesEndpoint(self.dispatcher)
self.recordings = RecordingsEndpoint(self.dispatcher)
self.scenarios = ScenariosEndpoint(self.dispatcher)
self.settings = SystemEndpoint(self.dispatcher)
def __enter__(self) -> WiremockClient:
return self
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]] = None,
exc_val: typing.Optional[BaseException] = None,
exc_tb: typing.Optional[TracebackType] = None,
):
self.client.close()
def __del__(self) -> None:
self.client.close()
class Dispatcher:
def __init__(self, client: httpx.Client, host: str) -> None:
self.client = client
self.host = host
def __call__( # type: ignore[return]
self,
*,
method: str,
url: str,
payload: typing.Optional[typing.Any] = None,
params: typing.Optional[typing.Dict[str, typing.Any]] = None,
schema: typing.Optional[typing.Type[WiremockSchema]] = None,
schema_kw: typing.Optional[typing.Dict[typing.Any, typing.Any]] = None,
) -> WiremockResponse:
"""Dispatches HTTP requests. We could implement this via __call__ but it should be private."""
if schema is not None:
payload = schema(**schema_kw or {}).dump(payload)
try:
httpx_response = self.client.request(method=method, url=url, json=payload)
print(httpx_response.request.content, httpx_response.request.url)
status = httpx_response.status_code
if status in (200, 201):
# Successfully fetching/creating a resource.
return WiremockResponse(httpx_response)
elif status == 401:
raise WiremockForbiddenException(httpx_response.text, status)
elif status == 404:
raise WiremockNotFoundException(
f"No wiremock instance running, {httpx_response.request.url} not found.", status
)
elif status == 422:
raise WiremockMalformedRequest(httpx_response.text, status)
elif status == 500:
raise WiremockServerException(httpx_response.extensions["reason_phrase"], status)
except httpx.TimeoutException as exc:
raise WiremockTimeoutException(str(exc)) from None
except httpx.ConnectError:
raise WiremockConnectionException(self.host) from None
|
the-stack_0_3297 | from abc import ABC
import numpy as np
from numpy.lib.function_base import copy
from scipy.signal import convolve2d
from typing import List
import re
import sys
class Filter(ABC):
def apply(self, data: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class ConvFilter(Filter):
def __init__(self, weights: np.ndarray, bias: float = 0) -> None:
super().__init__()
assert len(weights.shape) == 2
assert weights.shape[0] == weights.shape[1]
self.weights = weights
self.bias = bias
def apply(self, data: np.ndarray) -> np.ndarray:
return convolve2d(data, self.weights[::-1,::-1], mode='same') + self.bias
class Activation(ABC):
def apply(self, data: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class ReLUActivation(Activation):
def __init__(self, threshold: float = 0) -> None:
super().__init__()
self.threshold = threshold
def apply(self, data: np.ndarray) -> np.ndarray:
data = data.copy()
data[data < self.threshold] = self.threshold
return data
class ConvUsingProgram:
def __init__(self, filters: List[Filter], activation: Activation) -> None:
self.filters = filters
print('filters:')
print('\n'.join([str(filter.weights) for filter in filters]))
self.activation = activation
def run(self, data: np.ndarray) -> np.ndarray:
states = set()
print('run:')
while True:
print(data)
#input('step?')
result = self.step(data)
res_key = result.tobytes()
if res_key in states:
return result
states.add(res_key)
data = result
def step(self, data: np.ndarray) -> np.ndarray:
filter_result = np.array([filter.apply(data) for filter in self.filters])
#print(filter_result)
activation_result = self.activation.apply(filter_result)
#print(activation_result)
return np.sum(activation_result, axis=0)
def parse_program(code: str) -> ConvUsingProgram:
code_lines = _parse_lines(code)
filters = []
i = 0
while i < len(code_lines):
size = len(code_lines[i])
if size == 2:
size = 1
assert size % 2 != 0
weights = np.nan_to_num(np.array(code_lines[i:i+size]), copy=False)
bias = code_lines[i+size][0]
filters.append(ConvFilter(weights, bias=bias))
i += size + 1
activation = ReLUActivation(threshold=0)
return ConvUsingProgram(filters, activation)
def parse_data(data: str) -> np.ndarray:
data_lines = _parse_lines(data)
return np.nan_to_num(np.array(data_lines), copy=False)
def _parse_lines(text: str) -> List[List[float]]:
return [
code_line
for code_line in [
[float(number) for number in re.findall('([+-]?(?:\d+\.?\d*|\.\d+|inf))', line)]
for line in text.splitlines()
]
if len(code_line) > 0
]
if __name__ == '__main__':
with open(sys.argv[1]) as f:
program = parse_program(f.read())
with open(sys.argv[2]) as f:
data = parse_data(f.read())
result = program.run(data)
print('result:')
print(result) |
the-stack_0_3298 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Faithcoin should be started with the command line arguments:
arvcoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
the-stack_0_3299 | import ms2pip.peptides
class TestModifications:
def test_add_from_ms2pip_modstrings(self):
mods = ms2pip.peptides.Modifications()
mods.add_from_ms2pip_modstrings([
"Oxidation,15.994915,opt,M",
"Acetyl,42.010565,opt,N-term",
"Methyl,14.01565,opt,L",
])
assert mods.modifications['ptm']["Oxidation"]["amino_acid"] == "M"
assert mods.modifications['ptm']["Acetyl"]["mass_shift"] == 42.010565
assert mods.modifications['ptm']["Methyl"]["mass_shift"] == 14.01565
def test_get_mass_shifts(self):
mods = ms2pip.peptides.Modifications()
mods.add_from_ms2pip_modstrings([
"Oxidation,15.994915,opt,M"
])
assert mods.mass_shifts["Oxidation"] == 15.994915
# Test cache clear after adding new modifications
mods.add_from_ms2pip_modstrings([
"Acetyl,42.010565,opt,N-term",
])
assert mods.mass_shifts["Acetyl"] == 42.010565
|
the-stack_0_3301 | # SPDX-FileCopyrightText: 2021 Dylan Herrada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import math
import board
import busio
from digitalio import DigitalInOut
import displayio
from adafruit_display_shapes.rect import Rect
import adafruit_imageload
import adafruit_touchscreen
# ESP32 SPI
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
# Import NeoPixel Library
import neopixel
# Import Adafruit IO HTTP Client
from adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XL,
board.TOUCH_XR,
board.TOUCH_YD,
board.TOUCH_YU,
calibration=((5200, 59000), (5800, 57000)),
size=(480, 320),
)
RED = 0xFF0000
YELLOW = 0xFF9600
ORANGE = 0xFF2800
GREEN = 0x00FF00
TEAL = 0x00FF78
CYAN = 0x00FFFF
BLUE = 0x0000FF
PURPLE = 0xB400FF
MAGENTA = 0xFF0014
WHITE = 0xFFFFFF
BLACK = 0x000000
GOLD = 0xFFDE1E
PINK = 0xF15AFF
AQUA = 0x32FFFF
JADE = 0x00FF28
AMBER = 0xFF6400
"""
colors = [None, None, None, None,
None, None, None, None,
GREEN, ORANGE, YELLOW, RED,
PURPLE, BLUE, CYAN, TEAL,
GOLD, BLACK, WHITE, MAGENTA,
AMBER, JADE, AQUA, PINK]
"""
colors = [
None,
None,
GREEN,
PURPLE,
GOLD,
AMBER,
None,
None,
ORANGE,
BLUE,
BLACK,
JADE,
None,
None,
YELLOW,
CYAN,
WHITE,
AQUA,
None,
None,
RED,
TEAL,
MAGENTA,
PINK,
]
print(colors)
group = displayio.Group()
background, palette = adafruit_imageload.load(
"pyportal_setter.bmp", bitmap=displayio.Bitmap, palette=displayio.Palette
)
tile_grid = displayio.TileGrid(background, pixel_shader=palette)
group.append(tile_grid)
rect = Rect(0, 0, 160, 320, fill=0x000000)
group.append(rect)
print(len(group))
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# PyPortal ESP32 Setup
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Set your Adafruit IO Username and Key in secrets.py
# (visit io.adafruit.com if you need to create an account,
# or if you need your Adafruit IO key.)
ADAFRUIT_IO_USER = secrets["aio_username"]
ADAFRUIT_IO_KEY = secrets["aio_key"]
# Create an instance of the Adafruit IO HTTP client
io = IO_HTTP(ADAFRUIT_IO_USER, ADAFRUIT_IO_KEY, wifi)
try:
# Get the 'temperature' feed from Adafruit IO
neopixel_feed = io.get_feed("neopixel")
except AdafruitIO_RequestError:
neopixel_feed = io.create_new_feed("neopixel")
board.DISPLAY.show(group)
print("ready")
last_color = 257
last_index = 0
while True:
p = ts.touch_point
if p:
x = math.floor(p[0] / 80)
y = math.floor(p[1] / 80)
index = 6 * y + x
# Used to prevent the touchscreen sending incorrect results
if last_index == index:
color = colors[index]
if colors[index]:
group[1].fill = color
if last_color != color:
color_str = "#{:06x}".format(color)
print(color_str)
io.send_data(neopixel_feed["key"], color_str)
last_color = color
last_index = index
time.sleep(0.1)
|
the-stack_0_3302 | from django.urls import reverse
import pytest
from pytest_django.asserts import assertTemplateUsed
from Post.views import get_post_by_query_text
class TestViews:
@pytest.mark.django_db
def test_view_posts_GET(self, client):
response = client.get(reverse('view posts'))
assert response.status_code == 200
assertTemplateUsed(response, 'Post/postList.html')
@pytest.mark.parametrize(
"query_text, expected_output",
[
("Sea", ["Dead Sea", "Sea of Galilee", "Eilat"]),
("beautiful", ["Dead Sea", "Eilat"]),
("nice", ["`En Yorqe`am"]),
("place", ["`En Yorqe`am", "Eilat", "Dead Sea"]),
("Tal aviv", []),
("", ["Dead Sea", "Sea of Galilee", "Eilat", "`En Yorqe`am"]),
],
)
@pytest.mark.django_db
def test_post_exists_after_query(self, query_text, expected_output):
posts = get_post_by_query_text(query_text)
assert all(post.nameOfLocation in expected_output for post in posts)
# assert all(course.location in expected_output for course in courses)
@pytest.mark.django_db
def test_verify_respone_GET(self, client):
response = client.get(reverse('post_list_Search'), {'query_text': 'Galilee'})
posts_not_found = [b'Eilat', b'Dead Sea', b'`En Yorqe`am']
assert response.status_code == 200
assert b'Galilee' in response.content
assert all(post not in response.content for post in posts_not_found)
|
the-stack_0_3305 | import os
import pickle
import time
import torch
from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
class TextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory,
"cached_lm_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
)
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
class LineByLineTextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info("Creating features from dataset file at %s", file_path)
with open(file_path, encoding="utf-8") as f:
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding["input_ids"]
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
class TextDatasetForNextSentencePrediction(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
):
assert os.path.isfile(file_path), f"Input file path {file_path} not found"
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=True)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory,
"cached_nsp_{}_{}_{}".format(
tokenizer.__class__.__name__,
str(block_size),
filename,
),
)
self.tokenizer = tokenizer
self.examples = []
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
#
# Example:
# I am very happy.
# Here is the second sentence.
#
# A new document.
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.examples = [[]]
with open(file_path, encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line and len(self.examples[-1]) != 0:
self.examples.append([])
tokens = tokenizer.tokenize(line)
tokens = tokenizer.convert_tokens_to_ids(tokens)
if tokens:
self.examples[-1].append(tokens)
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
|
the-stack_0_3307 | #!/usr/bin/env python3
""" for testing the module awsbuild """
import sys
import logging
from bao_config import AwsConfig
from bao_connector import AwsConnector
from bao_vpc import AwsVPC
from bao_target_group import AwsTargetGroup
def main():
""" main """
my_logfile = 'logs/awsbuild.log'
my_region = 'us-east-1'
#my_vpc = 'vpc-xxx'
my_tag = 'momo-us-east-1'
# setup logging
log_formatter = logging.Formatter("%(asctime)s %(filename)s %(name)s %(levelname)s %(message)s")
root_logger = logging.getLogger()
file_handler = logging.FileHandler(my_logfile)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
config = AwsConfig(cfgdir='configs',\
cfgfile='target_group.yaml',\
cfgkey='target_groups')
conn = AwsConnector(credentials=config.settings['aws_cfg']['credentials'], region=my_region)
aws_conn = conn.get_all_conn()
if not aws_conn:
print('error AwsConnector\n')
sys.exit(-1)
vpc_conn = AwsVPC(aws_conn=aws_conn, tag=my_tag)
if not vpc_conn:
print('error AwsVPC\n')
sys.exit(-1)
vpc_id = vpc_conn.get_vpc_id()
target_grp_conn = AwsTargetGroup(aws_conn=aws_conn, target_group=config.settings['target_groups'], \
vpc_id=vpc_id, tag='tau-dev' \
)
if not target_grp_conn:
print('error AwsTargetGroup\n')
sys.exit(-1)
target_grp_conn.create()
if __name__ == '__main__':
main()
|
the-stack_0_3308 | #!/usr/bin/env python
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='PLTable',
version='1.0.2',
license='BSD (3 clause)',
description='Python library for easily displaying tabular data in a visually appealing text table format',
long_description=long_description,
long_description_content_type='text/markdown',
author='Luke Maurits',
author_email='[email protected]',
maintainer='Plato Mavropoulos',
url='https://github.com/platomav/PLTable',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Text Processing'
],
) |
the-stack_0_3309 | from flask import Response, request, stream_with_context, abort
from fHDHR.exceptions import TunerError
class Tuner():
endpoints = ['/tuner<tuner_number>/<channel>']
endpoint_name = "tuner"
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, tuner_number, channel, *args):
return self.get(tuner_number, channel, *args)
def get(self, tuner_number, channel, *args):
full_url = request.url
if channel.startswith("v"):
channel_number = channel.replace('v', '')
elif channel.startswith("ch"):
channel_freq = channel.replace('ch', '').split("-")[0]
subchannel = 0
if "-" in channel:
subchannel = channel.replace('ch', '').split("-")[1]
abort(501, "Not Implemented %s-%s" % (str(channel_freq), str(subchannel)))
if channel_number not in list(self.fhdhr.device.channels.list.keys()):
response = Response("Not Found", status=404)
response.headers["X-fHDHR-Error"] = "801 - Unknown Channel"
abort(response)
method = request.args.get('method', default=self.fhdhr.config.dict["fhdhr"]["stream_type"], type=str)
duration = request.args.get('duration', default=0, type=int)
transcode = request.args.get('transcode', default=None, type=str)
valid_transcode_types = [None, "heavy", "mobile", "internet720", "internet480", "internet360", "internet240"]
if transcode not in valid_transcode_types:
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = "802 - Unknown Transcode Profile"
abort(response)
stream_args = {
"channel": channel_number,
"method": method,
"duration": duration,
"transcode": transcode,
"accessed": full_url,
}
try:
tunernum = self.fhdhr.device.tuners.tuner_grab(tuner_number)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
abort(response)
tuner = self.fhdhr.device.tuners.tuners[int(tunernum)]
try:
stream_args = self.fhdhr.device.tuners.get_stream_info(stream_args)
except TunerError as e:
self.fhdhr.logger.info("A %s stream request for channel %s was rejected due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
tuner.close()
abort(response)
self.fhdhr.logger.info("Tuner #" + str(tunernum) + " to be used for stream.")
tuner.set_status(stream_args)
if stream_args["method"] == "direct":
return Response(tuner.get_stream(stream_args, tuner), content_type=stream_args["content_type"], direct_passthrough=True)
elif stream_args["method"] in ["ffmpeg", "vlc"]:
return Response(stream_with_context(tuner.get_stream(stream_args, tuner)), mimetype=stream_args["content_type"])
"""
try:
if stream_args["method"] == "direct":
return Response(tuner.get_stream(stream_args, tuner), content_type=stream_args["content_type"], direct_passthrough=True)
elif stream_args["method"] in ["ffmpeg", "vlc"]:
return Response(stream_with_context(tuner.get_stream(stream_args, tuner)), mimetype=stream_args["content_type"])
except TunerError as e:
tuner.close()
self.fhdhr.logger.info("A %s stream request for channel %s failed due to %s"
% (stream_args["method"], str(stream_args["channel"]), str(e)))
response = Response("Service Unavailable", status=503)
response.headers["X-fHDHR-Error"] = str(e)
abort(response)
"""
|
the-stack_0_3312 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.controller = Controller(vehicle_mass=vehicle_mass,
fuel_capacity=fuel_capacity,
brake_deadband=brake_deadband,
decel_limit=decel_limit,
accel_limit=accel_limit,
wheel_radius=wheel_radius,
wheel_base=wheel_base,
steer_ratio=steer_ratio,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = None
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# You should only publish the control commands if dbw is enabled
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled, self.linear_vel, self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
# msg type: Bool
self.dbw_enabled = msg
def twist_cb(self, msg):
# msg type: TwistStamped
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
# msg type: TwistStamped
self.current_vel = msg.twist.linear.x
self.curr_ang_vel = msg.twist.angular.z
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
|
the-stack_0_3313 | #!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import textwrap
import sys
SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
# Generates 2 files. Found by trial and error.
SHARD_SIZE = 97
PREAMBLE = """
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo --no-always-opt
// This test file was generated by tools/gen-inlining-tests.py .
// Global variables
var deopt = undefined; // either true or false
var counter = 0;
function resetState() {
counter = 0;
}
function warmUp(f) {
try {
f();
} catch (ex) {
// ok
}
try {
f();
} catch (ex) {
// ok
}
}
function resetOptAndAssertResultEquals(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
assertEquals(expected, f());
}
function resetOptAndAssertThrowsWith(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
try {
var result = f();
fail("resetOptAndAssertThrowsWith",
"exception: " + expected,
"result: " + result);
} catch (ex) {
assertEquals(expected, ex);
}
}
function increaseAndReturn15() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
function increaseAndThrow42() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
function increaseAndReturn15_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
function increaseAndThrow42_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
// Alternative 1
function returnOrThrow(doReturn) {
if (doReturn) {
return increaseAndReturn15();
} else {
return increaseAndThrow42();
}
}
// Alternative 2
function increaseAndReturn15_calls_noopt() {
return increaseAndReturn15_noopt_inner();
}
function increaseAndThrow42_calls_noopt() {
return increaseAndThrow42_noopt_inner();
}
// Alternative 3.
// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
// as the other one.
function invertFunctionCall(f) {
var result;
try {
result = f();
} catch (ex) {
return ex - 27;
}
throw result + 27;
}
// Alternative 4: constructor
function increaseAndStore15Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 15;
}
function increaseAndThrow42Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 42;
throw this.x;
}
// Alternative 5: property
var magic = {};
Object.defineProperty(magic, 'prop', {
get: function () {
if (deopt) %DeoptimizeFunction(f);
return 15 + 0 * ++counter;
},
set: function(x) {
// argument should be 37
if (deopt) %DeoptimizeFunction(f);
counter -= 36 - x; // increments counter
throw 42;
}
})
// Generate type feedback.
assertEquals(15, increaseAndReturn15_calls_noopt());
assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
assertEquals(15, (new increaseAndStore15Constructor()).x);
assertThrowsEquals(function() {
return (new increaseAndThrow42Constructor()).x;
},
42);
function runThisShard() {
""".strip()
def booltuples(n):
"""booltuples(2) yields 4 tuples: (False, False), (False, True),
(True, False), (True, True)."""
assert isinstance(n, int)
if n <= 0:
yield ()
else:
for initial in booltuples(n-1):
yield initial + (False,)
yield initial + (True,)
def fnname(flags):
assert len(FLAGLETTERS) == len(flags)
return "f_" + ''.join(
FLAGLETTERS[i] if b else '_'
for (i, b) in enumerate(flags))
NUM_TESTS_PRINTED = 0
NUM_TESTS_IN_SHARD = 0
def printtest(flags):
"""Print a test case. Takes a couple of boolean flags, on which the
printed Javascript code depends."""
assert all(isinstance(flag, bool) for flag in flags)
# The alternative flags are in reverse order so that if we take all possible
# tuples, ordered lexicographically from false to true, we get first the
# default, then alternative 1, then 2, etc.
(
alternativeFn5, # use alternative #5 for returning/throwing:
# return/throw using property
alternativeFn4, # use alternative #4 for returning/throwing:
# return/throw using constructor
alternativeFn3, # use alternative #3 for returning/throwing:
# return/throw indirectly, based on function argument
alternativeFn2, # use alternative #2 for returning/throwing:
# return/throw indirectly in unoptimized code,
# no branching
alternativeFn1, # use alternative #1 for returning/throwing:
# return/throw indirectly, based on boolean arg
tryThrows, # in try block, call throwing function
tryReturns, # in try block, call returning function
tryFirstReturns, # in try block, returning goes before throwing
tryResultToLocal, # in try block, result goes to local variable
doCatch, # include catch block
catchReturns, # in catch block, return
catchWithLocal, # in catch block, modify or return the local variable
catchThrows, # in catch block, throw
doFinally, # include finally block
finallyReturns, # in finally block, return local variable
finallyThrows, # in finally block, throw
endReturnLocal, # at very end, return variable local
deopt, # deopt inside inlined function
) = flags
# BASIC RULES
# Only one alternative can be applied at any time.
if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+ alternativeFn5 > 1):
return
# In try, return or throw, or both.
if not (tryReturns or tryThrows): return
# Either doCatch or doFinally.
if not doCatch and not doFinally: return
# Catch flags only make sense when catching
if not doCatch and (catchReturns or catchWithLocal or catchThrows):
return
# Finally flags only make sense when finallying
if not doFinally and (finallyReturns or finallyThrows):
return
# tryFirstReturns is only relevant when both tryReturns and tryThrows are
# true.
if tryFirstReturns and not (tryReturns and tryThrows): return
# From the try and finally block, we can return or throw, but not both.
if catchReturns and catchThrows: return
if finallyReturns and finallyThrows: return
# If at the end we return the local, we need to have touched it.
if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
# PRUNING
anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
alternativeFn4, alternativeFn5])
specificAlternative = any([alternativeFn2, alternativeFn3])
rareAlternative = not specificAlternative
# If try returns and throws, then don't catchWithLocal, endReturnLocal, or
# deopt, or do any alternative.
if (tryReturns and tryThrows and
(catchWithLocal or endReturnLocal or deopt or anyAlternative)):
return
# We don't do any alternative if we do a finally.
if doFinally and anyAlternative: return
# We only use the local variable if we do alternative #2 or #3.
if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
not specificAlternative):
return
# We don't need to test deopting into a finally.
if doFinally and deopt: return
# We're only interested in alternative #2 if we have endReturnLocal, no
# catchReturns, and no catchThrows, and deopt.
if (alternativeFn2 and
(not endReturnLocal or catchReturns or catchThrows or not deopt)):
return
# Flag check succeeded.
trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames))
write(textwrap.fill(flagsMsgLine, subsequent_indent=' // '))
write("")
if not anyAlternative:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15()',
'increaseAndThrow42': 'increaseAndThrow42()',
}
elif alternativeFn1:
fragments = {
'increaseAndReturn15': 'returnOrThrow(true)',
'increaseAndThrow42': 'returnOrThrow(false)',
}
elif alternativeFn2:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
}
elif alternativeFn3:
fragments = {
'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
}
elif alternativeFn4:
fragments = {
'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
}
else:
assert alternativeFn5
fragments = {
'increaseAndReturn15': 'magic.prop /* returns 15 */',
'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
}
# As we print code, we also maintain what the result should be. Variable
# {result} can be one of three things:
#
# - None, indicating returning JS null
# - ("return", n) with n an integer
# - ("throw", n), with n an integer
result = None
# We also maintain what the counter should be at the end.
# The counter is reset just before f is called.
counter = 0
write( " f = function {} () {{".format(fnname(flags)))
write( " var local = 888;")
write( " deopt = {};".format("true" if deopt else "false"))
local = 888
write( " try {")
write( " counter++;")
counter += 1
resultTo = "local +=" if tryResultToLocal else "return"
if tryReturns and not (tryThrows and not tryFirstReturns):
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
if tryThrows:
write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
if result == None:
counter += 1
result = ("throw", 42)
if tryReturns and tryThrows and not tryFirstReturns:
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
write( " counter++;")
if result == None:
counter += 1
if doCatch:
write( " } catch (ex) {")
write( " counter++;")
if isinstance(result, tuple) and result[0] == 'throw':
counter += 1
if catchThrows:
write(" throw 2 + ex;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('throw', 2 + result[1])
elif catchReturns and catchWithLocal:
write(" return 2 + local;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + local)
elif catchReturns and not catchWithLocal:
write(" return 2 + ex;");
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + result[1])
elif catchWithLocal:
write(" local += ex;");
if isinstance(result, tuple) and result[0] == "throw":
local += result[1]
result = None
counter += 1
else:
if isinstance(result, tuple) and result[0] == "throw":
result = None
counter += 1
write( " counter++;")
if doFinally:
write( " } finally {")
write( " counter++;")
counter += 1
if finallyThrows:
write(" throw 25;")
result = ('throw', 25)
elif finallyReturns:
write(" return 3 + local;")
result = ('return', 3 + local)
elif not finallyReturns and not finallyThrows:
write(" local += 2;")
local += 2
counter += 1
else: assert False # unreachable
write( " counter++;")
write( " }")
write( " counter++;")
if result == None:
counter += 1
if endReturnLocal:
write( " return 5 + local;")
if result == None:
result = ('return', 5 + local)
write( " }")
if result == None:
write( " resetOptAndAssertResultEquals(undefined, f);")
else:
tag, value = result
if tag == "return":
write( " resetOptAndAssertResultEquals({}, f);".format(value))
else:
assert tag == "throw"
write( " resetOptAndAssertThrowsWith({}, f);".format(value))
write( " assertEquals({}, counter);".format(counter))
write( "")
global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
NUM_TESTS_PRINTED += 1
NUM_TESTS_IN_SHARD += 1
FILE = None # to be initialised to an open file
SHARD_NUM = 1
def write(*args):
return print(*args, file=FILE)
def rotateshard():
global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
if MODE != 'shard':
return
if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
return
if FILE != None:
finishshard()
assert FILE == None
FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
write_shard_header()
NUM_TESTS_IN_SHARD = 0
def finishshard():
global FILE, SHARD_NUM, MODE
assert FILE
write_shard_footer()
if MODE == 'shard':
print("Wrote shard {}.".format(SHARD_NUM))
FILE.close()
FILE = None
SHARD_NUM += 1
def write_shard_header():
if MODE == 'shard':
write("// Shard {}.".format(SHARD_NUM))
write("")
write(PREAMBLE)
write("")
def write_shard_footer():
write("}")
write("%NeverOptimizeFunction(runThisShard);")
write("")
write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
write("")
write("runThisShard();")
FLAGLETTERS="54321trflcrltfrtld"
flagtuple = namedtuple('flagtuple', (
"alternativeFn5",
"alternativeFn4",
"alternativeFn3",
"alternativeFn2",
"alternativeFn1",
"tryThrows",
"tryReturns",
"tryFirstReturns",
"tryResultToLocal",
"doCatch",
"catchReturns",
"catchWithLocal",
"catchThrows",
"doFinally",
"finallyReturns",
"finallyThrows",
"endReturnLocal",
"deopt"
))
emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
f1 = emptyflags._replace(tryReturns=True, doCatch=True)
# You can test function printtest with f1.
allFlagCombinations = [
flagtuple(*bools)
for bools in booltuples(len(flagtuple._fields))
]
if __name__ == '__main__':
global MODE
if sys.argv[1:] == []:
MODE = 'stdout'
print("// Printing all shards together to stdout.")
print("")
write_shard_header()
FILE = sys.stdout
elif sys.argv[1:] == ['--shard-and-overwrite']:
MODE = 'shard'
else:
print("Usage:")
print("")
print(" python {}".format(sys.argv[0]))
print(" print all tests to standard output")
print(" python {} --shard-and-overwrite".format(sys.argv[0]))
print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
print("")
print(sys.argv[1:])
print("")
sys.exit(1)
rotateshard()
for flags in allFlagCombinations:
printtest(flags)
rotateshard()
finishshard()
if MODE == 'shard':
print("Total: {} tests.".format(NUM_TESTS_PRINTED))
|
the-stack_0_3314 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint changed code in current branch."""
from __future__ import print_function
import os
import sys
import yaml
from local.butler import appengine
from local.butler import common
_GOLINT_EXCEPTIONS = [
'types.go' # Not all model names conform to Go naming conventions.
]
_LICENSE_CHECK_FILENAMES = ['Dockerfile']
_LICENSE_CHECK_EXTENSIONS = [
'.bash',
'.c',
'.cc',
'.cpp',
'.css',
'.h',
'.htm',
'.html',
'.js',
'.go',
'.proto',
'.ps1',
'.py',
'.sh',
'.yaml',
]
_LICENSE_CHECK_IGNORE_FILENAMES = ['technology.css']
_LICENSE_CHECK_IGNORE_DIRECTORIES = [
'third_party',
'templates', # Generated code.
]
_LICENSE_CHECK_STRING = 'http://www.apache.org/licenses/LICENSE-2.0'
_PY_TEST_SUFFIX = '_test.py'
_PY_INIT_FILENAME = '__init__.py'
_YAML_EXCEPTIONS = ['bad.yaml']
_error_occurred = False
def _error(message=None):
"""Print error and track state via a global."""
if message:
print(message)
global _error_occurred
_error_occurred = True
def _execute_command_and_track_error(command):
"""Executes command, tracks error state."""
returncode, output = common.execute(command, exit_on_error=False)
if returncode != 0:
_error()
return output
def license_validate(file_path):
"""Run license header validation."""
filename = os.path.basename(file_path)
extension = os.path.splitext(file_path)[1]
if (filename not in _LICENSE_CHECK_FILENAMES and
extension not in _LICENSE_CHECK_EXTENSIONS):
return
path_directories = file_path.split(os.sep)
if any(d in _LICENSE_CHECK_IGNORE_DIRECTORIES for d in path_directories):
return
source_filename = os.path.basename(file_path)
if source_filename in _LICENSE_CHECK_IGNORE_FILENAMES:
return
with open(file_path) as f:
if _LICENSE_CHECK_STRING in f.read():
return
_error('Failed: Missing license header for %s.' % file_path)
def py_import_order(file_path):
"""Validate that python imports are alphabetized."""
def _validate_block(import_block):
"""Ensure that a single block is ordered properly."""
if not import_block:
return []
sorted_import_block = sorted(import_block, key=lambda i: i.lower())
if sorted_import_block == import_block:
return []
return ['\n'.join(sorted_import_block)]
with open(file_path) as f:
file_content = f.read()
imports = []
from_imports = []
corrected_import_blocks = []
for line in file_content.splitlines():
if line.startswith('import '):
imports.append(line)
else:
corrected_import_blocks += _validate_block(imports)
imports = []
if line.startswith('from '):
from_imports.append(line)
else:
corrected_import_blocks += _validate_block(from_imports)
from_imports = []
# Though rare, if a file ends with an import we must still validate them.
corrected_import_blocks += _validate_block(imports)
corrected_import_blocks += _validate_block(from_imports)
if not corrected_import_blocks:
return
suggestions = '\n\n--------\n\n'.join(corrected_import_blocks)
_error(('Failed: File {filename} has non-alphabetized import blocks. '
'Suggested order:\n\n{suggestions}').format(
filename=file_path, suggestions=suggestions))
def py_test_init_check(file_path):
"""Check test directory has a __init__.py file. Otherwise, the test does not
execute at all."""
if not file_path.endswith(_PY_TEST_SUFFIX):
return
test_directory = os.path.dirname(file_path)
if _PY_INIT_FILENAME not in os.listdir(test_directory):
_error('Failed: Missing {filename} file in test directory {dir}.'.format(
filename=_PY_INIT_FILENAME, dir=test_directory))
def yaml_validate(file_path):
"""Run yaml validation."""
if os.path.basename(file_path) in _YAML_EXCEPTIONS:
return
try:
with open(file_path) as f:
yaml.safe_load(f.read())
except Exception as e:
_error('Failed: Invalid yaml file %s.\n\n%s' % (file_path, e))
def execute(_):
"""Lint changed code."""
pythonpath = os.getenv('PYTHONPATH', '')
os.environ['PYTHONPATH'] = appengine.find_sdk_path() + ':' + pythonpath
if 'GOOGLE_CLOUDBUILD' in os.environ:
# Explicitly compare against master if we're running on the CI
_, output = common.execute('git diff --name-only master FETCH_HEAD')
elif 'TRAVIS_BRANCH' in os.environ:
_, output = common.execute(
'git diff --name-only HEAD $(git merge-base HEAD FETCH_HEAD)')
else:
_, output = common.execute('git diff --name-only FETCH_HEAD')
file_paths = [f for f in output.splitlines() if os.path.exists(f)]
py_changed_file_paths = [
f for f in file_paths if f.endswith('.py') and
# Exclude auto-generated files.
not f.endswith('_pb2.py') and not f.endswith('_pb2_grpc.py')
]
go_changed_file_paths = [f for f in file_paths if f.endswith('.go')]
yaml_changed_file_paths = [f for f in file_paths if f.endswith('.yaml')]
for file_path in py_changed_file_paths:
_execute_command_and_track_error('pylint ' + file_path)
_execute_command_and_track_error('yapf -d ' + file_path)
py_import_order(file_path)
py_test_init_check(file_path)
golint_path = os.path.join('local', 'bin', 'golint')
for file_path in go_changed_file_paths:
if not os.path.basename(file_path) in _GOLINT_EXCEPTIONS:
_execute_command_and_track_error(golint_path + ' ' + file_path)
output = _execute_command_and_track_error('gofmt -d ' + file_path)
if output.strip():
_error()
for file_path in yaml_changed_file_paths:
yaml_validate(file_path)
for file_path in file_paths:
license_validate(file_path)
if _error_occurred:
print('Linting failed, see errors above.')
sys.exit(1)
else:
print('Linting passed.')
|
the-stack_0_3316 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The restores api."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from webob import exc
from karbor.api import common
from karbor.api.openstack import wsgi
from karbor.api.schemas import restores as restore_schema
from karbor.api import validation
from karbor.common import constants
from karbor.common import notification
from karbor.common.notification import StartNotification
from karbor import exception
from karbor.i18n import _
from karbor import objects
from karbor.objects import base as objects_base
from karbor.policies import restores as restore_policy
from karbor.services.protection import api as protection_api
from karbor import utils
import six
query_restore_filters_opt = cfg.ListOpt(
'query_restore_filters',
default=['status'],
help="Restore filter options which "
"non-admin user could use to "
"query restores. Default values "
"are: ['status']")
CONF = cfg.CONF
CONF.register_opt(query_restore_filters_opt)
LOG = logging.getLogger(__name__)
class RestoreViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "restores"
def detail(self, request, restore):
"""Detailed view of a single restore."""
restore_ref = {
'restore': {
'id': restore.get('id'),
'project_id': restore.get('project_id'),
'provider_id': restore.get('provider_id'),
'checkpoint_id': restore.get('checkpoint_id'),
'restore_target': restore.get('restore_target'),
'parameters': restore.get('parameters'),
'status': restore.get('status'),
'resources_status': restore.get('resources_status'),
'resources_reason': restore.get('resources_reason'),
}
}
return restore_ref
def detail_list(self, request, restores, restore_count=None):
"""Detailed view of a list of restores."""
return self._list_view(self.detail, request, restores,
restore_count,
self._collection_name)
def _list_view(self, func, request, restores, restore_count,
coll_name=_collection_name):
"""Provide a view for a list of restores.
:param func: Function used to format the restore data
:param request: API request
:param restores: List of restores in dictionary format
:param restore_count: Length of the original list of restores
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: restore data in dictionary format
"""
restores_list = [func(request, restore)['restore']
for restore in restores]
restores_links = self._get_collection_links(request,
restores,
coll_name,
restore_count)
restores_dict = {
'restores': restores_list
}
if restores_links:
restores_dict['restores_links'] = restores_links
return restores_dict
class RestoresController(wsgi.Controller):
"""The Restores API controller for the OpenStack API."""
_view_builder_class = RestoreViewBuilder
def __init__(self):
self.protection_api = protection_api.API()
super(RestoresController, self).__init__()
def show(self, req, id):
"""Return data about the given restore."""
context = req.environ['karbor.context']
LOG.info("Show restore with id: %s", id, context=context)
if not uuidutils.is_uuid_like(id):
msg = _("Invalid restore id provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
restore = self._restore_get(context, id)
except exception.RestoreNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
LOG.info("Show restore request issued successfully.",
resource={'id': restore.id})
return self._view_builder.detail(req, restore)
def index(self, req):
"""Returns a list of restores, transformed through view builder."""
context = req.environ['karbor.context']
LOG.info("Show restore list", context=context)
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
utils.remove_invalid_filter_options(
context,
filters,
self._get_restore_filter_options())
utils.check_filters(filters)
restores = self._get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
retval_restores = self._view_builder.detail_list(req, restores)
LOG.info("Show restore list request issued successfully.")
return retval_restores
def _get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
context.can(restore_policy.GET_ALL_POLICY)
if filters is None:
filters = {}
all_tenants = utils.get_bool_param('all_tenants', filters)
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and all_tenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
restores = objects.RestoreList.get_all(
context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
else:
restores = objects.RestoreList.get_all_by_project(
context, context.project_id, marker, limit,
sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters,
offset=offset)
LOG.info("Get all restores completed successfully.")
return restores
def _get_restore_filter_options(self):
"""Return restores search options allowed by non-admin."""
return CONF.query_restore_filters
@validation.schema(restore_schema.create)
def create(self, req, body):
"""Creates a new restore."""
LOG.debug('Create restore request body: %s', body)
context = req.environ['karbor.context']
context.can(restore_policy.CREATE_POLICY)
context.notification = notification.KarborRestoreCreate(
context, request=req)
restore = body['restore']
LOG.debug('Create restore request : %s', restore)
parameters = restore.get("parameters")
restore_auth = restore.get("restore_auth", None)
restore_properties = {
'project_id': context.project_id,
'provider_id': restore.get('provider_id'),
'checkpoint_id': restore.get('checkpoint_id'),
'restore_target': restore.get('restore_target'),
'parameters': parameters,
'status': constants.RESTORE_STATUS_IN_PROGRESS,
}
restoreobj = objects.Restore(context=context,
**restore_properties)
restoreobj.create()
LOG.debug('call restore RPC : restoreobj:%s', restoreobj)
# call restore rpc API of protection service
try:
with StartNotification(context, parameters=parameters):
self.protection_api.restore(context, restoreobj, restore_auth)
except exception.AccessCheckpointNotAllowed as error:
raise exc.HTTPForbidden(explanation=error.msg)
except Exception:
# update the status of restore
update_dict = {
"status": constants.RESTORE_STATUS_FAILURE
}
context.can(restore_policy.UPDATE_POLICY, restoreobj)
restoreobj = self._restore_update(context,
restoreobj.get("id"),
update_dict)
retval = self._view_builder.detail(req, restoreobj)
return retval
def _restore_get(self, context, restore_id):
if not uuidutils.is_uuid_like(restore_id):
msg = _("Invalid restore id provided.")
raise exc.HTTPBadRequest(explanation=msg)
restore = objects.Restore.get_by_id(context, restore_id)
try:
context.can(restore_policy.GET_POLICY, restore)
except exception.PolicyNotAuthorized:
# raise RestoreNotFound instead to make sure karbor behaves
# as it used to
raise exception.RestoreNotFound(restore_id=restore_id)
LOG.info("Restore info retrieved successfully.")
return restore
def _restore_update(self, context, restore_id, fields):
try:
restore = self._restore_get(context, restore_id)
except exception.RestoreNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
if isinstance(restore, objects_base.KarborObject):
restore.update(fields)
restore.save()
LOG.info("restore updated successfully.")
return restore
else:
msg = _("The parameter restore must be a object of "
"KarborObject class.")
raise exception.InvalidInput(reason=msg)
def create_resource():
return wsgi.Resource(RestoresController())
|
the-stack_0_3318 | ''' Header reading / writing functions for nifti1 image format
Author: Matthew Brett
'''
import numpy as np
import numpy.linalg as npl
from nifti.volumeutils import Recoder, make_dt_codes, \
HeaderDataError, HeaderTypeError, allopen
from nifti.batteryrunners import Report
from nifti.quaternions import fillpositive, quat2mat, mat2quat
from nifti import analyze # module import
from nifti.spm99analyze import SpmAnalyzeHeader
from nifti import filetuples # module import
from nifti.spatialimages import SpatialImage
from nifti.header_ufuncs import write_data, adapt_header
# nifti1 flat header definition for Analyze-like first 348 bytes
# first number in comments indicates offset in file header in bytes
header_dtd = [
('sizeof_hdr', 'i4'), # 0; must be 348
('data_type', 'S10'), # 4; unused
('db_name', 'S18'), # 14; unused
('extents', 'i4'), # 32; unused
('session_error', 'i2'), # 36; unused
('regular', 'S1'), # 38; unused
('dim_info', 'u1'), # 39; MRI slice ordering code
('dim', 'i2', 8), # 40; data array dimensions
('intent_p1', 'f4'), # 56; first intent parameter
('intent_p2', 'f4'), # 60; second intent parameter
('intent_p3', 'f4'), # 64; third intent parameter
('intent_code', 'i2'),# 68; NIFTI intent code
('datatype', 'i2'), # 70; it's the datatype
('bitpix', 'i2'), # 72; number of bits per voxel
('slice_start', 'i2'),# 74; first slice index
('pixdim', 'f4', 8), # 76; grid spacings (units below)
('vox_offset', 'f4'), # 108; offset to data in image file
('scl_slope', 'f4'), # 112; data scaling slope
('scl_inter', 'f4'), # 116; data scaling intercept
('slice_end', 'i2'), # 120; last slice index
('slice_code', 'u1'), # 122; slice timing order
('xyzt_units', 'u1'), # 123; inits of pixdim[1..4]
('cal_max', 'f4'), # 124; max display intensity
('cal_min', 'f4'), # 128; min display intensity
('slice_duration', 'f4'), # 132; time for 1 slice
('toffset', 'f4'), # 136; time axis shift
('glmax', 'i4'), # 140; unused
('glmin', 'i4'), # 144; unused
('descrip', 'S80'), # 148; any text
('aux_file', 'S24'), # 228; auxiliary filename
('qform_code', 'i2'), # 252; xform code
('sform_code', 'i2'), # 254; xform code
('quatern_b', 'f4'), # 256; quaternion b param
('quatern_c', 'f4'), # 260; quaternion c param
('quatern_d', 'f4'), # 264; quaternion d param
('qoffset_x', 'f4'), # 268; quaternion x shift
('qoffset_y', 'f4'), # 272; quaternion y shift
('qoffset_z', 'f4'), # 276; quaternion z shift
('srow_x', 'f4', 4), # 280; 1st row affine transform
('srow_y', 'f4', 4), # 296; 2nd row affine transform
('srow_z', 'f4', 4), # 312; 3rd row affine transform
('intent_name', 'S16'), # 328; name or meaning of data
('magic', 'S4') # 344; must be 'ni1\0' or 'n+1\0'
]
# Full header numpy dtype
header_dtype = np.dtype(header_dtd)
# datatypes not in analyze format, with codes
try:
_float128t = np.float128
except AttributeError:
_float128t = np.void
try:
_complex256t = np.complex256
except AttributeError:
_complex256t = np.void
_added_dtdefs = ( # code, label, dtype definition
(256, 'int8', np.int8),
(512, 'uint16', np.uint16),
(768, 'uint32', np.uint32),
(1024,'int64', np.int64),
(1280, 'int64', np.uint64),
(1536, 'float128', _float128t), # Only numpy defined on 64 bit
(1792, 'complex128', np.complex128),
(2048, 'complex256', _complex256t), # 64 bit again
(2304, 'RGBA', np.dtype([('R','u1'),
('G', 'u1'),
('B', 'u1'),
('A', 'u1')]))
)
# Make full code alias bank, including dtype column
data_type_codes = make_dt_codes(analyze._dtdefs + _added_dtdefs)
# Transform (qform, sform) codes
xform_codes = Recoder(( # code, label
(0, 'unknown'), # Code for transform unknown or absent
(1, 'scanner'),
(2, 'aligned'),
(3, 'talairach'),
(4, 'mni')), fields=('code', 'label'))
# unit codes
unit_codes = Recoder(( # code, label
(0, 'unknown'),
(1, 'meter'),
(2, 'mm'),
(3, 'micron'),
(8, 'sec'),
(16, 'msec'),
(24, 'usec'),
(32, 'hz'),
(40, 'ppm'),
(48, 'rads')), fields=('code', 'label'))
slice_order_codes = Recoder(( # code, label
(0, 'unknown'),
(1, 'sequential increasing', 'seq inc'),
(2, 'sequential decreasing', 'seq dec'),
(3, 'alternating increasing', 'alt inc'),
(4, 'alternating decreasing', 'alt dec'),
(5, 'alternating increasing 2', 'alt inc 2'),
(6, 'alternating decreasing 2', 'alt dec 2')),
fields=('code', 'label'))
intent_codes = Recoder((
# code, label, parameters description tuple
(0, 'none', ()),
(2, 'correlation',('p1 = DOF',)),
(3, 't test', ('p1 = DOF',)),
(4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF')),
(5, 'z score', ()),
(6, 'chi2', ('p1 = DOF',)),
(7, 'beta', ('p1=a', 'p2=b')), # two parameter beta distribution
(8, 'binomial', ('p1 = number of trials', 'p2 = probability per trial')),
# Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1
(9, 'gamma', ('p1 = shape, p2 = scale', 2)), # 2 parameter gamma
(10, 'poisson', ('p1 = mean',)), # Density(x) proportional to x^(p1-1) * exp(-p2*x)
(11, 'normal', ('p1 = mean', 'p2 = standard deviation',)),
(12, 'non central f test', ('p1 = numerator DOF',
'p2 = denominator DOF',
'p3 = numerator noncentrality parameter',)),
(13, 'non central chi2', ('p1 = DOF', 'p2 = noncentrality parameter',)),
(14, 'logistic', ('p1 = location', 'p2 = scale',)),
(15, 'laplace', ('p1 = location', 'p2 = scale')),
(16, 'uniform', ('p1 = lower end', 'p2 = upper end')),
(17, 'non central t test', ('p1 = DOF', 'p2 = noncentrality parameter')),
(18, 'weibull', ('p1 = location', 'p2 = scale, p3 = power')),
(19, 'chi', ('p1 = DOF',)),
# p1 = 1 = 'half normal' distribution
# p1 = 2 = Rayleigh distribution
# p1 = 3 = Maxwell-Boltzmann distribution. */
(20, 'inverse gaussian', ('pi = mu', 'p2 = lambda')),
(21, 'extreme value 1', ('p1 = location', 'p2 = scale')),
(22, 'p value', ()),
(23, 'log p value', ()),
(24, 'log10 p value', ()),
(1001, 'estimate', ()),
(1002, 'label', ()),
(1003, 'neuroname', ()),
(1004, 'general matrix', ('p1 = M', 'p2 = N')),
(1005, 'symmetric matrix', ('p1 = M',)),
(1006, 'displacement vector', ()),
(1007, 'vector', ()),
(1008, 'poinset', ()),
(1009, 'triangle', ()),
(1010, 'quaternion', ()),
(1011, 'dimensionless', ()),
(2001, 'time series', ()),
(2002, 'node index', ()),
(2003, 'rgb vector', ()),
(2004, 'rgba vector', ()),
(2005, 'shape', ())),
fields=('code', 'label', 'parameters'))
class Nifti1Extension(object):
"""Baseclass for NIfTI1 header extensions.
This class is sufficient to handle very simple text-based extensions, such
as `comment`. More sophisticated extensions should/will be supported by
dedicated subclasses.
"""
def __init__(self, code, content):
"""
Parameters
----------
code : int|str
Canonical extension code as defined in the NIfTI standard, given
either as integer or corresponding label
(see :data:`~nifti.nifti1.extension_codes`)
content : str
Extension content as read from the NIfTI file header. This content is
converted into a runtime representation.
"""
try:
self._code = extension_codes.code[code]
except KeyError:
# XXX or fail or at least complain?
self._code = code
self._content = self._unmangle(content)
def _unmangle(self, value):
"""Convert the extension content into its runtime representation.
The default implementation does nothing at all.
Parameters
----------
value : str
Extension content as read from file.
Returns
-------
The same object that was passed as `value`.
Notes
-----
Subclasses should reimplement this method to provide the desired
unmangling procedure and may return any type of object.
"""
return value
def _mangle(self, value):
"""Convert the extension content into NIfTI file header representation.
The default implementation does nothing at all.
Parameters
----------
value : str
Extension content in runtime form.
Returns
-------
str
Notes
-----
Subclasses should reimplement this method to provide the desired
mangling procedure.
"""
return value
def get_code(self):
"""Return the canonical extension type code."""
return self._code
def get_content(self):
"""Return the extension content in its runtime representation."""
return self._content
def get_sizeondisk(self):
"""Return the size of the extension in the NIfTI file.
"""
# need raw value size plus 8 bytes for esize and ecode
size = len(self._mangle(self._content))
size += 8
# extensions size has to be a multiple of 16 bytes
size += 16 - (size % 16)
return size
def __repr__(self):
try:
code = extension_codes.label[self._code]
except KeyError:
# deal with unknown codes
code = self._code
s = "Nifti1Extension('%s', '%s')" % (code, self._content)
return s
def __eq__(self, other):
if self._code != other._code \
or self._content != other._content:
return False
else:
return True
def write_to(self, fileobj):
''' Write header extensions to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
'''
extstart = fileobj.tell()
rawsize = self.get_sizeondisk()
# write esize and ecode first
fileobj.write(np.array((rawsize, self._code),
dtype=np.int32).tostring())
# followed by the actual extension content
# XXX if mangling upon load is implemented, it should be reverted here
fileobj.write(self._mangle(self._content))
# be nice and zero out remaining part of the extension till the
# next 16 byte border
fileobj.write('\x00' * (extstart + rawsize - fileobj.tell()))
# NIfTI header extension type codes (ECODE)
# see nifti1_io.h for a complete list of all known extensions and
# references to their description or contacts of the respective
# initiators
extension_codes = Recoder((
(0, "ignore", Nifti1Extension),
(2, "dicom", Nifti1Extension),
(4, "afni", Nifti1Extension),
(6, "comment", Nifti1Extension),
(8, "xcede", Nifti1Extension),
(10, "jimdiminfo", Nifti1Extension),
(12, "workflow_fwds", Nifti1Extension),
(14, "freesurfer", Nifti1Extension),
(16, "pypickle", Nifti1Extension)
),
fields=('code', 'label', 'handler'))
class Nifti1Extensions(list):
"""Simple extension collection, implemented as a list-subclass.
"""
def count(self, ecode):
"""Returns the number of extensions matching a given *ecode*.
Parameter
---------
code : int | str
The ecode can be specified either literal or as numerical value.
"""
count = 0
code = extension_codes.code[ecode]
for e in self:
if e.get_code() == code:
count += 1
return count
def get_codes(self):
"""Return a list of the extension code of all available extensions"""
return [e.get_code() for e in self]
def get_sizeondisk(self):
"""Return the size of the complete header extensions in the NIfTI file.
"""
# add four bytes for the NIfTI extension flag!
return np.sum([e.get_sizeondisk() for e in self]) + 4
def __repr__(self):
s = "Nifti1Extensions(%s)" \
% ', '.join([str(e) for e in self])
return s
def __eq__(self, other):
for i, e in enumerate(self):
if not e == other[i]:
return False
return True
def write_to(self, fileobj):
''' Write header extensions to fileobj
Write starts at fileobj current file position.
Parameters
----------
fileobj : file-like object
Should implement ``write`` method
Returns
-------
None
'''
# not extensions -> nothing to do
if not len(self):
return
# since we have extensions write the appropriate flag
fileobj.write(np.array((1,0,0,0), dtype=np.int8).tostring())
# and now each extension
for e in self:
e.write_to(fileobj)
@classmethod
def from_fileobj(klass, fileobj, size):
'''Read header extensions from a fileobj
Parameters
----------
fileobj : file-like object
It is assumed to be positions right after the NIfTI magic field.
size : int
Number of bytes to read. If negative, fileobj will be read till its
end.
Returns
-------
An extension list. This list might be empty in case not extensions
were present in fileobj.
'''
# make empty extension list
extensions = klass()
# assume the fileptr is just after header (magic field)
# try reading the next 4 bytes after the initial header
extension_status = fileobj.read(4)
if not len(extension_status):
# if there is nothing the NIfTI standard requires to assume zeros
extension_status = np.zeros((4,), dtype=np.int8)
else:
extension_status = np.fromstring(extension_status, dtype=np.int8)
# NIfTI1 says: if first element is non-zero there are extensions present
# if not there is nothing left to do
if not extension_status[0]:
return extensions
# note that we read the extension flag
if not size < 0:
size = size - 4
# read until the whole header is parsed (each extension is a multiple
# of 16 bytes) or in case of a separate header file till the end
# (break inside the body)
# XXX not sure if the separate header behavior is sane
while size >= 16 or size < 0:
# the next 8 bytes should have esize and ecode
ext_def = fileobj.read(8)
# nothing was read and instructed to read till the end
# -> assume all extensions where parsed and break
if not len(ext_def) and size < 0:
break
# otherwise there should be a full extension header
if not len(ext_def) == 8:
raise HeaderDataError('failed to read extension header')
ext_def = np.fromstring(ext_def, dtype=np.int32)
# be extra verbose
ecode = ext_def[1]
esize = ext_def[0]
if esize % 16:
raise HeaderDataError(
'extension size is not a multiple of 16 bytes')
# read extension itself; esize includes the 8 bytes already read
evalue = fileobj.read(esize - 8)
if not len(evalue) == esize - 8:
raise HeaderDataError('failed to read extension content')
# note that we read a full extension
size -= esize
# store raw extension content, but strip trailing NULL chars
evalue = evalue.rstrip('\x00')
# 'extension_codes' also knows the best implementation to handle
# a particular extension type
try:
ext = extension_codes.handler[ecode](ecode, evalue)
except KeyError:
# unknown extension type
# XXX complain or fail or go with a generic extension
ext = Nifti1Extension(ecode, evalue)
extensions.append(ext)
return extensions
class Nifti1Header(SpmAnalyzeHeader):
''' Class for NIFTI1 header '''
# Copies of module level definitions
_dtype = header_dtype
_data_type_codes = data_type_codes
_xform_codes = xform_codes
_unit_codes = unit_codes
_intent_codes = intent_codes
_slice_order_codes = slice_order_codes
# data scaling capabilities
has_data_slope = True
has_data_intercept = True
def get_best_affine(self):
''' Select best of available transforms '''
hdr = self._header_data
if hdr['sform_code']:
return self.get_sform()
if hdr['qform_code']:
return self.get_qform()
return self.get_base_affine()
def _empty_headerdata(self, endianness=None):
''' Create empty header binary block with given endianness '''
hdr_data = analyze.AnalyzeHeader._empty_headerdata(self, endianness)
hdr_data['scl_slope'] = 1
hdr_data['magic'] = 'n+1'
hdr_data['vox_offset'] = 352
return hdr_data
def get_qform_quaternion(self):
''' Compute quaternion from b, c, d of quaternion
Fills a value by assuming this is a unit quaternion
'''
hdr = self._header_data
bcd = [hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d']]
return fillpositive(bcd)
def get_qform(self):
''' Return 4x4 affine matrix from qform parameters in header '''
hdr = self._header_data
quat = self.get_qform_quaternion()
R = quat2mat(quat)
vox = hdr['pixdim'][1:4].copy()
if np.any(vox) < 0:
raise HeaderDataError('pixdims[1,2,3] should be positive')
qfac = hdr['pixdim'][0]
if qfac not in (-1,1):
raise HeaderDataError('qfac (pixdim[0]) should be 1 or -1')
vox[-1] *= qfac
S = np.diag(vox)
M = np.dot(R, S)
out = np.eye(4)
out[0:3,0:3] = M
out[0:3,3] = [hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z']]
return out
def set_qform(self, affine, code=None):
''' Set qform header values from 4x4 affine
Parameters
----------
hdr : nifti1 header
affine : 4x4 array
affine transform to write into qform
code : None, string or integer
String or integer giving meaning of transform in *affine*.
The default is None. If code is None, then {if current
qform code is not 0, leave code as it is in the header; else
set to 1 ('scanner')}.
Notes
-----
The qform transform only encodes translations, rotations and
zooms. If there are shear components to the *affine* transform,
the written qform gives the closest approximation where the
rotation matrix is orthogonal. This is to allow quaternion
representation. The orthogonal representation enforces orthogonal
axes.
Examples
--------
>>> hdr = Nifti1Header()
>>> int(hdr['qform_code']) # gives 0 - unknown
0
>>> affine = np.diag([1,2,3,1])
>>> np.all(hdr.get_qform() == affine)
False
>>> hdr.set_qform(affine)
>>> np.all(hdr.get_qform() == affine)
True
>>> int(hdr['qform_code']) # gives 1 - scanner
1
>>> hdr.set_qform(affine, code='talairach')
>>> int(hdr['qform_code'])
3
>>> hdr.set_qform(affine, code=None)
>>> int(hdr['qform_code'])
3
>>> hdr.set_qform(affine, code='scanner')
>>> int(hdr['qform_code'])
1
'''
hdr = self._header_data
if code is None:
code = hdr['qform_code']
if code == 0:
hdr['qform_code'] = 1
else:
code = self._xform_codes[code]
hdr['qform_code'] = code
if not affine.shape == (4,4):
raise TypeError('Need 4x4 affine as input')
trans = affine[:3,3]
RZS = affine[:3,:3]
zooms = np.sqrt(np.sum(RZS * RZS, axis=0))
R = RZS / zooms
# Set qfac to make R determinant positive
if npl.det(R) > 0:
qfac = 1
else:
qfac = -1
R[:,-1] *= -1
# Make R orthogonal (to allow quaternion representation)
# The orthogonal representation enforces orthogonal axes
# (a subtle requirement of the NIFTI format qform transform)
# Transform below is polar decomposition, returning the closest
# orthogonal matrix PR, to input R
P, S, Qs = npl.svd(R)
PR = np.dot(P, Qs)
# Convert to quaternion
quat = mat2quat(PR)
# Set into header
hdr['qoffset_x'], hdr['qoffset_y'], hdr['qoffset_z'] = trans
hdr['pixdim'][0] = qfac
hdr['pixdim'][1:4] = zooms
hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d'] = quat[1:]
def get_sform(self):
''' Return sform 4x4 affine matrix from header '''
hdr = self._header_data
out = np.eye(4)
out[0,:] = hdr['srow_x'][:]
out[1,:] = hdr['srow_y'][:]
out[2,:] = hdr['srow_z'][:]
return out
def set_sform(self, affine, code=None):
''' Set sform transform from 4x4 affine
Parameters
----------
hdr : nifti1 header
affine : 4x4 array
affine transform to write into sform
code : None, string or integer
String or integer giving meaning of transform in *affine*.
The default is None. If code is None, then {if current
sform code is not 0, leave code as it is in the header; else
set to 1 ('scanner')}.
Examples
--------
>>> hdr = Nifti1Header()
>>> int(hdr['sform_code']) # gives 0 - unknown
0
>>> affine = np.diag([1,2,3,1])
>>> np.all(hdr.get_sform() == affine)
False
>>> hdr.set_sform(affine)
>>> np.all(hdr.get_sform() == affine)
True
>>> int(hdr['sform_code']) # gives 1 - scanner
1
>>> hdr.set_sform(affine, code='talairach')
>>> int(hdr['sform_code'])
3
>>> hdr.set_sform(affine, code=None)
>>> int(hdr['sform_code'])
3
>>> hdr.set_sform(affine, code='scanner')
>>> int(hdr['sform_code'])
1
'''
hdr = self._header_data
if code is None:
code = hdr['sform_code']
if code == 0:
hdr['sform_code'] = 1
else:
code = self._xform_codes[code]
hdr['sform_code'] = code
hdr['srow_x'][:] = affine[0,:]
hdr['srow_y'][:] = affine[1,:]
hdr['srow_z'][:] = affine[2,:]
def get_qform_code(self, code_repr='label'):
''' Return representation of qform code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
qform_code : string or integer
string label for qform code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['qform_code'] = 3
>>> hdr.get_qform_code()
'talairach'
'''
return self._get_code_field(
code_repr,
'qform_code',
self._xform_codes)
def get_sform_code(self, code_repr='label'):
''' Return representation of sform code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
sform_code : string or integer
string label for sform code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['sform_code'] = 3
>>> hdr.get_sform_code()
'talairach'
'''
return self._get_code_field(
code_repr,
'sform_code',
self._xform_codes)
def get_slope_inter(self):
''' Get data scaling (slope) and DC offset (intercept) from header data
Parameters
----------
self : header object
Should have fields (keys)
* scl_slope - slope
* scl_inter - intercept
Returns
-------
slope : None or float
scaling (slope). None if there is no valid scaling from
these fields
inter : None or float
offset (intercept). Also None if there is no valid scaling, offset
Examples
--------
>>> fields = {'scl_slope':1,'scl_inter':0}
>>> hdr = Nifti1Header()
>>> hdr.get_slope_inter()
(1.0, 0.0)
>>> hdr['scl_slope'] = 0
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = np.nan
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = 1
>>> hdr['scl_inter'] = 1
>>> hdr.get_slope_inter()
(1.0, 1.0)
>>> hdr['scl_inter'] = np.inf
>>> hdr.get_slope_inter()
(1.0, 0.0)
'''
scale = float(self['scl_slope'])
dc_offset = float(self['scl_inter'])
if not scale or not np.isfinite(scale):
return None, None
if not np.isfinite(dc_offset):
dc_offset = 0.0
return scale, dc_offset
def set_slope_inter(self, slope, inter):
self._header_data['scl_slope'] = slope
self._header_data['scl_inter'] = inter
def get_dim_info(self):
''' Gets nifti MRI slice etc dimension information
Returns
-------
freq : {None,0,1,2}
Which data array axis is freqency encode direction
phase : {None,0,1,2}
Which data array axis is phase encode direction
slice : {None,0,1,2}
Which data array axis is slice encode direction
where ``data array`` is the array returned by ``get_data``
Because nifti1 files are natively Fortran indexed:
0 is fastest changing in file
1 is medium changing in file
2 is slowest changing in file
``None`` means the axis appears not to be specified.
Examples
--------
See set_dim_info function
'''
hdr = self._header_data
info = int(hdr['dim_info'])
freq = info & 3
phase = (info >> 2) & 3
slice = (info >> 4) & 3
return (freq-1 if freq else None,
phase-1 if phase else None,
slice-1 if slice else None)
def set_dim_info(self, freq=None, phase=None, slice=None):
''' Sets nifti MRI slice etc dimension information
Parameters
----------
hdr : nifti1 header
freq : {None, 0, 1, 2}
axis of data array refering to freqency encoding
phase : {None, 0, 1, 2}
axis of data array refering to phase encoding
slice : {None, 0, 1, 2}
axis of data array refering to slice encoding
``None`` means the axis is not specified.
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(1, 2, 0)
>>> hdr.get_dim_info()
(1, 2, 0)
>>> hdr.set_dim_info(freq=1, phase=2, slice=0)
>>> hdr.get_dim_info()
(1, 2, 0)
>>> hdr.set_dim_info()
>>> hdr.get_dim_info()
(None, None, None)
>>> hdr.set_dim_info(freq=1, phase=None, slice=0)
>>> hdr.get_dim_info()
(1, None, 0)
Notes
-----
This is stored in one byte in the header
'''
for inp in (freq, phase, slice):
if inp not in (None, 0, 1, 2):
raise HeaderDataError('Inputs must be in [None, 0, 1, 2]')
info = 0
if not freq is None:
info = info | ((freq+1) & 3)
if not phase is None:
info = info | (((phase+1) & 3) << 2)
if not slice is None:
info = info | (((slice+1) & 3) << 4)
self._header_data['dim_info'] = info
def get_intent_code(self, code_repr='label'):
''' Return representation of intent code
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
intent_code : string or integer
string label for intent code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent_code()
't test'
'''
return self._get_code_field(
code_repr,
'intent_code',
self._intent_codes)
def get_intent(self, code_repr='label'):
''' Get intent code, parameters and name
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
code : string or integer
intent code, or string describing code
parameters : tuple
parameters for the intent
name : string
intent name
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.get_intent('code')
(3, (10.0,), 'some score')
'''
hdr = self._header_data
code = int(hdr['intent_code'])
recode = self.get_intent_code(code_repr)
n_params = len(self._intent_codes.parameters[code])
params = (float(hdr['intent_p%d' % (i+1)]) for i in range(n_params))
return recode, tuple(params), str(hdr['intent_name'])
def set_intent(self, code, params=(), name=''):
''' Set the intent code, parameters and name
If parameters are not specified, assumed to be all zero. Each
intent code has a set number of parameters associated. If you
specify any parameters, then it will need to be the correct number
(e.g the "f test" intent requires 2). However, parameters can
also be set in the file data, so we also allow not setting any
parameters (empty parameter tuple).
Parameters
----------
code : integer or string
code specifying nifti intent
params : list, tuple of scalars
parameters relating to intent (see intent_codes)
defaults to (). Unspecified parameters are set to 0.0
name : string
intent name (description). Defaults to ''
Returns
-------
None
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent(0) # unknown code
>>> hdr.set_intent('z score')
>>> hdr.get_intent()
('z score', (), '')
>>> hdr.get_intent('code')
(5, (), '')
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.set_intent('f test', (2, 10), name='another score')
>>> hdr.get_intent()
('f test', (2.0, 10.0), 'another score')
>>> hdr.set_intent('f test')
>>> hdr.get_intent()
('f test', (0.0, 0.0), '')
'''
hdr = self._header_data
icode = intent_codes.code[code]
p_descr = intent_codes.parameters[code]
if len(params) and len(params) != len(p_descr):
raise HeaderDataError('Need params of form %s, or empty' % (p_descr,))
all_params = [0] * 3
all_params[:len(params)] = params[:]
for i, param in enumerate(all_params):
hdr['intent_p%d' % (i+1)] = param
hdr['intent_code'] = icode
hdr['intent_name'] = name
def get_slice_duration(self):
''' Get slice duration
Returns
-------
slice_duration : float
time to acquire one slice
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_slice_duration(0.3)
>>> print "%0.1f" % hdr.get_slice_duration()
0.3
Notes
-----
The Nifti1 spec appears to require the slice dimension to be
defined for slice_duration to have meaning.
'''
_, _, slice_dim = self.get_dim_info()
if slice_dim is None:
raise HeaderDataError('Slice dimension must be set '
'for duration to be valid')
return float(self._header_data['slice_duration'])
def set_slice_duration(self, duration):
''' Set slice duration
Parameters
----------
duration : scalar
time to acquire one slice
Examples
--------
See ``get_slice_duration``
'''
_, _, slice_dim = self.get_dim_info()
if slice_dim is None:
raise HeaderDataError('Slice dimension must be set '
'for duration to be valid')
self._header_data['slice_duration'] = duration
def get_slice_code(self, code_repr='label'):
''' Return representation of slice order code
Parameters
----------
code_repr : string
string giving output form of slice order code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
slice_code : string or integer
string label for slice ordering code or code
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr['slice_code'] = 4 # alternating decreasing
>>> hdr.get_slice_code()
'alternating decreasing'
'''
return self._get_code_field(
code_repr,
'slice_code',
self._slice_order_codes)
def get_slice_times(self):
''' Get slice times from slice timing information
Returns
-------
slice_times : tuple
Times of acquisition of slices, where 0 is the beginning of
the acquisition, ordered by position in file. nifti allows
slices at the top and bottom of the volume to be excluded from
the standard slice timing specification, and calls these
"padding slices". We give padding slices ``None`` as a time
of acquisition
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_data_shape((1, 1, 7))
>>> hdr.set_slice_duration(0.1)
We need a function to print out the Nones and floating point
values in a predictable way, for the tests below.
>>> _stringer = lambda val: val is not None and '%2.1f' % val or None
>>> _print_me = lambda s: map(_stringer, s)
The following examples are from the nifti1.h documentation.
>>> hdr['slice_code'] = slice_order_codes['sequential increasing']
>>> _print_me(hdr.get_slice_times())
['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']
>>> hdr['slice_start'] = 1
>>> hdr['slice_end'] = 5
>>> _print_me(hdr.get_slice_times())
[None, '0.0', '0.1', '0.2', '0.3', '0.4', None]
>>> hdr['slice_code'] = slice_order_codes['sequential decreasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.4', '0.3', '0.2', '0.1', '0.0', None]
>>> hdr['slice_code'] = slice_order_codes['alternating increasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.0', '0.3', '0.1', '0.4', '0.2', None]
>>> hdr['slice_code'] = slice_order_codes['alternating decreasing']
>>> _print_me(hdr.get_slice_times())
[None, '0.2', '0.4', '0.1', '0.3', '0.0', None]
>>> hdr['slice_code'] = slice_order_codes['alternating increasing 2']
>>> _print_me(hdr.get_slice_times())
[None, '0.2', '0.0', '0.3', '0.1', '0.4', None]
>>> hdr['slice_code'] = slice_order_codes['alternating decreasing 2']
>>> _print_me(hdr.get_slice_times())
[None, '0.4', '0.1', '0.3', '0.0', '0.2', None]
'''
hdr = self._header_data
_, _, slice_dim = self.get_dim_info()
shape = self.get_data_shape()
slice_len = shape[slice_dim]
duration = self.get_slice_duration()
slabel = self.get_slice_code()
if slabel == 'unknown':
raise HeaderDataError('Cannot get slice times when '
'Slice code is "unknown"')
slice_start, slice_end = (int(hdr['slice_start']),
int(hdr['slice_end']))
if slice_start < 0:
raise HeaderDataError('slice_start should be >= 0')
if slice_end == 0:
slice_end = slice_len-1
n_timed = slice_end - slice_start + 1
if n_timed < 1:
raise HeaderDataError('slice_end should be > slice_start')
st_order = self._slice_time_order(slabel, n_timed)
times = st_order * duration
return ((None,)*slice_start +
tuple(times) +
(None,)*(slice_len-slice_end-1))
def set_slice_times(self, slice_times):
''' Set slice times into *hdr*
Parameters
----------
slice_times : tuple
tuple of slice times, one value per slice
tuple can include None to indicate no slice time for that slice
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_dim_info(slice=2)
>>> hdr.set_data_shape([1, 1, 7])
>>> hdr.set_slice_duration(0.1)
>>> times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None]
>>> hdr.set_slice_times(times)
>>> hdr.get_slice_code()
'alternating decreasing'
>>> int(hdr['slice_start'])
1
>>> int(hdr['slice_end'])
5
'''
# Check if number of slices matches header
hdr = self._header_data
_, _, slice_dim = self.get_dim_info()
shape = self.get_data_shape()
slice_len = shape[slice_dim]
if slice_len != len(slice_times):
raise HeaderDataError('Number of slice times does not '
'match number of slices')
# Extract Nones at beginning and end. Check for others
for ind, time in enumerate(slice_times):
if time is not None:
slice_start = ind
break
else:
raise HeaderDataError('Not all slice times can be None')
for ind, time in enumerate(slice_times[::-1]):
if time is not None:
slice_end = slice_len-ind-1
break
timed = slice_times[slice_start:slice_end+1]
for time in timed:
if time is None:
raise HeaderDataError('Cannot have None in middle '
'of slice time vector')
# Find slice duration, check times are compatible with single
# duration
tdiffs = np.diff(np.sort(timed))
if not np.allclose(np.diff(tdiffs), 0):
raise HeaderDataError('Slice times not compatible with '
'single slice duration')
duration = np.mean(tdiffs)
# To slice time order
st_order = np.round(np.array(timed) / duration)
# Check if slice times fit known schemes
n_timed = len(timed)
labels = self._slice_order_codes.value_set('label')
labels.remove('unknown')
for label in labels:
if np.all(st_order == self._slice_time_order(
label,
n_timed)):
break
else:
raise HeaderDataError('slice ordering of %s fits '
'with no known scheme' % st_order)
# Set values into header
hdr['slice_start'] = slice_start
hdr['slice_end'] = slice_end
hdr['slice_duration'] = duration
hdr['slice_code'] = slice_order_codes.code[label]
def for_file_pair(self, is_pair=True):
''' Adapt header to separate or same image and header file
Parameters
----------
is_pair : bool, optional
True if adapting header to file pair state, False for single
Returns
-------
hdr : Nifti1Header
copied and possibly modified header
Examples
--------
The header starts off as being for a single file
>>> hdr = Nifti1Header()
>>> str(hdr['magic'])
'n+1'
>>> hdr.get_data_offset()
352
But we can switch it to be for two files (a pair)
>>> pair_hdr = hdr.for_file_pair()
>>> str(pair_hdr['magic'])
'ni1'
>>> pair_hdr.get_data_offset()
0
The original header is not affected (a copy is returned)
>>> hdr.get_data_offset()
352
Back to single again
>>> unpair_hdr = pair_hdr.for_file_pair(False)
>>> str(unpair_hdr['magic'])
'n+1'
>>> unpair_hdr.get_data_offset()
352
'''
hdr = self.copy()
if not is_pair:
# one file version
if hdr['magic'] == 'n+1':
if hdr['vox_offset'] < 352:
hdr['vox_offset'] = 352
return hdr
hdr['magic'] = 'n+1'
hdr['vox_offset'] = 352
return hdr
# two file version
if hdr['magic'] == 'ni1':
return hdr
hdr['magic'] = 'ni1'
hdr['vox_offset'] = 0
return hdr
def _slice_time_order(self, slabel, n_slices):
''' Supporting function to give time order of slices from label '''
if slabel == 'sequential increasing':
sp_ind_time_order = range(n_slices)
elif slabel == 'sequential decreasing':
sp_ind_time_order = range(n_slices)[::-1]
elif slabel == 'alternating increasing':
sp_ind_time_order = range(0,n_slices,2) + range(1, n_slices, 2)
elif slabel == 'alternating decreasing':
sp_ind_time_order = range(n_slices-1,-1,-2) + range(n_slices-2,-1,-2)
elif slabel == 'alternating increasing 2':
sp_ind_time_order = range(1,n_slices,2) + range(0, n_slices, 2)
elif slabel == 'alternating decreasing 2':
sp_ind_time_order = range(n_slices-2,-1,-2) + range(n_slices-1,-1,-2)
else:
raise HeaderDataError('We do not handle slice ordering "%s"'
% slabel)
return np.argsort(sp_ind_time_order)
''' Checks only below here '''
@classmethod
def _get_checks(klass):
# We need to return our own versions of - e.g. chk_datatype, to
# pick up the Nifti datatypes from our class
return (klass._chk_sizeof_hdr,
klass._chk_datatype,
klass._chk_bitpix,
klass._chk_pixdims,
klass._chk_scale_slope,
klass._chk_scale_inter,
klass._chk_qfac,
klass._chk_magic_offset,
klass._chk_qform_code,
klass._chk_sform_code)
@staticmethod
def _chk_scale_slope(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
scale = hdr['scl_slope']
if scale and np.isfinite(scale):
return ret
ret.problem_msg = '"scl_slope" is %s; should !=0 and be finite' % scale
if fix:
hdr['scl_slope'] = 1
ret.fix_msg = 'setting "scl_slope" to 1'
else:
ret.level = 30
return ret
@staticmethod
def _chk_scale_inter(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
scale = hdr['scl_inter']
if np.isfinite(scale):
return ret
ret.problem_msg = '"scl_inter" is %s; should be finite' % scale
if fix:
hdr['scl_inter'] = 0
ret.fix_msg = 'setting "scl_inter" to 0'
else:
ret.level = 30
return ret
@staticmethod
def _chk_qfac(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
if hdr['pixdim'][0] in (-1, 1):
return ret
ret.problem_msg = 'pixdim[0] (qfac) should be 1 (default) or -1'
if fix:
hdr['pixdim'][0] = 1
ret.fix_msg = 'setting qfac to 1'
else:
ret.level = 20
return ret
@staticmethod
def _chk_magic_offset(hdr, fix=True):
ret = Report(hdr, HeaderDataError)
magic = hdr['magic']
offset = hdr['vox_offset']
if magic == 'ni1': # two files
if offset == 0:
return ret
ret.problem_msg = ('vox offset should be 0 (is %s)'
'with two-file nifti images' % offset)
ret.level = 40
if fix:
ret.fix_msg = 'leaving at current value'
elif magic == 'n+1': # one file
if offset >= 352:
if not offset % 16:
return ret
else:
# XXX Michael wonders, if this warning really valid? NIfTI
# says that each extension's length has to be a multiple of
# 16, therefore the test should be (offset-352) % 16 and
# not offset % 16, or does SPM have additional artifical
# limitations?
ret.problem_msg = ('vox offset (=%s) not divisible '
'by 16, not SPM compatible' % offset)
ret.level = 30
if fix:
ret.fix_msg = 'leaving at current value'
return ret
ret.problem_msg = ('vox offset %d too low for '
'single file nifti1' % offset)
if fix:
hdr['vox_offset'] = 352
ret.fix_msg = 'setting to minimum value of 352'
else:
ret.level = 50
else: # unrecognized nii magic string, oh dear
ret.problem_msg = 'magic string %s is not valid' % magic
ret.level = 50
if fix:
ret.fix_msg = 'leaving as is, but future errors are likely'
return ret
@classmethod
def _chk_qform_code(klass, hdr, fix=True):
ret = Report(hdr, HeaderDataError)
code = int(hdr['qform_code'])
if int(hdr['qform_code']) in klass._xform_codes.value_set():
return ret
ret.problem_msg = 'qform code %d not valid' % code
if fix:
hdr['qform_code'] = 0
ret.fix_msg = 'setting to 0'
else:
ret.level = 30
return ret
@classmethod
def _chk_sform_code(klass, hdr, fix=True):
ret = Report(hdr, HeaderDataError)
code = int(hdr['sform_code'])
if int(hdr['sform_code']) in klass._xform_codes.value_set():
return ret
ret.problem_msg = 'sform code %d not valid' % code
if fix:
hdr['sform_code'] = 0
ret.fix_msg = 'setting to 0'
else:
ret.level = 30
return ret
class Nifti1Image(analyze.AnalyzeImage):
_header_maker = Nifti1Header
def _set_header(self, header=None):
SpatialImage._set_header(self, header)
@staticmethod
def filespec_to_files(filespec):
ft1 = filetuples.FileTuples(
(('header', '.nii'), ('image', '.nii')),
ignored_suffixes=('.gz', '.bz2')
)
ft2 = filetuples.FileTuples(
(('header', '.hdr'), ('image', '.img')),
ignored_suffixes=('.gz', '.bz2')
)
for ftups in (ft1, ft2):
try:
ftups.set_filenames(filespec)
except filetuples.FileTuplesError:
continue
break
else:
raise ValueError('Filespec "%s" does not '
'look like Nifti1' % filespec)
files = dict(zip(('header', 'image'), ftups.get_filenames()))
return files
@classmethod
def from_files(klass, files):
fname = files['header']
fileobj = allopen(fname)
header = klass._header_maker.from_fileobj(fileobj)
extra = None
# handle extensions
# assume the fileptr is just after header (magic field)
# determine how much to read when parsing the extensions
if header['vox_offset'] == 0:
# read till the end of the header
extsize = -1
else:
extsize = header['vox_offset'] - fileobj.tell()
extensions = Nifti1Extensions.from_fileobj(fileobj, extsize)
# XXX maybe always do that?
if len(extensions):
extra = {'extensions': extensions}
affine = header.get_best_affine()
ret = klass(None, affine, header=header, extra=extra)
ret._files = files
return ret
def to_files(self, files=None):
''' Write image to files passed, or self._files
'''
# XXX the whole method is candidate for refactoring, since it started as
# verbatim copy of AnalyzeImage.to_files()
if files is None:
files = self._files
if files is None:
raise ValueError('Need files to write data')
data = self.get_data()
# Adapt header to possible two<->one file difference
is_pair = files['header'] != files['image']
hdr = self.get_header().for_file_pair(is_pair)
# if any extensions, figure out necessary vox_offset for extensions to
# fit
if self.extra.has_key('extensions') and len(self.extra['extensions']):
hdr['vox_offset'] = len(hdr.binaryblock) \
+ self.extra['extensions'].get_sizeondisk()
slope, inter, mn, mx = adapt_header(hdr, data)
hdrf = allopen(files['header'], 'wb')
hdr.write_to(hdrf)
# write all extensions to file
# assumes that the file ptr is right after the magic string
if not self.extra.has_key('extensions'):
# no extensions: be nice and write appropriate flag
hdrf.write(np.array((0,0,0,0), dtype=np.int8).tostring())
else:
self.extra['extensions'].write_to(hdrf)
if is_pair:
imgf = allopen(files['image'], 'wb')
else: # single file for header and image
imgf = hdrf
# streams like bz2 do not allow seeks, even forward. We
# check where to go, and write zeros up until the data part
# of the file
offset = hdr.get_data_offset()
diff = offset-hdrf.tell()
if diff > 0:
hdrf.write('\x00' * diff)
write_data(hdr, data, imgf, inter, slope, mn, mx)
self._header = hdr
self._files = files
def _update_header(self):
''' Harmonize header with image data and affine
See AnalyzeImage._update_header for more examples
Examples
--------
>>> data = np.zeros((2,3,4))
>>> affine = np.diag([1.0,2.0,3.0,1.0])
>>> img = Nifti1Image(data, affine)
>>> hdr = img.get_header()
>>> np.all(hdr.get_qform() == affine)
True
>>> np.all(hdr.get_sform() == affine)
True
'''
super(Nifti1Image, self)._update_header()
hdr = self._header
if not self._affine is None:
hdr.set_sform(self._affine)
hdr.set_qform(self._affine)
load = Nifti1Image.load
save = Nifti1Image.save
|
the-stack_0_3319 | """functions for evaluating embeddings and neighbor networks"""
import scanpy as sc
import numpy as np
import pandas as pd
def fidelity(adata, name, groupby='label'):
"""look in .obsp[name_connectivities] for connectivity data and see how well it preserves the given labelling"""
labels = adata.obs[groupby].values
adj = (adata.obsp[name+'_'*(len(name)>0)+'connectivities']>0)
edgelist = zip(*adj.nonzero())
good_edges=0
bad_edges=0
for i,j in edgelist:
if labels[i] == labels[j]:
good_edges += 1
else:
bad_edges += 1
return float(good_edges)/float(good_edges + bad_edges)
def group_fidelity(adata, name, groupby='label', pairwise=False):
"""matrix containing the fraction of edges that stay within each group
if pairwise=True, returns the fraction of edges between each pair of groups
i.e. position (i,j) is the proportion of edges that land in group j, among
all edges originating in group i"""
classes = adata.obs[groupby].values
G = (adata.obsp[name+'_'*(len(name)>0)+'connectivities']>0)
class_list = np.unique(classes)
result = np.zeros((len(class_list), len(class_list)))
for i, c in enumerate(class_list):
# print(i)
# print(c)
inds = np.where(np.array(classes) == c)[0]
G_sub = G[inds, :] # only look at vertices in c
class_freqs = [0] * len(class_list)
for j in range(len(inds)):
row = G_sub[j, :].todense().tolist()[0]
row = np.array(row).astype(int)
# print(G_sub[j,:].tolist()[0])
nbr_inds = np.where(row > 0)[0]
# print(nbr_inds)
nbr_classes = np.array([classes[x] for x in nbr_inds])
for k, c2 in enumerate(class_list):
class_freqs[k] += np.sum(nbr_classes == c2)
# print(class_freqs)
result[i, :] = class_freqs
result = result / result.sum(axis=1)[:, None]
result = pd.DataFrame(result)
result.columns = class_list
result.index = class_list
if pairwise:
return (result)
else:
diags = np.diag(result.values)
return pd.DataFrame({'cluster':class_list, 'fidelity':diags, 'method':name})
def plot_umap(adata, name, n_neighbors=10, rerun=False, **kwargs):
"""looks in .uns[name] for neighbors info, and uses that to store and plot a UMAP"""
if name not in adata.uns or rerun:
sc.pp.neighbors(adata, use_rep='X_'+name, key_added=name, n_neighbors=n_neighbors)
if 'X_'+name+'_umap' not in adata.obsm or rerun:
sc.tl.umap(adata, neighbors_key=name)
adata.obsm['X_{}_umap'.format(name)] = adata.obsm['X_umap']
sc.pl.embedding(adata, basis='{}_umap'.format(name), neighbors_key=name, **kwargs)
def good_bad_edges(adata, name, groupby='label'):
labels = adata.obs[groupby].values
adj = (adata.obsp[name + '_' * (len(name) > 0) + 'connectivities'] > 0)
edgelist = zip(*adj.nonzero())
good_edges = []
bad_edges = []
for i, j in edgelist:
if labels[i] == labels[j]:
good_edges.append((i,j))
else:
bad_edges.append((i,j))
return (good_edges, bad_edges) |
the-stack_0_3320 | import os
import time
import yaml
from platform import python_version
from unittest import skipIf
import bzt
from bzt.engine import EXEC
from bzt.modules import ConsolidatingAggregator
from bzt.modules._selenium import GeckoDriver
from bzt.modules.functional import FuncSamplesReader, LoadSamplesReader, FunctionalAggregator
from bzt.modules._apiritif import ApiritifNoseExecutor
from bzt.modules._pytest import PyTestExecutor
from bzt.modules.robot import RobotExecutor
from tests.unit import RESOURCES_DIR, ExecutorTestCase
from tests.unit.modules._selenium import SeleniumTestCase, MockPythonTool, MockDriver
from bzt.utils import EXE_SUFFIX, is_windows
class TestSeleniumApiritifRunner(SeleniumTestCase):
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def test_selenium_prepare_python_single(self):
"""
Check if script exists in working dir
:return:
"""
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py"
}})
self.obj_prepare()
def test_selenium_prepare_python_folder(self):
"""
Check if scripts exist in working dir
:return:
"""
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.obj_prepare()
def test_selenium_startup_shutdown_python_single(self):
"""
run tests from .py file
:return:
"""
self.configure({
'execution': {
"iterations": 1,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/'},
'executor': 'selenium'
},
'reporting': [{'module': 'junit-xml'}]
})
self.obj.execution.merge({"scenario": {
"script": RESOURCES_DIR + "selenium/python/test_blazemeter_fail.py"
}})
self.obj_prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.csv")))
@skipIf(python_version() >= '3.8' and is_windows(), "Temporary disabled")
def test_selenium_startup_shutdown_python_folder(self):
"""
run tests from .py files
:return:
"""
self.configure({
'execution': {
'iterations': 1,
'scenario': {'script': RESOURCES_DIR + 'selenium/python/'},
'executor': 'selenium'
},
'reporting': [{'module': 'junit-xml'}]
})
self.obj_prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
api_log = os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.csv")
nose_log = os.path.join(self.obj.engine.artifacts_dir, "apiritif.out")
self.assertTrue(os.path.exists(api_log))
with open(nose_log) as fds:
content = fds.read()
self.assertIn("Transaction started::", content)
self.assertIn("Transaction ended::", content)
def test_runner_fail_no_test_found(self):
"""
Check that Python Apiritif runner fails if no tests were found
:return:
"""
self.configure({
EXEC: {
"iterations": 1,
"executor": "selenium",
"scenario": {"script": RESOURCES_DIR + "selenium/invalid/dummy.py"}
}
})
self.obj_prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
diagnostics = "\n".join(self.obj.get_error_diagnostics())
self.assertIn("Nothing to test.", diagnostics)
def test_resource_files_collection_remote_apiritif(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.assertEqual(len(self.obj.resource_files()), 1)
def test_long_iterations_value(self):
self.engine.aggregator = ConsolidatingAggregator()
self.engine.aggregator.engine = self.engine
self.obj.execution.merge({
"iterations": 2 ** 64,
"scenario": {
"requests": [
"http://blazedemo.com/",
],
}
})
self.obj_prepare()
try:
self.obj.startup()
for _ in range(3):
self.assertFalse(self.obj.check())
self.engine.aggregator.check()
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
def test_check_tools_installed_conf(self):
self.obj.execution.merge({"scenario": {"requests": ["http://blazedemo.com/"]}})
self.obj_prepare()
self.assertTrue(self.obj.selenium.called)
self.assertTrue(self.obj.runner.selenium.called)
self.assertTrue(self.obj.runner.apiritif.called)
def test_check_tools_installed_script(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "selenium/python/"}})
self.obj_prepare()
self.assertTrue(self.obj.selenium.called)
self.assertTrue(self.obj.runner.selenium.called)
self.assertTrue(self.obj.runner.apiritif.called)
class TestApiritifRunner(ExecutorTestCase):
EXECUTOR = ApiritifNoseExecutor
def obj_prepare(self):
tmp_tool = bzt.modules._apiritif.executor.Apiritif
try:
bzt.modules._apiritif.executor.Apiritif = MockPythonTool
self.obj.prepare()
finally:
bzt.modules._apiritif.executor.Apiritif = tmp_tool
def test_new_flow(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
{"set-variables": {"name1": "val1"}},
{
"transaction": "second",
"do": [
"/other.html",
"/reserve.php",
{
"transaction": "third",
"do": [
"/${name1}"
]
}
]}]}}]})
self.obj_prepare()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "test_requests.py")))
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_apiritif_generated_requests(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
"/reserve.php"]}}]})
self.obj_prepare()
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, "test_requests.py")))
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_apiritif_transactions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "apiritif/test_transactions.py"
}
}]
})
self.obj_prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_report_reading(self):
reader = FuncSamplesReader(RESOURCES_DIR + "apiritif/transactions.ldjson", self.obj.engine, self.obj.log)
items = list(reader.read(last_pass=True))
self.assertEqual(9, len(items))
self.assertEqual(items[0].get_short_name(), 'TestRequests.test_1_single_request')
self.assertEqual(items[1].get_short_name(), 'TestRequests.test_2_multiple_requests')
self.assertEqual(items[2].get_short_name(), 'test_3_toplevel_transaction.Transaction')
self.assertEqual(items[3].get_short_name(), 'test_4_mixed_transaction.Transaction')
self.assertEqual(items[4].get_short_name(), 'test_5_multiple_transactions.Transaction 1')
self.assertEqual(items[5].get_short_name(), 'test_5_multiple_transactions.Transaction 2')
self.assertEqual(items[6].get_short_name(), 'test_6_transaction_obj.Label')
self.assertEqual(items[7].get_short_name(), 'test_7_transaction_fail.Label')
self.assertEqual(items[8].get_short_name(), 'test_8_transaction_attach.Label')
def test_report_transactions_as_failed(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"iterations": 1,
"scenario": {
"default-address": "http://httpbin.org",
"requests": [{
"label": "failure by 404",
"url": "/status/404",
}]
}
}]
})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj_prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
reader = LoadSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.ldjson"), self.obj.log)
samples = list(reader._read(last_pass=True))
self.assertEqual(len(samples), 1)
tstmp, label, concur, rtm, cnn, ltc, rcd, error, trname, byte_count = samples[0]
self.assertIsNotNone(error)
def test_status_skipped(self):
self.configure({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "functional/test_all.py"
}
}]
})
self.obj.engine.aggregator = FunctionalAggregator()
self.obj_prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
reader = FuncSamplesReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif.0.ldjson"),
self.obj.engine, self.obj.log)
samples = list(reader.read(last_pass=True))
self.assertEqual(len(samples), 4)
self.assertIsNotNone(samples[-1].status)
class TestPyTestExecutor(ExecutorTestCase):
EXECUTOR = PyTestExecutor
CMD_LINE = None
def setUp(self):
super().setUp()
bzt.modules._selenium.ChromeDriver = MockDriver
bzt.modules._selenium.GeckoDriver = MockDriver
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = args
def obj_prepare(self):
tmp_tool = bzt.modules._pytest.PyTest
try:
bzt.modules._pytest.PyTest = MockPythonTool
self.obj.prepare()
finally:
bzt.modules._pytest.PyTest = tmp_tool
def full_run(self, config):
self.obj.execution.merge(config)
self.obj_prepare()
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_report_file(self):
self.full_run({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue('--report-file' in self.CMD_LINE)
val = self.CMD_LINE[self.CMD_LINE.index('--report-file') + 1]
self.assertTrue(val.endswith("PyTestExecutor.ldjson"))
def test_iterations(self):
self.full_run({
"iterations": 10,
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue('-i 10' in ' '.join(self.CMD_LINE))
def test_hold(self):
self.full_run({
"hold-for": "3s",
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue('-d 3.0' in ' '.join(self.CMD_LINE))
def test_script(self):
self.full_run({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue(self.CMD_LINE[-1].endswith("test_single.py"))
def test_blazedemo(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/test_blazedemo.py"
}
})
self.obj_prepare()
driver = self.obj._get_tool(MockDriver, tool_path=self.obj.settings.get('geckodriver').get('path'))
if not driver.check_if_installed():
driver.install()
self.obj.env.add_path({"PATH": driver.get_driver_dir()})
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_package(self):
self.obj.engine.check_interval = 0.1
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "selenium/pytest/"
}
})
self.obj_prepare()
driver = self.obj._get_tool(MockDriver, tool_path=self.obj.settings.get('geckodriver').get('path'))
if not driver.check_if_installed():
driver.install()
self.obj.env.add_path({"PATH": driver.get_driver_dir()})
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_additional_args(self):
additional_args = "--foo --bar"
self.obj.runner_path = RESOURCES_DIR + "selenium/pytest/bin/runner.py"
self.full_run({
"scenario": {
"additional-args": additional_args,
"script": RESOURCES_DIR + "selenium/pytest/test_single.py"
}
})
self.assertTrue(additional_args in " ".join(self.CMD_LINE))
class TestRobotExecutor(ExecutorTestCase):
EXECUTOR = RobotExecutor
CMD_LINE = None
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = args
def test_full_single_script(self):
self.configure({
"execution": [{
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
tmp_tool = bzt.modules.robot.Robot
try:
bzt.modules.robot.Robot = MockPythonTool
self.obj.prepare()
self.obj.settings["interpreter"] = RESOURCES_DIR + "selenium/robot/robot-mock" + EXE_SUFFIX
self.obj.startup()
finally:
bzt.modules.robot.Robot = tmp_tool
self.obj.shutdown()
self.obj.post_process()
self.assertFalse(self.obj.has_results())
self.assertNotEquals(self.obj.process, None)
lines = open(self.obj.report_file).readlines()
self.assertEqual(1, len(lines))
def full_run(self, config):
self.configure(config)
tmp_tool = bzt.modules.robot.Robot
try:
bzt.modules.robot.Robot = MockPythonTool
self.obj.prepare()
finally:
bzt.modules.robot.Robot = tmp_tool
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_hold(self):
self.full_run({
"execution": [{
"hold-for": "5s",
"iterations": 3,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--duration' in self.CMD_LINE)
dur_val = self.CMD_LINE[self.CMD_LINE.index('--duration') + 1]
self.assertEqual(dur_val, '5.0')
def test_report_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--report-file' in self.CMD_LINE)
report_file = self.CMD_LINE[self.CMD_LINE.index('--report-file') + 1]
self.assertTrue(report_file.endswith("RobotExecutor.ldjson"))
def test_iterations(self):
self.full_run({
"execution": [{
"iterations": 3,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--iterations' in self.CMD_LINE)
iters_val = self.CMD_LINE[self.CMD_LINE.index('--iterations') + 1]
self.assertEqual(iters_val, '3')
def test_variables(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"variables": {
"USERNAME": "janedoe",
},
"script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot",
}
}]
})
self.assertTrue('--variablefile' in self.CMD_LINE)
var_file = self.CMD_LINE[self.CMD_LINE.index('--variablefile') + 1]
self.assertTrue(var_file.endswith("robot-vars.yaml"))
self.assertEqual('janedoe', yaml.full_load(open(var_file).read())['USERNAME'])
def test_variables_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"variables": RESOURCES_DIR + "selenium/robot/simple/vars.yaml",
"script": RESOURCES_DIR + "selenium/robot/simple/test_novar.robot",
}
}]
})
self.assertTrue('--variablefile' in self.CMD_LINE)
var_file = self.CMD_LINE[self.CMD_LINE.index('--variablefile') + 1]
self.assertEqual(var_file, os.path.normpath(RESOURCES_DIR + "selenium/robot/simple/vars.yaml"))
def test_output_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--outputfile' in self.CMD_LINE)
out_file = self.CMD_LINE[self.CMD_LINE.index('--outputfile') + 1]
self.assertTrue(out_file.endswith("output.xml"))
def test_log_file(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot"
}
}]
})
self.assertTrue('--logfile' in self.CMD_LINE)
log_file = self.CMD_LINE[self.CMD_LINE.index('--logfile') + 1]
self.assertTrue(log_file.endswith("log.html"))
def test_single_tag(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"tags": "create",
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot",
}
}]
})
self.assertTrue('--include' in self.CMD_LINE)
tags = self.CMD_LINE[self.CMD_LINE.index('--include') + 1]
self.assertEqual(tags, 'create')
def test_multiple_tags(self):
self.full_run({
"execution": [{
"iterations": 1,
"scenario": {
"tags": "create,database",
"script": RESOURCES_DIR + "selenium/robot/simple/test.robot",
}
}]
})
self.assertTrue('--include' in self.CMD_LINE)
tags = self.CMD_LINE[self.CMD_LINE.index('--include') + 1]
self.assertEqual(tags, 'create,database')
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.