content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def get_graph_names(test_dir):
"""Parse test_dir/*GRAPHFILES and return basenames for all .graph files"""
graph_list = []
GRAPHFILES_files = [f for f in os.listdir(test_dir) if f.endswith("GRAPHFILES")]
for GRAPHFILE in GRAPHFILES_files:
with open(os.path.join(test_dir, GRAPHFILE), 'r') as f:
for l in f.readlines():
l = l.strip()
if not l or l.startswith('#'):
continue
graph_list.append(os.path.basename(l).replace('.graph', ''))
return graph_list | b60f6e5a1b3654e6e7a982902356c18db0e740ae | 3,650,500 |
import logging
def get_sagemaker_feature_group(feature_group_name: str):
"""Used to check if there is an existing feature group with a given feature_group_name."""
try:
return sagemaker_client().describe_feature_group(FeatureGroupName=feature_group_name)
except botocore.exceptions.ClientError as error:
logging.error(
f"SageMaker could not find a feature group with the name {feature_group_name}. Error {error}"
)
return None | 1e94e894b1686a6833df51f1006f3f845a9e63b4 | 3,650,501 |
def check_system(command, message, exit=0, user=None, stdin=None, shell=False, timeout=None, timeout_signal='TERM'):
"""Runs the command and checks its exit status code.
Handles all of the common steps associated with running a system command:
runs the command, checks its exit status code against the expected result,
and raises an exception if there is an obvious problem.
Returns a tuple of the standard output, standard error, and the failure
message generated by diagnose(). See the system() function for more details
about the command-line options.
"""
status, stdout, stderr = system(command, user, stdin, shell=shell,
timeout=timeout, timeout_signal=timeout_signal,
quiet=False)
fail = diagnose(message, command, status, stdout, stderr)
if timeout and status == -1:
raise osgunittest.TimeoutException(fail)
else:
assert status == exit, fail
return stdout, stderr, fail | 31d83941d5198d0786a6a67a4b1bcd320c26218a | 3,650,502 |
import requests
import json
import pandas as pd
from common_functions import read_orgs
import sys
def get_repo_data(api_token):
"""Executes the GraphQL query to get repository data from one or more GitHub orgs.
Parameters
----------
api_token : str
The GH API token retrieved from the gh_key file.
Returns
-------
repo_info_df : pandas.core.frame.DataFrame
"""
url = 'https://api.github.com/graphql'
headers = {'Authorization': 'token %s' % api_token}
repo_info_df = pd.DataFrame()
# Read list of orgs from a file
try:
org_list = read_orgs('orgs.txt')
except:
print("Error reading orgs. This script depends on the existance of a file called orgs.txt containing one org per line. Exiting")
sys.exit()
for org_name in org_list:
has_next_page = True
after_cursor = None
print("Processing", org_name)
while has_next_page:
try:
query = make_query(after_cursor)
variables = {"org_name": org_name}
r = requests.post(url=url, json={'query': query, 'variables': variables}, headers=headers)
json_data = json.loads(r.text)
df_temp = pd.DataFrame(json_data['data']['organization']['repositories']['nodes'])
repo_info_df = pd.concat([repo_info_df, df_temp])
has_next_page = json_data["data"]["organization"]["repositories"]["pageInfo"]["hasNextPage"]
after_cursor = json_data["data"]["organization"]["repositories"]["pageInfo"]["endCursor"]
except:
has_next_page = False
print("ERROR Cannot process", org_name)
return repo_info_df | 0fcb024aa2f68d6687aebbce93f7ea5a0004afa5 | 3,650,503 |
import json
def getJson(file, filters={}):
"""Given a specific JSON file (string) and a set of filters (dictionary
key-values pairs), will return a JSON-formatted tree of the matching data
entries from that file (starting as a null-key list of objects).
"""
with open(file, 'r') as f:
j = json.loads(f.read())
all = j['']
dicts = basic.filter(all, filters)
if len(dicts) > 0:
return formatJson(dicts)
else:
raise Exception('No matching data entries found') | 7b6832eae476eef48584690d993c2dee301bb565 | 3,650,504 |
def underline_node_formatter(nodetext, optionstext, caller=None):
"""
Draws a node with underlines '_____' around it.
"""
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
options_width_max = max(m_len(line) for line in optionstext.split("\n"))
total_width = max(options_width_max, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + "|n" + nodetext + "|n" + separator2 + "|n" + optionstext | 598e3aaf875b2539b93ec03d8665cc8011872015 | 3,650,505 |
from pywps.dependencies import ogr
import jsonschema
import json
import mimetypes
import os
def validategeojson(data_input, mode):
"""GeoJSON validation example
>>> import StringIO
>>> class FakeInput(object):
... json = open('point.geojson','w')
... json.write('''{"type":"Feature", "properties":{}, "geometry":{"type":"Point", "coordinates":[8.5781228542328, 22.87500500679]}, "crs":{"type":"name", "properties":{"name":"urn:ogc:def:crs:OGC:1.3:CRS84"}}}''') # noqa
... json.close()
... file = 'point.geojson'
>>> class fake_data_format(object):
... mimetype = 'application/geojson'
>>> fake_input = FakeInput()
>>> fake_input.data_format = fake_data_format()
>>> validategeojson(fake_input, MODE.SIMPLE)
True
"""
LOGGER.info('validating GeoJSON; Mode: {}'.format(mode))
passed = False
if mode >= MODE.NONE:
passed = True
if mode >= MODE.SIMPLE:
name = data_input.file
(mtype, encoding) = mimetypes.guess_type(name, strict=False)
passed = data_input.data_format.mime_type in {mtype, FORMATS.GEOJSON.mime_type}
if mode >= MODE.STRICT:
data_source = ogr.Open(data_input.file)
if data_source:
passed = (data_source.GetDriver().GetName() == "GeoJSON")
else:
passed = False
if mode >= MODE.VERYSTRICT:
# this code comes from
# https://github.com/om-henners/GeoJSON_Validation/blob/master/geojsonvalidation/geojson_validation.py
schema_home = os.path.join(_get_schemas_home(), "geojson")
base_schema = os.path.join(schema_home, "geojson.json")
with open(base_schema) as fh:
geojson_base = json.load(fh)
with open(os.path.join(schema_home, "crs.json")) as fh:
crs_json = json.load(fh)
with open(os.path.join(schema_home, "bbox.json")) as fh:
bbox_json = json.load(fh)
with open(os.path.join(schema_home, "geometry.json")) as fh:
geometry_json = json.load(fh)
cached_json = {
"http://json-schema.org/geojson/crs.json": crs_json,
"http://json-schema.org/geojson/bbox.json": bbox_json,
"http://json-schema.org/geojson/geometry.json": geometry_json
}
resolver = jsonschema.RefResolver(
"http://json-schema.org/geojson/geojson.json",
geojson_base, store=cached_json)
validator = jsonschema.Draft4Validator(geojson_base, resolver=resolver)
try:
validator.validate(json.loads(data_input.stream.read()))
passed = True
except jsonschema.ValidationError:
passed = False
return passed | 5b9364ecc0c8f92f82448bd5541eb606df776c12 | 3,650,506 |
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
fcn8 = tf.layers.conv2d(vgg_layer7_out, filters=num_classes, kernel_size=1, padding="SAME", name='fcn8')
fcn9 = tf.layers.conv2d_transpose(fcn8, filters=vgg_layer4_out.get_shape().as_list()[-1], kernel_size=4, strides=(2,2), padding="SAME", name='fcn9')
fcn9_skip = tf.add(fcn9, vgg_layer4_out, name='fcn9_plus_layer4')
fcn10 = tf.layers.conv2d_transpose(fcn9_skip, filters=vgg_layer3_out.get_shape().as_list()[-1], kernel_size=4, strides=(2,2), padding="SAME", name='fcn10')
fcn10_skip = tf.add(fcn10, vgg_layer3_out, name='fcn10_plus_layer3')
fcn11 = tf.layers.conv2d_transpose(fcn10_skip, filters=num_classes, kernel_size=16, strides=(8,8), padding="SAME", name='fcn11')
return fcn11 | cf907d29555fbb7e9a11a1b2f6981637a977bf48 | 3,650,507 |
def determine_issues(project):
"""
Get the list of issues of a project.
:rtype: list
"""
issues = project["Issue"]
if not isinstance(issues, list):
return [issues]
return issues | 7b8b670e4ad5a7ae49f3541c87026dd603406c9f | 3,650,508 |
import os
def find_image_files(path=None):
"""
Used to find image files.
Argument:
path - path to directory of 'img.image' files
"""
if path is None:
path = os.getcwd()
folders = []
for folder in os.listdir(path):
if folder.endswith("img.image"):
folders.append(os.path.join(path, folder))
folders.sort()
return folders | f6813672c1619204caa45d21df47289227ab4d5f | 3,650,509 |
def get_media_after_date(mountpoint: str, date:str):
"""
Date format in EXIF yyyy:mm:dd, look for EXIF:CreateDate
"""
metadata = get_media_list(mountpoint)
filtered_meta = list()
for m in metadata:
if 'File:FileModifyDate' in m:
if is_after(m['File:FileModifyDate'].split(' ')[0],date):
filtered_meta.append(m)
return filtered_meta | 950c937540bd44cd1f577f1ee763262dad51d353 | 3,650,510 |
import random
def run_normal_game():
"""Run a complex game, like the real thing."""
stage = create_stage()
contestant_first_pick = random.randrange(3)
montys_pick_algorithm(stage, contestant_first_pick)
contestant_second_pick = contestants_second_pick_algorithm(stage, contestant_first_pick)
wins = contestant_wins(stage, contestant_second_pick)
#print (stage, contestant_first_pick, contestant_second_pick, wins)
return wins | 04f3e8805b3b7d7d9e9f631eee635c4b9af75fdf | 3,650,511 |
def reformat_wolfram_entries(titles, entries):
"""Reformat Wolfram entries."""
output_list = []
for title, entry in zip(titles, entries):
try:
if ' |' in entry:
entry = '\n\t{0}'.format(entry.replace(' |', ':')
.replace('\n', '\n\t'))
if title == 'Result':
new_entry = entry.encode('utf-8') if PY2 else entry
else:
raw_entry = (title + ': ' + entry)
new_entry = raw_entry.encode('utf-8') if PY2 else raw_entry
output_list.append(new_entry)
except (AttributeError, UnicodeEncodeError):
pass
return output_list | ba236671187ba4ab80fb013b3ee40c6ae58cc1c8 | 3,650,512 |
import os
def project_root() -> str:
"""Returns path to root directory of a project"""
return os.path.join(_file_directory_path(__file__), '..') | 175090bf05b5bc79f78e8fbbbb491d62696e8d35 | 3,650,513 |
import six
def GetAndValidateRowId(row_dict):
"""Returns the integer ID for a new Row.
This method is also responsible for validating the input fields related
to making the new row ID.
Args:
row_dict: A dictionary obtained from the input JSON.
Returns:
An integer row ID.
Raises:
BadRequestError: The input wasn't formatted properly.
"""
if 'revision' not in row_dict:
raise BadRequestError('Required field "revision" missing.')
try:
return int(row_dict['revision'])
except (ValueError, TypeError) as e:
six.raise_from(
BadRequestError('Bad value for "revision", should be numerical.'), e) | be9f096ddb8bba036d1fa06cdd3565296a949762 | 3,650,514 |
def generate_test_cases(ukernel, channel_tile, pixel_tile, isa):
"""Generates all tests cases for a BILINEAR micro-kernel.
Args:
ukernel: C name of the micro-kernel function.
channel_tile: Number of channels processed per one iteration of the inner
loop of the micro-kernel.
pixel_tile: Number of pixels processed per one iteration of the outer loop
of the micro-kernel.
isa: instruction set required to run the micro-kernel. Generated unit test
will skip execution if the host processor doesn't support this ISA.
Returns:
Code for the test case.
"""
_, test_name = ukernel.split("_", 1)
_, datatype, ukernel_type, _ = ukernel.split("_", 3)
test_args = [ukernel]
return xngen.preprocess(IBILINEAR_TEST_TEMPLATE, {
"TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
"TEST_FUNC": ukernel,
"UKERNEL_TYPE": ukernel_type.upper(),
"DATATYPE": datatype,
"CHANNEL_TILE": channel_tile,
"PIXEL_TILE": pixel_tile,
"ISA_CHECK": xnncommon.generate_isa_check_macro(isa),
"next_prime": next_prime,
}) | 4fe3243c3f8d2ab3ce7861b46aa96ee79ef1014a | 3,650,515 |
def get_file_range(ase, offsets, timeout=None):
# type: (blobxfer.models.azure.StorageEntity,
# blobxfer.models.download.Offsets, int) -> bytes
"""Retrieve file range
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.download.Offsets offsets: download offsets
:param int timeout: timeout
:rtype: bytes
:return: content for file range
"""
dir, fpath, _ = parse_file_path(ase.name)
return ase.client._get_file(
share_name=ase.container,
directory_name=dir,
file_name=fpath,
start_range=offsets.range_start,
end_range=offsets.range_end,
validate_content=False, # HTTPS takes care of integrity during xfer
timeout=timeout,
snapshot=ase.snapshot,
).content | be4f2f06c64ee457152fe582128b36db1a1baae4 | 3,650,516 |
def parse_cli_args():
"""Return parsed command-line arguments."""
parser = ArgumentParser(description='parse and summarize a GLSL file')
parser.add_argument('path')
shader_type_names = [member.name for member in ShaderType]
parser.add_argument('shader_type', nargs='?',
choices=shader_type_names,
default=ShaderType.Fragment.name)
return parser.parse_args() | a044e20ea91e05c09cccc118641dac68ce748142 | 3,650,517 |
def genus_species_name(genus, species):
"""Return name, genus with species if present.
Copes with species being None (or empty string).
"""
# This is a simple function, centralising it for consistency
assert genus and genus == genus.strip(), repr(genus)
if species:
assert species == species.strip(), repr(species)
return f"{genus} {species}"
else:
return genus | 1fed57c5c87dfd9362262a69429830c7103b7fca | 3,650,518 |
def _native_set_to_python_list(typ, payload, c):
"""
Create a Python list from a native set's items.
"""
nitems = payload.used
listobj = c.pyapi.list_new(nitems)
ok = cgutils.is_not_null(c.builder, listobj)
with c.builder.if_then(ok, likely=True):
index = cgutils.alloca_once_value(c.builder,
ir.Constant(nitems.type, 0))
with payload._iterate() as loop:
i = c.builder.load(index)
item = loop.entry.key
itemobj = c.box(typ.dtype, item)
c.pyapi.list_setitem(listobj, i, itemobj)
i = c.builder.add(i, ir.Constant(i.type, 1))
c.builder.store(i, index)
return ok, listobj | 808a10d85cc19c0b1c31b3e01afc9bbb402e1e90 | 3,650,519 |
def JointAngleCalc(frame,vsk):
""" Joint Angle Calculation function.
Calculates the Joint angles of plugingait and stores the data in array
Stores:
RPel_angle = []
LPel_angle = []
RHip_angle = []
LHip_angle = []
RKnee_angle = []
LKnee_angle = []
RAnkle_angle = []
LAnkle_angle = []
Joint Axis store like below form
The axis is in the form [[origin], [axis]]
Origin defines the position of axis and axis is the direction vector of
x, y, z axis attached to the origin
If it is just single one (Pelvis, Hip, Head, Thorax)
Axis = [[origin_x, origin_y, origin_z],[[Xaxis_x,Xaxis_y,Xaxis_z],
[Yaxis_x,Yaxis_y,Yaxis_z],
[Zaxis_x,Zaxis_y,Zaxis_z]]]
If it has both of Right and Left ( knee, angle, foot, clavicle, humerus, radius, hand)
Axis = [[[R_origin_x,R_origin_y,R_origin_z],
[L_origin_x,L_origin_y,L_origin_z]],[[[R_Xaxis_x,R_Xaxis_y,R_Xaxis_z],
[R_Yaxis_x,R_Yaxis_y,R_Yaxis_z],
[R_Zaxis_x,R_Zaxis_y,R_Zaxis_z]],
[[L_Xaxis_x,L_Xaxis_y,L_Xaxis_z],
[L_Yaxis_x,L_Yaxis_y,L_Yaxis_z],
[L_Zaxis_x,L_Zaxis_y,L_Zaxis_z]]]]
Parameters
----------
frame : dict
Dictionaries of marker lists.
vsk : dict
A dictionary containing subject measurements.
Returns
-------
r, jc : tuple
Returns a tuple containing an array that holds the result of all the joint calculations,
followed by a dictionary for joint center marker positions.
Examples
--------
>>> import numpy as np
>>> from .pyCGM import JointAngleCalc
>>> from .pycgmIO import loadC3D, loadVSK
>>> from .pycgmStatic import getStatic
>>> from .pyCGM_Helpers import getfilenames
>>> import os
>>> fileNames=getfilenames(2)
>>> c3dFile = fileNames[1]
>>> vskFile = fileNames[2]
>>> result = loadC3D(c3dFile)
>>> data = result[0]
>>> frame = result[0][0]
>>> vskData = loadVSK(vskFile, False)
>>> vsk = getStatic(data,vskData,flat_foot=False)
>>> results = JointAngleCalc(frame, vsk)[1]
>>> np.around(results['Pelvis'], 2)
array([ 246.15, 353.26, 1031.71])
>>> np.around(results['Thorax'], 2)
array([ 250.56, 303.23, 1461.17])
>>> np.around(results['Head'], 2)
array([ 244.9 , 325.06, 1730.16])
>>> np.around(results['RHand'], 2)
array([ 770.93, 591.05, 1079.05])
"""
# THIS IS FOOT PROGRESS ANGLE
rfoot_prox,rfoot_proy,rfoot_proz,lfoot_prox,lfoot_proy,lfoot_proz = [None]*6
#First Calculate Pelvis
pelvis_axis = pelvisJointCenter(frame)
kin_Pelvis_axis = pelvis_axis
kin_Pelvis_JC = pelvis_axis[0] #quick fix for storing JC
#change to same format
Pelvis_vectors = pelvis_axis[1]
Pelvis_origin = pelvis_axis[0]
#need to update this based on the file
global_Axis = vsk['GCS']
#make the array which will be the input of findangle function
pelvis_Axis_mod = np.vstack([np.subtract(Pelvis_vectors[0],Pelvis_origin),
np.subtract(Pelvis_vectors[1],Pelvis_origin),
np.subtract(Pelvis_vectors[2],Pelvis_origin)])
global_pelvis_angle = getangle(global_Axis,pelvis_Axis_mod)
pelx=global_pelvis_angle[0]
pely=global_pelvis_angle[1]
pelz=global_pelvis_angle[2]
# and then find hip JC
hip_JC = hipJointCenter(frame,pelvis_axis[0],pelvis_axis[1][0],pelvis_axis[1][1],pelvis_axis[1][2],vsk=vsk)
kin_L_Hip_JC = hip_JC[0] #quick fix for storing JC
kin_R_Hip_JC = hip_JC[1] #quick fix for storing JC
hip_axis = hipAxisCenter(hip_JC[0],hip_JC[1],pelvis_axis)
knee_JC = kneeJointCenter(frame,hip_JC,0,vsk=vsk)
kin_R_Knee_JC = knee_JC[0] #quick fix for storing JC
kin_L_Knee_JC = knee_JC[1] #quick fix for storing JC
#change to same format
Hip_axis_form = hip_axis[1]
Hip_center_form = hip_axis[0]
R_Knee_axis_form = knee_JC[2][0]
R_Knee_center_form = knee_JC[0]
L_Knee_axis_form = knee_JC[2][1]
L_Knee_center_form = knee_JC[1]
#make the array which will be the input of findangle function
hip_Axis = np.vstack([np.subtract(Hip_axis_form[0],Hip_center_form),
np.subtract(Hip_axis_form[1],Hip_center_form),
np.subtract(Hip_axis_form[2],Hip_center_form)])
R_knee_Axis = np.vstack([np.subtract(R_Knee_axis_form[0],R_Knee_center_form),
np.subtract(R_Knee_axis_form[1],R_Knee_center_form),
np.subtract(R_Knee_axis_form[2],R_Knee_center_form)])
L_knee_Axis = np.vstack([np.subtract(L_Knee_axis_form[0],L_Knee_center_form),
np.subtract(L_Knee_axis_form[1],L_Knee_center_form),
np.subtract(L_Knee_axis_form[2],L_Knee_center_form)])
R_pelvis_knee_angle = getangle(hip_Axis,R_knee_Axis)
L_pelvis_knee_angle = getangle(hip_Axis,L_knee_Axis)
rhipx=R_pelvis_knee_angle[0]*-1
rhipy=R_pelvis_knee_angle[1]
rhipz=R_pelvis_knee_angle[2]*-1+90
lhipx=L_pelvis_knee_angle[0]*-1
lhipy=L_pelvis_knee_angle[1]*-1
lhipz=L_pelvis_knee_angle[2]-90
ankle_JC = ankleJointCenter(frame,knee_JC,0,vsk=vsk)
kin_R_Ankle_JC = ankle_JC[0] #quick fix for storing JC
kin_L_Ankle_JC = ankle_JC[1] #quick fix for storing JC
#change to same format
R_Ankle_axis_form = ankle_JC[2][0]
R_Ankle_center_form = ankle_JC[0]
L_Ankle_axis_form = ankle_JC[2][1]
L_Ankle_center_form = ankle_JC[1]
#make the array which will be the input of findangle function
# In case of knee axis I mentioned it before as R_knee_Axis and L_knee_Axis
R_ankle_Axis = np.vstack([np.subtract(R_Ankle_axis_form[0],R_Ankle_center_form),
np.subtract(R_Ankle_axis_form[1],R_Ankle_center_form),
np.subtract(R_Ankle_axis_form[2],R_Ankle_center_form)])
L_ankle_Axis = np.vstack([np.subtract(L_Ankle_axis_form[0],L_Ankle_center_form),
np.subtract(L_Ankle_axis_form[1],L_Ankle_center_form),
np.subtract(L_Ankle_axis_form[2],L_Ankle_center_form)])
R_knee_ankle_angle = getangle(R_knee_Axis,R_ankle_Axis)
L_knee_ankle_angle = getangle(L_knee_Axis,L_ankle_Axis)
rkneex=R_knee_ankle_angle[0]
rkneey=R_knee_ankle_angle[1]
rkneez=R_knee_ankle_angle[2]*-1+90
lkneex=L_knee_ankle_angle[0]
lkneey=L_knee_ankle_angle[1]*-1
lkneez=L_knee_ankle_angle[2] - 90
# ANKLE ANGLE
offset = 0
foot_JC = footJointCenter(frame,vsk,ankle_JC,knee_JC,offset)
kin_R_Foot_JC = foot_JC[0] #quick fix for storing JC
kin_L_Foot_JC = foot_JC[1] #quick fix for storing JC
kin_RHEE = frame['RHEE']
kin_LHEE = frame['LHEE']
# Change to same format
R_Foot_axis_form = foot_JC[2][0]
R_Foot_center_form = foot_JC[0]
L_Foot_axis_form = foot_JC[2][1]
L_Foot_center_form = foot_JC[1]
R_foot_Axis = np.vstack([np.subtract(R_Foot_axis_form[0],R_Foot_center_form),
np.subtract(R_Foot_axis_form[1],R_Foot_center_form),
np.subtract(R_Foot_axis_form[2],R_Foot_center_form)])
L_foot_Axis = np.vstack([np.subtract(L_Foot_axis_form[0],L_Foot_center_form),
np.subtract(L_Foot_axis_form[1],L_Foot_center_form),
np.subtract(L_Foot_axis_form[2],L_Foot_center_form)])
R_ankle_foot_angle = getangle(R_ankle_Axis,R_foot_Axis)
L_ankle_foot_angle = getangle(L_ankle_Axis,L_foot_Axis)
ranklex=R_ankle_foot_angle[0]*(-1)-90
rankley=R_ankle_foot_angle[2]*(-1)+90
ranklez=R_ankle_foot_angle[1]
lanklex=L_ankle_foot_angle[0]*(-1)-90
lankley=L_ankle_foot_angle[2]-90
lanklez=L_ankle_foot_angle[1]*(-1)
# ABSOLUTE FOOT ANGLE
R_global_foot_angle = getangle(global_Axis,R_foot_Axis)
L_global_foot_angle = getangle(global_Axis,L_foot_Axis)
rfootx=R_global_foot_angle[0]
rfooty=R_global_foot_angle[2]-90
rfootz=R_global_foot_angle[1]
lfootx=L_global_foot_angle[0]
lfooty=(L_global_foot_angle[2]-90)*-1
lfootz=L_global_foot_angle[1]*-1
#First Calculate HEAD
head_axis = headJC(frame,vsk=vsk)
kin_Head_JC = head_axis[1] #quick fix for storing JC
LFHD = frame['LFHD'] #as above
RFHD = frame['RFHD']
LBHD = frame['LBHD']
RBHD = frame['RBHD']
kin_Head_Front = np.array((LFHD+RFHD)/2)
kin_Head_Back = np.array((LBHD+RBHD)/2)
#change to same format
Head_axis_form = head_axis[0]
Head_center_form = head_axis[1]
#Global_axis_form = [[0,1,0],[-1,0,0],[0,0,1]]
Global_center_form = [0,0,0]
#***********************************************************
Global_axis_form = vsk['GCS']
#Global_axis_form = rotmat(x=0,y=0,z=180) #this is some weird fix to global axis
#make the array which will be the input of findangle function
head_Axis_mod = np.vstack([np.subtract(Head_axis_form[0],Head_center_form),
np.subtract(Head_axis_form[1],Head_center_form),
np.subtract(Head_axis_form[2],Head_center_form)])
global_Axis = np.vstack([np.subtract(Global_axis_form[0],Global_center_form),
np.subtract(Global_axis_form[1],Global_center_form),
np.subtract(Global_axis_form[2],Global_center_form)])
global_head_angle = getHeadangle(global_Axis,head_Axis_mod)
headx=(global_head_angle[0]*-1)# + 24.8
if headx <-180:
headx = headx+360
heady=global_head_angle[1]*-1
headz=global_head_angle[2]#+180
if headz <-180:
headz = headz-360
# Calculate THORAX
thorax_axis = thoraxJC(frame)
kin_Thorax_JC = thorax_axis[1] #quick fix for storing JC
kin_Thorax_axis = thorax_axis
# Change to same format
Thorax_axis_form = thorax_axis[0]
Thorax_center_form = thorax_axis[1]
Global_axis_form = [[0,1,0],[-1,0,0],[0,0,1]]
Global_center_form = [0,0,0]
#*******************************************************
Global_axis_form = rotmat(x=0,y=0,z=180) #this needs to be fixed for the global rotation
#make the array which will be the input of findangle function
thorax_Axis_mod = np.vstack([np.subtract(Thorax_axis_form[0],Thorax_center_form),
np.subtract(Thorax_axis_form[1],Thorax_center_form),
np.subtract(Thorax_axis_form[2],Thorax_center_form)])
global_Axis = np.vstack([np.subtract(Global_axis_form[0],Global_center_form),
np.subtract(Global_axis_form[1],Global_center_form),
np.subtract(Global_axis_form[2],Global_center_form)])
global_thorax_angle = getangle(global_Axis,thorax_Axis_mod)
if global_thorax_angle[0] > 0:
global_thorax_angle[0] = global_thorax_angle[0] - 180
elif global_thorax_angle[0] < 0:
global_thorax_angle[0] = global_thorax_angle[0] + 180
thox=global_thorax_angle[0]
thoy=global_thorax_angle[1]
thoz=global_thorax_angle[2]+90
# Calculate NECK
head_thorax_angle = getHeadangle(head_Axis_mod,thorax_Axis_mod)
neckx=(head_thorax_angle[0]-180)*-1# - 24.9
necky=head_thorax_angle[1]
neckz=head_thorax_angle[2]*-1
kin_C7 = frame['C7']#quick fix to calculate CoM
kin_CLAV = frame['CLAV']
kin_STRN = frame['STRN']
kin_T10 = frame['T10']
# Calculate SPINE
pel_tho_angle = getangle_spi(pelvis_Axis_mod,thorax_Axis_mod)
spix=pel_tho_angle[0]
spiy=pel_tho_angle[2]*-1
spiz=pel_tho_angle[1]
# Calculate SHOULDER
wand = findwandmarker(frame,thorax_axis)
shoulder_JC = findshoulderJC(frame,thorax_axis,wand,vsk=vsk)
kin_R_Shoulder_JC = shoulder_JC[0] #quick fix for storing JC
kin_L_Shoulder_JC = shoulder_JC[1] #quick fix for storing JC
shoulder_axis = shoulderAxisCalc(frame,thorax_axis,shoulder_JC,wand)
humerus_JC = elbowJointCenter(frame,thorax_axis,shoulder_JC,wand,vsk=vsk)
kin_R_Humerus_JC = humerus_JC[0][0] #quick fix for storing JC
kin_L_Humerus_JC = humerus_JC[0][1] #quick fix for storing JC
# Change to same format
R_Clavicle_axis_form = shoulder_axis[1][0]
L_Clavicle_axis_form = shoulder_axis[1][1]
R_Clavicle_center_form = shoulder_axis[0][0]
L_Clavicle_center_form = shoulder_axis[0][1]
# Change to same format
R_Humerus_axis_form = humerus_JC[1][0]
L_Humerus_axis_form = humerus_JC[1][1]
R_Humerus_center_form = humerus_JC[0][0]
L_Humerus_center_form = humerus_JC[0][1]
# make the array which will be the input of findangle function
R_humerus_Axis_mod = np.vstack([np.subtract(R_Humerus_axis_form[0],R_Humerus_center_form),
np.subtract(R_Humerus_axis_form[1],R_Humerus_center_form),
np.subtract(R_Humerus_axis_form[2],R_Humerus_center_form)])
L_humerus_Axis_mod = np.vstack([np.subtract(L_Humerus_axis_form[0],L_Humerus_center_form),
np.subtract(L_Humerus_axis_form[1],L_Humerus_center_form),
np.subtract(L_Humerus_axis_form[2],L_Humerus_center_form)])
R_thorax_shoulder_angle = getangle_sho(thorax_Axis_mod,R_humerus_Axis_mod)
L_thorax_shoulder_angle = getangle_sho(thorax_Axis_mod,L_humerus_Axis_mod)
if R_thorax_shoulder_angle[2] < 0:
R_thorax_shoulder_angle[2]=R_thorax_shoulder_angle[2]+180
elif R_thorax_shoulder_angle[2] >0:
R_thorax_shoulder_angle[2] = R_thorax_shoulder_angle[2]-180
if R_thorax_shoulder_angle[1] > 0:
R_thorax_shoulder_angle[1] = R_thorax_shoulder_angle[1]-180
elif R_thorax_shoulder_angle[1] <0:
R_thorax_shoulder_angle[1] = R_thorax_shoulder_angle[1]*-1-180
if L_thorax_shoulder_angle[1] < 0:
L_thorax_shoulder_angle[1]=L_thorax_shoulder_angle[1]+180
elif L_thorax_shoulder_angle[1] >0:
L_thorax_shoulder_angle[1] = L_thorax_shoulder_angle[1]-180
rshox=R_thorax_shoulder_angle[0]*-1
rshoy=R_thorax_shoulder_angle[1]*-1
rshoz=R_thorax_shoulder_angle[2]
lshox=L_thorax_shoulder_angle[0]*-1
lshoy=L_thorax_shoulder_angle[1]
lshoz=(L_thorax_shoulder_angle[2]-180)*-1
if lshoz >180:
lshoz = lshoz - 360
# Calculate ELBOW
radius_JC = wristJointCenter(frame,shoulder_JC,wand,humerus_JC)
kin_R_Radius_JC = radius_JC[0][0] #quick fix for storing JC
kin_L_Radius_JC = radius_JC[0][1] #quick fix for storing JC
# Change to same format
R_Radius_axis_form = radius_JC[1][0]
L_Radius_axis_form = radius_JC[1][1]
R_Radius_center_form = radius_JC[0][0]
L_Radius_center_form = radius_JC[0][1]
# make the array which will be the input of findangle function
R_radius_Axis_mod = np.vstack([np.subtract(R_Radius_axis_form[0],R_Radius_center_form),
np.subtract(R_Radius_axis_form[1],R_Radius_center_form),
np.subtract(R_Radius_axis_form[2],R_Radius_center_form)])
L_radius_Axis_mod = np.vstack([np.subtract(L_Radius_axis_form[0],L_Radius_center_form),
np.subtract(L_Radius_axis_form[1],L_Radius_center_form),
np.subtract(L_Radius_axis_form[2],L_Radius_center_form)])
R_humerus_radius_angle = getangle(R_humerus_Axis_mod,R_radius_Axis_mod)
L_humerus_radius_angle = getangle(L_humerus_Axis_mod,L_radius_Axis_mod)
relbx=R_humerus_radius_angle[0]
relby=R_humerus_radius_angle[1]
relbz=R_humerus_radius_angle[2]-90.0
lelbx=L_humerus_radius_angle[0]
lelby=L_humerus_radius_angle[1]
lelbz=L_humerus_radius_angle[2]-90.0
# Calculate WRIST
hand_JC = handJointCenter(frame,humerus_JC,radius_JC,vsk=vsk)
kin_R_Hand_JC = hand_JC[0][0] #quick fix for storing JC
kin_L_Hand_JC = hand_JC[0][1] #quick fix for storing JC
# Change to same format
R_Hand_axis_form = hand_JC[1][0]
L_Hand_axis_form = hand_JC[1][1]
R_Hand_center_form = hand_JC[0][0]
L_Hand_center_form = hand_JC[0][1]
# make the array which will be the input of findangle function
R_hand_Axis_mod = np.vstack([np.subtract(R_Hand_axis_form[0],R_Hand_center_form),
np.subtract(R_Hand_axis_form[1],R_Hand_center_form),
np.subtract(R_Hand_axis_form[2],R_Hand_center_form)])
L_hand_Axis_mod = np.vstack([np.subtract(L_Hand_axis_form[0],L_Hand_center_form),
np.subtract(L_Hand_axis_form[1],L_Hand_center_form),
np.subtract(L_Hand_axis_form[2],L_Hand_center_form)])
R_radius_hand_angle = getangle(R_radius_Axis_mod,R_hand_Axis_mod)
L_radius_hand_angle = getangle(L_radius_Axis_mod,L_hand_Axis_mod)
rwrtx=R_radius_hand_angle[0]
rwrty=R_radius_hand_angle[1]
rwrtz=R_radius_hand_angle[2]*-1 + 90
lwrtx=L_radius_hand_angle[0]
lwrty=L_radius_hand_angle[1]*-1
lwrtz=L_radius_hand_angle[2]-90
if lwrtz < -180:
lwrtz = lwrtz + 360
# make each axis as same format to store
# Pelvis
# origin
pel_origin = Pelvis_origin
pel_ox=pel_origin[0]
pel_oy=pel_origin[1]
pel_oz=pel_origin[2]
# xaxis
pel_x_axis = Pelvis_vectors[0]
pel_xx=pel_x_axis[0]
pel_xy=pel_x_axis[1]
pel_xz=pel_x_axis[2]
# yaxis
pel_y_axis = Pelvis_vectors[1]
pel_yx=pel_y_axis[0]
pel_yy=pel_y_axis[1]
pel_yz=pel_y_axis[2]
# zaxis
pel_z_axis = Pelvis_vectors[2]
pel_zx=pel_z_axis[0]
pel_zy=pel_z_axis[1]
pel_zz=pel_z_axis[2]
# Hip
# origin
hip_origin = Hip_center_form
hip_ox=hip_origin[0]
hip_oy=hip_origin[1]
hip_oz=hip_origin[2]
# xaxis
hip_x_axis = Hip_axis_form[0]
hip_xx=hip_x_axis[0]
hip_xy=hip_x_axis[1]
hip_xz=hip_x_axis[2]
# yaxis
hip_y_axis = Hip_axis_form[1]
hip_yx=hip_y_axis[0]
hip_yy=hip_y_axis[1]
hip_yz=hip_y_axis[2]
# zaxis
hip_z_axis = Hip_axis_form[2]
hip_zx=hip_z_axis[0]
hip_zy=hip_z_axis[1]
hip_zz=hip_z_axis[2]
# R KNEE
# origin
rknee_origin = R_Knee_center_form
rknee_ox=rknee_origin[0]
rknee_oy=rknee_origin[1]
rknee_oz=rknee_origin[2]
# xaxis
rknee_x_axis = R_Knee_axis_form[0]
rknee_xx=rknee_x_axis[0]
rknee_xy=rknee_x_axis[1]
rknee_xz=rknee_x_axis[2]
# yaxis
rknee_y_axis = R_Knee_axis_form[1]
rknee_yx=rknee_y_axis[0]
rknee_yy=rknee_y_axis[1]
rknee_yz=rknee_y_axis[2]
# zaxis
rknee_z_axis = R_Knee_axis_form[2]
rknee_zx=rknee_z_axis[0]
rknee_zy=rknee_z_axis[1]
rknee_zz=rknee_z_axis[2]
# L KNEE
# origin
lknee_origin = L_Knee_center_form
lknee_ox=lknee_origin[0]
lknee_oy=lknee_origin[1]
lknee_oz=lknee_origin[2]
# xaxis
lknee_x_axis = L_Knee_axis_form[0]
lknee_xx=lknee_x_axis[0]
lknee_xy=lknee_x_axis[1]
lknee_xz=lknee_x_axis[2]
# yaxis
lknee_y_axis = L_Knee_axis_form[1]
lknee_yx=lknee_y_axis[0]
lknee_yy=lknee_y_axis[1]
lknee_yz=lknee_y_axis[2]
# zaxis
lknee_z_axis = L_Knee_axis_form[2]
lknee_zx=lknee_z_axis[0]
lknee_zy=lknee_z_axis[1]
lknee_zz=lknee_z_axis[2]
# R ANKLE
# origin
rank_origin = R_Ankle_center_form
rank_ox=rank_origin[0]
rank_oy=rank_origin[1]
rank_oz=rank_origin[2]
# xaxis
rank_x_axis = R_Ankle_axis_form[0]
rank_xx=rank_x_axis[0]
rank_xy=rank_x_axis[1]
rank_xz=rank_x_axis[2]
# yaxis
rank_y_axis = R_Ankle_axis_form[1]
rank_yx=rank_y_axis[0]
rank_yy=rank_y_axis[1]
rank_yz=rank_y_axis[2]
# zaxis
rank_z_axis = R_Ankle_axis_form[2]
rank_zx=rank_z_axis[0]
rank_zy=rank_z_axis[1]
rank_zz=rank_z_axis[2]
# L ANKLE
# origin
lank_origin = L_Ankle_center_form
lank_ox=lank_origin[0]
lank_oy=lank_origin[1]
lank_oz=lank_origin[2]
# xaxis
lank_x_axis = L_Ankle_axis_form[0]
lank_xx=lank_x_axis[0]
lank_xy=lank_x_axis[1]
lank_xz=lank_x_axis[2]
# yaxis
lank_y_axis = L_Ankle_axis_form[1]
lank_yx=lank_y_axis[0]
lank_yy=lank_y_axis[1]
lank_yz=lank_y_axis[2]
# zaxis
lank_z_axis = L_Ankle_axis_form[2]
lank_zx=lank_z_axis[0]
lank_zy=lank_z_axis[1]
lank_zz=lank_z_axis[2]
# R FOOT
# origin
rfoot_origin = R_Foot_center_form
rfoot_ox=rfoot_origin[0]
rfoot_oy=rfoot_origin[1]
rfoot_oz=rfoot_origin[2]
# xaxis
rfoot_x_axis = R_Foot_axis_form[0]
rfoot_xx=rfoot_x_axis[0]
rfoot_xy=rfoot_x_axis[1]
rfoot_xz=rfoot_x_axis[2]
# yaxis
rfoot_y_axis = R_Foot_axis_form[1]
rfoot_yx=rfoot_y_axis[0]
rfoot_yy=rfoot_y_axis[1]
rfoot_yz=rfoot_y_axis[2]
# zaxis
rfoot_z_axis = R_Foot_axis_form[2]
rfoot_zx=rfoot_z_axis[0]
rfoot_zy=rfoot_z_axis[1]
rfoot_zz=rfoot_z_axis[2]
# L FOOT
# origin
lfoot_origin = L_Foot_center_form
lfoot_ox=lfoot_origin[0]
lfoot_oy=lfoot_origin[1]
lfoot_oz=lfoot_origin[2]
# xaxis
lfoot_x_axis = L_Foot_axis_form[0]
lfoot_xx=lfoot_x_axis[0]
lfoot_xy=lfoot_x_axis[1]
lfoot_xz=lfoot_x_axis[2]
# yaxis
lfoot_y_axis = L_Foot_axis_form[1]
lfoot_yx=lfoot_y_axis[0]
lfoot_yy=lfoot_y_axis[1]
lfoot_yz=lfoot_y_axis[2]
# zaxis
lfoot_z_axis = L_Foot_axis_form[2]
lfoot_zx=lfoot_z_axis[0]
lfoot_zy=lfoot_z_axis[1]
lfoot_zz=lfoot_z_axis[2]
# HEAD
# origin
head_origin = Head_center_form
head_ox=head_origin[0]
head_oy=head_origin[1]
head_oz=head_origin[2]
# xaxis
head_x_axis = Head_axis_form[0]
head_xx=head_x_axis[0]
head_xy=head_x_axis[1]
head_xz=head_x_axis[2]
# yaxis
head_y_axis = Head_axis_form[1]
head_yx=head_y_axis[0]
head_yy=head_y_axis[1]
head_yz=head_y_axis[2]
# zaxis
head_z_axis = Head_axis_form[2]
head_zx=head_z_axis[0]
head_zy=head_z_axis[1]
head_zz=head_z_axis[2]
# THORAX
# origin
tho_origin = Thorax_center_form
tho_ox=tho_origin[0]
tho_oy=tho_origin[1]
tho_oz=tho_origin[2]
# xaxis
tho_x_axis = Thorax_axis_form[0]
tho_xx=tho_x_axis[0]
tho_xy=tho_x_axis[1]
tho_xz=tho_x_axis[2]
# yaxis
tho_y_axis = Thorax_axis_form[1]
tho_yx=tho_y_axis[0]
tho_yy=tho_y_axis[1]
tho_yz=tho_y_axis[2]
# zaxis
tho_z_axis = Thorax_axis_form[2]
tho_zx=tho_z_axis[0]
tho_zy=tho_z_axis[1]
tho_zz=tho_z_axis[2]
# R CLAVICLE
# origin
rclav_origin = R_Clavicle_center_form
rclav_ox=rclav_origin[0]
rclav_oy=rclav_origin[1]
rclav_oz=rclav_origin[2]
# xaxis
rclav_x_axis = R_Clavicle_axis_form[0]
rclav_xx=rclav_x_axis[0]
rclav_xy=rclav_x_axis[1]
rclav_xz=rclav_x_axis[2]
# yaxis
rclav_y_axis = R_Clavicle_axis_form[1]
rclav_yx=rclav_y_axis[0]
rclav_yy=rclav_y_axis[1]
rclav_yz=rclav_y_axis[2]
# zaxis
rclav_z_axis = R_Clavicle_axis_form[2]
rclav_zx=rclav_z_axis[0]
rclav_zy=rclav_z_axis[1]
rclav_zz=rclav_z_axis[2]
# L CLAVICLE
# origin
lclav_origin = L_Clavicle_center_form
lclav_ox=lclav_origin[0]
lclav_oy=lclav_origin[1]
lclav_oz=lclav_origin[2]
# xaxis
lclav_x_axis = L_Clavicle_axis_form[0]
lclav_xx=lclav_x_axis[0]
lclav_xy=lclav_x_axis[1]
lclav_xz=lclav_x_axis[2]
# yaxis
lclav_y_axis = L_Clavicle_axis_form[1]
lclav_yx=lclav_y_axis[0]
lclav_yy=lclav_y_axis[1]
lclav_yz=lclav_y_axis[2]
# zaxis
lclav_z_axis = L_Clavicle_axis_form[2]
lclav_zx=lclav_z_axis[0]
lclav_zy=lclav_z_axis[1]
lclav_zz=lclav_z_axis[2]
# R HUMERUS
# origin
rhum_origin = R_Humerus_center_form
rhum_ox=rhum_origin[0]
rhum_oy=rhum_origin[1]
rhum_oz=rhum_origin[2]
# xaxis
rhum_x_axis = R_Humerus_axis_form[0]
rhum_xx=rhum_x_axis[0]
rhum_xy=rhum_x_axis[1]
rhum_xz=rhum_x_axis[2]
# yaxis
rhum_y_axis = R_Humerus_axis_form[1]
rhum_yx=rhum_y_axis[0]
rhum_yy=rhum_y_axis[1]
rhum_yz=rhum_y_axis[2]
# zaxis
rhum_z_axis = R_Humerus_axis_form[2]
rhum_zx=rhum_z_axis[0]
rhum_zy=rhum_z_axis[1]
rhum_zz=rhum_z_axis[2]
# L HUMERUS
# origin
lhum_origin = L_Humerus_center_form
lhum_ox=lhum_origin[0]
lhum_oy=lhum_origin[1]
lhum_oz=lhum_origin[2]
# xaxis
lhum_x_axis = L_Humerus_axis_form[0]
lhum_xx=lhum_x_axis[0]
lhum_xy=lhum_x_axis[1]
lhum_xz=lhum_x_axis[2]
# yaxis
lhum_y_axis = L_Humerus_axis_form[1]
lhum_yx=lhum_y_axis[0]
lhum_yy=lhum_y_axis[1]
lhum_yz=lhum_y_axis[2]
# zaxis
lhum_z_axis = L_Humerus_axis_form[2]
lhum_zx=lhum_z_axis[0]
lhum_zy=lhum_z_axis[1]
lhum_zz=lhum_z_axis[2]
# R RADIUS
# origin
rrad_origin = R_Radius_center_form
rrad_ox=rrad_origin[0]
rrad_oy=rrad_origin[1]
rrad_oz=rrad_origin[2]
# xaxis
rrad_x_axis = R_Radius_axis_form[0]
rrad_xx=rrad_x_axis[0]
rrad_xy=rrad_x_axis[1]
rrad_xz=rrad_x_axis[2]
# yaxis
rrad_y_axis = R_Radius_axis_form[1]
rrad_yx=rrad_y_axis[0]
rrad_yy=rrad_y_axis[1]
rrad_yz=rrad_y_axis[2]
# zaxis
rrad_z_axis = R_Radius_axis_form[2]
rrad_zx=rrad_z_axis[0]
rrad_zy=rrad_z_axis[1]
rrad_zz=rrad_z_axis[2]
# L RADIUS
# origin
lrad_origin = L_Radius_center_form
lrad_ox=lrad_origin[0]
lrad_oy=lrad_origin[1]
lrad_oz=lrad_origin[2]
# xaxis
lrad_x_axis = L_Radius_axis_form[0]
lrad_xx=lrad_x_axis[0]
lrad_xy=lrad_x_axis[1]
lrad_xz=lrad_x_axis[2]
# yaxis
lrad_y_axis = L_Radius_axis_form[1]
lrad_yx=lrad_y_axis[0]
lrad_yy=lrad_y_axis[1]
lrad_yz=lrad_y_axis[2]
# zaxis
lrad_z_axis = L_Radius_axis_form[2]
lrad_zx=lrad_z_axis[0]
lrad_zy=lrad_z_axis[1]
lrad_zz=lrad_z_axis[2]
# R HAND
# origin
rhand_origin = R_Hand_center_form
rhand_ox=rhand_origin[0]
rhand_oy=rhand_origin[1]
rhand_oz=rhand_origin[2]
# xaxis
rhand_x_axis= R_Hand_axis_form[0]
rhand_xx=rhand_x_axis[0]
rhand_xy=rhand_x_axis[1]
rhand_xz=rhand_x_axis[2]
# yaxis
rhand_y_axis= R_Hand_axis_form[1]
rhand_yx=rhand_y_axis[0]
rhand_yy=rhand_y_axis[1]
rhand_yz=rhand_y_axis[2]
# zaxis
rhand_z_axis= R_Hand_axis_form[2]
rhand_zx=rhand_z_axis[0]
rhand_zy=rhand_z_axis[1]
rhand_zz=rhand_z_axis[2]
# L HAND
# origin
lhand_origin = L_Hand_center_form
lhand_ox=lhand_origin[0]
lhand_oy=lhand_origin[1]
lhand_oz=lhand_origin[2]
# xaxis
lhand_x_axis = L_Hand_axis_form[0]
lhand_xx=lhand_x_axis[0]
lhand_xy=lhand_x_axis[1]
lhand_xz=lhand_x_axis[2]
# yaxis
lhand_y_axis = L_Hand_axis_form[1]
lhand_yx=lhand_y_axis[0]
lhand_yy=lhand_y_axis[1]
lhand_yz=lhand_y_axis[2]
# zaxis
lhand_z_axis = L_Hand_axis_form[2]
lhand_zx=lhand_z_axis[0]
lhand_zy=lhand_z_axis[1]
lhand_zz=lhand_z_axis[2]
#-----------------------------------------------------
#Store everything in an array to send back to results of process
r=[
pelx,pely,pelz,
rhipx,rhipy,rhipz,
lhipx,lhipy,lhipz,
rkneex,rkneey,rkneez,
lkneex,lkneey,lkneez,
ranklex,rankley,ranklez,
lanklex,lankley,lanklez,
rfootx,rfooty,rfootz,
lfootx,lfooty,lfootz,
headx,heady,headz,
thox,thoy,thoz,
neckx,necky,neckz,
spix,spiy,spiz,
rshox,rshoy,rshoz,
lshox,lshoy,lshoz,
relbx,relby,relbz,
lelbx,lelby,lelbz,
rwrtx,rwrty,rwrtz,
lwrtx,lwrty,lwrtz,
pel_ox,pel_oy,pel_oz,pel_xx,pel_xy,pel_xz,pel_yx,pel_yy,pel_yz,pel_zx,pel_zy,pel_zz,
hip_ox,hip_oy,hip_oz,hip_xx,hip_xy,hip_xz,hip_yx,hip_yy,hip_yz,hip_zx,hip_zy,hip_zz,
rknee_ox,rknee_oy,rknee_oz,rknee_xx,rknee_xy,rknee_xz,rknee_yx,rknee_yy,rknee_yz,rknee_zx,rknee_zy,rknee_zz,
lknee_ox,lknee_oy,lknee_oz,lknee_xx,lknee_xy,lknee_xz,lknee_yx,lknee_yy,lknee_yz,lknee_zx,lknee_zy,lknee_zz,
rank_ox,rank_oy,rank_oz,rank_xx,rank_xy,rank_xz,rank_yx,rank_yy,rank_yz,rank_zx,rank_zy,rank_zz,
lank_ox,lank_oy,lank_oz,lank_xx,lank_xy,lank_xz,lank_yx,lank_yy,lank_yz,lank_zx,lank_zy,lank_zz,
rfoot_ox,rfoot_oy,rfoot_oz,rfoot_xx,rfoot_xy,rfoot_xz,rfoot_yx,rfoot_yy,rfoot_yz,rfoot_zx,rfoot_zy,rfoot_zz,
lfoot_ox,lfoot_oy,lfoot_oz,lfoot_xx,lfoot_xy,lfoot_xz,lfoot_yx,lfoot_yy,lfoot_yz,lfoot_zx,lfoot_zy,lfoot_zz,
head_ox,head_oy,head_oz,head_xx,head_xy,head_xz,head_yx,head_yy,head_yz,head_zx,head_zy,head_zz,
tho_ox,tho_oy,tho_oz,tho_xx,tho_xy,tho_xz,tho_yx,tho_yy,tho_yz,tho_zx,tho_zy,tho_zz,
rclav_ox,rclav_oy,rclav_oz,rclav_xx,rclav_xy,rclav_xz,rclav_yx,rclav_yy,rclav_yz,rclav_zx,rclav_zy,rclav_zz,
lclav_ox,lclav_oy,lclav_oz,lclav_xx,lclav_xy,lclav_xz,lclav_yx,lclav_yy,lclav_yz,lclav_zx,lclav_zy,lclav_zz,
rhum_ox,rhum_oy,rhum_oz,rhum_xx,rhum_xy,rhum_xz,rhum_yx,rhum_yy,rhum_yz,rhum_zx,rhum_zy,rhum_zz,
lhum_ox,lhum_oy,lhum_oz,lhum_xx,lhum_xy,lhum_xz,lhum_yx,lhum_yy,lhum_yz,lhum_zx,lhum_zy,lhum_zz,
rrad_ox,rrad_oy,rrad_oz,rrad_xx,rrad_xy,rrad_xz,rrad_yx,rrad_yy,rrad_yz,rrad_zx,rrad_zy,rrad_zz,
lrad_ox,lrad_oy,lrad_oz,lrad_xx,lrad_xy,lrad_xz,lrad_yx,lrad_yy,lrad_yz,lrad_zx,lrad_zy,lrad_zz,
rhand_ox,rhand_oy,rhand_oz,rhand_xx,rhand_xy,rhand_xz,rhand_yx,rhand_yy,rhand_yz,rhand_zx,rhand_zy,rhand_zz,
lhand_ox,lhand_oy,lhand_oz,lhand_xx,lhand_xy,lhand_xz,lhand_yx,lhand_yy,lhand_yz,lhand_zx,lhand_zy,lhand_zz
]
r=np.array(r,dtype=np.float64)
#Put temporary dictionary for joint centers to return for now, then modify later
jc = {}
jc['Pelvis_axis'] = kin_Pelvis_axis
jc['Thorax_axis'] = kin_Thorax_axis
jc['Pelvis'] = kin_Pelvis_JC
jc['RHip'] = kin_R_Hip_JC
jc['LHip'] = kin_L_Hip_JC
jc['RKnee'] = kin_R_Knee_JC
jc['LKnee'] = kin_L_Knee_JC
jc['RAnkle'] = kin_R_Ankle_JC
jc['LAnkle'] = kin_L_Ankle_JC
jc['RFoot'] = kin_R_Foot_JC
jc['LFoot'] = kin_L_Foot_JC
jc['RHEE'] = kin_RHEE
jc['LHEE'] = kin_LHEE
jc['C7'] = kin_C7
jc['CLAV'] = kin_CLAV
jc['STRN'] = kin_STRN
jc['T10'] = kin_T10
jc['Front_Head'] = kin_Head_Front
jc['Back_Head'] = kin_Head_Back
jc['Head'] = kin_Head_JC
jc['Thorax'] = kin_Thorax_JC
jc['RShoulder'] = kin_R_Shoulder_JC
jc['LShoulder'] = kin_L_Shoulder_JC
jc['RHumerus'] = kin_R_Humerus_JC
jc['LHumerus'] = kin_L_Humerus_JC
jc['RRadius'] = kin_R_Radius_JC
jc['LRadius'] = kin_L_Radius_JC
jc['RHand'] = kin_R_Hand_JC
jc['LHand'] = kin_L_Hand_JC
return r,jc | 0b3c7df9e514a1a08f23d487f909952df9a168b5 | 3,650,520 |
def var_to_str(var):
"""Returns a string representation of the variable of a Jax expression."""
if isinstance(var, jax.core.Literal):
return str(var)
elif isinstance(var, jax.core.UnitVar):
return "*"
elif not isinstance(var, jax.core.Var):
raise ValueError(f"Idk what to do with this {type(var)}?")
c = int(var.count)
if c == -1:
return "_"
str_rep = ""
while c > 25:
str_rep += chr(c % 26 + ord("a"))
c = c // 26
str_rep += chr(c + ord("a"))
return str_rep[::-1] | 820645057359f8704cbd28d2545b9bb3c6e2f4d3 | 3,650,521 |
def lal_binary_neutron_star(
frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,
phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, lambda_1, lambda_2,
**kwargs):
""" A Binary Neutron Star waveform model using lalsimulation
Parameters
----------
frequency_array: array_like
The frequencies at which we want to calculate the strain
mass_1: float
The mass of the heavier object in solar masses
mass_2: float
The mass of the lighter object in solar masses
luminosity_distance: float
The luminosity distance in megaparsec
a_1: float
Dimensionless primary spin magnitude
tilt_1: float
Primary tilt angle
phi_12: float
Azimuthal angle between the two component spins
a_2: float
Dimensionless secondary spin magnitude
tilt_2: float
Secondary tilt angle
phi_jl: float
Azimuthal angle between the total binary angular momentum and the
orbital angular momentum
theta_jn: float
Orbital inclination
phase: float
The phase at coalescence
lambda_1: float
Dimensionless tidal deformability of mass_1
lambda_2: float
Dimensionless tidal deformability of mass_2
kwargs: dict
Optional keyword arguments
Supported arguments:
waveform_approximant
reference_frequency
minimum_frequency
maximum_frequency
catch_waveform_errors
pn_spin_order
pn_tidal_order
pn_phase_order
pn_amplitude_order
mode_array:
Activate a specific mode array and evaluate the model using those
modes only. e.g. waveform_arguments =
dict(waveform_approximant='IMRPhenomHM', modearray=[[2,2],[2,-2])
returns the 22 and 2-2 modes only of IMRPhenomHM. You can only
specify modes that are included in that particular model. e.g.
waveform_arguments = dict(waveform_approximant='IMRPhenomHM',
modearray=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the
55 modes are not included in this model. Be aware that some models
only take positive modes and return the positive and the negative
mode together, while others need to call both. e.g.
waveform_arguments = dict(waveform_approximant='IMRPhenomHM',
modearray=[[2,2],[4,-4]]) returns the 22 a\nd 2-2 of IMRPhenomHM.
However, waveform_arguments =
dict(waveform_approximant='IMRPhenomXHM', modearray=[[2,2],[4,-4]])
returns the 22 and 4-4 of IMRPhenomXHM.
Returns
-------
dict: A dictionary with the plus and cross polarisation strain modes
"""
waveform_kwargs = dict(
waveform_approximant='IMRPhenomPv2_NRTidal', reference_frequency=50.0,
minimum_frequency=20.0, maximum_frequency=frequency_array[-1],
catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,
pn_phase_order=-1, pn_amplitude_order=0)
waveform_kwargs.update(kwargs)
return _base_lal_cbc_fd_waveform(
frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,
luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,
a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_12=phi_12,
phi_jl=phi_jl, lambda_1=lambda_1, lambda_2=lambda_2, **waveform_kwargs) | 4641e65e9f422bb9be9a90f2f849ab58f1cdea51 | 3,650,522 |
def handledisc(tree):
"""Binarize discontinuous substitution sites.
>>> print(handledisc(Tree('(S (X 0 2 4))')))
(S (X 0 (X|<> 2 (X|<> 4))))
>>> print(handledisc(Tree('(S (X 0 2))')))
(S (X 0 (X|<> 2)))
"""
for a in tree.postorder(lambda n: len(n) > 1 and isinstance(n[0], int)):
binarize(a, rightmostunary=True, threshold=1)
return tree | 1e164d0174a4b31462369a10e56f9d69d936d18b | 3,650,523 |
def check_bounds(shape: Shape, point: Coord) -> bool:
"""Return ``True`` if ``point`` is valid index in ``shape``.
Args:
shape: Shape of two-dimensional array.
point: Two-dimensional coordinate.
Return:
True if ``point`` is within ``shape`` else ``False``.
"""
return (0 <= point[0] < shape[0]) and (0 <= point[1] < shape[1]) | 88ab89fddf3f85fc38f3404ed90f384f50337905 | 3,650,524 |
def logout(home=None):
"""
Logs out current session and redirects to home
:param str home: URL to redirect to after logout success
"""
flask_login.logout_user()
return redirect(request.args.get('redirect',
home or url_for('public.home'))) | bded682e6807532aa6382ea0855ee4d335da550f | 3,650,525 |
def N(u,i,p,knots):
"""
u: point for which a spline should be evaluated
i: spline knot
p: spline order
knots: all knots
Evaluates the spline basis of order p defined by knots
at knot i and point u.
"""
if p == 0:
if knots[int(i)] < u and u <=knots[int(i+1)]:
return 1.0
else:
return 0.0
else:
try:
k = ((float((u-knots[int(i)])) / float((knots[int(i+p)] - knots[int(i)]) ))
* N(u,i,p-1,knots))
except ZeroDivisionError:
k = 0.0
try:
q = ((float((knots[int(i+p+1)] - u)) / float((knots[int(i+p+1)] - knots[int(i+1)])))
* N(u,i+1,p-1,knots))
except ZeroDivisionError:
q = 0.0
return float(k + q) | 0cd0756d558ee99b0ed32350860bc27f023fa88b | 3,650,526 |
import tqdm
import scipy
def infer_growth_rate(data,
od_bounds=None,
convert_time=True,
groupby=None,
cols={'time':'clock_time', 'od':'od_600nm'},
return_opts=True,
print_params=True,
**kwargs):
"""
Infers the maximal a posteriori (MAP) parameter set for the steady state growth
rate given measurements of optical density. This is performed via optimization
by minimization.
Parameters
----------
data : pandas DataFrame
A tidy long-form pandas DataFrame with columns corresponding to the
measurement time and optical density.
od_bounds : list of floats
The lower and upper bounds of the optical density range to be considered.
The default bounds assumed are [0.04, 0.41] inclusive.
convert_time : bool
If `True`, the provided time column needs to be converted to elapsed
time. In this case, the provided time column is assumed to be
the clock time of the measurement and is converted to minutes elapsed.
groupby : list of str, optional
The column names for the groupby operation to operate upon. For example,
if there are multiple strains measured in the data set, a groupby of
`['strain']` will yield a growth rate estimate for each strain in
the data. A groupby of `['strain', 'replicate']` will return a growth
rate estimate for each strain and biological replicate.
cols : dict, keys 'time', and 'od'
The column names of the time and optical density measurements in the
DataFrame.
return_opts : bool
If `True`, the approximated covariance matrix, optimal parameters, and
approximate hessian matrix for each grouping is returned as a dictionary.
print_params : bool
If `True`, the estimated parameters will be printed to the screen when
the estimation is finished.
Returns
-------
data_df : pandas DataFrame
A pandas DataFrame with the converted time measurements cropped to the
provided optical density bounds.
param_df : pandas DataFrame
A pandas DataFrame containing the parameters, values, and their 95% credible
intervals for each obejct in the provided groupby.
opts : dict
If `return_opts = True`, the estimated covariance matrix, optimal parameters,
and approximate Hessian matrix is returned.
Notes
-----
This function infers the "maximal a posteriori" parameter set using a
Bayesian definition of probability. This function calls the posterior
defined by `cremerlab.bayes.steady_state_log_posterior` which contains
more information about the construction of the statistical model.
"""
# TODO: Include type checks
if (groupby is not None) & (type(groupby) is not list):
groupby = [groupby]
# Unpack the time col
time_col = cols['time']
od_col = cols['od']
# Determine the OD bounds to consider
if od_bounds is not None:
data = data[(data[od_col] >= od_bounds[0]) & (data[od_col] <= od_bounds[1])]
faux_groupby = False
if groupby is None:
faux_groupby = True
data['__group_idx'] = 1
groupby=['__group_idx']
iterator = data.groupby(groupby)
else:
iterator = tqdm.tqdm(data.groupby(groupby), desc='Estimating parameters...')
# Iterate through each grouping
data_dfs = []
param_dfs = []
opts = {'groupby':groupby}
iter = 0 # Iterator for opts
output = """\n
============================================================
Parameter Estimate Summary
============================================================
\n
"""
for g, d in iterator:
# Convert time if necessary
if convert_time:
d[time_col] = pd.to_datetime(d[time_col])
d.sort_values(by=time_col, inplace=True)
d['elapsed_time_hr'] = d[time_col].values - d[time_col].values[0]
d['elapsed_time_hr'] = (d['elapsed_time_hr'].astype('timedelta64[m]')
)/60
_time = d['elapsed_time_hr'].values
_od = d[od_col].values
else:
_time = d[time_col].values
_od = d[od_col].values
# Define the arguments and initial guesses of the parameters
# lam_guess = np.mean(np.diff(np.log(_od)) / np.diff(_time))
params = [1, _od.min(), 0.1]
args = (_time, _od)
# Compute the MAP
res = scipy.optimize.minimize(steady_state_growth_rate_log_posterior,
params, args=args, method="powell")
# Get the optimal parameters
popt = res.x
# Compute the Hessian and covariance matrix
hes = smnd.approx_hess(popt, steady_state_growth_rate_log_posterior, args=args)
cov = np.linalg.inv(hes)
# Extract the MAP parameters and CI
lam_MAP, od_init_MAP, sigma_MAP = popt
lam_CI = 1.96 * np.sqrt(cov[0, 0])
od_init_CI = 1.96 * np.sqrt(cov[1, 1])
sigma_CI = 1.96 * np.sqrt(cov[2, 2])
if print_params:
if faux_groupby == False:
header = f"""Parameter Estimates for grouping {groupby}: {g}
------------------------------------------------------------
"""
else:
header = """Parameter Estimates
------------------------------------------------------------
"""
output += header + f"""growth rate, λ = {lam_MAP:0.2f} ± {lam_CI:0.3f} [per unit time]
initial OD, OD_0 = {od_init_MAP:0.2f} ± {lam_CI:0.3f} [a.u.]
homoscedastic error, σ = {sigma_MAP:0.2f} ± {sigma_CI:0.3f} [a.u.]
\n
"""
# Assemble the data dataframe
_data_df = pd.DataFrame([])
if convert_time:
_data_df['elapsed_time_hr'] = _time
else:
_data_df[time_col] = _time
_data_df[od_col] = _od
# Include other columns that were not carried through
colnames = [k for k in d.keys() if k not in [time_col, od_col]]
for c in colnames:
_data_df[c] = d[c].values
if '__group_idx' in _data_df.keys():
_data_df.drop(columns=['__group_idx'], inplace=True)
_data_df.rename(columns={'od':od_col})
# Assemble the parameter dataframe
_param_df = pd.DataFrame([])
for title, MAP, CI in zip(['growth_rate', 'od_init', 'sigma'],
[lam_MAP, od_init_MAP, sigma_MAP],
[lam_CI, od_init_CI, sigma_CI]):
_param_df = _param_df.append({'parameter':title,
'map_val': MAP,
'cred_int': CI},
ignore_index=True)
# Add grouping identifier if provided
if groupby is not None:
# if type(g) is not list:
# _g = [g]
_g = g
for title, value in zip(groupby, _g):
_data_df[title] = value
_param_df[title] = value
# Append the dataframes to the storage lists
param_dfs.append(_param_df)
data_dfs.append(_data_df)
opts[iter] = {'groupby': g, 'cov':cov, 'popt':popt, 'hessian':hes}
iter += 1
# Concatenate the dataframes for return
if len(data_dfs) == 1:
data_df = data_dfs[0]
param_df = param_dfs[0]
else:
data_df = pd.concat(data_dfs, sort=False)
param_df = pd.concat(param_dfs, sort=False)
if print_params:
print(output)
if return_opts:
return_obj = [data_df, param_df, opts]
else:
return_obj = [data_df, param_df]
return return_obj | cef58ddc864bc683708170289439c318363b6561 | 3,650,527 |
def check_merge(s, idn) -> bool:
"""
Check whether a set of nodes is valid to merge
"""
found = False
in_size = None
out_size = None
stride = None
act = None
nds = [idn[i] for i in state2iset(s)]
if len(nds) == 1:
return True
for nd in nds:
if not isinstance(nd, Conv): # current only merge conv
return False
if not found:
in_size = nd.input_shape[1], nd.input_shape[2]
out_size = nd.output_shape[1], nd.output_shape[2]
stride = nd.stride[0], nd.stride[1]
act = nd.act
found = True
else:
# all input resolution, output resolution and stride must be the same
if in_size[0] != nd.input_shape[1] or in_size[1] != nd.input_shape[2]:
return False
if out_size[0] != nd.output_shape[1] or out_size[1] != nd.output_shape[2]:
return False
if stride[0] != nd.stride[0] or stride[1] != nd.stride[1]:
return False
if nd.groups != 1 or act != nd.act:
return False
if len(nd.inputs) > 1 or len(nd.inputs[0]) > 1 or not (nd.inputs[0][0] == nds[0].inputs[0][0]):
return False
return True | d0edfee6150d7814c926fb59d413b61a989c9808 | 3,650,528 |
def typeMap(name, package=None):
""" typeMap(name: str) -> Module
Convert from C/C++ types into VisTrails Module type
"""
if package is None:
package = identifier
if isinstance(name, tuple):
return [typeMap(x, package) for x in name]
if name in typeMapDict:
return typeMapDict[name]
else:
registry = get_module_registry()
if not registry.has_descriptor_with_name(package, name):
return None
else:
return registry.get_descriptor_by_name(package,
name).module | 6f8ed31cfe1eb88201d0131d43c0fb0da2295405 | 3,650,529 |
def _rm_from_diclist(diclist, key_to_check, value_to_check):
"""Function that removes an entry form a list of dictionaries if a key of
an entry matches a given value. If no value of the key_to_check matches the
value_to_check for all of the entries in the diclist, the same diclist will
be returned that was passed to the function.
Parameters:
diclist - A list of dictionaries.
key_to_check - A key of a dictionary whose value should be checked
to determine if a dictionary should be removed from
the diclist.
value_to_check - The value that should be compared to the value of the
key_to_check to determine if a dictionary should be
removed from the diclist.
Returns the diclist passed to the function with an entry removed if its
value of the key_to_check matched the value_to_check.
"""
for i in xrange(len(diclist)):
if diclist[i][key_to_check] == value_to_check:
diclist.pop(i)
break
return diclist | 89806ec5029923709bd44d794d75a84f440c5aa7 | 3,650,530 |
import scipy
def odr_linear(x, y, intercept=None, beta0=None):
"""
Performs orthogonal linear regression on x, y data.
Parameters
----------
x: array_like
x-data, 1D array. Must be the same lengths as `y`.
y: array_like
y-data, 1D array. Must be the same lengths as `x`.
intercept: float, default None
If not None, fixes the intercept.
beta0: array_like, shape (2,)
Guess at the slope and intercept, respectively.
Returns
-------
output: ndarray, shape (2,)
Array containing slope and intercept of ODR line.
"""
def linear_fun(p, x):
return p[0] * x + p[1]
def linear_fun_fixed(p, x):
return p[0] * x + intercept
# Set the model to be used for the ODR fitting
if intercept is None:
model = scipy.odr.Model(linear_fun)
if beta0 is None:
beta0 = (0.0, 1.0)
else:
model = scipy.odr.Model(linear_fun_fixed)
if beta0 is None:
beta0 = (1.0,)
# Make a Data instance
data = scipy.odr.Data(x, y)
# Instantiate ODR
odr = scipy.odr.ODR(data, model, beta0=beta0)
# Perform ODR fit
try:
result = odr.run()
except scipy.odr.odr_error:
raise scipy.odr.odr_error('ORD failed.')
return result.beta | 51fc464cb60e5b05645907d5ed3ec40d1b9cdb54 | 3,650,531 |
def get_centroid_world_coordinates(geo_trans, raster_x_size, raster_y_size, x_pixel_size, y_pixel_size):
"""Return the raster centroid in world coordinates
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:param raster_x_size: number of columns
:type raster_x_size: int
:param raster_y_size: number of rows
:param x_pixel_size: pixel size in x direction
:type: x_pixel_size: float
:param y_pixel_size: pixel size in y direction
:type y_pixel_size: float
:return:
"""
x0, y0 = pixel_to_world(geo_trans, 0, 0)
x1, y1 = pixel_to_world(geo_trans, raster_x_size-1, raster_y_size-1)
x1 += x_pixel_size
y1 -= y_pixel_size
return (x0 + x1) * 0.5, (y0 + y1) * 0.5 | e0dd1d57cb020a85d9784f2c9bf22b4b8035ffae | 3,650,532 |
import json
def save_change_item(request):
"""
保存改变项
算法:在rquest_list中查找对应的uuid,找到后将数据更新其中
:param request:
:return:
"""
if request.method != 'POST':
return HttpResponse("数据异常.")
str_data = request.POST.get('jsons')
logger.info("change_item: " + str_data)
jsons = json.loads(str_data)
id = jsons['id']
name = jsons['name']
url = jsons['url']
raw_mode_data = jsons['rawModeData']
method = jsons['method']
logger.info("打印send: {}".format(url))
request_list = JsonConf.json_data['requests']
for item in request_list:
if id == item["id"]:
item["method"] = method
item["rawModeData"] = raw_mode_data
item["name"] = name
item["url"] = url
break
JsonConf.store(settings.INIT_DATA)
return HttpResponse("保存成功.") | 434bd1e77690cd60dd163a39fd1cb90dd0cb4952 | 3,650,533 |
import pytdx.hq
import pytdx.util.best_ip
def get_lastest_stocklist():
"""
使用pytdx从网络获取最新券商列表
:return:DF格式,股票清单
"""
print(f"优选通达信行情服务器 也可直接更改为优选好的 {{'ip': '123.125.108.24', 'port': 7709}}")
# ipinfo = pytdx.util.best_ip.select_best_ip()
api = pytdx.hq.TdxHq_API()
# with api.connect(ipinfo['ip'], ipinfo['port']):
with api.connect('123.125.108.24', 7709):
data = pd.concat([pd.concat(
[api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j in range(2)], axis=0)
data = data.reindex(columns=['sse', 'code', 'name', 'pre_close', 'volunit', 'decimal_point'])
data.sort_values(by=['sse', 'code'], ascending=True, inplace=True)
data.reset_index(drop=True, inplace=True)
# 这个方法不行 字符串不能运算大于小于,转成int更麻烦
# df = data.loc[((data['sse'] == 'sh') & ((data['code'] >= '600000') | (data['code'] < '700000'))) | \
# ((data['sse'] == 'sz') & ((data['code'] >= '000001') | (data['code'] < '100000'))) | \
# ((data['sse'] == 'sz') & ((data['code'] >= '300000') | (data['code'] < '309999')))]
sh_start_num = data[(data['sse'] == 'sh') & (data['code'] == '600000')].index.tolist()[0]
sh_end_num = data[(data['sse'] == 'sh') & (data['code'] == '706070')].index.tolist()[0]
sz00_start_num = data[(data['sse'] == 'sz') & (data['code'] == '000001')].index.tolist()[0]
sz00_end_num = data[(data['sse'] == 'sz') & (data['code'] == '100303')].index.tolist()[0]
sz30_start_num = data[(data['sse'] == 'sz') & (data['code'] == '300001')].index.tolist()[0]
sz30_end_num = data[(data['sse'] == 'sz') & (data['code'] == '395001')].index.tolist()[0]
df_sh = data.iloc[sh_start_num:sh_end_num]
df_sz00 = data.iloc[sz00_start_num:sz00_end_num]
df_sz30 = data.iloc[sz30_start_num:sz30_end_num]
df = pd.concat([df_sh, df_sz00, df_sz30])
df.reset_index(drop=True, inplace=True)
return df | 2953cbd800ad2e2b6bc6122ec225f34d165773ea | 3,650,534 |
from datetime import datetime
def grpc_detect_ledger_id(connection: "GRPCv1Connection") -> str:
"""
Return the ledger ID from the remote server when it becomes available. This method blocks until
a ledger ID has been successfully retrieved, or the timeout is reached (in which case an
exception is thrown).
"""
LOG.debug("Starting a monitor thread for connection: %s", connection)
start_time = datetime.utcnow()
connect_timeout = connection.options.connect_timeout
while connect_timeout is None or (datetime.utcnow() - start_time) < connect_timeout:
if connection.invoker.level >= RunLevel.TERMINATE_GRACEFULLY:
raise UserTerminateRequest()
if connection.closed:
raise Exception("connection closed")
try:
response = connection.ledger_identity_service.GetLedgerIdentity(
lapipb.GetLedgerIdentityRequest()
)
except RpcError as ex:
details_str = ex.details()
# suppress some warning strings because they're not particularly useful and just clutter
# up the logs
if details_str not in GRPC_KNOWN_RETRYABLE_ERRORS:
LOG.exception(
"An unexpected error occurred when trying to fetch the "
"ledger identity; this will be retried."
)
sleep(1)
continue
return response.ledger_id
raise ConnectionTimeoutError(
f"connection timeout exceeded: {connect_timeout.total_seconds()} seconds"
) | a60b84db2b8274d71b601920eb8325123191109b | 3,650,535 |
import logging
def cross_mcs(input_vectors, value_fields, verbose=False, logger=None):
""" Compute map comparison statistics between input vector features.
MCS (Map Comparison Statistic) indicates the average difference between any
pair of feature polygon values, expressed as a fraction of the highest
value. MCS is calculated between each polygon in the input vector features
and it is required (and checked) that all the inputs are based on the
same vector feature.
For another application of MCS, see:
Schulp, C. J. E., Burkhard, B., Maes, J., Van Vliet, J., & Verburg, P. H.
(2014). Uncertainties in Ecosystem Service Maps: A Comparison on the
European Scale. PLoS ONE, 9(10), e109643.
http://doi.org/10.1371/journal.pone.0109643
:param input_vectors list of input vector paths.
:param value_field list of String names indicating which fields contains
the values to be compared.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return list of GeoPandas Dataframe with MCS between all rasters in field
"mcs".
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_mcs')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_vectors) > 1, "More than one input vector needed"
assert len(value_fields) == len(input_vectors), "One value field per vector feature needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING MCS SCORES **]")
all_mcs = pd.DataFrame({"feature1": [], "feature2": [],
"mcs": []})
n_vectors = len(input_vectors)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_vectors * n_vectors - n_vectors) / 2)
no_computation = 1
for i in range(0, n_vectors):
# Read in the data as a GeoPandas dataframe
vector1_path = input_vectors[i]
vector1 = gpd.read_file(vector1_path)
for j in range(i+1, n_vectors):
vector2_path = input_vectors[j]
vector2 = gpd.read_file(vector2_path)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating MCS ".format(prefix) +
"between {} ".format(vector1_path) +
"and {}".format(vector2_path)))
mcs_value = compute_mcs(vector1[value_fields[i]],
vector2[value_fields[j]])
mcs = pd.DataFrame({"feature1": [vector1_path],
"feature2": [vector2_path],
"mcs": [mcs_value]})
all_mcs = pd.concat([all_mcs, mcs])
no_computation += 1
all_mcs.index = np.arange(0, len(all_mcs.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_mcs | cc10bb30489c13a2ce0243e1f7ab13037aa23986 | 3,650,536 |
def extrapolate_trace(traces_in, spec_min_max_in, fit_frac=0.2, npoly=1, method='poly'):
"""
Extrapolates trace to fill in pixels that lie outside of the range spec_min, spec_max). This
routine is useful for echelle spectrographs where the orders are shorter than the image by a signfiicant
amount, since the polynomial trace fits often go wild.
Args:
traces (np.ndarray): shape = (nspec,) or (nspec, ntrace)
Array containing object or slit boundary traces
spec_min_max (np.ndarray): shape = (2, ntrace)
Array contaning the minimum and maximum spectral region covered by each trace. If this is an array with
ndim=1 array, the same numbers will be used for all traces in traces_in. If a 2d array, then this must be
an ndarray of shape (2, ntrace,) where the spec_min_max[0,:] are the minimua and spec_min_max[1,:] are the maxima.
fit_frac (float):
fraction of the good pixels to be used to fit when extrapolating traces. The upper fit_frac
pixels are used to extrapolate to larger spectral position, and vice versa for lower spectral
positions.
npoly (int):
Order of polynomial fit used for extrapolation
method (str):
Method used for extrapolation. Options are 'poly' or 'edge'. If 'poly' the code does a polynomial fit. If
'edge' it just attaches the last good pixel everywhere. 'edge' is not currently used.
Returns:
trace_extrap (np.ndarray):
Array with same size as trace containing the linearly extrapolated values for the bad spectral pixels.
"""
#embed()
# This little bit of code allows the input traces to either be (nspec, nslit) arrays or a single
# vectors of size (nspec)
spec_min_max_tmp = np.array(spec_min_max_in)
if traces_in.ndim == 2:
traces = traces_in
nslits = traces.shape[1]
if np.array(spec_min_max_in).ndim == 1:
spec_min_max = np.outer(spec_min_max_tmp, np.ones(nslits))
elif spec_min_max_tmp.ndim == 2:
if (spec_min_max_tmp.shape[1] != nslits):
msgs.error('If input as any arrays, spec_min_max needs to have dimensions (2,nslits)')
spec_min_max = spec_min_max_tmp
else:
msgs.error('Invalid shapes for traces_min and traces_max')
else:
nslits = 1
traces = traces_in.reshape(traces_in.size, 1)
spec_min_max = spec_min_max_tmp
nspec = traces.shape[0]
spec_vec = np.arange(nspec,dtype=float)
xnspecmin1 = spec_vec[-1]
traces_extrap = traces.copy()
# TODO should we be doing a more careful extrapolation here rather than just linearly using the nearest pixel
# values??
for islit in range(nslits):
ibad_max = spec_vec > spec_min_max[1,islit]
ibad_min = spec_vec < spec_min_max[0,islit]
igood = (spec_vec >= spec_min_max[0,islit]) & (spec_vec <= spec_min_max[1,islit])
nfit = int(np.round(fit_frac*np.sum(igood)))
good_ind = np.where(igood)[0]
igood_min = good_ind[0:nfit]
igood_max = good_ind[-nfit:]
if np.any(ibad_min):
if 'poly' in method:
coeff_min = utils.func_fit(spec_vec[igood_min], traces[igood_min, islit], 'legendre', npoly, minx=0.0, maxx=xnspecmin1)
traces_extrap[ibad_min, islit] = utils.func_val(coeff_min, spec_vec[ibad_min], 'legendre', minx=0.0, maxx=xnspecmin1)
elif 'edge' in method:
traces_extrap[ibad_min, islit] = traces[good_ind[0], islit]
if np.any(ibad_max):
if 'poly' in method:
coeff_max = utils.func_fit(spec_vec[igood_max], traces[igood_max, islit], 'legendre', npoly, minx=0.0,maxx=xnspecmin1)
traces_extrap[ibad_max, islit] = utils.func_val(coeff_max, spec_vec[ibad_max], 'legendre', minx=0.0,maxx=xnspecmin1)
elif 'edge' in method:
traces_extrap[ibad_max, islit] = traces[good_ind[-1], islit]
#ibad = np.invert(igood)
#traces_extrap[ibad, islit] = interpolate.interp1d(spec_vec[igood], traces[igood, islit], kind='linear',
#bounds_error=False, fill_value='extrapolate')(spec_vec[ibad])
return traces_extrap | d2da076badb70147fd124bbf8ceaba24f26d4c0f | 3,650,537 |
def getStopWords(stopWordFileName):
"""Reads stop-words text file which is assumed to have one word per line.
Returns stopWordDict.
"""
stopWordDict = {}
stopWordFile = open(stopWordFileName, 'r')
for line in stopWordFile:
word = line.strip().lower()
stopWordDict[word] = None
return stopWordDict | 8bb85683f257c35de9d04e4993b42cd758a802e6 | 3,650,538 |
def metade(valor):
"""
-> Realiza o calculo de metade salárial
:param valor: Valor do dinheiro
:param view: Visualizar ou não retorno formatado
:return: Retorna a metade do valor
"""
if not view:
return moeda(valor / 2)
else:
return valor / 2
return valor / 2 | fb1bbb605b8a0f1b8623ca70940377bd3c6a440a | 3,650,539 |
def monopole(uvecs: [float, np.ndarray], order: int=3) -> [float, np.ndarray]:
"""
Solution for I(r) = 1.
Also handles nonzero-w case.
Parameters
----------
uvecs: float or ndarray of float
The cartesian baselines in units of wavelengths. If a float, assumed to be the magnitude of
the baseline. If an array of one dimension, each entry is assumed to be a magnitude.
If a 2D array, may have shape (Nbls, 2) or (Nbls, 3). In the first case, w is
assumed to be zero.
order: int
Expansion order to use for non-flat array case (w != 0).
Returns
-------
ndarray of complex
Visibilities, shape (Nbls,)
"""
if np.isscalar(uvecs) or uvecs.ndim == 1 or uvecs.shape[1] == 2 or np.allclose(uvecs[:, 2], 0):
# w is zero.
uamps = vec_to_amp(uvecs)
return 2 * np.pi * np.sinc(2 * uamps)
uvecs = uvecs[..., None]
ks = np.arange(order)[None, :]
fac0 = (2 * np.pi * 1j * uvecs[:, 2, :])**ks / (gamma(ks + 2))
fac1 = hyp0f1((3 + ks) / 2, -np.pi**2 * (uvecs[:, 0, :]**2 + uvecs[:, 1, :]**2))
return 2 * np.pi * np.sum(fac0 * fac1, axis=-1) | 6828b4014fc7970a4d85b6d04b6d3e16249d3dae | 3,650,540 |
def get_question(
numbers: OneOrManyOf(NUMBERS_AVAILABLE),
cases: OneOrManyOf(CASES_AVAILABLE),
num: hug.types.in_range(1, MAX_NUM + 1) = 10):
"""When queried for one or multiple numbers and cases, this endpoint returns a random question."""
questions = []
bag = NounCaseQuestionBag(
noun_bag,
adjective_bag,
numbers,
cases)
while len(questions) < num:
question = bag.get_question()
questions.append(
{
'question_elements': question.get_question_elements(),
'answer_elements': question.get_correct_answer_elements()
})
return questions | b32d76f6ee7519935292743f6d7d8b8ad7357d3a | 3,650,541 |
from textwrap import dedent
def _print_attrs(attr, html=False):
"""
Given a Attr class will print out each registered attribute.
Parameters
----------
attr : `sunpy.net.attr.Attr`
The attr class/type to print for.
html : bool
Will return a html table instead.
Returns
-------
`str`
String with the registered attributes.
"""
attrs = attr._attr_registry[attr]
# Only sort the attrs if any have been registered
sorted_attrs = _ATTR_TUPLE(*zip(*sorted(zip(*attrs)))) if attrs.name else make_tuple()
*other_row_data, descs = sorted_attrs
descs = [(dsc[:77] + '...') if len(dsc) > 80 else dsc for dsc in descs]
table = Table(names=["Attribute Name", "Client", "Full Name", "Description"],
dtype=["U80", "U80", "U80", "U80"],
data=[*other_row_data, descs])
class_name = f"{(attr.__module__ + '.') or ''}{attr.__name__}"
lines = [class_name]
# If the attr lacks a __doc__ this will error and prevent this from returning anything.
try:
lines.append(dedent(attr.__doc__.partition("\n\n")[0]) + "\n")
except AttributeError:
pass
format_line = "<p>{}</p>" if html else "{}"
width = -1 if html else get_width()
lines = [*[format_line.format(line) for line in lines],
*table.pformat_all(show_dtype=False, max_width=width, align="<", html=html)]
return '\n'.join(lines) | 5044764b8799eed66d3e11fe9423922d79fd9981 | 3,650,542 |
import struct
def create_wave_header(samplerate=44100, channels=2, bitspersample=16, duration=3600):
"""Generate a wave header from given params."""
# pylint: disable=no-member
file = BytesIO()
numsamples = samplerate * duration
# Generate format chunk
format_chunk_spec = b"<4sLHHLLHH"
format_chunk = struct.pack(
format_chunk_spec,
b"fmt ", # Chunk id
16, # Size of this chunk (excluding chunk id and this field)
1, # Audio format, 1 for PCM
channels, # Number of channels
int(samplerate), # Samplerate, 44100, 48000, etc.
int(samplerate * channels * (bitspersample / 8)), # Byterate
int(channels * (bitspersample / 8)), # Blockalign
bitspersample, # 16 bits for two byte samples, etc.
)
# Generate data chunk
data_chunk_spec = b"<4sL"
datasize = int(numsamples * channels * (bitspersample / 8))
data_chunk = struct.pack(
data_chunk_spec,
b"data", # Chunk id
int(datasize), # Chunk size (excluding chunk id and this field)
)
sum_items = [
# "WAVE" string following size field
4,
# "fmt " + chunk size field + chunk size
struct.calcsize(format_chunk_spec),
# Size of data chunk spec + data size
struct.calcsize(data_chunk_spec) + datasize,
]
# Generate main header
all_chunks_size = int(sum(sum_items))
main_header_spec = b"<4sL4s"
main_header = struct.pack(main_header_spec, b"RIFF", all_chunks_size, b"WAVE")
# Write all the contents in
file.write(main_header)
file.write(format_chunk)
file.write(data_chunk)
# return file.getvalue(), all_chunks_size + 8
return file.getvalue() | b0b53b33733e5456e321cd7c276ad95754140f8a | 3,650,543 |
import unicodedata
import re
def xslugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, slash, or hyphens. Converts to
lowercase. Also strips leading and trailing whitespace.
(I.e., does the same as slugify, but also converts slashes to dashes.)
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s/-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s/]+', '-', value)) | 7a8a3f00011a46465ccafdcaf1ac797577511b2b | 3,650,544 |
def firstcond(list1, list2):
"""this is a fixture for testing conditions when
the list is a four node list """
ll = LinkedList()
ll.insert(1, 5)
ll.insert(3, 9)
ll.insert(2, 4)
return ll | 6e50038285e84e986304de5d2b28bef0db32b63d | 3,650,545 |
import os
def read_bert_vocab(bert_model_path):
"""读取bert词典"""
dict_path = os.path.join(bert_model_path, 'vocab.txt')
token2idx = {}
with open(dict_path, 'r', encoding='utf-8') as f:
tokens = f.read().splitlines()
for word in tokens:
token2idx[word] = len(token2idx)
return token2idx | f38c82a1a2b8f69b6c10e8d0bfcf8bdf4f63e123 | 3,650,546 |
import textwrap
def _template_message(desc, descriptor_registry):
# type: (Descriptor, DescriptorRegistry) -> str
"""
Returns cls_def string, list of fields, list of repeated fields
"""
this_file = desc.file
desc = SimpleDescriptor(desc)
if desc.full_name in WKTBASES:
desc.bases.append(WKTBASES[desc.full_name])
descriptor_registry[desc.identifier] = desc
slots = desc.field_names
# TODO: refactor field partitioning and iskeyword checks
# NOTE: the "pass" statement is a hack to provide a body when args is empty
initialisers = ['pass']
initialisers += [
'self.{} = self.{}() # inner_nonrepeated_fields'.format(field_name, field_type)
for field_name, field_type in desc.inner_nonrepeated_fields
if not iskeyword(field_name)
]
repeated_scalar_fields = [fd.name for fd in desc.fields if is_repeated(fd) and not is_composite(fd)]
initialisers += [
'self.{} = [] # repeated_fields'.format(field_name)
for field_name in repeated_scalar_fields
if not iskeyword(field_name)
]
rcfields = {
fd for fd in desc.fields
if is_repeated(fd) and is_composite(fd) and not is_map_field(fd)
}
repeated_composite_fields = [
(fd.name, fd.message_type.name, desc.is_nested(fd))
for fd in rcfields
]
initialisers += [
_template_composite_field(desc.name, field_name, field_type, is_nested)
for field_name, field_type, is_nested in repeated_composite_fields
if not iskeyword(field_name)
]
# TODO: refactor this
external_fields = [
(f, f.message_type) for f in desc.message_fields
if not desc.is_nested(f)
if f not in rcfields # don't want to double up above
]
siblings = [
(f, f.name, full_name(msg_type))
for f, msg_type in external_fields
if msg_type.file is this_file
]
initialisers += [
'self.{} = {}() # external_fields (siblings)'.format(field_name, field_type)
for _, field_name, field_type in siblings
if not iskeyword(field_name)
]
externals = [
(f, f.name, _to_module_name(msg_type.file.name), full_name(msg_type)) # TODO: look up name instead of heuristic?
for f, msg_type in external_fields
if msg_type.file is not this_file
]
initialisers += [
'self.{} = {}.{}() # external_fields (imports)'.format(field_name, qualifier, field_type)
for _, field_name, qualifier, field_type in externals
if not iskeyword(field_name)
]
# Extensions should show up as attributes on message instances but not
# as keyword arguments in message constructors
initialisers += [
'self.{} = object() # extensions'.format(ext_name)
for ext_name in desc.extensions_by_name
if not iskeyword(ext_name)
]
args = ['self'] + ['{}=None'.format(f) for f in slots if not iskeyword(f)]
init_str = 'def __init__({argspec}):\n{initialisers}\n'.format(
argspec=', '.join(args),
initialisers=textwrap.indent('\n'.join(initialisers), ' '),
)
helpers = ""
if desc.options.map_entry:
# for map <key, value> fields
# This mirrors the _IsMessageMapField check
value_type = desc.fields_by_name['value']
if value_type.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
base_class = MessageMap
else:
base_class = ScalarMap
# Rather than (key, value), use the attributes of the correct
# MutableMapping type as the "slots"
slots = tuple(m for m in dir(base_class) if not m.startswith("_"))
helpers = 'def __getitem__(self, idx):\n pass\n'
helpers += 'def __delitem__(self, idx):\n pass\n'
body = ''.join([
_template_enum(d, descriptor_registry) for d in desc.enum_types
] + [
_template_message(d, descriptor_registry) for d in desc.nested_types
])
cls_str = (
'class {name}(object):\n'
' {docstring!r}\n'
' __slots__ = {slots}\n'
'{helpers}{body}{init}\n'
).format(
name=desc.name,
docstring="descriptor={}".format(desc.identifier),
slots=slots,
body=textwrap.indent(body, ' '),
helpers=textwrap.indent(helpers, ' '),
init=textwrap.indent(init_str, ' '),
)
return cls_str | d56f79317d4599db2f722f9a665eb912a60aa5b8 | 3,650,547 |
def random_portfolio(n, k, mu=0., sd=0.01, corr=None, dt=1., nan_pct=0.):
""" Generate asset prices assuming multivariate geometric Brownian motion.
:param n: Number of time steps.
:param k: Number of assets.
:param mu: Drift parameter. Can be scalar or vector. Default is 0.
:param sd: Volatility of single assets. Default is 0.01.
:param corr: Correlation matrix of assets. Default is identity.
:param dt: Time step.
:param nan_pct: Add given percentage of NaN values. Useful for testing
"""
# default values
corr = corr if corr is not None else np.eye(k)
sd = sd * np.ones(k)
mu = mu * np.ones(k)
# drift
nu = mu - sd**2 / 2.
# do a Cholesky factorization on the correlation matrix
R = np.linalg.cholesky(corr).T
# generate uncorrelated random sequence
x = np.matrix(np.random.normal(size=(n - 1,k)))
# correlate the sequences
ep = x * R
# multivariate brownian
W = nu * dt + ep * np.diag(sd) * np.sqrt(dt)
# generate potential path
S = np.vstack([np.ones((1, k)), np.cumprod(np.exp(W), 0)])
# add nan values
if nan_pct > 0:
r = S * 0 + np.random.random(S.shape)
S[r < nan_pct] = np.nan
return pd.DataFrame(S) | 86801609a44619565188cd58b1d519c2e326086b | 3,650,548 |
import time
def vulnerabilities_for_image(image_obj):
"""
Return the list of vulnerabilities for the specified image id by recalculating the matches for the image. Ignores
any persisted matches. Query only, does not update the data. Caller must add returned results to a db session and commit
in order to persist.
:param image_obj: the image
:return: list of ImagePackageVulnerability records for the packages in the given image
"""
# Recompute. Session and persistence in the session is up to the caller
try:
ts = time.time()
computed_vulnerabilties = []
for package in image_obj.packages:
pkg_vulnerabilities = package.vulnerabilities_for_package()
for v in pkg_vulnerabilities:
img_v = ImagePackageVulnerability()
img_v.pkg_image_id = image_obj.id
img_v.pkg_user_id = image_obj.user_id
img_v.pkg_name = package.name
img_v.pkg_type = package.pkg_type
img_v.pkg_arch = package.arch
img_v.pkg_version = package.version
img_v.pkg_path = package.pkg_path
img_v.vulnerability_id = v.vulnerability_id
img_v.vulnerability_namespace_name = v.namespace_name
computed_vulnerabilties.append(img_v)
#log.debug("TIMER VULNERABILITIES: {}".format(time.time() - ts))
return computed_vulnerabilties
except Exception as e:
log.exception('Error computing full vulnerability set for image {}/{}'.format(image_obj.user_id, image_obj.id))
raise | 3017ebfdeb2965760df7bb4db426fea175a3bf39 | 3,650,549 |
def superpose_images(obj, metadata, skip_overlaps=False,
num_frames_for_bkgd=100, every=1,
color_objs=False, disp_R=False, b=1.7, d=2,
false_color=False, cmap='jet', remove_positive_noise=True):
"""
Superposes images of an object onto one frame.
Parameters
----------
vid_path : string
Path to video in which object was tracked. Source folder is `src/`
obj : TrackedObject
Object that has been tracked. Must have 'image', 'local centroid',
'frame_dim', 'bbox', and 'centroid' parameters.
skip_overlaps : bool, optional
If True, will skip superposing images that overlap with each other
to produce a cleaner, though incomplete, image. Default False.
every : int, optional
Superposes every `every` image (so if every = 1, superposes every image;
if every = 2, superposes every *other* image; if every = n, superposes every
nth image). Default = 1
color_objs : bool, optional
If True, will use image processing to highlight objects in each frame
before superposing. Default False
disp_R : bool, optional
If True, will display the radius measured by image-processing in um
above each object.
b : float, optional
Factor by which to scale brightness of superposed images to match background.
Not sure why they don't automatically appear with the same brightness.
Default is 1.7.
d : int, optional
Number of pixels to around the bounding box of the object to transfer to
the superposed image. Helps ensure the outer edge of the image is bkgd. Default is 2.
Returns
-------
im : (M x N) numpy array of uint8
Image of object over time; each snapshot is superposed
(likely black-and-white)
"""
### initializes image as background ###
# loads parameters
highlight_kwargs = metadata['highlight_kwargs']
mask_data = highlight_kwargs['mask_data']
row_lo, _, row_hi, _ = mask.get_bbox(mask_data)
# computes background
bkgd = improc.compute_bkgd_med_thread(metadata['vid_path'],
vid_is_grayscale=True, #assumes video is already grayscale
num_frames=num_frames_for_bkgd,
crop_y=row_lo,
crop_height=row_hi-row_lo)
# copies background to superpose object images on
im = np.copy(bkgd)
# converts image to 3-channel if highlighting objects (needs color)
if color_objs:
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
# initializes previous bounding box
bbox_prev = (0,0,0,0)
# loads video capture object
cap = cv2.VideoCapture(metadata['vid_path'])
# gets list of frames with object
frame_list = obj.get_props('frame')
### Superposes image from each frame ###
ct = 0
for i, f in enumerate(frame_list):
# only superposes every "every"th image
if (ct % every) != 0:
ct += 1
continue
# loads bounding box and image within it
bbox = obj.get_prop('bbox', f)
# skips images that overlap if requested
if skip_overlaps:
if basic.is_overlapping(bbox_prev, bbox):
continue
else:
bbox_prev = bbox
# highlights objects if requested
if color_objs:
# extracts radius of object
R = obj.get_prop('radius [um]', f)
# not sure why, but brightness must be 1.5 to match rest of image
# selects offset that pushes label out of image
offset = bbox[3]-bbox[1]+5
# reads frame and converts to color
frame = basic.read_frame(cap, f)
# highlights the object in the image
im_obj = highlight.highlight_image(frame,
f, cfg.highlight_method,
metadata, {R : obj}, [R],
brightness=b, offset=offset)
# shows number ID of object in image
centroid = obj.get_prop('centroid', f)
# converts centroid from (row, col) to (x, y) for open-cv
x = int(centroid[1])
y = int(centroid[0])
# superposes object image on overall image (3-channel images)
row_min, col_min, row_max, col_max = bbox
d = 2
im[row_min-d:row_max+d, col_min-d:col_max+d, :] = im_obj[row_min-d:row_max+d,
col_min-d:col_max+d, :]
if disp_R:
# prints label on image (radius [um])
im = cv2.putText(img=im, text='{0:.1f}'.format(R), org=(x-10, y-7),
fontFace=0, fontScale=0.5, color=cfg.white,
thickness=2)
else:
# loads image
im_raw = basic.read_frame(cap, f)
im_obj = cv2.cvtColor(basic.adjust_brightness(im_raw, b), cv2.COLOR_BGR2GRAY)[row_lo:row_hi, :]
# superposes object image on overall image
row_min, col_min, row_max, col_max = bbox
im[row_min:row_max, col_min:col_max] = im_obj[row_min:row_max,
col_min:col_max]
# increments counter
ct += 1
# false-colors objects by taking signed difference with background
if false_color:
signed_diff = im.astype(int) - bkgd.astype(int)
# remove noise above 0 (*assumes object is darker than background)
if remove_positive_noise:
signed_diff[signed_diff > 0] = 0
# defines false-color mapping to range to max difference
max_diff = max(np.max(np.abs(signed_diff)), 1) # ensures >= 1
# normalizes image so -max_diff -> 0 and +max_diff -> 1
im_norm = (signed_diff + max_diff) / (2*max_diff)
# maps normalized image to color image (still as floats from 0 to 1)
color_mapped = cm.get_cmap(cmap)(im_norm)
# converts to OpenCV format (uint8 0 to 255)
im_false_color = basic.cvify(color_mapped)
# converts from RGBA to RGB
im = cv2.cvtColor(im_false_color, cv2.COLOR_RGBA2RGB)
return im | 10767c9d10e5d32af51a31f1f8eb85d8989bb5d4 | 3,650,550 |
def gen_s_linear(computed_data, param ):
"""Generate sensitivity matrix for wavelength dependent sensitivity modeled as line"""
mat=np.zeros((computed_data.shape[0],computed_data.shape[0]))
#print(mat.shape)
for i in range(computed_data.shape[0]):
for j in range(computed_data.shape[0]):
v1 = computed_data[i, 0] - scenter # col 0 has position
v2 = computed_data[j, 0] - scenter # col 0 has position
#print(v1, v2)
c1 = param[0]
mat [i,j]=(1+ (c1/scale1)*v1 )/ \
(1+ (c1/scale1)*v2)
return mat | c18c31e65804d65ac7419a2037f889a0f9de2f96 | 3,650,551 |
import numpy
def upsample2(x):
"""
Up-sample a 2D array by a factor of 2 by interpolation.
Result is scaled by a factor of 4.
"""
n = [x.shape[0] * 2 - 1, x.shape[1] * 2 - 1] + list(x.shape[2:])
y = numpy.empty(n, x.dtype)
y[0::2, 0::2] = 4 * x
y[0::2, 1::2] = 2 * (x[:, :-1] + x[:, 1:])
y[1::2, 0::2] = 2 * (x[:-1, :] + x[1:, :])
y[1::2, 1::2] = x[:-1, :-1] + x[1:, 1:] + x[:-1, 1:] + x[1:, :-1]
return y | 4eb23d668154ac12755c0e65eeff485ac5e5dd23 | 3,650,552 |
import six
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s)) | dab8c0dfb78fd22fb35b5abc3680f74de8a1089a | 3,650,553 |
def _unravel_plug(node, attr):
"""Convert Maya node/attribute combination into an MPlug.
Note:
Tries to break up a parent attribute into its child attributes:
.t -> [tx, ty, tz]
Args:
node (str): Name of the Maya node
attr (str): Name of the attribute on the Maya node
Returns:
MPlug or list: MPlug of the Maya attribute, list of MPlugs
if a parent attribute was unravelled to its child attributes.
"""
LOG.debug("_unravel_plug (%s, %s)", node, attr)
return_value = om_util.get_mplug_of_node_and_attr(node, attr)
# Try to unravel the found MPlug into child attributes
child_plugs = om_util.get_child_mplugs(return_value)
if child_plugs:
return_value = [child_plug for child_plug in child_plugs]
return return_value | 6513af00896a19e316e55beb5424fc15b2748b55 | 3,650,554 |
def index():
""" Root URL response """
return jsonify(name='Payment Demo REST API Service', version='1.0'), status.HTTP_200_OK | 2d370a9fdf1878f60af6de264d99193d06ff96d2 | 3,650,555 |
def unvoigt(A):
"""
Converts from 6x1 to 3x3
:param A: 6x1 Voigt vector (strain or stress)
:return: 3x3 symmetric tensor (strain or stress)
"""
a=np.zeros(shape=(3,3))
a[0,0]=A[0]
a[0,1]=A[5]
a[0,2]=A[4]
a[1,0]=A[5]
a[1,1]=A[1]
a[1,2]=A[3]
a[2,0]=A[4]
a[2,1]=A[3]
a[2,2]=A[2]
return (a) | 72b28fceedb5ae2d34c768d5c29b5924310ff2b3 | 3,650,556 |
import argparse
def get_parser():
"""
Creates a new argument parser.
"""
parser = argparse.ArgumentParser('niget_yyyymm.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
help description
"""
)
version = '%(prog)s ' + __version__
parser.add_argument('--version', '-v', action='version', version=version,
help='show version of this command')
parser.add_argument('--csvfile', '-c', type=str, default="nicer_target_segment_table.csv",
help='csvfile')
parser.add_argument('--obsid', '-o', type=str, default=None,
help='target ObsID (default=None)')
return parser | 27be28a2a1e2d6a90a4485d27c22ce33998886c6 | 3,650,557 |
def _calculate_rmsd(P, Q):
"""Calculates the root-mean-square distance between the points of P and Q.
The distance is taken as the minimum over all possible matchings. It is
zero if P and Q are identical and non-zero if not.
"""
distance_matrix = cdist(P, Q, metric='sqeuclidean')
matching = linear_sum_assignment(distance_matrix)
return np.sqrt(distance_matrix[matching].sum()) | 22261e75edf3edf378fa30daa5c33abc68ff93cd | 3,650,558 |
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Note that we will hard code the shape values in the function to make the grading simpler.
Normally, functions should take values as inputs rather than hard coding.
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 2 lines of code)
W1 = tf.get_variable("W1",[4,4,3,8],initializer=tf.contrib.layers.xavier_initializer(seed = 0))
W2 = tf.get_variable("W2",[2,2,8,16],initializer=tf.contrib.layers.xavier_initializer(seed = 0))
### END CODE HERE ###
parameters = {"W1": W1,
"W2": W2}
return parameters | 43481172a70ea88bcf5cfbc95792365c5af2ea52 | 3,650,559 |
import time
import random
import string
import hashlib
def generate_dynamic_secret(salt: str) -> str:
"""Creates a new overseas dynamic secret
:param salt: A ds salt
"""
t = int(time.time())
r = "".join(random.choices(string.ascii_letters, k=6))
h = hashlib.md5(f"salt={salt}&t={t}&r={r}".encode()).hexdigest()
return f"{t},{r},{h}" | 2a9bdf00daea91f13f34724d1c744c17e9b4d6cf | 3,650,560 |
import os
import shutil
def upload(slot_id):
"""
Upload a file
"""
form = request.form
# Is the upload using Ajax, or a direct POST by the form?
is_ajax = False
if form.get("__ajax", None) == "true":
is_ajax = True
# Target folder for these uploads.
target = os.path.join(APP_ROOT, UPLOAD_ROOT, slot_id)
if os.path.isdir(target):
shutil.rmtree(target)
os.mkdir(target)
for upload in request.files.getlist("file"):
filename = upload.filename.rsplit("/")[0]
destination = "/".join([target, filename])
upload.save(destination)
# convert the file to syro format
syroconvert(os.path.join(APP_ROOT, destination), slot_id)
return ajax_response(True, slot_id) | c0a67ed62c581cbe8fdb644f5240d7c3d0bf2016 | 3,650,561 |
def is_sim_f(ts_kname):
""" Returns True if the TSDist is actually a similarity and not a distance
"""
return ts_kname in ('linear_allpairs',
'linear_crosscor',
'cross_correlation',
'hsdotprod_autocor_truncated',
'hsdotprod_autocor_cyclic') | 11c18983d8d411714ba3147d4734ad77c40ceedf | 3,650,562 |
import pika
from typing import Union
def initialise_pika_connection(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: float = 5,
) -> "BlockingConnection":
"""Create a Pika `BlockingConnection`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
Pika `BlockingConnection` with provided parameters
"""
parameters = _get_pika_parameters(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return pika.BlockingConnection(parameters) | 7364547a4836aea0b277098bed75b8c5ec874522 | 3,650,563 |
def units(arg_name, unit):
"""Decorator to define units for an input.
Associates a unit of measurement with an input.
Parameters
----------
arg_name : str
Name of the input to attach a unit to.
unit : str
Unit of measurement descriptor to use (e.g. "mm").
Example
--------
Create an operation where its `x` parameter has its units defined in microns.
>>> @OperationPlugin
>>> @units('x', '\u03BC'+'m')
>>> def op(x: float = -1) -> float:
>>> return x *= -1.0
"""
def decorator(func):
_quick_set(func, 'units', arg_name, unit, {})
return func
return decorator | 45bd1695cada5612e2ce9e39632ed1357556535f | 3,650,564 |
from typing import Callable
async def to_thread_task(func: Callable, *args, **kwargs) -> Task:
"""Assign task to thread"""
coro = to_thread(func, *args, **kwargs)
return create_task(coro) | ad666a91588a670be7babf84294f338f0148b8e1 | 3,650,565 |
import logging
import sys
def setup_logging(stream_or_file=None, debug=False, name=None):
"""
Create a logger for communicating with the user or writing to log files.
By default, creates a root logger that prints to stdout.
:param stream_or_file:
The destination of the log messages. If None, stdout will be used.
:type stream_or_file:
`unicode` or `file` or None
:param debug:
Whether or not the logger will be at the DEBUG level (if False, the logger will be at the INFO level).
:type debug:
`bool` or None
:param name:
The logging channel. If None, a root logger will be created.
:type name:
`unicode` or None
:return:
A logger that's been set up according to the specified parameters.
:rtype:
:class:`Logger`
"""
logger = logging.getLogger(name)
if isinstance(stream_or_file, string_types):
handler = logging.FileHandler(stream_or_file, mode='w')
else:
handler = logging.StreamHandler(stream_or_file or sys.stdout)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG if debug else logging.INFO)
return logger | 95f31df5f468261d0d0a0b85c8ffc8b6c5b7d1b7 | 3,650,566 |
from typing import Dict
from typing import Any
from typing import Optional
from typing import Union
from pathlib import Path
def combo2fname(
combo: Dict[str, Any],
folder: Optional[Union[str, Path]] = None,
ext: Optional[str] = ".pickle",
sig_figs: int = 8,
) -> str:
"""Converts a dict into a human readable filename.
Improved version of `combo_to_fname`."""
name_parts = [f"{k}_{maybe_round(v, sig_figs)}" for k, v in sorted(combo.items())]
fname = Path("__".join(name_parts) + ext)
if folder is None:
return fname
return str(folder / fname) | 9951171647167e39753546645f8e1f185d9fa55a | 3,650,567 |
def cls_from_str(name_str):
"""
Gets class of unit type from a string
Helper function for end-users entering the name of a unit type
and retrieving the class that contains stats for that unit type.
Args:
name_str: str
Returns:
UnitStats
"""
name_str = name_str.lower()
for cls in _UNIT_TYPES.values():
if cls.name.lower() == name_str:
return cls | 4dc26f8586065319a25f8965a5267308bd8dbfea | 3,650,568 |
def convert_to_github_url_with_token(url, token):
"""
Convert a Github URL to a git+https url that identifies via an Oauth token. This allows for installation of
private packages.
:param url: The url to convert into a Github access token oauth url.
:param token: The Github access token to use for the oauth url.
:return: A git+https url with Oauth identification.
"""
for prefix in [GIT_SSH_PREFIX, GIT_GIT_PREFIX, GIT_HTTPS_PREFIX]:
if url.startswith(prefix):
return 'git+https://{}:[email protected]/{}'.format(token, url[len(prefix):])
return url | 9b9c5e17cb389eb938af1221518a6838e65712bc | 3,650,569 |
from datetime import datetime
import numpy
def get_numbers_of_papers(metrics):
"""
Convert the metrics into a format that is easier to work with. Year-ordered
numpy arrays.
"""
publications = metrics['histograms']['publications']
year, total, year_refereed, refereed = [], [], [], []
y = list(publications['all publications'].keys())
y.sort()
for i in range(len(y)):
k = y[i]
year.append(datetime.strptime(k, '%Y'))
total.append(publications['all publications'][k])
refereed.append(publications['refereed publications'][k])
year, total, refereed = \
numpy.array(year), numpy.array(total), numpy.array(refereed)
return year, total, refereed | ce8b079ea416ff01b4974ea7ae7aa82080321cbb | 3,650,570 |
import json
from typing import Dict
def test_base_provider_get_transform_json_exception(mock_name, mock_value):
"""
Test BaseProvider.get() with a json transform that raises an exception
"""
mock_data = json.dumps({mock_name: mock_value}) + "{"
class TestProvider(BaseProvider):
def _get(self, name: str, **kwargs) -> str:
assert name == mock_name
return mock_data
def _get_multiple(self, path: str, **kwargs) -> Dict[str, str]:
raise NotImplementedError()
provider = TestProvider()
with pytest.raises(parameters.TransformParameterError) as excinfo:
provider.get(mock_name, transform="json")
assert "Extra data" in str(excinfo) | abb81b142a34f264466b808867bc3a7cc4460fcf | 3,650,571 |
def load_check_definitions(lang):
"""
Retrieve Trust Advisor check definitions
"""
retval = {}
resp = TA_C.describe_trusted_advisor_checks(language=lang)
if resp:
try:
checks = resp['checks']
retval = {a['id']:a for a in checks}
except ValueError:
LOGGER.error('Received invalid check definitions: %s', str(resp))
else:
LOGGER.error('No response from check definitions')
return retval | 43bca091506d33270a7e0fa3ec6ca84e4c342bf6 | 3,650,572 |
def filter_phrase(comments, phrase):
"""Returns list of comments and replies filtered by substring."""
results = []
for comment in comments:
if phrase.lower() in comment.message.lower():
results.append(comment)
for reply in comment.replies:
if phrase.lower() in reply.message.lower():
results.append(reply)
if not results:
return None
return results | 0865163f117550e36b2c21608739649b7b99f825 | 3,650,573 |
def asses_completeness(language_code: str, sw: ServiceWorker = Depends(get_sw)):
"""
make a completion test for language: check fe,be, domains and entries
@param language_code:
@param sw:
@return:
"""
if language_code not in sw.messages.get_added_languages():
raise ApplicationException(HTTP_404_NOT_FOUND, "Language not yet added")
return sw.translation.asses_completion(language_code) | 9de6a9130ec34e47782679ac63d80707de5b98ce | 3,650,574 |
def create_intrinsic_node_class(cls):
"""
Create dynamic sub class
"""
class intrinsic_class(cls):
"""Node class created based on the input class"""
def is_valid(self):
raise TemplateAttributeError('intrisnic class shouldn\'t be directly used')
intrinsic_class.__name__ = '%s_intrinsic' % cls.__name__
return intrinsic_class | ddcb0ba5f36981288fd9748f1f533f02f1eb1604 | 3,650,575 |
import os
def load(provider, config_location=DEFAULT_CONFIG_DIR):
"""Load provider specific auth info from file """
auth = None
auth_file = None
try:
config_dir = os.path.join(config_location, NOIPY_CONFIG)
print("Loading stored auth info [%s]... " % config_dir, end="")
auth_file = os.path.join(config_dir, provider)
with open(auth_file) as f:
auth_key = f.read()
auth = ApiAuth.get_instance(auth_key.encode('utf-8'))
print("OK.")
except IOError as e:
print('{0}: "{1}"'.format(e.strerror, auth_file))
raise e
return auth | b9dfb27bcef9216ff5fddd94decbd6bf3b9dc297 | 3,650,576 |
def segment_fish(image):
"""Attempts to segment the clown fish out of the provided image."""
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
light_orange = (1, 190, 200)
dark_orange = (18, 255, 255)
mask = cv2.inRange(hsv_image, light_orange, dark_orange)
light_white = (0, 0, 200)
dark_white = (145, 60, 255)
mask_white = cv2.inRange(hsv_image, light_white, dark_white)
final_mask = mask + mask_white
result = cv2.bitwise_and(image, image, mask=final_mask)
result = cv2.GaussianBlur(result, (7, 7), 0)
return result | c9ee166f12e9c344143f677939a82dd1a00a5fb5 | 3,650,577 |
def get_bugzilla_url(bug_id):
"""Return bugzilla url for bug_id."""
return u'https://bugzilla.mozilla.org/show_bug.cgi?id=%d' % bug_id | 051f37ab1eeb096d353317bfd9514b30d13ddd8a | 3,650,578 |
def enable_faster_encoder(self, need_build=True, use_fp16=False):
"""
Compiles fusion encoder operator intergrated FasterTransformer using the
method of JIT(Just-In-Time) and replaces the `forward` function of
`paddle.nn.TransformerEncoder` and `paddle.nn.TransformerEncoderLayer`
objects inherited from `self` to support inference using FasterTransformer.
Examples:
.. code-block:: python
from paddlenlp.ops import enable_faster_encoder, disable_faster_encoder
model.eval()
model = enable_faster_encoder(model)
enc_out = model(src, src_mask)
model = disable_faster_encoder(model)
"""
def init_func(layer):
if isinstance(layer, TransformerEncoderLayer):
is_usable = True
if layer._config['bias_attr'] == False:
logger.warning("`False` for paddle.nn.TransformerEncoder's" \
" parameter `bias_attr` is not supported in " \
"FasterTransformer by now. The original forward" \
" will be involved.")
is_usable = False
if layer._config['activation'] not in ('relu', 'gelu'):
logger.warning("Only 'relu' or 'gelu' is supported by now. " \
"The original forward will be involved.")
is_usable = False
if is_usable:
layer.forward = layer._ft_forward
elif isinstance(layer, TransformerEncoder):
layer.forward = layer._ft_forward
if use_fp16:
convert_to_fp16(layer)
if not self.training:
if need_build:
try:
load("FasterTransformer", verbose=True)
except Exception:
logger.warning(
"Exception occurs when using FasterTransformer. " \
"The original forward will be involved. ")
return self
for layer in self.children():
layer.apply(init_func)
return self | 4da1f669cefd291df4bc790dfc68fcbe5ce93f86 | 3,650,579 |
def func(*x):
""" Compute the function to minimise.
Vector reshaped for more readability.
"""
res = 0
x = np.array(x)
x = x.reshape((n, 2))
for i in range(n):
for j in range(i+1, n):
(x1, y1), (x2, y2) = x[i, :], x[j, :]
delta = (x2 - x1)**2 + (y2 - y1)**2 - distances[i, j]**2
res += delta**2
return res | 775d4330ca77e04662f1920dd2160631deb30430 | 3,650,580 |
import torch
def transform_target(target, classes=None):
"""
Accepts target value either single dimensional torch.Tensor or (int, float)
:param target:
:param classes:
:return:
"""
if isinstance(target, torch.Tensor):
if target.ndim == 1:
target = target.item() if target.shape[0] == 1 else target
if target.ndim == 0 and classes is None:
return round(target.item(), 2)
if target.shape[0] == 1 and type(classes) in (list, tuple) and classes:
return classes[target]
# Multi-label
if target.shape[0] > 1 and type(classes) in (list, tuple) and classes:
return ",".join([classes[index] for index, value in enumerate(target) if value])
elif isinstance(target, int) and classes:
target = classes[target]
return target | 5e1423b4beac4385fa4f328bfdfeed2859c28f7b | 3,650,581 |
from typing import List
from typing import Tuple
def merge_all_regions(out_path: str, id_regions: List[Tuple[int, File]]) -> Tuple[int, int, File]:
"""
Recursively merge a list of region files.
"""
if len(id_regions) == 1:
# Base case 1.
[(sample_id, region_file)] = id_regions
return (sample_id, sample_id, region_file)
elif len(id_regions) == 2:
# Base case 2.
[(sample1_id, region1_file), (sample2_id, region2_file)] = id_regions
else:
# Recursive case.
k = find_midpoint(len(id_regions))
sample1_id, _, region1_file = merge_all_regions(out_path, id_regions[:k])
_, sample2_id, region2_file = merge_all_regions(out_path, id_regions[k:])
return (
sample1_id,
sample2_id,
merge_regions(out_path, sample1_id, region1_file, sample2_id, region2_file),
) | d9ebbdfec49b6e5702e4c16476a20440185e39ef | 3,650,582 |
import os
def write_champ_file_geometry(filename, nucleus_num, nucleus_label, nucleus_coord):
"""Writes the geometry data from the quantum
chemistry calculation to a champ v2.0 format file.
Returns:
None as a function value
"""
if filename is not None:
if isinstance(filename, str):
## Write down a geometry file in the new champ v2.0 format
filename_geometry = os.path.splitext("champ_v2_" + filename)[0]+'_geom.xyz'
with open(filename_geometry, 'w') as file:
file.write("{} \n".format(nucleus_num))
# header line printed below
file.write("# Converted from the trexio file using trex2champ converter https://github.com/TREX-CoE/trexio_tools \n")
for element in range(nucleus_num):
file.write("{:5s} {: 0.6f} {: 0.6f} {: 0.6f} \n".format(nucleus_label[element], nucleus_coord[element][0], nucleus_coord[element][1], nucleus_coord[element][2]))
file.write("\n")
file.close()
else:
raise ValueError
# If filename is None, return a string representation of the output.
else:
return None | dfaaddb754e50c4343b60ae3f19f6f7b3af8ee73 | 3,650,583 |
def check_for_collision(sprite1: arcade.Sprite,
sprite2: arcade.Sprite) -> bool:
"""Check for collision between two sprites.
Used instead of Arcade's default implementation as we need a hack to
return False if there is just a one pixel overlap, if it's not
multiplayer...
"""
allowed_overlap = 0
if isinstance(sprite1, player.Player):
if isinstance(sprite1.game, game.Game):
allowed_overlap = 1
x_collision = (
sprite1.right - allowed_overlap > sprite2.left + allowed_overlap
and sprite1.left + allowed_overlap < sprite2.right - allowed_overlap
)
if not x_collision:
return False
return (
sprite1.top - allowed_overlap > sprite2.bottom + allowed_overlap
and sprite1.bottom + allowed_overlap < sprite2.top - allowed_overlap
) | 679de76d880c2e2e9ac34e0d87cc5cdd0211daa9 | 3,650,584 |
def modify_color(hsbk, **kwargs):
"""
Helper function to make new colors from an existing color by modifying it.
:param hsbk: The base color
:param hue: The new Hue value (optional)
:param saturation: The new Saturation value (optional)
:param brightness: The new Brightness value (optional)
:param kelvin: The new Kelvin value (optional)
"""
return hsbk._replace(**kwargs) | ecc5118873aaf0e4f63bad512ea61d2eae0f7ead | 3,650,585 |
def train_val_test_split(df, train_p=0.8, val_p=0.1, state=1, shuffle=True):
"""Wrapper to split data into train, validation, and test sets.
Parameters
-----------
df: pd.DataFrame, np.ndarray
Dataframe containing features (X) and labels (y).
train_p: float
Percent of data to assign to train set.
val_p: float
Percent of data to assign to validation set.
state: int or None
Int will make the split repeatable. None will give a different random
split each time.
shuffle: bool
If True, randomly shuffle the data before splitting.
"""
test_p = 1 - val_p / (1 - train_p)
train, val = train_test_split(df, train_size=train_p, shuffle=shuffle,
random_state=state)
test = None
if not np.isclose(test_p, 0):
val, test = train_test_split(val, test_size=test_p, random_state=state)
return train, val, test | 67b50b172f94ee65981ab124f03e192c7631c49c | 3,650,586 |
def add_logs_to_table_heads(max_logs):
"""Adds log headers to table data depending on the maximum number of logs from trees within the stand"""
master = []
for i in range(2, max_logs + 1):
for name in ['Length', 'Grade', 'Defect']:
master.append(f'Log {i} {name}')
if i < max_logs:
master.append('Between Logs Feet')
return master | 5db494650901bfbb114135da9596b9b453d47568 | 3,650,587 |
def stations_at_risk(stations, level):
"""Returns a list of tuples, (station, risk_level) for all stations with risk above level"""
level = risk_level(level)
stations = [(i, station_flood_risk(i)) for i in stations]
return [i for i in stations if risk_level(i[1]) >= level] | c18ef9af1ac02633f2daed9b88dfe6d72e83481a | 3,650,588 |
from pathlib import Path
import fsspec
def try_to_acquire_archive_contents(pull_from: str, extract_to: Path) -> bool:
"""Try to acquire the contents of the archive.
Priority:
1. (already extracted) local contents
2. adress-specified (local|remote) archive through fsspec
Returns:
True if success_acquisition else False
"""
# validation
if extract_to.is_file():
msg = f"contents ({str(extract_to)}) should be directory or empty, but it is file."
raise RuntimeError(msg)
# contents directory already exists.
if extract_to.exists():
return True
else:
file_system: fsspec.AbstractFileSystem = fsspec.filesystem(get_protocol(pull_from))
# todo: get_protocol with cache
archive_exists = file_system.exists(pull_from)
archive_is_file = file_system.isfile(pull_from)
# No corresponding archive. Failed to acquire.
if not archive_exists:
return False
else:
# validation
if not archive_is_file:
msg = f"Archive ({pull_from}) should be file or empty, but is directory."
raise RuntimeError(msg)
# A dataset file exists, so pull and extract.
pull_from_with_cache = f"simplecache::{pull_from}"
extract_to.mkdir(parents=True, exist_ok=True)
print("Accessing the archive in the adress...")
with fsspec.open(pull_from_with_cache, "rb") as archive:
with NamedTemporaryFile("ab") as tmp:
print("Reading the archive in the adress...")
while True:
# Read every 100 MB for large corpus.
d = archive.read(100*1000*1000)
if d:
tmp.write(d)
else:
break
tmp.seek(0)
print("Read.")
print("Extracting...")
extract_archive(tmp.name, str(extract_to))
print("Extracted.")
return True | e510e165db9d57357b5c1e588db5563b08bf407f | 3,650,589 |
def unproxy(proxy):
"""Return a new copy of the original function of method behind a proxy.
The result behaves like the original function in that calling it
does not trigger compilation nor execution of any compiled code."""
if isinstance(proxy, types.FunctionType):
return _psyco.unproxycode(proxy.func_code)
if isinstance(proxy, types.MethodType):
f = unproxy(proxy.im_func)
return new.instancemethod(f, proxy.im_self, proxy.im_class)
raise TypeError, "%s objects cannot be proxies" % type(proxy).__name__ | 7fad2339a8e012fd95117b73b79a371d4488e439 | 3,650,590 |
from typing import Optional
def get_measured_attribute(data_model, metric_type: str, source_type: str) -> Optional[str]:
"""Return the attribute of the entities of a source that are measured in the context of a metric.
For example, when using Jira as source for user story points, the points of user stories (the source entities) are
summed to arrive at the total number of user story points.
"""
attribute = (
data_model["sources"].get(source_type, {}).get("entities", {}).get(metric_type, {}).get("measured_attribute")
)
return str(attribute) if attribute else attribute | f15379e528b135ca5d9d36f50f06cb95a145b477 | 3,650,591 |
def get_one_frame_stack_dynamic(sp, idx):
"""
for a given sp and index number in a dynamic caller operand_stack data, return its
data type and value.
Note, at runtime, caller.operand_stack is dynamic, sp, idx, types and values are all
changing during the run time.
"""
if idx > sp:
return None, None
# get the caller.operand_stack length
length = None
try:
buffer = m_util.gdb_exec_to_str('p func.operand_stack.size()')
except:
return None, None
if buffer[0] != '$':
return None, None
if ' = ' in buffer:
try:
length = int(buffer.split(' = ')[1])
except:
return None, None
else:
return None, None
if m_debug.Debug: m_debug.dbg_print("length=", length)
if length <= sp or length < idx:
return None, None
try:
maple_type = m_util.gdb_exec_to_str('p func.operand_stack[' + str(idx) + '].ptyp')
except:
return None, None
maple_type = maple_type.split(' = ')[1].split('maple::PTY_')[1].rstrip()
try:
buffer = m_util.gdb_exec_to_str('p func.operand_stack[' + str(idx) + ']')
except:
return None, None
value = buffer.split('x = ')[1][1:].split('}')[0]
if maple_type in value:
v = value.split(maple_type + ' = ')[1].split(',')[0]
else:
return None, None
if m_debug.Debug: m_debug.dbg_print("return maple_type=", maple_type, "v=", v)
return maple_type, v | b621c25294e3d4aac720a35e73c3969b02ba5e31 | 3,650,592 |
def getIntArg(arg, optional=False):
"""
Similar to "getArg" but return the integer value of the arg.
Args:
arg (str): arg to get
optional (bool): argument to get
Returns:
int: arg value
"""
return(int(getArg(arg, optional))) | a30e39b5a90bd6df996bdd8a43faf787aed7128f | 3,650,593 |
from typing import Iterable
def get_in_with_default(keys: Iterable, default):
"""`get_in` function, returning `default` if a key is not there.
>>> get_in_with_default(["a", "b", 1], 0)({"a": {"b": [0, 1, 2]}})
1
>>> get_in_with_default(["a", "c", 1], 0)({"a": {"b": [0, 1, 2]}})
0
"""
getter = get_in(keys)
def get_in_with_default(x):
try:
return getter(x)
except (KeyError, IndexError, TypeError):
return default
return get_in_with_default | dbb5a9753bad224245ffea884e33802930bb8ded | 3,650,594 |
def conv_HSV2BGR(hsv_img):
"""HSV画像をBGR画像に変換します。
Arguments:
hsv_img {numpy.ndarray} -- HSV画像(3ch)
Returns:
numpy.ndarray -- BGR画像(3ch)
"""
V = hsv_img[:, :, 2]
C = hsv_img[:, :, 1]
H_p = hsv_img[:, :, 0] / 60
X = C * (1 - np.abs(H_p % 2 - 1))
Z = np.zeros_like(C)
vals = [[Z, X, C], [Z, C, X], [X, C, Z], [C, X, Z], [C, Z, X], [X, Z, C]]
bgr_img = np.zeros_like(hsv_img)
for i in range(6):
idx = (i <= H_p) * (H_p < (i + 1))
bgr_img[:, :, 0][idx] = (V - C)[idx] + vals[i][0][idx]
bgr_img[:, :, 1][idx] = (V - C)[idx] + vals[i][1][idx]
bgr_img[:, :, 2][idx] = (V - C)[idx] + vals[i][2][idx]
return (bgr_img * 255).astype(np.uint8) | f748c88e9f4b2a3da2ee7d7703b0d3c9615e564b | 3,650,595 |
import torch
def remap(tensor, map_x, map_y, align_corners=False):
"""
Applies a generic geometrical transformation to a tensor.
"""
if not tensor.shape[-2:] == map_x.shape[-2:] == map_y.shape[-2:]:
raise ValueError("Inputs last two dimensions must match.")
batch_size, _, height, width = tensor.shape
# grid_sample need the grid between -1/1
map_xy = torch.stack([map_x, map_y], dim=-1)
map_xy_norm = normalize_pixel_coordinates(map_xy, height, width)
# simulate broadcasting since grid_sample does not support it
map_xy_norm = map_xy_norm.expand(batch_size, -1, -1, -1)
# warp ans return
tensor_warped = F.grid_sample(tensor, map_xy_norm, align_corners=align_corners)
return tensor_warped | ff88d66b6692548979e45d2a00f6905e2d973c2a | 3,650,596 |
def AutoRegression(df_input,
target_column,
time_column,
epochs_to_forecast=1,
epochs_to_test=1,
hyper_params_ar={}):
"""
This function performs regression using feature augmentation and then training XGB with Crossvalidation.
Parameters:
- df_input (pandas.DataFrame): Input Time Series.
- target_column (str): name of the column containing the target feature
- time_column (str): name of the column containing the pandas Timestamps
- frequency_data (str): string representing the time frequency of record, e.g. "h" (hours), "D" (days), "M" (months)
- epochs_to_forecast (int): number of steps for predicting future data
- epochs_to_test (int): number of steps corresponding to most recent records to test on
- hyper_params_ar: Parameters of AR model
Returns:
- df_output (pandas.DataFrame): Output DataFrame with forecast
"""
# create and evaluate an updated autoregressive model
# load dataset
input_series = df_input[:-(epochs_to_forecast+epochs_to_test)].set_index(time_column, 1)[target_column]
# split dataset
model = ar_select_order(input_series, **hyper_params_ar)
for hyp_param in ["maxlag","glob","ic"]:
if hyp_param in hyper_params_ar.keys():
del hyper_params_ar[hyp_param]
model = AutoReg(input_series, lags=model.ar_lags, **hyper_params_ar)
res = model.fit()
print(res.summary())
#start_idx = df_input[:-(epochs_to_forecast+epochs_to_test)][time_column].max()
start_idx = df_input[-(epochs_to_forecast+epochs_to_test):][time_column].min()
end_idx = df_input[-(epochs_to_forecast+epochs_to_test):][time_column].max()
# =============================================================================
# ### for statsmodels< 0.12.0
# #forecast_steps = model.predict(res.params, start=start_idx, end=end_idx, dynamic=True)
# forecast = df_input[target_column] * np.nan
# forecast[-(epochs_to_forecast+epochs_to_test):] = forecast_steps
# df_output = df_input.copy()
# df_output["forecast"] = forecast
# df_output["forecast_up"] = forecast * 1.1
# df_output["forecast_low"] = forecast * 0.9
# =============================================================================
### for statsmodels>= 0.12.0
forecast_steps = res.get_prediction(start=start_idx, end=end_idx)
forecast_steps_mean = forecast_steps.predicted_mean
forecast_steps_low = forecast_steps.conf_int()["lower"]
forecast_steps_up = forecast_steps.conf_int()["upper"]
forecast = df_input[target_column] * np.nan
forecast_low = df_input[target_column] * np.nan
forecast_up = df_input[target_column] * np.nan
forecast[-(epochs_to_forecast+epochs_to_test):] = forecast_steps_mean
forecast_low[-(epochs_to_forecast+epochs_to_test):] = forecast_steps_low
forecast_up[-(epochs_to_forecast+epochs_to_test):] = forecast_steps_up
df_output = df_input.copy()
df_output["forecast"] = forecast
df_output["forecast_low"] = forecast_low
df_output["forecast_up"] = forecast_up
return df_output | 704daf914897b7a43971b22d721ec0f1bb919d3e | 3,650,597 |
def VMACD(prices, timeperiod1=12, timeperiod2=26, timeperiod3=9):
"""
39. VMACD量指数平滑异同移动平均线
(Vol Moving Average Convergence and Divergence,VMACD)
说明:
量平滑异同移动平均线(VMACD)用于衡量量能的发展趋势,属于量能引趋向指标。
MACD称为指数平滑异同平均线。分析的数学公式都是一样的,只是分析的物理量不同。
VMACD对成交量VOL进行分析计算,而MACD对收盘价CLOSE进行分析计算。
计算方法:
SHORT=EMA(VOL,N1)
LONG=EMA(VOL,N2)
DIFF=SHORT-LONG
DEA=EMA(DIFF,M)
VMACD=DIFF-DEA
通常N1=12,N2=26,M=9
:param prices:
:param timeperiod1: N1
:param timeperiod2: N2
:param timeperiod3: N3
:return:
"""
assert prices is not None
timeperiod = max(timeperiod1, timeperiod2, timeperiod3)
_assert_greater_or_equal(len(prices), timeperiod)
assert isinstance(timeperiod1, int)
assert isinstance(timeperiod2, int)
assert isinstance(timeperiod3, int)
df_price = prices.copy()
df_price = df_price.sort_index(ascending=True)
EMA = ta.EMA
short = EMA(df_price['volume'].values.astype(float), timeperiod1)
long = EMA(df_price['volume'].values.astype(float), timeperiod2)
diff = short - long
dea = EMA(diff, timeperiod3)
vmacd = diff - dea
df_price['VMACD'] = vmacd
return df_price['VMACD'] | 5de5f372cb7ef6762b82f30d16465469b2cb6afc | 3,650,598 |
def try_compress_numbers_to_range(elements):
"""
Map the "number" attribute of any element in `elements` to the most compact
range possible (starting from 1). If the resulting numbers are within
[MIN_RANGE, MAX_RANGE], return True, otherwise, return False. If it is not
possible to obtain a mapping within [MIN_RANGE, MAX_RANGE], the number
attributes not modified.
"""
numbers = set(int(e.get('number')) for e in elements)
if len(numbers) <= ((MAX_RANGE - MIN_RANGE) + 1):
actual_nrs = sorted(numbers)
ideal_nrs = range(MIN_RANGE, MIN_RANGE + len(numbers))
if np.any(np.array(actual_nrs) != np.array(ideal_nrs)):
nr_map = dict(zip(actual_nrs, ideal_nrs))
LOGGER.debug(u'compressing number range {}'
.format(', '.join(u'{} → {}'.format(k, v) for k, v in nr_map.items())))
for e in elements:
old_nr = int(e.get('number'))
new_nr = nr_map[old_nr]
e.set('number', str(new_nr))
all_within_range = True
else:
all_within_range = False
return all_within_range | 2927046f5bb4217a5267db764fd58275f3fe65ce | 3,650,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.