content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_tomography_circuits(circuit, qreg, creg, tomoset):
"""
Add tomography measurement circuits to a QuantumProgram.
The quantum program must contain a circuit 'name', which is treated as a
state preparation circuit for state tomography, or as teh circuit being
measured for process tomography. This function then appends the circuit
with a set of measurements specified by the input `tomography_set`,
optionally it also prepends the circuit with state preparation circuits if
they are specified in the `tomography_set`.
For n-qubit tomography with a tomographically complete set of preparations
and measurements this results in $4^n 3^n$ circuits being added to the
quantum program.
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
tomoset (tomography_set): the dict of tomography configurations.
Returns:
list: A list of quantum tomography circuits for the input circuit.
Raises:
QISKitError: if circuit is not a valid QuantumCircuit
Example:
For a tomography set specififying state tomography of qubit-0 prepared
by a circuit 'circ' this would return:
```
['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']
```
For process tomography of the same circuit with preparation in the
SIC-POVM basis it would return:
```
[
'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',
'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',
'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',
'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',
'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',
'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'
]
```
"""
if not isinstance(circuit, QuantumCircuit):
raise QISKitError('Input circuit must be a QuantumCircuit object')
dics = tomoset['circuits']
labels = tomography_circuit_names(tomoset, circuit.name)
tomography_circuits = []
for label, conf in zip(labels, dics):
tmp = circuit
# Add prep circuits
if 'prep' in conf:
prep = QuantumCircuit(qreg, creg, name='tmp_prep')
for qubit, op in conf['prep'].items():
tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op)
prep.barrier(qreg[qubit])
tmp = prep + tmp
# Add measurement circuits
meas = QuantumCircuit(qreg, creg, name='tmp_meas')
for qubit, op in conf['meas'].items():
meas.barrier(qreg[qubit])
tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op)
meas.measure(qreg[qubit], creg[qubit])
tmp = tmp + meas
# Add label to the circuit
tmp.name = label
tomography_circuits.append(tmp)
logger.info('>> created tomography circuits for "%s"', circuit.name)
return tomography_circuits | ab42a0b57ccd94f6ffbb64425473c3a90dd10888 | 3,653,100 |
def filter_background(bbox, bg_data):
"""
Takes bounding box and background geojson file assumed to be the US states, and outputs a geojson-like dictionary
containing only those features with at least one point within the bounding box, or any state that completely
contains the bounding box.
This tests if a feature contains the bounding box by drawing the box that contains the feature and checking if that
box also contains the bounding box. Because features are odd shapes, this may find that more than one feature
completely contains the bounding box. E.g., if you draw a box around Maryland it will also contain a chunk of West
Virginia. To deal with this, we are allowed to find that multiple states contain the bounding box.
:param bbox: The coordinates of the bounding box as [lon, lat, lon, lat]
:param bg_data: a geojson-like dict describing the background
:return: the features from bg_filename whose borders intersect bbox OR the feature which completely contains bbox
"""
box_lon = [bbox[0], bbox[2]]
box_lat = [bbox[1], bbox[3]]
features = bg_data['features']
in_box = []
for f in features:
starting_len = len(in_box)
# Define points for bounding box around the feature.
feature_max_lat = -90
feature_max_lon = -180
feature_min_lat = 90
feature_min_lon = 180
coordinates = f['geometry']['coordinates']
for group in coordinates:
if len(in_box) > starting_len:
# This feature has already been added
break
# actual points for MultiPolygons are nested one layer deeper than those for polygons
if f['geometry']['type'] == 'MultiPolygon':
geom = group[0]
else:
geom = group
for lon, lat in geom:
# check if any point along the state's borders falls within the bounding box.
if min(box_lon) <= lon <= max(box_lon) and min(box_lat) <= lat <= max(box_lat):
in_box.append(f)
break
# If any point of a feature falls within the bounding box, then the feature cannot contain the box,
# so this only needs to be run if the above if statement is not executed
feature_min_lon = min(feature_min_lon, lon)
feature_min_lat = min(feature_min_lat, lat)
feature_max_lon = max(feature_max_lon, lon)
feature_max_lat = max(feature_max_lat, lat)
# If the box containing a feature also contains the bounding box, keep this feature
# Allow adding more than one because otherwise MD contains boxes in WV, and CA would contain most of NV.
if feature_min_lat < min(box_lat) and feature_max_lat > max(box_lat) and \
feature_min_lon < min(box_lon) and feature_max_lon > max(box_lon):
in_box.append(f)
keepers = {
'type': 'FeatureCollection',
'features': in_box
}
return keepers | f06fe5efe1e3920d8b1092601a121e313da4eec4 | 3,653,101 |
def rename_columns(table, mapper):
""" Renames the table headings to conform with the ketos naming convention.
Args:
table: pandas DataFrame
Annotation table.
mapper: dict
Dictionary mapping the headings of the input table to the
standard ketos headings.
Returns:
: pandas DataFrame
Table with new headings
"""
return table.rename(columns=mapper) | c9c9228f4f477b8d5ade234964c2540fd20ddd09 | 3,653,102 |
def get_round(year, match):
"""Get event number by year and (partial) event name
A fuzzy match is performed to find the most likely event for the provided name.
Args:
year (int): Year of the event
match (string): Name of the race or gp (e.g. 'Bahrain')
Returns:
The round number. (2019, 'Bahrain') -> 2
"""
def build_string(d):
r = len('https://en.wikipedia.org/wiki/') # TODO what the hell is this
c, l = d['Circuit'], d['Circuit']['Location'] # noqa: E741 (for now...)
return (f"{d['url'][r:]} {d['raceName']} {c['circuitId']} "
+ f"{c['url'][r:]} {c['circuitName']} {l['locality']} "
+ f"{l['country']}")
races = ergast.fetch_season(year)
to_match = [build_string(block) for block in races]
ratios = np.array([fuzz.partial_ratio(match, ref) for ref in to_match])
return int(races[np.argmax(ratios)]['round']) | e5f4d724eb2d453251c107287450b1f0166152dc | 3,653,103 |
from typing import Union
import json
def parse_tuple(s: Union[str, tuple]) -> tuple:
"""Helper for load_detections_csv, to parse string column into column of Tuples."""
if isinstance(s, str):
result = s.replace("(", "[").replace(")", "]")
result = result.replace("'", '"').strip()
result = result.replace(",]", "]")
if result:
# print(result)
return tuple(sorted((json.loads(result))))
else:
return tuple()
else:
return s | ad568bfc8ccdf8440378e852daccaf2f24a7e2d0 | 3,653,104 |
import re
def clean(tweet):
"""
clean tweet text by removing links, special characters
using simple regex statements
Parameters
----------
tweet : String
Single Twitter message
Returns
-------
tokenized_tweet : List
List of cleaned tokens derived from the input Twitter message
"""
# convert to lower
tweet = tweet.lower()
# get the stop-words available from the nltk.corpus lib
# as the corpus would haver also delete a lot of negations from the tweets
# it is considered to use just a subset
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', "you're", "you've", "you'll",
"you'd", 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', "she's", 'her',
'hers', 'herself', 'it', "it's", 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what',
'which', 'who', 'whom', 'this', 'that', "that'll",
'these', 'those', 'am', 'is', 'are', 'was', 'were',
'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after', 'above',
'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on',
'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such',
'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',
'will', 'just', 'should', "should've", 'now', 'd', 'll',
'm', 'o', 're', 've', 'y', 'ain', 'ma', '.', ',', ';', '!', '?',
'@...', '@', '@…']
# convert to string again as re expects a string-like object (and not a list)
# remove all the stopwords as well as the numbers and words shorter than
# two letters also check the spelling
tmp = ""
tmp_c = [tmp +
item.replace(",","").replace(";","").replace("?","").replace("!","").replace("#","")
for item in tweet.split() if item not in stop_words
and not item.isdigit()]
tmp_c = " ".join(item for item in tmp_c)
# remove other special characters including @, URLs, Usernames and other
# special characters
return ' '.join(re.sub("(@[A-Za-z0-9]+)| M^|(\w+:\/\/\S+)",
" ",
tmp_c).split()) | fbfacb49f88638610fb071cb6b14d02dadf665e1 | 3,653,105 |
def predict(x, u):
"""
:param x: Particle state (x,y,theta) [size 3 array]
:param u: Robot inputs (u1,u2) [size 2 array]
:return: Particle's updated state sampled from the motion model
"""
x = x + motionModel(x, u) + np.random.multivariate_normal(np.zeros(3), Q)
return x | 7fe3e9fa42e1e74ac448a0139ca6dae8ff5388ad | 3,653,106 |
def plot_multiple(datasets, method='scatter', pen=True, labels=None, **kwargs):
"""
Plot a series of 1D datasets as a scatter plot
with optional lines between markers.
Parameters
----------
datasets : a list of ndatasets
method : str among [scatter, pen]
pen : bool, optional, default: True
if method is scatter, this flag tells to draw also the lines
between the marks.
labels : a list of str, optional
labels used for the legend.
**kwargs : other parameters that will be passed to the plot1D function
"""
if not is_sequence(datasets):
# we need a sequence. Else it is a single plot.
return datasets.plot(**kwargs)
if not is_sequence(labels) or len(labels) != len(datasets):
# we need a sequence of labels of same lentgh as datasets
raise ValueError('the list of labels must be of same length '
'as the datasets list')
for dataset in datasets:
if dataset._squeeze_ndim > 1:
raise NotImplementedError('plot multiple is designed to work on '
'1D dataset only. you may achieved '
'several plots with '
'the `clear=False` parameter as a work '
'around '
'solution')
# do not save during this plots, nor apply any commands
# we will make this when all plots will be done
output = kwargs.get('output', None)
kwargs['output'] = None
commands = kwargs.get('commands', [])
kwargs['commands'] = []
clear = kwargs.pop('clear', True)
legend = kwargs.pop('legend', None) # remove 'legend' from kwargs before calling plot
# else it will generate a conflict
for s in datasets: # , colors, markers):
ax = s.plot(method=method, pen=pen, marker='AUTO', color='AUTO', ls='AUTO', clear=clear, **kwargs)
clear = False # clear=False is necessary for the next plot to say # that we will plot on the same figure
# scale all plots
if legend is not None:
_ = ax.legend(ax.lines, labels, shadow=True, loc=legend, frameon=True, facecolor='lightyellow')
# now we can output the final figure
kw = {'output': output, 'commands': commands}
datasets[0]._plot_resume(datasets[-1], **kw)
return ax | 85b41b719cb8d33884dd7364b3e0937100167c6a | 3,653,107 |
def _scale_enum(anchor, scales):
""" 列举关于一个anchor的三种尺度 128*128,256*256,512*512
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor) #返回宽高和中心坐标,w:16,h:16,x_ctr:7.5,y_ctr:7.5
ws = w * scales #[128 256 512]
hs = h * scales #[128 256 512]
anchors = _mkanchors(ws, hs, x_ctr, y_ctr) #[[-56 -56 71 71] [-120 -120 135 135] [-248 -248 263 263]]
return anchors | b0b8e9418b935daf2961fbd690a4ccf2b6bd6d7b | 3,653,108 |
import codecs
def metamodel_from_file(file_name, **kwargs):
"""
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel | 526a1876ed11aff2341133d061573ae9f3bfb1fe | 3,653,109 |
import sirepo.template
import copy
def _python(data):
"""Generate python in current directory
Args:
data (dict): simulation
Returns:
py.path.Local: file to append
"""
template = sirepo.template.import_module(data)
res = pkio.py_path('run.py')
res.write(template.python_source_for_model(copy.deepcopy(data), None))
return res | df0d2eae8155f1093dde02db73fd5185983d6847 | 3,653,110 |
def load_hosts_conf(path='/etc/hosts'):
"""parse hosts file"""
hosts = {}
try:
with open(path, 'r') as f:
for line in f.readlines():
parts = line.strip().split()
if len(parts) < 2:
continue
addr = ip_address(parts[0])
if addr:
for hostname in parts[1:]:
if hostname:
hosts[hostname] = addr
except IOError as e:
hosts['localhost'] = '127.0.0.1'
return hosts | c6e2d1f34f5aa140a3bccfbd4d9791641cc75fff | 3,653,111 |
import os
def get_first_env(*args):
"""
Return the first env var encountered from list
PLEASE NOTE: Always prefer using get_env, this helper is for app
transitioning to a new config structure.
Example:
get_first_env('DB_NAME', 'DATABASE_NAME')
"""
for name in args:
if name in os.environ:
return os.environ[name]
error_msg = "Missing any of these env vars {}".format(args)
raise ImproperlyConfigured(error_msg) | 70438ee5b991e2f6c90905e89fb16fb0fd6f5de2 | 3,653,112 |
import pandas
def pick(df, isnotnull=None, **kwargs):
"""Function to pick row indices from DataFrame.
Copied from kkpandas
This method provides a nicer interface to choose rows from a DataFrame
that satisfy specified constraints on the columns.
isnotnull : column name, or list of column names, that should not be null.
See pandas.isnull for a defintion of null
All additional kwargs are interpreted as {column_name: acceptable_values}.
For each column_name, acceptable_values in kwargs.items():
The returned indices into column_name must contain one of the items
in acceptable_values.
If acceptable_values is None, then that test is skipped.
Note that this means there is currently no way to select rows that
ARE none in some column.
If acceptable_values is a single string or value (instead of a list),
then the returned rows must contain that single string or value.
TODO:
add flags for string behavior, AND/OR behavior, error if item not found,
return unique, ....
"""
msk = np.ones(len(df), dtype=np.bool)
for key, val in list(kwargs.items()):
if val is None:
continue
elif is_nonstring_iter(val):
msk &= df[key].isin(val)
else:
msk &= (df[key] == val)
if isnotnull is not None:
# Edge case
if not is_nonstring_iter(isnotnull):
isnotnull = [isnotnull]
# Filter by not null
for key in isnotnull:
msk &= -pandas.isnull(df[key])
return df.index[msk] | e8cdf1fb88ef92748c73095c62ca80c29bd40fec | 3,653,113 |
import random
def _waveform_distortion(waveform, distortion_methods_conf):
""" Apply distortion on waveform
This distortion will not change the length of the waveform.
Args:
waveform: numpy float tensor, (length,)
distortion_methods_conf: a list of config for ditortion method.
a method will be randomly selected by 'method_rate' and
apply on the waveform.
Returns:
distorted waveform.
"""
r = random.uniform(0, 1)
acc = 0.0
for distortion_method in distortion_methods_conf:
method_rate = distortion_method['method_rate']
acc += method_rate
if r < acc:
distortion_type = distortion_method['name']
distortion_conf = distortion_method['params']
point_rate = distortion_method['point_rate']
return distort_wav_conf(waveform, distortion_type,
distortion_conf , point_rate)
return waveform | cca32854f3d72f381d40a5ca8802c29996413149 | 3,653,114 |
def _pad_returns(returns):
"""
Pads a returns Series or DataFrame with business days, in case the
existing Date index is sparse (as with PNL csvs). Sparse indexes if not
padded will affect the Sharpe ratio because the 0 return days will not be
included in the mean and std.
"""
bdays = pd.date_range(start=returns.index.min(), end=returns.index.max(),freq="B")
idx = returns.index.union(bdays)
return returns.reindex(index=idx).fillna(0) | fde27dd928d3f0510f98fa4eba8f89f7d6b81922 | 3,653,115 |
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
#if r.headers["Content-Type"] !="application/json" and r.status_code!=304:
# print(str(r.status_code)+" -",end="")
return r | 3a758340d1c13cc29f0013f3d2fec77c47099c02 | 3,653,116 |
def pair_sorter(aln):
"""Get the alignment name and attributes for sorting."""
return (
aln.name,
not aln.first_in_pair,
aln.unmapped,
aln.supplementary_alignment,
aln.secondary_alignment) | 217eac7c89a12f68f4c9fe324c4feb6c2a955d58 | 3,653,117 |
import requests
import yaml
import json
def _read_input_from(input_from):
""" Reads the labels from the input from. """
inputs = []
for input_from_line in input_from.splitlines():
# Skip if line is empty.
if input_from_line.strip() == '':
continue
# Load file content
print(f"::debug::Loading labels from '{input_from_line}'.")
input_from_content = None
if input_from_line.startswith('http://') or input_from_line.startswith('https://'):
requests_url_response = requests.get(input_from)
if requests_url_response.ok:
input_from_content = requests_url_response.text
else:
raise Exception(f'Unable to read file from {input_from}: {requests_url_response.reason}')
else:
with open(input_from_line, 'r') as input_from_file:
input_from_content = input_from_file.read()
if input_from_line.endswith('.yaml') or input_from_line.endswith('.yml'):
inputs.extend(yaml.load(input_from_content, Loader=yaml.FullLoader))
if input_from_line.endswith('.json'):
inputs.extend(json.loads(input_from_content))
return inputs | 2477771fecf99e1f6ef59a0ebeceb3a650b9f414 | 3,653,118 |
def project_to2d(
pts: np.ndarray, K: np.ndarray, R: np.ndarray, t: np.ndarray
) -> np.ndarray:
"""Project 3d points to 2d.
Projects a set of 3-D points, pts, into 2-D using the camera intrinsic
matrix (K), and the extrinsic rotation matric (R), and extrinsic
translation vector (t). Note that this uses the matlab
convention, such that
M = [R;t] * K, and pts2d = pts3d * M
"""
M = np.concatenate((R, t), axis=0) @ K
projPts = np.concatenate((pts, np.ones((pts.shape[0], 1))), axis=1) @ M
projPts[:, :2] = projPts[:, :2] / projPts[:, 2:]
return projPts | 5f9bc03ae0086649746651da4e5e8e1d870db6bd | 3,653,119 |
import copy
def uncontract_general(basis, use_copy=True):
"""
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False) | de6638ae34b4d6f0b3a467048683ac363f71f9c1 | 3,653,120 |
def read_sd15ch1_images(root_dir,
image_relative_path_seq,
resize=None,
color=False):
"""
WARNING
-------
- All images must have the same shape (this is the case for the frames, and all models but the
ones of the "01-original" category).
- Loading many images at one can quickly fill up your RAM.
Returns
-------
- np.array((number_of_images, images_height, images_width)) if `color` is `False`
- np.array((number_of_images, images_height, images_width, image_channels)) otherwise.
"""
# Read first image, if any, to get image shape
# Note: all images must have the same shape
if len(image_relative_path_seq) == 0:
return np.array([])
# We have a least 1 element
img0 = read_sd15ch1_image(root_dir, image_relative_path_seq[0], resize, color)
# allocate some contiguous memory to host the decoded images
dim_axis0 = (len(image_relative_path_seq), ) # make it a tuple
dim_axis_others = img0.shape
imgs_shape = dim_axis0 + dim_axis_others
__info("About to allocate %d bytes for an array of shape %s." % (np.prod(imgs_shape) * 4, imgs_shape))
imgs = np.zeros(imgs_shape, dtype=np.float32)
# Handle first image
imgs[0, ...] = img0
# Loop over other images
for ii, rel_path in enumerate(image_relative_path_seq[1:], start=1):
imgi = read_sd15ch1_image(root_dir, rel_path, resize, color)
if imgi.shape != dim_axis_others:
__err("All images must have the same shape. Inconsistent dataset. Aborting loading.", RuntimeError)
imgs[ii, ...] = imgi
return imgs | 0d6efd2eac2762440ae532564e4f680a1f056d30 | 3,653,121 |
def is_private_bool(script_dict):
""" Returns is_private boolean value from user dictionary object """
return script_dict['entry_data']['ProfilePage'][0]['graphql']['user']['is_private'] | 1e8b30a38dc527dc5e2ea73e75c253d8f1a59550 | 3,653,122 |
def manage_greylist(request):
"""
View for managing greylist.
"""
message = None
if request.method == 'POST':
form = GreylistForm(request.POST)
if form.is_valid():
# Set details to empty string if blank
new_greylisted_guest = form.save(commit=False)
new_greylisted_guest.addedBy = request.user
new_greylisted_guest.save()
message = 'Successfully added entry to greylist'
else:
message = 'Error adding entry to greylist'
else:
form = GreylistForm()
context = {
'greylist': [
(
greylisting,
user_can_delete_greylisting(request.user, greylisting),
)
for greylisting in GreylistedGuest.objects.all().order_by('name')
],
'message': message,
'form': form,
}
return render(request, 'parties/greylist/manage.html', context) | eafbbf10b6150189d25c7d863cb00f6565648925 | 3,653,123 |
def get_regions():
"""Summary
Returns:
TYPE: Description
"""
client = boto3.client('ec2')
region_response = client.describe_regions()
regions = [region['RegionName'] for region in region_response['Regions']]
return regions | 700119f1c852ad9475823170388c062f62291637 | 3,653,124 |
def _is_ignored_read_event(request):
"""Return True if this read event was generated by an automated process, as
indicated by the user configurable LOG_IGNORE* settings.
See settings_site.py for description and rationale for the settings.
"""
if (
django.conf.settings.LOG_IGNORE_TRUSTED_SUBJECT
and d1_gmn.app.auth.is_trusted_subject(request)
):
return True
if (
django.conf.settings.LOG_IGNORE_NODE_SUBJECT
and d1_gmn.app.auth.is_client_side_cert_subject(request)
):
return True
if _has_regex_match(
request.META["REMOTE_ADDR"], django.conf.settings.LOG_IGNORE_IP_ADDRESS
):
return True
if _has_regex_match(
request.META.get("HTTP_USER_AGENT", "<not provided>"),
django.conf.settings.LOG_IGNORE_USER_AGENT,
):
return True
if _has_regex_match(
request.primary_subject_str, django.conf.settings.LOG_IGNORE_SUBJECT
):
return True
return False | f6f7417fe923ef6bd56a6d649ef302ed811185e8 | 3,653,125 |
def aten_embedding(mapper, graph, node):
""" 构造embedding的PaddleLayer。
TorchScript示例:
%inputs_embeds.1 : Tensor = aten::embedding(%57, %input_ids.1, %45, %46, %46)
参数含义:
%inputs_embeds.1 (Tensor): 输出,embedding后的结果。
%57 (Tensor): weights。
%input_ids.1 (Tensor): 需要进行embedding的特征层。
%45 (int): padding_idx。
%46 (bool): scale_grad_by_freq。
%46 (bool): sparse。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("embedding", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%57
weights = mapper.pytorch_params[inputs_name[0]]
mapper.paddle_params[op_name + ".weight"] = weights
layer_attrs["num_embeddings"] = weights.shape[0]
layer_attrs["embedding_dim"] = weights.shape[1]
# 处理输入1,即%input_ids.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入2,即%45
if mapper.attrs[inputs_name[2]] == -1:
layer_attrs["padding_idx"] = None
else:
layer_attrs["padding_idx"] = mapper.attrs[inputs_name[2]]
# 处理输入4,即%46
layer_attrs["sparse"] = mapper.attrs[inputs_name[4]]
graph.add_layer(
"paddle.nn.Embedding",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs | d174c7e551bb3db7e7dc5d9014de9edd48ee4032 | 3,653,126 |
import os
def build_drivers(compilation_commands, linker_commands, kernel_src_dir,
target_arch, clang_path, llvm_link_path, llvm_bit_code_out, is_clang_build):
"""
The main method that performs the building and linking of the driver files.
:param compilation_commands: Parsed compilation commands from the json.
:param linker_commands: Parsed linker commands from the json.
:param kernel_src_dir: Path to the kernel source directory.
:param target_arch: Number representing target architecture.
:param clang_path: Path to clang.
:param llvm_link_path: Path to llvm-link
:param llvm_bit_code_out: Folder where all the linked bitcode files should be stored.
:param is_clang_build: Flag to indicate that this is a clang build.
:return: True
"""
output_llvm_sh_file = os.path.join(llvm_bit_code_out, 'llvm_build.sh')
fp_out = open(output_llvm_sh_file, 'w')
fp_out.write("#!/bin/bash\n")
log_info("Writing all compilation commands to", output_llvm_sh_file)
all_compilation_commands = []
obj_bc_map = {}
for curr_compilation_command in compilation_commands:
if is_clang_build:
wd, obj_file, bc_file, build_str = _get_llvm_build_str_from_llvm(clang_path, curr_compilation_command.curr_args,
kernel_src_dir, target_arch,
curr_compilation_command.work_dir,
curr_compilation_command.src_file,
curr_compilation_command.output_file,
llvm_bit_code_out)
else:
wd, obj_file, bc_file, build_str = _get_llvm_build_str(clang_path, curr_compilation_command.curr_args,
kernel_src_dir, target_arch,
curr_compilation_command.work_dir,
curr_compilation_command.src_file,
curr_compilation_command.output_file, llvm_bit_code_out)
all_compilation_commands.append((wd, build_str))
obj_bc_map[obj_file] = bc_file
fp_out.write("cd " + wd + ";" + build_str + "\n")
fp_out.close()
log_info("Got", len(all_compilation_commands), "compilation commands.")
log_info("Running compilation commands in multiprocessing modea.")
p = Pool(cpu_count())
return_vals = p.map(run_program_with_wd, all_compilation_commands)
log_success("Finished running compilation commands.")
output_llvm_sh_file = os.path.join(llvm_bit_code_out, 'llvm_link_cmds.sh')
fp_out = open(output_llvm_sh_file, 'w')
fp_out.write("#!/bin/bash\n")
log_info("Writing all linker commands to", output_llvm_sh_file)
all_linker_commands = []
recursive_linker_commands = []
for curr_linked_command in linker_commands:
curr_ret_val = _get_llvm_link_str(llvm_link_path, kernel_src_dir,
curr_linked_command.input_files, obj_bc_map,
curr_linked_command.output_file,
curr_linked_command.work_dir, llvm_bit_code_out)
if curr_ret_val is not None:
wd, obj_file, bc_file, build_str = curr_ret_val
all_linker_commands.append((wd, build_str))
obj_bc_map[obj_file] = bc_file
fp_out.write("cd " + wd + ";" + build_str + "\n")
else:
# these are recursive linker commands.
recursive_linker_commands.append(curr_linked_command)
log_info("Got", len(all_linker_commands), "regular linker commands.")
log_info("Running linker commands in multiprocessing mode.")
p = Pool(cpu_count())
return_vals = p.map(run_program_with_wd, all_linker_commands)
log_success("Finished running linker commands.")
if len(recursive_linker_commands) > 0:
log_info("Got", len(recursive_linker_commands), " recursive linker commands.")
_process_recursive_linker_commands(recursive_linker_commands, kernel_src_dir, llvm_link_path,
llvm_bit_code_out, obj_bc_map, fp_out)
fp_out.close()
return True | ca6dcd258d118c8114204303e5993ee376780d70 | 3,653,127 |
def _validate_opts(opts):
"""
Check that all of the types of values passed into the config are
of the right types
"""
def format_multi_opt(valid_type):
try:
num_types = len(valid_type)
except TypeError:
# Bare type name won't have a length, return the name of the type
# passed.
return valid_type.__name__
else:
def get_types(types, type_tuple):
for item in type_tuple:
if isinstance(item, tuple):
get_types(types, item)
else:
try:
types.append(item.__name__)
except AttributeError:
log.warning(
"Unable to interpret type %s while validating "
"configuration",
item,
)
types = []
get_types(types, valid_type)
ret = ", ".join(types[:-1])
ret += " or " + types[-1]
return ret
errors = []
err = (
"Config option '{0}' with value {1} has an invalid type of {2}, a "
"{3} is required for this option"
)
for key, val in opts.items():
if key in VALID_OPTS:
if val is None:
if VALID_OPTS[key] is None:
continue
else:
try:
if None in VALID_OPTS[key]:
continue
except TypeError:
# VALID_OPTS[key] is not iterable and not None
pass
if isinstance(val, VALID_OPTS[key]):
continue
if hasattr(VALID_OPTS[key], "__call__"):
try:
VALID_OPTS[key](val)
if isinstance(val, (list, dict)):
# We'll only get here if VALID_OPTS[key] is str or
# bool, and the passed value is a list/dict. Attempting
# to run int() or float() on a list/dict will raise an
# exception, but running str() or bool() on it will
# pass despite not being the correct type.
errors.append(
err.format(
key, val, type(val).__name__, VALID_OPTS[key].__name__
)
)
except (TypeError, ValueError):
errors.append(
err.format(
key, val, type(val).__name__, VALID_OPTS[key].__name__
)
)
continue
errors.append(
err.format(
key, val, type(val).__name__, format_multi_opt(VALID_OPTS[key])
)
)
# Convert list to comma-delimited string for 'return' config option
if isinstance(opts.get("return"), list):
opts["return"] = ",".join(opts["return"])
for error in errors:
log.warning(error)
if errors:
return False
return True | cafd1048a7496728715a192a4f70c7d50ade3622 | 3,653,128 |
async def from_string(input, output_path=None, options=None):
"""
Convert given string or strings to PDF document
:param input: string with a desired text. Could be a raw text or a html file
:param output_path: (optional) path to output PDF file. If not provided,
PDF will be returned as string
:param options: (optional) dict to configure pyppeteer page.pdf action
Returns: output_path if provided else PDF Binary
"""
sources = Source(input, 'string')
r = PDFMate(sources, options=options)
return await r.to_pdf(output_path) | 2b3b6d9523d516fd3d258a3f722655720f49d91b | 3,653,129 |
def parse_tuple(tuple_string):
"""
strip any whitespace then outter characters.
"""
return tuple_string.strip().strip("\"[]") | d0052dce0582ca04d70455f1833d98545792c8ac | 3,653,130 |
def create_size():
"""Create a new size."""
in_out_schema = SizeSchema()
try:
new_size = in_out_schema.load(request.json)
except ValidationError as err:
abort(400, {'message': err.messages})
try:
db.session.add(new_size)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return in_out_schema.jsonify(new_size) | f85b339c5ec5c38b8778de25456caa6fb0680d76 | 3,653,131 |
import click
from typing import Optional
def inject_snakefmt_config(
ctx: click.Context, param: click.Parameter, config_file: Optional[str] = None
) -> Optional[str]:
"""
If no config file argument provided, parses "pyproject.toml" if one exists.
Injects any parsed configuration into the relevant parameters to the click `ctx`.
"""
if config_file is None:
config_file = find_pyproject_toml(ctx.params.get("src", ()))
config = read_snakefmt_config(config_file)
if ctx.default_map is None:
ctx.default_map = {}
ctx.default_map.update(config) # type: ignore # bad types in .pyi
return config_file | 4d1fc2996db4c63070f67ef6b19387b2b30ac5cd | 3,653,132 |
def sort_by_ctime(paths):
"""Sorts list of file paths by ctime in ascending order.
Arg:
paths: iterable of filepaths.
Returns:
list: filepaths sorted by ctime or empty list if ctime is unavailable.
"""
ctimes = list(map(safe_ctime, paths))
if not all(ctimes) or len(set(ctimes)) <= 1:
return []
else:
return sorted(paths, key=lambda fp: safe_ctime(fp)) | 551b7bc1d2cdc416588cbd783c9b1ac3e5914077 | 3,653,133 |
def get_ospf_metric(device,
destination_address):
"""Get OSPF metric
Args:
device (obj): Device object
destination_address (str): Destination address
"""
out = device.parse('show route')
# Example dictionary
# "route-table": [
# {
# "active-route-count": "0",
# "destination-count": "0",
# "hidden-route-count": "0",
# "holddown-route-count": "0",
# "rt": [
# {
# "metric": "101",
# }
# },
rt_list = Dq(out).get_values('rt')
for rt_dict in rt_list:
rt_destination_ = Dq(rt_dict).get_values("rt-destination", 0)
if not isinstance(rt_destination_, list):
if rt_destination_.startswith(str(destination_address)):
metric_ = Dq(rt_dict).get_values('metric', 0)
if not metric_:
continue
return metric_
return None | f5cd44794389a28db647e815baac4e954d59757b | 3,653,134 |
def get_episode_url():
"""エピソードの配信URLを追加
Returns:
[type]: [description]
"""
# フォームの値を取得
episode_num = "#"+request.form['episode_num'][0]
print(episode_num)
# 配信先一覧を取得
podcasts = Podcast.query.all()
broadcasts = Broadcast.query.all()
# 配信先 url
broadcast_urls = {}
for br in broadcasts:
broadcast_urls[br.broadcast_service] = br.broadcast_url
# エピソードのurlを取得
episode_urls = get_episode_url_all(broadcast_urls, episode_num)
return render_template(
'podcasts.html',
podcasts=podcasts,
broadcasts=broadcasts,
episode_num=episode_num,
episode_urls=episode_urls
) | e27f0324fd8332aa0648d35630cbb88b2b36c721 | 3,653,135 |
def autofs():
"""Fixture data from /proc/mounts."""
data = "flux-support -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 flux-support.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/f/flux-support\numms-remills -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 umms-remills.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/u/umms-remills"
return data | ea53c34d863de69c15f1e1247b98599c5f365ab7 | 3,653,136 |
def flag_dims(flags):
"""Return flag names, dims, and initials for flags.
Only flag value that correspond to searchable dimensions are
returned. Scalars and non-function string values are not included
in the result.
"""
dims = {}
initials = {}
for name, val in flags.items():
try:
flag_dim, initial = _flag_dim(val, name)
except ValueError:
pass
else:
dims[name] = flag_dim
initials[name] = initial
names = sorted(dims)
return (names, [dims[name] for name in names], [initials[name] for name in names]) | 4cafd991e21facacf36423028288e4c5bb10c8d9 | 3,653,137 |
import shutil
import os
def where(cmd, path=None):
"""
A function to wrap shutil.which for universal usage
"""
raw_result = shutil.which(cmd, os.X_OK, path)
if raw_result:
return os.path.abspath(raw_result)
else:
raise ValueError("Could not find '{}' in the path".format(cmd)) | bbac46386ef955190898e52ee3efa57aac3fa264 | 3,653,138 |
def to_stack(df, col, by, transform=None, get_cats=False):
""" Convert columns of a dataframe to a list of lists by 'by'
Args:
df:
col:
by:
transform:
Returns:
"""
g = df.groupby(by)
transform = _notransform if transform is None else transform
x_data = []
for gr in g.groups:
x_data.append(transform(g.get_group(gr)[col].values))
cats = np.array([gg for gg in g.groups])
x_len = np.array([len(x) for x in x_data])
inds = x_len.argsort()
# print(cats)
# print(inds)
if get_cats:
return [x_data[i] for i in inds], cats[inds]
return [x_data[i] for i in inds] | 7bbf0ff609aaf2a6f5b49f80128ad06c04f93b5c | 3,653,139 |
from typing import List
def entries_repr(entries: List[Metadata]) -> str:
"""
Generates a nicely formatted string repr from a list of Dropbox metadata.
:param entries: List of Dropbox metadata.
:returns: String representation of the list.
"""
str_reps = [
f"<{e.__class__.__name__}(path_display={e.path_display})>" for e in entries
]
return "[" + ",\n ".join(str_reps) + "]" | cc768a662ac6440ef7d5ca0eaddff5205a7c0a8c | 3,653,140 |
def frequency_encode(dftrain, dftest, columnlist, output_type="include"):
"""
Frequency encode columns in columnlist.
Parameters:
dftrain: [DataFrame] train set
dftest: [DataFrame] test set
columnlist: [list] columns to encode.
output_type: [str], default="include" will include the columns in the same dataframes.
If "separate", returns separate dataframes.
Returns:
dftrain_freq: [DataFrame] train
dftest_freq: [DataFrame] test
Author: kmp
"""
if output_type is "include":
for col in columnlist:
col_freqs = dftrain.fillna({col:'NA'})[col].value_counts(normalize=True)
dftrain[col+'_freq'] = dftrain.fillna({col:'NA'})[col].map(col_freqs)
dftest[col+'_freq'] = dftest.fillna({col:'NA'})[col].map(col_freqs).fillna(0)
dftrain_freq = dftrain
dftest_freq = dftest
else:
dftrain_freq = pd.DataFrame(index=dftrain.index)
dftest_freq = pd.DataFrame(index=dftest.index)
for col in columnlist:
col_freqs = dftrain.fillna({col:'NA'})[col].value_counts(normalize=True)
dftrain_freq[col+'_freq'] = dftrain.fillna({col:'NA'})[col].map(col_freqs)
dftest_freq[col+'_freq'] = dftest.fillna({col:'NA'})[col].map(col_freqs).fillna(0)
return dftrain_freq, dftest_freq | 3380853f0b5f88a6b2392a657424c4fc326876e2 | 3,653,141 |
def get_ranked_results(completed_rounds):
"""
For the rounds given in completed_rounds, calculate the total score for each team.
Then all teams are sorted on total score and are given a ranking to allow for ex aequo scores.
"""
results = []
for team in QTeam.objects.all():
teamtotal = 0
for a in team.qanswer_set.all():
# Only add results for complete rounds
if a.rnd in completed_rounds:
teamtotal += a.score
results.append((team.team_name, teamtotal))
# Sort the results
sorted_results = sorted(results, reverse=True, key=lambda tup: tup[1])
rank, count, previous, ranking = 0, 0, None, []
for key, num in sorted_results:
count += 1
if num != previous:
rank += count
previous = num
count = 0
ranking.append((rank, key, num))
return ranking | cea2afa2bb8de1db82450f323274af94ad3b633f | 3,653,142 |
def get_subgraphs():
"""
Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).
:return: A list of lists of dictionaries.
"""
subgraph_list = [c.get("color") for c in classes if c.get("color") is not None]
subgraphs = []
# Add to subgraphs all the lists of actual subgraphs
for c in subgraph_list:
sub = [cl for cl in classes if cl.get("color") == c and cl]
if sub not in subgraphs:
subgraphs.append(sub)
# Now add to subgraphs all the items (as lists) that don't belong to a subsystem
for c in classes:
if c.get("color") is None:
sub = [c]
subgraphs.append(sub)
return subgraphs | 5e9b766b2c7f58d71eac62d88be64096272d2511 | 3,653,143 |
def score(self, features):
""" return score from ML models"""
assert len(self._models) > 0, 'No valid prediction model'
scores = list()
for feature in features:
# when feature list extraction fails
if not feature:
scores.append(-float('inf'))
continue
item = list()
for ins in self._models:
item.append(ins.inference(feature))
pred = [i for i in item if i]
scores.append(float(sum(pred)/len(pred)))
return scores | 413eb4a0ecdcf0ac4b8f9cf9643b08a839c78b9a | 3,653,144 |
def fromRGB(rgb):
"""Convert tuple or list to red, green and blue values that can be accessed as follows:
a = fromRGB((255, 255, 255))
a["red"]
a["green"]
a["blue"]
"""
return {"red":rgb[0], "green":rgb[1], "blue":rgb[2]} | 205a8f189d177e7af5cdc686e7c52fd2053a3c87 | 3,653,145 |
import math
def computeTelescopeTransmission(pars, offAxis):
"""
Compute tel. transmission (0 < T < 1) for a given set of parameters
as defined by the MC model and for a given off-axis angle.
Parameters
----------
pars: list of float
Parameters of the telescope transmission. Len(pars) should be 4.
offAxis: float
Off-axis angle in deg.
Returns
-------
float
Telescope transmission.
"""
_degToRad = math.pi / 180.0
if pars[1] == 0:
return pars[0]
else:
t = math.sin(offAxis * _degToRad) / (pars[3] * _degToRad)
return pars[0] / (1.0 + pars[2] * t ** pars[4]) | 50b2e2908726b8a77bc83a2821cf760b7475300b | 3,653,146 |
def mean_iou(
results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category IoU, shape (num_classes, ).
"""
all_acc, acc, iou = eval_seg_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=["mIoU"],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
)
return all_acc, acc, iou | a6d90cb4028c831db82b4dddb6a4c52a8fa4e1f0 | 3,653,147 |
def as_date_or_none(date_str):
"""
Casts a date string as a datetime.date, or None if it is blank.
>>> as_date_or_none('2020-11-04')
datetime.date(2020, 11, 4)
>>> as_date_or_none('')
None
>>> as_date_or_none(None)
None
"""
if not date_str:
return None
return dateutil_parser.parse(date_str).date() | bf01bd280526e7962e1b08aa0400d6ebadf8053f | 3,653,148 |
def guarantee_trailing_slash(directory_name: str) -> str:
"""Adds a trailling slash when missing
Params:
:directory_name: str, required
A directory name to add trailling slash if missing
Returns:
A post processed directory name with trailling slash
"""
if not directory_name.endswith('/'):
return directory_name + '/'
return directory_name | 38cfdf971262fceb4888277522b22ba7276fa9b7 | 3,653,149 |
def bc32encode(data: bytes) -> str:
"""
bc32 encoding
see https://github.com/BlockchainCommons/Research/blob/master/papers/bcr-2020-004-bc32.md
"""
dd = convertbits(data, 8, 5)
polymod = bech32_polymod([0] + dd + [0, 0, 0, 0, 0, 0]) ^ 0x3FFFFFFF
chk = [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
return "".join([BECH32_ALPHABET[d] for d in dd + chk]) | 46feb2b744089f5f4bf84cae6ff9d29623b3bba5 | 3,653,150 |
def read_all_reviews(current_user):
"""Reads all Reviews"""
reviews = Review.query.all()
if reviews:
return jsonify({'Reviews': [
{
'id': review.id,
'title': review.title,
'desc': review.desc,
'reviewer': review.reviewer.username,
'business': review.business.name,
'created_at': review.created_at,
'updated_at': review.updated_at
} for review in reviews
]}), 200
return jsonify({'warning': 'No Review, create one first'}), 200 | 78642f38dab8328c11445e67848b7f6d9583d892 | 3,653,151 |
def matches_filters(row, field_to_index, transformed_filters):
"""
Validate field name in transformed filter_expressions, return TRUE for rows matching all filters
Parameters
------------
row : str
row in `list` registry table (manager.show())
field_to_index : dict
key = column names, val = column index, in registry table (or manager.show())
transformed_filters : list
transformed/formatted fields for filtering rows
Returns
--------
bool
return TRUE for rows matching all filters
"""
field_to_index_lower = dict(
(k.lower(), v) for k, v in field_to_index.items()
) # to accept case-insensitive comparison
for tfilter in transformed_filters:
[field, op, value] = tfilter
if field not in field_to_index_lower:
raise DSGInvalidParameter(
f"field='{field}' is not a valid column name, valid fields: {list(field_to_index.keys())}"
)
obj_val = row[field_to_index_lower[field]].lower() # to accept case-insensitive comparison
if not matches_filter(val=obj_val, op=op, required_value=value):
return False
return True | 119b5e7d7f7dfb72e1a66525d5bf84665cbbced0 | 3,653,152 |
def div(f, other):
"""Element-wise division applied to the `Functional` objects.
# Arguments
f: Functional object.
other: A python number or a tensor or a functional object.
# Returns
A Functional.
"""
validate_functional(f)
inputs = f.inputs.copy()
if is_functional(other):
inputs += to_list(other.inputs)
lmbd = [Lambda(lambda x: x[0]/x[1], name=graph_unique_name("div")) for X in f.outputs]
else:
_warn_for_ndarray(other)
lmbd = [Lambda(lambda x: x/other, name=graph_unique_name("div")) for X in f.outputs]
Functional = f.get_class()
res = Functional(
inputs = unique_tensors(inputs),
outputs = _apply_operation(lmbd, f, other),
layers = lmbd
)
return res | abfc7df85946cfcd5196dff58bec22ee237b590b | 3,653,153 |
import itertools
def select_latest_versions(files):
"""Select only the latest version of files."""
result = []
def same_file(file):
"""Return a versionless identifier for a file."""
# Dataset without the version number
dataset = file.dataset.rsplit('.', 1)[0]
return (dataset, file.name)
files = sorted(files, key=same_file)
for _, versions in itertools.groupby(files, key=same_file):
versions = sorted(versions, reverse=True)
latest_version = versions[0]
result.append(latest_version)
if len(versions) > 1:
logger.debug("Only using the latest version %s, not %s",
latest_version, versions[1:])
return result | 81862d64ea44d0b0d9621bb198e5590c7f7ef0c7 | 3,653,154 |
def _gen_input(storyline, nsims, mode, site, chunks, current_c, nperc, simlen, swg_dir, fix_leap):
"""
:param storyline: loaded storyline
:param SWG_path: path to the directory with contining the files from the SWG
:param nsims: number of sims to run
:param mode: one of ['irrigated', 'dryland']
:param site: one of ['eyrewell', 'oxford']
:param chunks: the number of chunks
:param current_c: the current chunk (from range(chunks)
:param nperc: number of simulations that can be run per chunk
:return:
"""
# manage chunks
if chunks == 1:
num_to_pull = nsims
elif chunks > 1:
num_to_pull = nperc
if current_c + 1 == chunks:
# manage last chunk
num_to_pull = nsims - (current_c * nperc)
else:
raise ValueError('shouldnt get here')
params, doy_irr = get_params_doy_irr(mode, site)
matrix_weathers = []
days_harvests = []
# get restriction data
if mode == 'dryland':
rest_data = np.repeat([None], num_to_pull)
elif mode == 'irrigated':
rest_data = get_irr_data(num_to_pull, storyline, simlen)
else:
raise ValueError('weird arg for mode: {}'.format(mode))
# get weather data
weather_data = _get_weather_data(storyline=storyline, nsims=num_to_pull, simlen=simlen, swg_dir=swg_dir, site=site,
fix_leap=fix_leap)
# make all the other data
for rest, weather in zip(rest_data, weather_data):
if rest is None:
rest_temp = None
else:
rest_temp = pd.DataFrame(data=rest, index=weather.index, columns=['frest'])
matrix_weather = create_matrix_weather(mode, weather_data=weather, restriction_data=rest_temp,
rest_key='frest', fix_leap=fix_leap)
matrix_weathers.append(matrix_weather)
days_harvests.append(create_days_harvest(mode, matrix_weather, site, fix_leap=fix_leap))
return params, doy_irr, matrix_weathers, days_harvests | d0594a3b986c1415202db5f894101537464355a8 | 3,653,155 |
def guess_mime_mimedb (filename):
"""Guess MIME type from given filename.
@return: tuple (mime, encoding)
"""
mime, encoding = None, None
if mimedb is not None:
mime, encoding = mimedb.guess_type(filename, strict=False)
if mime not in ArchiveMimetypes and encoding in ArchiveCompressions:
# Files like 't.txt.gz' are recognized with encoding as format, and
# an unsupported mime-type like 'text/plain'. Fix this.
mime = Encoding2Mime[encoding]
encoding = None
return mime, encoding | 8202551c81b25e9bb104ec82114a750a16556b23 | 3,653,156 |
def get_paths(uast_file, max_length, max_width, token_extractor, split_leaves=True):
"""
Creates a list of all the paths given the max_length and max_width restrictions.
:param uast_file: file containing a bblfsh UAST as string and binary-coded
:param max_length:
:param max_width:
:param token_extractor: function to transform a node into a single string token
:param split_leaves: get leaves token as a different node
:return: list(list(str)) list of paths (which are list of strings)
"""
print("Processing file: {}".format(uast_file))
uast = Node.FromString(open(uast_file, 'rb').read())
tree, leaves = extend_tree(uast)
paths = []
if len(leaves) > 1:
for i in range(len(leaves)):
for j in range(i + 1, min(i + max_width, len(leaves))):
u, v = leaves[i], leaves[j]
# TODO decide where to filter comments and decouple bblfsh
if not is_noop_line(u) and not is_noop_line(v):
ancestor = lca(u, v)
d = distance(u, v, ancestor)
if d <= max_length:
node_path = get_path(u, v, ancestor, split_leaves=split_leaves)
# convert nodes to its desired representation
paths.append([token_extractor(p) for p in node_path])
return paths | cd2ec58f6d960317ac56a6992373f6aa311f9672 | 3,653,157 |
def get_members():
"""
Get a list of all members in FreeIPA
"""
members = []
ldap_conn = ldap.get_con()
res = ldap_conn.search_s(
"cn=users,cn=accounts,dc=csh,dc=rit,dc=edu",
pyldap.SCOPE_SUBTREE,
"(uid=*)",
["uid", "displayName"],
)
for member in res:
members.append(
{
"value": member[1]["uid"][0].decode("utf-8"),
"display": member[1]
.get("displayName", member[1]["uid"])[0]
.decode("utf-8"),
}
)
return members | 2714bddf7554884fa638066f91aa489b497f6c15 | 3,653,158 |
def _unicode_decode_extracted_tb(extracted_tb):
"""Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb] | bbe020daecf6dc7021ff38dfac6869646120be5d | 3,653,159 |
def load_table(source, version):
"""Load synth table from file
"""
filepath = get_table_filepath(source, version=version)
return pd.read_table(filepath, delim_whitespace=True) | b95d35a6f297e0f73fee3652a0c9c6942b792451 | 3,653,160 |
def single_spaces(string: str) -> str:
"""Replaces all instances of whitespace-like chars with single spaces
Args:
string (str): The string to modify
Returns:
str: The cleaned string
"""
return UGLY_SPACES_RE.sub(" ", string) | eb37ae691f7fb54b6a23a5fd6d2cdd3edf8ebf57 | 3,653,161 |
import socket
def snmp_count(
address,
oid,
port=161,
community="public",
version=SNMP_v2c,
timeout=10,
bulk=False,
filter=None,
max_repetitions=BULK_MAX_REPETITIONS,
tos=None,
ioloop=None,
udp_socket=None,
):
"""
Perform SNMP get request and returns Future to be used
inside @tornado.gen.coroutine
"""
def true(x, y):
return true
logger.debug("[%s] SNMP COUNT %s", address, oid)
if not filter:
filter = true
poid = oid + "."
result = 0
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
while True:
# Get PDU
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=max_repetitions, version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
# Send request and wait for response
try:
yield sock.sendto(pdu, (address, port))
data, addr = yield sock.recvfrom(4096)
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oid)
except socket.gaierror as e:
logger.debug("[%s] Cannot resolve address: %s", address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug("[%s] Socket error: %s", address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
# Parse response
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if resp.error_status == NO_SUCH_NAME:
# NULL result
break
elif resp.error_status != NO_ERROR:
# Error
raise SNMPError(code=resp.error_status, oid=oid)
else:
# Success value
for oid, v in resp.varbinds:
if oid.startswith(poid):
# Next value
if filter(oid, v):
result += 1
else:
logger.debug("[%s] COUNT result: %s", address, result)
sock.close()
raise Return(result) | 0ddf236abeb1bca4d9a8584b7714225b1bc93166 | 3,653,162 |
def create_group(api_key: str, board_id: str, group_name: str, *args, **kwargs):
"""Creates a new group in a specific board.
__________
Parameters
api_key : `str`
The monday.com v2 API user key.
board_id : `str`
The board's unique identifier.
group_name : `str`
The name of the new group.
args : `tuple`
The list of group return fields.
kwargs : `dict`
Optional arguments for querying assets.
_______
Returns
data : `dict`
A monday.com group in item form.
_____________
Return Fields
archived : `bool`
Is the group archived or not.
color : `str`
The group's color.
deleted : `bool`
Is the group deleted or not.
id : `str`
The group's unique identifier.
items : `list[moncli.entities.Item]`
The items in the group.
position : `str`
The group's position in the board.
title : `str`
The group's title.
"""
args = get_field_list(constants.DEFAULT_GROUP_QUERY_FIELDS, *args)
kwargs = get_method_arguments(constants.CREATE_GROUP_OPTIONAL_PARAMS, **kwargs)
kwargs['board_id'] = util.IntValue(board_id)
kwargs['group_name'] = util.StringValue(group_name)
return execute_mutation(api_key, constants.CREATE_GROUP, *args, **kwargs) | b591fe000718615f44954e488d4e3c46b9cf0123 | 3,653,163 |
import cvxopt
def _solve_qp_ik_vel(vel, jac, joint_pos, joint_lims=None, duration=None, margin=0.2):
"""
Solves the IK for a given pusher velocity using a QP solver, imposing joint limits.
If the solution is optimal, it is guaranteed that the resulting joint velocities will not
cause the joints to reach their limits (minus the margin) in the specified duration of time
:param vel: desired EE velocity (6 values)
:param jac: jacobian
:param joint_pos: current joint positions
:param joint_lims: matrix of joint limits; if None, limits are not imposed
:param duration: how long the specified velocity will be kept (in seconds); if None, 2.0 is used
:param margin: maximum absolute distance to be kept from the joint limits
:return: tuple with the solution (as a numpy array) and with a boolean indincating if the result is optimal or not
:type vel: np.ndarray
:type jac: np.ndarray
:type joint_pos: np.ndarray
:type joint_lims: np.ndarray
:type duration: float
:type margin: float
:rtype: (np.ndarray, bool)
"""
x_len = len(joint_pos)
P = cvxopt.matrix(np.identity(x_len))
A = cvxopt.matrix(jac)
b = cvxopt.matrix(vel)
q = cvxopt.matrix(np.zeros(x_len))
if duration is None:
duration = 2.
if joint_lims is None:
G, h = None, None
else:
G = duration * np.identity(x_len)
h = np.zeros(x_len)
for i in range(x_len):
dist_up = abs(joint_lims[i, 1] - joint_pos[i])
dist_lo = abs(joint_lims[i, 0] - joint_pos[i])
if dist_up > dist_lo:
# we are closer to the lower limit
# => must bound negative angular velocity, i.e. G_ii < 0
h[i] = dist_lo
G[i, i] *= -1
else:
# we are closer to the upper limit
# => must bound positive angular velocity, i.e. G_ii > 0
h[i] = dist_up
h = cvxopt.matrix(h - margin)
G = cvxopt.matrix(G)
# sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, options={'show_progress': False, 'kktreg': 1e-9}, kktsolver='ldl')
sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, options={'show_progress': False, 'refinement': 5})
x = np.array(sol['x']).reshape(-1)
optimal = sol['status'] == 'optimal'
return x, optimal | 25bd82403421f936d81d1a5c3090c1fbb1a964c1 | 3,653,164 |
def channel_will_be_next(crontab: str):
"""Checks if the given notification channel will be activated on the
next channel, in an hour."""
return pycron.is_now(crontab, now + timedelta(hours=1)) | b5505d7e27d70377cfb58acab8a38d9bd12d9351 | 3,653,165 |
def hospital_resident(residents, hospitals, optimal="resident"):
"""Solve an instance of HR using an adapted Gale-Shapley algorithm
:cite:`Rot84`. A unique, stable and optimal matching is found for the given
set of residents and hospitals. The optimality of the matching is found with
respect to one party and is subsequently the worst stable matching for the
other.
Parameters
----------
residents : list of Player
The residents in the game. Each resident must rank a non-empty subset
of the elements of ``hospitals``.
hospitals : list of Hospital
The hospitals in the game. Each hospital must rank all the residents
that have ranked them.
optimal : str, optional
Which party the matching should be optimised for. Must be one of
``"resident"`` and ``"hospital"``. Defaults to the former.
Returns
-------
matching : Matching
A dictionary-like object where the keys are the members of
``hospitals``, and the values are their matches ranked by preference.
"""
if optimal == "resident":
return resident_optimal(residents, hospitals)
if optimal == "hospital":
return hospital_optimal(hospitals) | e666b502a2e74f5c4628108397a82977b7da5b7f | 3,653,166 |
def log_request(response):
"""Log request.
:param response:
:return:
"""
ip = request.headers.get('X-Forwarded-For', request.remote_addr)
host = request.host.split(':', 1)[0]
app.logger.info(f"method={request.method}, path={request.path}, "
f"status={response.status_code}, "
f"ip={ip}, host={host}, params={dict(request.args)},"
f"headers={request.headers}, "
f"body={request.data}")
return response | 838df023329b8b49c2349e58d02b44ef51ef7213 | 3,653,167 |
def reduce(path, n_procs, column, function):
""" Calculate an aggregate value from IMB output.
Args:
path: str, path to file
n_procs: int, number of processes
column: str, column name
function: callable to apply to specified `column` of table for `n_procs` in `path`
"""
tables = read_imb_out(path)
table = tables[n_procs] # separate lines here for more useful KeyError if missing:
col = table[column]
result = function(col)
return result | e2892b862f02ca11acaa180e24d390804441f0db | 3,653,168 |
from pathlib import Path
def output_file_path(status_id, phase):
"""
"""
BASE_DIR = Path(__file__).resolve().parent.parent
return f"%s/logs/stage/{status_id}-{phase}.txt" %str(BASE_DIR) | 3bcbd80ad95389b9cf37fa66923bacb819ede710 | 3,653,169 |
def clean(some_string, uppercase=False):
"""
helper to clean up an input string
"""
if uppercase:
return some_string.strip().upper()
else:
return some_string.strip().lower() | cdc4587b762625e00c91189950bd45840861c93f | 3,653,170 |
import re
def to_title(value):
"""Converts a string into titlecase."""
t = re.sub("\s+", ".", value)
t = filter(LETTER_SET.__contains__, t)
t = re.sub("([a-z])'\W([A-Z])", lambda m: m.group(0).lower(), t.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t) | a88c9559abeab7426fa874e66c9e81a75138c0cd | 3,653,171 |
import yaml
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
# values from config file are all possible params
arguments = dict(yaml_config, **kwargs)
# In case some arguments were not passed, replace with default ones
for key, value in DEFAULT_ARGS.items():
arguments.setdefault(key, value)
return arguments | f36946ed3a05f32057786ddf8e4194b935b4c129 | 3,653,172 |
def sig_generacion(m):
"""Devuelve la matriz resultante de aplicar las reglas del juego a cada celda"""
FILAS = len(m)
COLUMNAS = len(m[0]) if len(m) else 0
new_m = [] # matriz resultado
for i in range(FILAS):
l = [] # Una lista para ir generando una fila
for j in range(COLUMNAS):
vec = num_vecinos(m, j, i)
if vec < 2 or vec > 3:
l.append(0) # muere
elif vec == 3:
l.append(1) # nace
else:
l.append(m[i][j]) # sobrevive si estaba viva
new_m.append(l)
return new_m | 09da2baede2eef22179218f267bc2325d72822ee | 3,653,173 |
import hmac
import hashlib
import base64
def calc_file_signature(data: str, password: str = None) -> str:
"""
Função que calcula o has da assinatura de um arquivo
@param data: string assinada
@param password: senha da assinatura
@return: hash da assinatura
"""
if (password):
digest = hmac.new(bytes(password), msg=bytes(data), digestmod=hashlib.sha256).digest()
res_hash = base64.b64encode(digest).decode()
else:
hash = hashlib.sha256()
hash.update(bytes(data))
res_hash = hash.hexdigest()
return res_hash | 1422b8058a6eb7995558b3e0a7fa5f33f6cfd134 | 3,653,174 |
def get_angle_from_coordinate(lat1, long1, lat2, long2):
"""https://stackoverflow.com/questions/3932502/calculate-angle-between-two-latitude-longitude-points"""
dLon = (long2 - long1)
y = np.sin(dLon) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(dLon)
brng = np.arctan2(y, x)
brng = np.degrees(brng)
brng = (brng + 360) % 360
brng = 360 - brng
return brng | a1ad7ffe1e63197cc5f70b2ce2f343078fd9b5e7 | 3,653,175 |
import json
def get_predictions():
"""Return the list of predications as a json object"""
results = []
conn = None
columns = ("pid", "name", "location", "latitude", "longitude", "type", "modtime")
try:
conn = psycopg2.connect(db_conn)
# create a cursor
cur = conn.cursor()
cur.execute(
"SELECT pid, name, location, latitude, longitude, type, modtime FROM predictions"
)
for row in cur.fetchall():
results.append(dict(zip(columns, row)))
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print("Database connection closed.")
json_str = json.dumps(results, indent=2, sort_keys=True, default=json_serial)
return Response(json_str, mimetype="application/json") | 6afb9d703f4dbeff81d4369f9096d577dcafc993 | 3,653,176 |
def parse_packageset(packageset):
"""
Get "input" or "output" packages and their repositories from each PES event.
:return: set of Package tuples
"""
return {parse_package(p) for p in packageset.get('package', packageset.get('packages', []))} | ff8af3423c0fda993cfa88be16142520e29b999e | 3,653,177 |
import urllib
import zipfile
def download_nyiso_csv(month, data_type, zone = None):
"""Downloads a NYISO csv dataset for a specific data type, month, and zone.
Args:
month: string denoting the first day of the month to be downloaded in
yyyymmdd format
data_type: string denoting the type of NYISO data to retrieve,
examples include "damlbmp" which stands for "day ahead
market location based marginal price" or "outSched" for
"outage schedule"
zone: string denoting the NYISO geographic zone of the data to be
requested. This is required if data_type == "damlbmp"
Returns:
df: pandas dataframe of the NYISO csv file for the entire month requested
"""
# Build the necessary url to access the NYISO data
url = build_nyiso_url(month, data_type, zone)
# Download the zip folder to a temporary file location,
# then open the zip folder into the object zf
zip_folder_path, headers = urllib.request.urlretrieve(url)
zf = zipfile.ZipFile(zip_folder_path)
#TODO: increase efficiency by only reading the files from NYISO that contain the desired days
# For each file contained in zf, read the csv and concatenate it with
# the other csvs for this month to create a month-long csv
df = pd.DataFrame()
for file in zf.filelist:
temp_df = pd.read_csv(zf.open(file.filename))
df = pd.concat([df,temp_df])
return df | cbe99ea482a26d5e59bf553ff143fe9895390f4e | 3,653,178 |
def pretty_print_large_number(number):
"""Given a large number, it returns a string of the sort: '10.5 Thousand' or '12.3 Billion'. """
s = str(number).ljust(12)
if number > 0 and number < 1e3:
pass
elif number >= 1e3 and number < 1e6:
s = s + " (%3.1f Thousand)" % (number * 1.0 / 1e3)
elif number >= 1e6 and number < 1e9:
s = s + " (%3.1f Million)" % (number * 1.0 / 1e6)
elif number >= 1e9 and number < 1e12:
s = s + " (%3.1f Billion)" % (number * 1.0 / 1e9)
elif number >= 1e12 and number < 1e15:
s = s + " (%3.1f Trillion)" % (number * 1.0 / 1e12)
return s | 6762f34744da360b36d4a4fc0659fcf7d3fb0465 | 3,653,179 |
def find_aligning_transformation(skeleton, euler_frames_a, euler_frames_b):
"""
performs alignment of the point clouds based on the poses at the end of
euler_frames_a and the start of euler_frames_b
Returns the rotation around y axis in radians, x offset and z offset
"""
point_cloud_a = convert_euler_frame_to_cartesian_frame(skeleton, euler_frames_a[-1])
point_cloud_b = convert_euler_frame_to_cartesian_frame(skeleton, euler_frames_b[0])
weights = skeleton.get_joint_weights()
theta, offset_x, offset_z = _align_point_clouds_2D(point_cloud_a, point_cloud_b, weights)
return theta, offset_x, offset_z | 1d323fcb0af73aacbc57e5cf57f0b9875375b98d | 3,653,180 |
def find_all_visit(tx):
"""
Method that queries the database to find all VISIT relationships
:param tx: session
:return: nodes of Person , Location
"""
query = (
"""
MATCH (p:Person)-[r:VISIT]->(l:Location)
RETURN p , ID(p) , r , r.start_hour , r.end_hour , r.date , l , ID(l)
"""
)
result = tx.run(query).data()
return result | 851d790b16f9db285a6d09b5cabc4e12ad364484 | 3,653,181 |
def read_vectors(filename):
"""Reads measurement vectors from a space or comma delimited file.
:param filename: path of the file
:type filename: str
:return: array of vectors
:rtype: numpy.ndarray
:raises: ValueError
"""
vectors = []
data = read_csv(filename)
expected_size = len(data[0])
if expected_size % 3 != 0:
raise ValueError('Column size of vector data must be a multiple of 3')
for row in data:
if len(row) == expected_size:
vectors.append(row)
else:
raise ValueError('Inconsistent column size of vector data')
result = np.array(vectors, np.float32)
if not np.isfinite(result).all():
raise ValueError('Non-finite value present in vector data')
return result | a772c4185d55543e0c641271a5af699f91e81b95 | 3,653,182 |
def get_scoring_algorithm():
""" Base scoring algorithm for index and search """
return scoring.BM25F() | 78fe59d02071ce000262208f4c228566e0747857 | 3,653,183 |
def _make_augmentation_pipeline(augmentation_list):
"""Buids an sklearn pipeline of augmentations from a tuple of strings.
Parameters
----------
augmentation_list: list of strings, A list of strings that determine the
augmentations to apply, and in which order to apply them (the first
string will be applied first). Possible augmentation strings are
['leadlag', 'ir', 'addtime', 'cumsum', 'basepoint']
Returns
-------
sklearn.Pipeline
The transforms, in order, as an sklearn pipeline.
Examples
--------
augementations = ('leadlag', 'ir', 'addtime')
_make_augmentation_pipeline(augmentations)
# Will return
Pipeline([
('leadlag', LeadLag()),
('ir', InvisibilityReset()),
('addtime', AddTime())
])
"""
# Dictionary of augmentations
AUGMENTATIONS = {
"leadlag": _LeadLag(),
"ir": _InvisibilityReset(),
"addtime": _AddTime(),
"cumsum": _CumulativeSum(),
"basepoint": _BasePoint(),
}
# Assertions, check we have a tuple/list
if augmentation_list is not None:
if isinstance(augmentation_list, str):
augmentation_list = (augmentation_list,)
assert all(
[x in list(AUGMENTATIONS.keys()) for x in augmentation_list]
), "augmentation_list must only contain string elements from {}. Given: {}.".format(
list(AUGMENTATIONS.keys()), augmentation_list
)
# Setup pipeline
if augmentation_list is not None:
pipeline = Pipeline(
[(tfm_str, AUGMENTATIONS[tfm_str]) for tfm_str in augmentation_list]
)
else:
pipeline = None
return pipeline | e53f4d198e6781c5eaf6ce6c0a453801f4ceb0d7 | 3,653,184 |
def ctg_path(event_name,sc_reform,path_cache,var_map,model,prev_events):
"""
Recursively computes the controllable and contigent events that influence
the schedule of a given event.
"""
if event_name in path_cache:#If solution has been already computed, use it
return path_cache[event_name]
else:
if event_name in sc_reform: #End point of uncontrollable duration
if event_name in prev_events:
raise RuntimeError('Contigent duration loop detected!')
else:
prev_events.add(event_name)
path_ref = ctg_path(sc_reform[event_name]['ref'],sc_reform,path_cache,var_map,model,prev_events)
path = [event_name]+path_ref
else: #Controllable event
if not event_name in var_map:#1-to-1 mapping between events and variables
var_map[event_name]=model.addVar(vtype=GRB.CONTINUOUS,lb=0.0)
model.update()
path = [event_name]
path_cache[event_name]=path #Caches solution for future use
return path | 5de8eb6fe3be991da3f4af37b6e81990aa8cb34f | 3,653,185 |
def _setup_mock_socket_file(mock_socket_create_conn, resp):
"""Sets up a mock socket file from the mock connection.
Args:
mock_socket_create_conn: The mock method for creating a socket connection.
resp: iterable, the side effect of the `readline` function of the mock
socket file.
Returns:
The mock socket file that will be injected into the code.
"""
fake_file = mock.Mock()
fake_file.readline.side_effect = resp
fake_conn = mock.Mock()
fake_conn.makefile.return_value = fake_file
mock_socket_create_conn.return_value = fake_conn
return fake_file | 5b70c73bb948211919065298a01a48d927e64482 | 3,653,186 |
def get_defense_type(action: int, game_config) -> int:
"""
Utility method for getting the defense type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
defense_type = action % (game_config.num_attack_types+1) # +1 for detection
return defense_type | 68a05cf15bd833fb24aa448b8be2d08c1a949d12 | 3,653,187 |
def color_box(
colors, border="#000000ff", border2=None, height=32, width=32,
border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF
):
"""Color box."""
return colorbox.color_box(
colors, border, border2, height, width,
border_size, check_size, max_colors, alpha, border_map
) | 6f8a98743c11985529afd5ad0c04a64c1301f85a | 3,653,188 |
def get_performance_of_lstm_classifier(X, y, n_epochs, verbose=1, final_score=False):
"""
Reshapes feature matrix X, applies LSTM and returns the performance of the neural network
:param X: List of non-reshaped/original feature matrices (one per logfile)
:param y: labels
:param n_epochs: Number of epochs the model should be trained
:param verbose: verbose mode of keras_model.fit
:param final_score: If final score should be printed, then don't use a validation set
:return rocs, recalls, specificities, presicions, f1s
"""
X_list, y_list = _get_splitted_up_feature_matrix_and_labels(X, y)
globals()["_maxlen"] = max(len(fm) for fm in X_list)
if final_score:
X_train_list, y_train_list, X_test_list, y_test_list, X_val, y_val = \
_split_into_train_test_val_data(X_list, y_list, size_test_set=3, size_val_set=0)
X_lstm, y_lstm = _get_reshaped_matrices(X_train_list, y_train_list)
model = _generate_lstm_classifier((X_lstm.shape[1], X_lstm.shape[2]))
trained_model = _fit_lstm(model, X_lstm, y_lstm, n_epochs, verbose)
else:
X_train_list, y_train_list, X_test_list, y_test_list, X_val, y_val = \
_split_into_train_test_val_data(X_list, y_list, size_test_set=3, size_val_set=2)
X_lstm, y_lstm = _get_reshaped_matrices(X_train_list, y_train_list)
X_val, y_val = _get_reshaped_matrices(X_val, y_val)
model = _generate_lstm_classifier((X_lstm.shape[1], X_lstm.shape[2]))
trained_model = _fit_lstm(model, X_lstm, y_lstm, n_epochs, verbose, val_set=(X_val, y_val))
print('Performance training set: ')
_calculate_performance(X_lstm, y_lstm, trained_model)
print('Performance test set: ')
rocs, recalls, specificities, presicions, f1s = _calculate_performance(X_test_list, y_test_list, trained_model)
return rocs, recalls, specificities, presicions, f1s | 13a494f9aca643ff23ce6954471ef007df96f9e8 | 3,653,189 |
def worker(data):
"""Thread function."""
width, column = data
queen = Queen(width)
queen.run(column)
return queen.solutions | ef0f3c6410885ac2e20b28f009085d92b6fca22b | 3,653,190 |
def eitem(self, key, value):
"""Translate included eitems."""
_eitem = self.get("_eitem", {})
urls = []
for v in force_list(value):
urls.append(
{
"description": "E-book by EbookCentral",
"value": clean_val("u", v, str),
}
)
_eitem.update({"urls": urls})
return _eitem | d9a5d3f9dc29baa15d9df6b4fe32c7f20151316c | 3,653,191 |
def annotate_group(groups, ax=None, label=None, labeloffset=30):
"""Annotates the categories with their parent group and add x-axis label"""
def annotate(ax, name, left, right, y, pad):
"""Draw the group annotation"""
arrow = ax.annotate(name, xy=(left, y), xycoords="data",
xytext=(right, y - pad), textcoords="data",
annotation_clip=False, verticalalignment="top",
horizontalalignment="center", linespacing=2.0,
arrowprops={'arrowstyle': "-", 'shrinkA': 0, 'shrinkB': 0,
'connectionstyle': "angle,angleB=90,angleA=0,rad=5"}
)
return arrow
if ax is None:
ax = plt.gca()
level = 0
for level in range(len(groups)):
grp = groups[level]
for name, coord in list(grp.items()):
ymin = ax.get_ylim()[0] - np.ptp(ax.get_ylim()) * 0.12 - np.ptp(ax.get_ylim()) * 0.05 * (level)
ypad = 0.01 * np.ptp(ax.get_ylim())
xcenter = np.mean(coord)
annotate(ax, name, coord[0], xcenter, ymin, ypad)
annotate(ax, name, coord[1], xcenter, ymin, ypad)
if label is not None:
# Define xlabel and position it according to the number of group levels
ax.annotate(label,
xy=(0.5, 0), xycoords="axes fraction",
xytext=(0, -labeloffset - (level + 1) * 15), textcoords="offset points",
verticalalignment="top", horizontalalignment="center")
return | 33f57ccf96b4b0907ea8c2ea161e19b0e6e536d2 | 3,653,192 |
def background_schwarzfischer(fluor_chan, bin_chan, div_horiz=7, div_vert=5, mem_lim=None, memmap_dir=None):
"""Perform background correction according to Schwarzfischer et al.
Arguments:
fluor_chan -- (frames x height x width) numpy array; the fluorescence channel to be corrected
bin_chan -- boolean numpy array of same shape as `fluor_chan`; segmentation map (background=False, cell=True)
div_horiz -- int; number of (non-overlapping) tiles in horizontal direction
div_vert -- int; number of (non-overlapping) tiles in vertical direction
mem_lim -- max number of bytes for temporary data before switching to memmap;
if in (0,1], max percentage of free memory to be used;
if non-positive, always use memory; if None, decide automatically
memmap_dir -- str; directory for creating memmap
Returns:
Background-corrected fluorescence channel as numpy array (dtype single) of same shape as `fluor_chan`
"""
n_frames, height, width = fluor_chan.shape
# Allocate arrays
if np.can_cast(fluor_chan, np.float16):
dtype_interp = np.float16
elif np.can_cast(fluor_chan, np.float32):
dtype_interp = np.float32
else:
dtype_interp = np.float64
dtype_interp = np.dtype(dtype_interp)
bg_mean = np.empty((n_frames, 1, 1), dtype=dtype_interp)
# Create large arrays in memory or as memmap
if mem_lim is None or mem_lim > 0:
bg_interp, arr_temp, iter_temp = _get_arr(fluor_chan.shape, dtype_interp, mem_lim, memmap_dir)
else:
bg_interp, arr_temp, iter_temp = np.empty(shape=fluor_chan.shape, dtype=dtype_interp)
# Construct tiles for background interpolation
# Each pair of neighboring tiles is overlapped by a third tile, resulting in a total tile number
# of `2 * div_i - 1` tiles for each direction `i` in {`horiz`, `vert`}.
# Due to integer rounding, the sizes may slightly vary between tiles.
tiles_vert = _make_tiles(height, div_vert)
tiles_horiz = _make_tiles(width, div_horiz)
supp = np.empty((tiles_horiz.size, tiles_vert.size))
# Interpolate background as cubic spline with each tile’s median as support point at the tile center
for t in range(n_frames):
print(f"Interpolating background in frame {t:3d} …")
masked_frame = ma.masked_array(fluor_chan[t, ...], mask=bin_chan[t, ...])
for iy, (y, sy) in enumerate(tiles_vert):
for ix, (x, sx) in enumerate(tiles_horiz):
supp[ix, iy] = ma.median(masked_frame[sy, sx])
bg_spline = scint.RectBivariateSpline(x=tiles_horiz['center'], y=tiles_vert['center'], z=supp)
patch = bg_spline(x=range(width), y=range(height)).T
bg_interp[t, ...] = patch
bg_mean[t, ...] = patch.mean()
# Correct for background using Schwarzfischer’s formula:
# corrected_image = (raw_image - interpolated_background) / gain
# wherein, in opposite to Schwarzfischer, the gain is approximated as
# median(interpolated_background / mean_background)
# This “simple” calculation may consume more memory than available.
# Therefore, a less readable but more memory-efficient command flow is used.
for st, sl in iter_temp:
np.divide(bg_interp[:, sl, :], bg_mean, out=arr_temp[:, :st, :])
np.subtract(fluor_chan[:, sl, :], bg_interp[:, sl, :], out=bg_interp[:, sl, :])
np.divide(bg_interp[:, sl, :], np.median(arr_temp[:, :st, :], axis=0, keepdims=True), out=bg_interp[:, sl, :])
# `bg_interp` now holds the corrected image
return bg_interp | 512d1721dc14a4f7a09843603b8700360f97fd37 | 3,653,193 |
from typing import Optional
import os
def _get_eula_date(extract_path: str) -> Optional[str]:
"""Get any EULA accept date in the install script, if any.
:param extract_path: The path to the extracted archive.
:return: The EULA date, if any.
"""
install_script = os.path.join(extract_path, "houdini.install")
if not os.path.exists(install_script):
return None
with open(install_script) as handle:
for line in handle:
if line.startswith("LICENSE_DATE"):
return line.split("=")[1].strip()
return None | 044ff76dadf7c4bdcd74b1435e02d2ec29c0a877 | 3,653,194 |
def get_output_data_path(extension, suffix=None):
"""Return full path for data file with extension, generated by a test script"""
name = get_default_test_name(suffix)
return osp.join(TST_PATH[0], f"{name}.{extension}") | ce5437c23061df490a31ac11f26f72e5935f0fd7 | 3,653,195 |
def _plot(self, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot `close` and overlay it with the heatmap of `labels`."""
if self.wrapper.ndim > 1:
raise TypeError("Select a column first. Use indexing.")
return self.close.rename('close').vbt.overlay_with_heatmap(self.labels.rename('labels'), **kwargs) | eaa6df4f29db8d1ab6dc0ffd1b9ecf8804f6aac9 | 3,653,196 |
def _set_global_vars(metadata):
"""Identify files used multiple times in metadata and replace with global variables
"""
fnames = collections.defaultdict(list)
for sample in metadata.keys():
for k, v in metadata[sample].items():
print k, v
if os.path.isfile(v):
v = _expand_file(v)
metadata[sample][k] = v
fnames[v].append(k)
loc_counts = collections.defaultdict(int)
global_vars = {}
global_var_sub = {}
for fname, locs in fnames.items():
if len(locs) > 1:
loc_counts[locs[0]] += 1
name = "%s%s" % (locs[0], loc_counts[locs[0]])
global_var_sub[fname] = name
global_vars[name] = fname
for sample in metadata.keys():
for k, v in metadata[sample].items():
if v in global_var_sub:
metadata[sample][k] = global_var_sub[v]
return metadata, global_vars | 23caefdf0f999a9b60649c85278edb8498b771b3 | 3,653,197 |
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id) | b3108b4627751d5dfef1b42b8ccad0295b33cc99 | 3,653,198 |
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x | f6f55ff17ba29aab5946c682b825c72eb70324dd | 3,653,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.