content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def demander_nombre(mini: int = None, maxi: int = None) -> int:
"""
Demande un nombre à l'utilisateur, situé entre min et max.
:param mini: le minimum
:param maxi: le maximum
:return: le nombre entrée par l'utilisateur
"""
message = 'Veuillez rentrer un nombre:'
if mini is not None and maxi is not None:
message = f'Veuillez rentrer un nombre entre {mini} et {maxi}:'
elif mini is not None and maxi is None:
message = f'Veuillez rentrer un nombre supérieur à {mini}:'
while True:
nombre = input(message + '\n> ')
# On s'assure que l'utilisateur vient de rentrer un nombre
try:
# On convertit en nombre base 10
nombre = int(nombre)
except ValueError:
print('Valeur incorrecte.')
continue
# Le nombre est désormais un entier. On vérifie qu'il coincide avec les valeurs min/max
if mini is not None and nombre < mini:
print(f'Le nombre entré est trop petit. Il doit valoir au moins {mini}')
elif maxi is not None and nombre > maxi:
print(f'Le nombre entré est trop grand. Il doit valoir au maximum {maxi}')
else:
return nombre | ac5b949af1ad4ede2f956c7da5d4c2136dc47b15 | 2,542 |
from datetime import datetime
def temporal_filter(record_date_time, time_or_period, op):
"""
Helper function to perform temporal filters on feature set
:param record_date_time: datetime field value of a feature
:type record_date_time: :class:`datetime.datetime`
:param time_or_period: the time instant or time span to use as a filter
:type time_or_period: :class:`datetime.datetime` or a tuple of two
datetimes or a tuple of one datetime and one
:class:`datetime.timedelta`
:param op: the comparison operation
:type op: str
:return: a comparison expression result
:rtype: bool
"""
d = datetime.strptime(record_date_time, "%Y-%m-%dT%H:%M:%SZ")
result = None
# perform before and after operations
if op in ['BEFORE', 'AFTER']:
query_date_time = datetime.strptime(
time_or_period.value, "%Y-%m-%dT%H:%M:%SZ")
if op == 'BEFORE':
return d <= query_date_time
elif op == 'AFTER':
return d >= query_date_time
# perform during operation
elif 'DURING' in op:
low, high = time_or_period
low = datetime.strptime(low.value, "%Y-%m-%dT%H:%M:%SZ")
high = datetime.strptime(high.value, "%Y-%m-%dT%H:%M:%SZ")
result = d >= low and d <= high
if 'BEFORE' in op:
result = d <= high
elif 'AFTER' in op:
result = d >= low
return result | 9f76d6a6eb96da9359c4bbb80f6cfb1dfdcb4159 | 2,544 |
def convert_rgb2gray(image, convert_dic):
"""convert rgb image to grayscale
Parameters
----------
image: array
RGB image. Channel order should be RGB.
convert_dic: dict
dictionary key is str(rgb list), value is grayscale value
Returns
-------
image_gray: array
Grayscale image
"""
image_r = image[:, :, 0]
image_g = image[:, :, 1]
image_b = image[:, :, 2]
im_shape = image_r.shape
image_gray = np.zeros(im_shape)
for i in range(im_shape[0]):
for j in range(im_shape[1]):
image_gray[i, j] = convert_dic[str([image_r[i, j], image_g[i, j], image_b[i, j]])]
return image_gray | 0132719ef88d139d1d3da4e52312faef25443efd | 2,548 |
def get_external_storage_path():
"""Returns the external storage path for the current app."""
return _external_storage_path | a33704c5b3267a7211c94b5a3a8d8d73b3889d68 | 2,549 |
def blur(old_img):
"""
:param old_img: a original image
:return: a blurred image
"""
blur_img = SimpleImage.blank(old_img.width, old_img.height)
for x in range(old_img.width):
for y in range(old_img.height):
if x == 0 and y == 0: # Upper left corner
old_pixel_00 = old_img.get_pixel(x, y) # Reference point
old_pixel_s = old_img.get_pixel(x, y + 1) # South
old_pixel_e = old_img.get_pixel(x + 1, y) # East
old_pixel_se = old_img.get_pixel(x + 1, y + 1) # Southeast
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_00.red + old_pixel_s.red + old_pixel_e.red + old_pixel_se.red) // 4
blur_pixel.green = (old_pixel_00.green + old_pixel_s.green + old_pixel_e.green + old_pixel_se.green) \
// 4
blur_pixel.blue = (old_pixel_00.blue + old_pixel_s.blue + old_pixel_e.blue + old_pixel_se.blue) // 4
elif x == 0 and y == old_img.height - 1: # Bottom left
old_pixel_0h = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1) # North
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1) # Northeast
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_0h.red + old_pixel_n.red + old_pixel_e.red + old_pixel_ne.red) // 4
blur_pixel.green = (old_pixel_0h.green + old_pixel_n.green + old_pixel_e.green + old_pixel_ne.green) \
// 4
blur_pixel.blue = (old_pixel_0h.blue + old_pixel_n.blue + old_pixel_e.blue + old_pixel_ne.blue) // 4
elif x == old_img.width - 1 and y == 0: # Upper right corner
old_pixel_w0 = old_img.get_pixel(x, y)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_w = old_img.get_pixel(x - 1, y) # West
old_pixel_sw = old_img.get_pixel(x - 1, y + 1) # Southwest
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_w0.red + old_pixel_s.red + old_pixel_w.red + old_pixel_sw.red) // 4
blur_pixel.green = (old_pixel_w0.green + old_pixel_s.green + old_pixel_w.green + old_pixel_sw.green) \
// 4
blur_pixel.blue = (old_pixel_w0.blue + old_pixel_s.blue + old_pixel_w.blue + old_pixel_sw.blue) // 4
elif x == old_img.width - 1 and y == old_img.height - 1: # Bottom right corner
old_pixel_wh = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1) # Northwest
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_wh.red + old_pixel_n.red + old_pixel_w.red + old_pixel_nw.red) // 4
blur_pixel.green = (old_pixel_wh.green + old_pixel_n.green + old_pixel_w.green + old_pixel_nw.green) \
// 4
blur_pixel.blue = (old_pixel_wh.blue + old_pixel_n.blue + old_pixel_w.blue + old_pixel_nw.blue) // 4
elif x == 0 and y != 0 and y != old_img.height - 1: # Left side except for head and tail
old_pixel_0y = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_0y.red + old_pixel_n.red + old_pixel_s.red + old_pixel_ne.red +
old_pixel_e.red + old_pixel_se.red) // 6
blur_pixel.green = (old_pixel_0y.green + old_pixel_n.green + old_pixel_s.green + old_pixel_ne.green +
old_pixel_e.green + old_pixel_se.green) // 6
blur_pixel.blue = (old_pixel_0y.blue + old_pixel_n.blue + old_pixel_s.blue + old_pixel_ne.blue +
old_pixel_e.blue + old_pixel_se.blue) // 6
elif y == 0 and x != 0 and x != old_img.width - 1: # Top except for head and tail
old_pixel_x0 = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_x0.red + old_pixel_w.red + old_pixel_s.red + old_pixel_sw.red +
old_pixel_e.red + old_pixel_se.red) // 6
blur_pixel.green = (old_pixel_x0.green + old_pixel_w.green + old_pixel_s.green + old_pixel_sw.green +
old_pixel_e.green + old_pixel_se.green) // 6
blur_pixel.blue = (old_pixel_x0.blue + old_pixel_w.blue + old_pixel_s.blue + old_pixel_sw.blue +
old_pixel_e.blue + old_pixel_se.blue) // 6
elif x == old_img.width - 1 and y != 0 and y != old_img.height - 1: # right side except for head and tail
old_pixel_wy = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_wy.red + old_pixel_n.red + old_pixel_s.red + old_pixel_nw.red +
old_pixel_w.red + old_pixel_sw.red) // 6
blur_pixel.green = (old_pixel_wy.green + old_pixel_n.green + old_pixel_s.green + old_pixel_nw.green +
old_pixel_w.green + old_pixel_sw.green) // 6
blur_pixel.blue = (old_pixel_wy.blue + old_pixel_n.blue + old_pixel_s.blue + old_pixel_nw.blue +
old_pixel_w.blue + old_pixel_sw.blue) // 6
elif y == old_img.height - 1 and x != 0 and x != old_img.width - 1: # Bottom except for head and tail
old_pixel_xh = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_xh.red + old_pixel_w.red + old_pixel_nw.red + old_pixel_n.red +
old_pixel_e.red + old_pixel_ne.red) // 6
blur_pixel.green = (old_pixel_xh.green + old_pixel_w.green + old_pixel_nw.green + old_pixel_n.green +
old_pixel_e.green + old_pixel_ne.green) // 6
blur_pixel.blue = (old_pixel_xh.blue + old_pixel_w.blue + old_pixel_nw.blue + old_pixel_n.blue +
old_pixel_e.blue + old_pixel_ne.blue) // 6
else: # middle parts having 8 neighbors
old_pixel_xy = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_xy.red + old_pixel_w.red + old_pixel_nw.red + old_pixel_n.red +
old_pixel_e.red + old_pixel_ne.red + old_pixel_s.red + old_pixel_sw.red +
old_pixel_se.red) // 9
blur_pixel.green = (old_pixel_xy.green + old_pixel_w.green + old_pixel_nw.green + old_pixel_n.green +
old_pixel_e.green + old_pixel_ne.green + old_pixel_s.green + old_pixel_sw.green +
old_pixel_se.green) // 9
blur_pixel.blue = (old_pixel_xy.blue + old_pixel_w.blue + old_pixel_nw.blue + old_pixel_n.blue +
old_pixel_e.blue + old_pixel_ne.blue + old_pixel_s.blue + old_pixel_sw.blue +
old_pixel_se.blue) // 9
return blur_img | 771a6e906ea8b485d4166de311c17a441f469158 | 2,550 |
import re
def generate_table_row(log_file, ancestry, official_only, code):
""" Takes an imported log and ancestry and converts it into a properly formatted pandas table.
Keyword arguments:
log_file -- output from import_log()
ancestry -- a single ancestry code
official_only -- a boolean indicating if all fields should be imported
into the table, or only the official ones.
Returns:
dict of arguments: new values
"""
# verify that ancestry is correct
matches = [l for l in log_file if re.search('Searching for ancestry: ' + \
ancestry, l)]
if len(matches) == 0:
raise ValueError('ALERT: Incorrect ancestry passed in for code ' + code +
'. Passed in value: ' + ancestry)
dict_of_vals = {'ancestry': ancestry, 'phenotype_code': code}
nrow_orig = num_cols = None
for line in log_file:
nrow_orig = _parse_single_term(nrow_orig, 'Original number of rows: ([0-9]*)',
line, int)
num_cols = _parse_single_term(num_cols, 'Found ([0-9]*) ancestry specific columns:',
line, int)
dict_of_vals.update({'original_nrow': nrow_orig,
'ancestry_specific_ncols': num_cols})
if dict_of_vals['ancestry_specific_ncols'] != 0:
tf_boundary = [idx for idx, l in enumerate(log_file) if re.search('Now running LDSC in (vanilla|stratified) mode.',l)]
log_file_official = log_file[(tf_boundary[0]+1):(len(log_file)+1)]
log_file_unofficial = log_file[0:tf_boundary[0]]
if not official_only:
unofficial_dict = _parse_unofficial_log(log_file_unofficial)
dict_of_vals.update(unofficial_dict)
official_dict, error_str = _parse_official_log(log_file_official)
else:
if not official_only:
unofficial_dict = _parse_unofficial_log(log_file)
dict_of_vals.update(unofficial_dict)
official_dict, _ = _parse_official_log(log_file)
error_str = 'No ' + ancestry + '-specific columns found.'
dict_of_vals.update(official_dict)
if error_str is not None:
dict_of_vals.update({'missing_data_note': error_str})
return pd.DataFrame(dict_of_vals, index=[ancestry + ':' + code]) | c72bdef2aafbc15c54b80337c80f03ae8d8f1e00 | 2,551 |
def read_preflib_file(filename, setsize=1, relative_setsize=None, use_weights=False):
"""Reads a single preflib file (soi, toi, soc or toc).
Parameters:
filename: str
Name of the preflib file.
setsize: int
Number of top-ranked candidates that voters approve.
In case of ties, more than `setsize` candidates are approved.
Paramer `setsize` is ignored if `relative_setsize` is used.
relative_setsize: float in (0, 1]
Indicates which proportion of candidates of the ranking
are approved (rounded up). In case of ties, more
candidates are approved.
E.g., if a voter has 10 approved candidates and `relative_setsize` is 0.75,
then the approval set contains the top 8 candidates.
use_weights: bool
If False, treat vote count in preflib file as the number of duplicate ballots,
i.e., the number of voters that have this approval set.
If True, treat vote count as weight and use this weight in class Voter.
Returns:
profile: abcvoting.preferences.Profile
Preference profile extracted from preflib file,
including names of candidates
"""
if setsize <= 0:
raise ValueError("Parameter setsize must be > 0")
if relative_setsize and (relative_setsize <= 0.0 or relative_setsize > 1.0):
raise ValueError("Parameter relative_setsize not in interval (0, 1]")
with open(filename, "r") as f:
line = f.readline()
num_cand = int(line.strip())
candidate_map = {}
for _ in range(num_cand):
parts = f.readline().strip().split(",")
candidate_map[int(parts[0].strip())] = ",".join(parts[1:]).strip()
parts = f.readline().split(",")
try:
voter_count, _, unique_orders = [int(p.strip()) for p in parts]
except ValueError:
raise PreflibException(
f"Number of voters ill specified ({str(parts)}), should be triple of integers"
)
approval_sets = []
lines = [line.strip() for line in f.readlines() if line.strip()]
if len(lines) != unique_orders:
raise PreflibException(
f"Expected {unique_orders} lines that specify voters in the input, "
f"encountered {len(lines)}"
)
for line in lines:
parts = line.split(",")
if len(parts) < 1:
continue
try:
count = int(parts[0])
except ValueError:
raise PreflibException(f"Each ranking must start with count/weight ({line})")
ranking = parts[1:] # ranking starts after count
if len(ranking) == 0:
raise PreflibException("Empty ranking: " + str(line))
if relative_setsize:
num_appr = int(ceil(len(ranking) * relative_setsize))
else:
num_appr = setsize
approval_set = _approval_set_from_preflib_datastructures(num_appr, ranking, candidate_map)
approval_sets.append((count, approval_set))
# normalize candidates to 0, 1, 2, ...
cand_names = []
normalize_map = {}
for cand in candidate_map.keys():
cand_names.append(candidate_map[cand])
normalize_map[cand] = len(cand_names) - 1
profile = Profile(num_cand, cand_names=cand_names)
for count, approval_set in approval_sets:
normalized_approval_set = []
for cand in approval_set:
normalized_approval_set.append(normalize_map[cand])
if use_weights:
profile.add_voter(Voter(normalized_approval_set, weight=count))
else:
profile.add_voters([normalized_approval_set] * count)
if use_weights:
if len(profile) != unique_orders:
raise PreflibException("Number of voters wrongly specified in preflib file.")
else:
if len(profile) != voter_count:
raise PreflibException("Number of voters wrongly specified in preflib file.")
return profile | 6feec6e786e47cdc11021021ffa91a1f96597567 | 2,552 |
def get_row(client, instance, file_=None):
"""Get one row of a family table.
Args:
client (obj):
creopyson Client.
instance (str):
Instance name.
`file_` (str, optional):
File name. Defaults is currently active model.
Returns:
(dict):
colid (str):
Column ID.
value (depends on datatype):
Cell value.
datatype (str):
Data type.
coltype (str):
Column Type; a string corresponding to the Creo column type.
"""
data = {"instance": instance}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
return client._creoson_post("familytable", "get_row", data, "columns") | c8e8c90a81d553d06ce9f78eb1be582e5b034ac6 | 2,553 |
def hospitalization_to_removed(clip_low=2, clip_high=32.6, mean=8.6, std=6.7):
"""
Returns the time for someone to either get removed after being
hospitalized in days within range(clip_low, clip_high),
of a truncated_norm(mean, std).
"""
return sample_truncated_norm(clip_low, clip_high, mean, std) | e1da5283e32b5734927436af72fdbd002c0844b1 | 2,554 |
def test_inheritance():
"""
test inheritance from different module
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
sfdm = test_submodule.getter('super_from_diff_mod')
ok_(isinstance(sfdm, doc.MatClass))
eq_(sfdm.bases,['MyAbstractClass', 'MyHandleClass'])
bases = sfdm.getter('__bases__')
eq_(bases['MyAbstractClass'].module, 'test_data')
eq_(bases['MyHandleClass'].module, 'test_data')
return sfdm | 0f29de2ef67318010feed25ea0ffc08e2dc44162 | 2,555 |
from collections import Counter
def split_mon_unmon(data, labels):
"""
Splits into monitored and unmonitored data
If a data point only happens once, we also consider it unmonitored
@return monitored_data, monitored_label, unmonitored_data
"""
occurence = Counter(labels)
monitored_data, unmonitored_data = [], []
monitored_label = []
for d, l in zip(data, labels):
if l == UNKNOWN_WEBPAGE or occurence[l] == 1:
unmonitored_data.append(d)
else:
monitored_data.append(d)
monitored_label.append(l)
return monitored_data, monitored_label, unmonitored_data | b1d0cac2e12f4386bf04eb355811f230b18f38ca | 2,556 |
def sum_and_count(x, y):
"""A function used for calculating the mean of a list from a reduce.
>>> from operator import truediv
>>> l = [15, 18, 2, 36, 12, 78, 5, 6, 9]
>>> truediv(*reduce(sum_and_count, l)) == 20.11111111111111
True
>>> truediv(*fpartial(sum_and_count)(l)) == 20.11111111111111
True
"""
try:
return (x[0] + y, x[1] + 1)
except TypeError:
return ((x or 0) + (y or 0), len([i for i in [x, y] if i is not None])) | d43cc8dc39fb35afae4f6a4e32d34221d525f5d3 | 2,558 |
def animTempCustom():
"""
Temporarily play a custom animation for a set amount of time.
API should expect a full `desc` obect in json alongside a timelimit, in ms.
"""
colorList = request.form.get('colors').split(',')
colorsString = ""
for colorName in colorList:
c = Color(colorName)
colorsString += "[{},{},{}],".format(
int(255*c.red), int(255*c.green), int(255*c.blue)
)
colorsString = colorsString[0:-1]
print(colorsString)
colors = colorsString
bp.newAnim(
'$bpa.strip.Twinkle',
colors
)
return "Animation animation set to RGB!" | 2d9cea92d7c1c93d73fcf9b325b7b58225b4ba13 | 2,559 |
from unittest.mock import Mock
def mock_stripe_invoice(monkeypatch):
"""Fixture to monkeypatch stripe.Invoice.* methods"""
mock = Mock()
monkeypatch.setattr(stripe, "Invoice", mock)
return mock | a88923ba6d4a6dda9bf3b2fcda3bb717efe36cee | 2,560 |
def read_project(output_dir):
"""Read existing project data
"""
try:
yaml = YAML()
with open(project_yaml_file(output_dir), encoding='utf-8') as project:
project_data = yaml.load(project)
for key, value in project_data.items():
if value == None:
project_data[key] = []
except FileNotFoundError:
project_data = {
'name': "Test Project",
'scenario_sets': [],
'narrative_sets': [],
'region_definitions': [],
'interval_definitions': [],
'units': [],
'scenarios': [],
'narratives': []
}
return project_data | 90cfd7d143176925d8a99f4d577bc7de9eb360e2 | 2,561 |
def bytes_to_int(byte_array: bytes) -> int:
""" Bytes to int """
return int.from_bytes(byte_array, byteorder='big') | 442452faeb1a4e7c346b6f4355095f179701f8f1 | 2,562 |
def ask(query, default=None):
"""Ask a question."""
if default:
default_q = ' [{0}]'.format(default)
else:
default_q = ''
inp = input("{query}{default_q}: ".format(query=query, default_q=default_q)).strip()
if inp or default is None:
return inp
else:
return default | 54564ee00749ddb2e5c409d781552f3ca5fcd847 | 2,563 |
def _clean_kwargs(keep_name=False, **kwargs):
"""
Sanatize the arguments for use with shade
"""
if "name" in kwargs and not keep_name:
kwargs["name_or_id"] = kwargs.pop("name")
return __utils__["args.clean_kwargs"](**kwargs) | 326d849b030a11ebc21e364f6a05eef9ab019637 | 2,564 |
def calculate_pnl_per_equity(df_list):
"""Method that calculate the P&L of the strategy per equity and returns a list of P&L"""
pnl_per_equity = [] # initialize the list of P&L per equity
for df in df_list: # iterates over the dataframes of equities
pnl = df['Strategy Equity'].iloc[-1] - df['Buy and Hold Equity'].iloc[-1] # calculating the difference at the last point
pnl_per_equity.append(pnl)
return pnl_per_equity | 4f6ac1b9f6a949215c6b805f05a65897393f3288 | 2,566 |
import http
def search(q_str: str) -> dict:
"""search in genius
Args:
q_str (str): query string
Returns:
dict: search response
"""
data = {'songs': [], 'lyric': []}
response = http.get(
'https://genius.com/api/search/multi?per_page=5', params={'q': q_str}, headers=headers).json()
sections = response['response']['sections']
if len(sections[1]['hits']) == 0 and len(sections[2]) == 0:
return False
for section in response['response']['sections'][1:3]:
if section['type'] == 'song':
for song in section['hits']:
music = song['result']
# print(music)
if len(data['songs']) == 0:
data['songs'].append(dict_builder(music))
if data['songs'][-1]['api_path'] != music['api_path']:
data['songs'].append(dict_builder(music))
elif section['type'] == 'lyric':
for lyric in section['hits']:
music = lyric['result']
if len(data['lyric']) == 0:
data['lyric'].append(dict_builder(music))
if data['lyric'][-1]['api_path'] != music['api_path']:
data['songs'].append(dict_builder(music))
return data | 7421220e43415fb17b29db26f1fc6902e88144a4 | 2,567 |
def geocode(level=None, names=None, countries=None, states=None, counties=None, scope=None) -> NamesGeocoder:
"""
Create a `Geocoder`. Allows to refine ambiguous request with `where()` method,
scope that limits area of geocoding or with parents.
Parameters
----------
level : {'country', 'state', 'county', 'city'}
The level of administrative division. Autodetection by default.
names : list or str
Names of objects to be geocoded.
For 'state' level: 'US-48' returns continental part of United States (48 states)
in a compact form.
countries : list
Parent countries. Should have same size as names. Can contain strings or `Geocoder` objects.
states : list
Parent states. Should have same size as names. Can contain strings or `Geocoder` objects.
counties : list
Parent counties. Should have same size as names. Can contain strings or `Geocoder` objects.
scope : str or `Geocoder`
Limits area of geocoding. If parent country is set then error will be generated.
If type is a string - geoobject should have geocoded scope in parents.
If type is a `Geocoder` - geoobject should have geocoded scope in parents.
Scope should contain only one entry.
Returns
-------
`NamesGeocoder`
Geocoder object specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 5
from IPython.display import display
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
states = geocode('state').scope('Italy').get_boundaries(6)
display(states.head())
ggplot() + geom_map(data=states)
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 5, 8
from IPython.display import display
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
states = geocode(level='state', scope='US').get_geocodes()
display(states.head())
names = ['York'] * len(states.state)
cities = geocode(names=names, states=states.state).ignore_not_found().get_centroids()
display(cities.head())
ggplot() + \\
geom_livemap() + \\
geom_point(data=cities, tooltips=layer_tooltips().line('@{found name}'))
"""
return NamesGeocoder(level, names) \
.scope(scope) \
.countries(countries) \
.states(states) \
.counties(counties) | 25ab4ff7887d09a41c19b6ec8ee9057151483b2e | 2,569 |
def fpAbs(x):
"""
Returns the absolute value of the floating point `x`. So:
a = FPV(-3.2, FSORT_DOUBLE)
b = fpAbs(a)
b is FPV(3.2, FSORT_DOUBLE)
"""
return abs(x) | d69f5f07b651ed4466ff768601c77f90232b8827 | 2,570 |
from io import StringIO
import json
def volumes(container:str) -> list:
"""
Return list of 'container' volumes (host,cont)
"""
buf = StringIO()
_exec(
docker, 'inspect', '-f', "'{{json .Mounts}}'", container, _out=buf
)
res = buf.getvalue().strip()
vols_list = json.loads(res[1:-1])
# vols = {d['Source']:d['Destination'] for d in vols_list}
vols = [(d['Source'],d['Destination']) for d in vols_list]
return vols | 5191df9ab4aa58a80fba90872da6091bc58f8be2 | 2,571 |
def names():
"""Return stock summary information"""
helper = SQLHelper()
conn = helper.getConnection()
repo = robinhoodRepository(conn)
stockInfo = repo.getAllStocks()
return json_response(stockInfo, 200) | d543ab5254e95e903e8b74db1ab5b0266859b083 | 2,572 |
def get_bot_group_config(bot_id):
"""Returns BotGroupConfig for a bot with given ID.
Returns:
BotGroupConfig or None if not found.
Raises:
BadConfigError if there's no cached config and the current config at HEAD is
not passing validation.
"""
cfg = _fetch_bot_groups()
gr = cfg.direct_matches.get(bot_id)
if gr is not None:
return gr
for prefix, gr in cfg.prefix_matches:
if bot_id.startswith(prefix):
return gr
return cfg.default_group | 025b2a9a91f2a744668fd6c438db0f5c4edd0a98 | 2,573 |
def add_utm(url_, campaign, source='notification', medium='email'):
"""Add the utm_* tracking parameters to a URL."""
return urlparams(
url_, utm_campaign=campaign, utm_source=source, utm_medium=medium) | d428daf58db7b0b5d5dabfd4bac6f70e900bd311 | 2,574 |
def is_forest(G):
"""Return True if the input graph is a forest
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
True if the input graph is a forest
Notes
-----
For undirected graphs only.
"""
for graph in nx.connected_component_subgraphs(G):
if not nx.is_tree(graph):
return False
return True | 6aade3d2407b8af1cd8662b9efdc604d304341fe | 2,575 |
def parse_uri(uri):
""" This implies that we are passed a uri that looks something like:
proto://username:password@hostname:port/database
In most cases, you can omit the port and database from the string:
proto://username:password@hostname
Also, in cases with no username, you can omit that:
proto://:password@hostname:port/database
Also supports additional arguments:
proto://hostname:port/database?arg1=val&arg2=vals
:param str uri: URI to parse
:rtype: dict
:returns: Dictionary with parsed URL components
.. note::
This function may move, as the currently location may not
be optimal. Location will be finalized by 1.0.0 stable release.
"""
proto = uri.split('://')[0]
uri = uri.split('://')[1]
_host = uri.split('@')[-1]
_host = _host.split(':')
if len(_host) == 2:
host = _host[0]
if '/' in _host[1]:
port = int(_host[1].split('/')[0])
else:
port = int(_host[1])
else:
host = _host[0]
if '/' in host:
host = host.split('/')[0]
port = None
if "@" in uri:
_cred = uri[0:uri.rfind(':'.join(_host)) - 1]
_cred = _cred.split(':')
if len(_cred) == 2:
_user = _cred[0]
_pass = _cred[1]
else:
_user = _cred[0]
_pass = None
else:
_user = None
_pass = None
database = uri.split('/')
if len(database) >= 2:
database = database[1]
if '?' in database:
_db = database.split('?')
database = _db[0]
args = parse_qs(_db[1], keep_blank_values = True)
else:
args = None
else:
database = None
args = None
return {
"protocol": proto,
"resource": uri,
"host": host,
"port": port,
"username": _user,
"password": _pass,
"database": database,
"args": args,
"uri": "{}://{}".format(proto, uri),
} | 5204d803a5d0f6995c49883a892bc6b22cef9443 | 2,577 |
def pwgen(pw_len=16):
"""Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(
pw_len, 'abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789'
) | 3c5a07440a6d3eee7c1bc9162089c434cfe6c45d | 2,578 |
def compute_tree_distances(tree):
"""
Computes the matrix of pairwise distances between leaves of the tree
"""
num_leaves = len(get_leaves(tree)) - 1
distances = np.zeros([num_leaves, num_leaves])
for leaf in range(num_leaves):
distance_dictionary, tmp = nx.multi_source_dijkstra(tree.to_undirected(), [leaf], weight = 'time')
for target_leaf in range(num_leaves):
distances[leaf, target_leaf] = distance_dictionary[target_leaf]
return distances | b4bdd81e0f4c8d5577813f6e68ece9f0a8992e19 | 2,580 |
def create_rndm_backgr_selections(annotations, files, length, num, no_overlap=False, trim_table=False):
""" Create background selections of uniform length, randomly distributed across the
data set and not overlapping with any annotations, including those labelled 0.
The random sampling is performed without regard to already created background
selections. Therefore, it is in principle possible that some of the created
selections will overlap, although in practice this will only occur with very
small probability, unless the number of requested selections (num) is very
large and/or the (annotation-free part of) the data set is small in size.
To avoid any overlap, set the 'no_overlap' to True, but note that this can
lead to longer execution times.
Args:
annotations: pandas DataFrame
Annotation table.
files: pandas DataFrame
Table with file durations in seconds.
Should contain columns named 'filename' and 'duration'.
length: float
Selection length in seconds.
num: int
Number of selections to be created.
no_overlap: bool
If True, randomly selected segments will have no overlap.
trim_table: bool
Keep only the columns prescribed by the Ketos annotation format.
Returns:
table_backgr: pandas DataFrame
Output selection table.
Example:
>>> import pandas as pd
>>> import numpy as np
>>> from ketos.data_handling.selection_table import select
>>>
>>> #Ensure reproducible results by fixing the random number generator seed.
>>> np.random.seed(3)
>>>
>>> #Load and inspect the annotations.
>>> df = pd.read_csv("ketos/tests/assets/annot_001.csv")
>>> print(df)
filename start end label
0 file1.wav 7.0 8.1 1
1 file1.wav 8.5 12.5 0
2 file1.wav 13.1 14.0 1
3 file2.wav 2.2 3.1 1
4 file2.wav 5.8 6.8 1
5 file2.wav 9.0 13.0 0
>>>
>>> #Standardize annotation table format
>>> df, label_dict = standardize(df, return_label_dict=True)
>>> print(df)
start end label
filename annot_id
file1.wav 0 7.0 8.1 2
1 8.5 12.5 1
2 13.1 14.0 2
file2.wav 0 2.2 3.1 2
1 5.8 6.8 2
2 9.0 13.0 1
>>>
>>> #Enter file durations into a pandas DataFrame
>>> file_dur = pd.DataFrame({'filename':['file1.wav','file2.wav','file3.wav',], 'duration':[18.,20.,15.]})
>>>
>>> #Create randomly sampled background selection with fixed 3.0-s length.
>>> df_bgr = create_rndm_backgr_selections(df, files=file_dur, length=3.0, num=12, trim_table=True)
>>> print(df_bgr.round(2))
start end label
filename sel_id
file1.wav 0 1.06 4.06 0
1 1.31 4.31 0
2 2.26 5.26 0
file2.wav 0 13.56 16.56 0
1 14.76 17.76 0
2 15.50 18.50 0
3 16.16 19.16 0
file3.wav 0 2.33 5.33 0
1 7.29 10.29 0
2 7.44 10.44 0
3 9.20 12.20 0
4 10.94 13.94 0
"""
# compute lengths, and discard segments shorter than requested length
c = files[['filename','duration']]
if 'offset' in files.columns.names: c['offset'] = files['offset']
else: c['offset'] = 0
c.reset_index(drop=True, inplace=True)
c['length'] = c['duration'] - length
c = c[c['length'] >= 0]
# cumulative length
cs = c['length'].cumsum().values.astype(float)
cs = np.concatenate(([0],cs))
# output
filename, start, end = [], [], []
# randomply sample
df = pd.DataFrame()
while (len(df) < num):
times = np.random.random_sample(num) * cs[-1]
for t in times:
idx = np.argmax(t < cs) - 1
row = c.iloc[idx]
fname = row['filename']
start = t - cs[idx] + row['offset']
end = start + length
q = query(annotations, filename=fname, start=start, end=end)
if len(q) > 0: continue
if no_overlap and len(df) > 0:
q = query(df.set_index(df.filename), filename=fname, start=start, end=end)
if len(q) > 0: continue
x = {'start':start, 'end':end}
y = files[files['filename']==fname].iloc[0].to_dict()
z = {**x, **y}
df = df.append(z, ignore_index=True)
if len(df) == num: break
# sort by filename and offset
df = df.sort_values(by=['filename','start'], axis=0, ascending=[True,True]).reset_index(drop=True)
# re-order columns
col_names = ['filename','start','end']
if not trim_table:
names = df.columns.values.tolist()
for name in col_names: names.remove(name)
col_names += names
df = df[col_names]
df['label'] = 0 #add label
# transform to multi-indexing
df = use_multi_indexing(df, 'sel_id')
return df | 01eac8bc0a624b56d419ce3cb75744792af1472f | 2,581 |
def GetPartition(partition_name, target_os):
"""Return the partition to install to.
Args:
partition_name: partition name from command-line
{'primary', 'secondary', 'other'}
target_os: 'fiberos' or 'android'
Returns:
0 or 1
Raises:
Fatal: if no partition could be determined
"""
if partition_name == 'other':
if target_os == GetOs():
boot = GetBootedPartition()
else:
boot = GetActivePartitionFromHNVRAM(target_os)
assert boot in [None, 0, 1]
if boot is None:
# Policy decision: if we're booted from NFS, install to secondary
return 1
else:
return boot ^ 1
elif partition_name in ['primary', 0]:
return 0
elif partition_name in ['secondary', 1]:
return 1
else:
raise Fatal('--partition must be one of: primary, secondary, other') | b3f030779bd29bbe695ba3769372f4af700d7cb7 | 2,583 |
import aiohttp
import json
async def call_dialogflow(message, config, lang=DEFAULT_LANGUAGE):
"""Call the Dialogflow api and return the response."""
async with aiohttp.ClientSession() as session:
payload = {
"v": DIALOGFLOW_API_VERSION,
"lang": lang,
"sessionId": message.connector.name,
"query": message.text,
}
headers = {
"Authorization": "Bearer " + config["access-token"],
"Content-Type": "application/json",
}
resp = await session.post(
DIALOGFLOW_API_ENDPOINT, data=json.dumps(payload), headers=headers
)
result = await resp.json()
_LOGGER.info(_("Dialogflow response - %s"), json.dumps(result))
return result | e670748dc4d0318d047b0f0ded6d857597112d49 | 2,584 |
def gettgd(sat, eph, type=0):
""" get tgd: 0=E5a, 1=E5b """
sys = gn.sat2prn(sat)[0]
if sys == uGNSS.GLO:
return eph.dtaun * rCST.CLIGHT
else:
return eph.tgd[type] * rCST.CLIGHT | c7231769b0e9be5287b2b2f76c8dcdc7bd409a89 | 2,585 |
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) | b942253c14438c72c19d648a0d0358d8cd280bd0 | 2,586 |
def ones_v(n):
"""
Return the column vector of ones of length n.
"""
return matrix(1, (n,1), 'd') | 46936660025c1b5bd533b78143301d1218b568d7 | 2,587 |
def test_encrypt_and_decrypt_two(benchmark: BenchmarkFixture) -> None:
"""Benchmark encryption and decryption run together."""
primitives.decrypt = pysodium.crypto_aead_xchacha20poly1305_ietf_decrypt
primitives.encrypt = pysodium.crypto_aead_xchacha20poly1305_ietf_encrypt
def encrypt_and_decrypt() -> bytes:
token = version2.encrypt(MESSAGE, KEY, FOOTER)
return version2.decrypt(token, KEY, FOOTER)
plain_text = benchmark(encrypt_and_decrypt)
assert plain_text == MESSAGE | 1b632ae28f147fa4d98dcdda982bf3d17b2c17dd | 2,588 |
def get_fy_parent_nucl(fy_lib):
"""Gets the list of fission parents from a fission yield dictionnary.
Parameters
----------
fy_lib: dict
A fission yield dictionnary
"""
fy_nucl = get_fy_nucl(fy_lib)
fy_parent = []
sample_zamid = fy_nucl[0]
sample = fy_lib[sample_zamid]
for fission_parent in sample:
fy_parent.append(fission_parent)
return fy_parent | feb2ec2adfda4d9df4993cc89545564e4c0d1a54 | 2,590 |
from functools import partial
import array
def initialize ( is_test, no_cam ) :
"""job machine Tableをもとに個体、世代の初期設定"""
jmTable = getJmTable ( is_test )
MAX_JOBS = jmTable.getJobsCount()
MAX_MACHINES = jmTable.getMachinesCount()
# makespan最小化
creator.create ( "FitnessMin", base.Fitness, weights=(-1.0,) )
# 個体はジョブ番号のリスト
#creator.create ( "Individual", list, fitness=creator.FitnessMin )
creator.create ( "Individual", array.array, typecode='b', fitness=creator.FitnessMin ) # 'b' is signed char
toolbox = base.Toolbox()
# ゼロからMAX_MACHINES未満までがMAX_JOBS回ランダムに並ぶ個体と設定
gen_ind = partial ( initIndividual, MAX_JOBS, MAX_MACHINES )
toolbox.register ( "individual", tools.initIterate, creator.Individual, gen_ind )
# 初期世代を生成する関数を登録、初期世代はIndividualのリストとして設定
toolbox.register ( "population", tools.initRepeat, list, toolbox.individual )
# 評価関数を登録
toolbox.register ( "evaluate", schedule.eval, jmTable )
# 交叉関数を登録
toolbox.register ( "mate", schedule.crossover )
# 突然変異を登録
toolbox.register ( "mutate", schedule.mutation )
# ルーレット選択を登録
toolbox.register ( "select", tools.selRoulette )
# 置換操作を登録
if no_cam :
# 通常の置換操作
toolbox.register ( "getArgWorst", schedule.getArgWorst )
else :
# クラスタ平均法(CAM)による置換操作
toolbox.register ( "getArgWorst", schedule.getArgWorstCAM )
return toolbox, jmTable | f306cf9b5400ea92b92709bc6986d6b87ea909b2 | 2,591 |
def perform_variants_query(job, **kwargs):
"""Query for variants.
:param job: API to interact with the owner of the variants.
:type job: :class:`cibyl.sources.zuul.transactions.JobResponse`
:param kwargs: See :func:`handle_query`.
:return: List of retrieved variants.
:rtype: list[:class:`cibyl.sources.zuul.transactions.VariantResponse`]
"""
return job.variants().get() | c779080e2ef8c1900c293f70996e17bae932b142 | 2,592 |
import torch
def get_model(share_weights=False, upsample=False): # pylint: disable=too-many-statements
""" Return a network dict for the model """
block0 = [{'conv1_1': [3, 64, 3, 1, 1]},
{'conv1_2': [64, 64, 3, 1, 1]}, {'pool1_stage1': [2, 2, 0]},
{'conv2_1': [64, 128, 3, 1, 1]},
{'conv2_2': [128, 128, 3, 1, 1]}, {'pool2_stage1': [2, 2, 0]},
{'conv3_1': [128, 256, 3, 1, 1]},
{'conv3_2': [256, 256, 3, 1, 1]},
{'conv3_3': [256, 256, 3, 1, 1]},
{'conv3_4': [256, 256, 3, 1, 1]}, {'pool3_stage1': [2, 2, 0]},
{'conv4_1': [256, 512, 3, 1, 1]},
{'conv4_2': [512, 512, 3, 1, 1]}]
if share_weights:
print("defining network with shared weights")
network_dict = get_shared_network_dict()
else:
network_dict = get_network_dict()
def define_base_layers(block, layer_size):
layers = []
for i in range(layer_size):
one_ = block[i]
for k, v in zip(one_.keys(), one_.values()):
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return layers
def define_stage_layers(cfg_dict):
layers = define_base_layers(cfg_dict, len(cfg_dict) - 1)
one_ = cfg_dict[-1].keys()
k = list(one_)[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
# create all the layers of the model
base_layers = define_base_layers(block0, len(block0))
pre_stage_layers = define_base_layers(network_dict['block_pre_stage'], len(network_dict['block_pre_stage']))
blocks = {'block0': nn.Sequential(*base_layers),
'block_pre_stage': nn.Sequential(*pre_stage_layers)}
if share_weights:
shared_layers_s1 = define_base_layers(network_dict['block1_shared'], len(network_dict['block1_shared']))
shared_layers_s2 = define_base_layers(network_dict['block2_shared'], len(network_dict['block2_shared']))
blocks['block1_shared'] = nn.Sequential(*shared_layers_s1)
blocks['block2_shared'] = nn.Sequential(*shared_layers_s2)
for k, v in zip(network_dict.keys(), network_dict.values()):
if 'shared' not in k and 'pre_stage' not in k:
blocks[k] = define_stage_layers(v)
class PoseModel(nn.Module):
""" Pose Model class """
def __init__(self, model_dict, upsample=False):
super(PoseModel, self).__init__()
self.upsample = upsample
self.basemodel = model_dict['block0']
self.pre_stage = model_dict['block_pre_stage']
if share_weights:
self.stage1_shared = model_dict['block1_shared']
self.stage1_1 = model_dict['block1_1']
self.stage2_1 = model_dict['block2_1']
# self.stage3_1 = model_dict['block3_1']
# self.stage4_1 = model_dict['block4_1']
# self.stage5_1 = model_dict['block5_1']
# self.stage6_1 = model_dict['block6_1']
if share_weights:
self.stage2_shared = model_dict['block2_shared']
self.stage1_2 = model_dict['block1_2']
self.stage2_2 = model_dict['block2_2']
# self.stage3_2 = model_dict['block3_2']
# self.stage4_2 = model_dict['block4_2']
# self.stage5_2 = model_dict['block5_2']
# self.stage6_2 = model_dict['block6_2']
def forward(self, *inputs):
out1_vgg = self.basemodel(inputs[0])
out1 = self.pre_stage(out1_vgg)
if share_weights:
out1_shared = self.stage1_shared(out1)
else:
out1_shared = out1
out1_1 = self.stage1_1(out1_shared)
out1_2 = self.stage1_2(out1_shared)
out2 = torch.cat([out1_1, out1_2, out1], 1)
if share_weights:
out2_shared = self.stage2_shared(out2)
else:
out2_shared = out2
out2_1 = self.stage2_1(out2_shared)
out2_2 = self.stage2_2(out2_shared)
# out3 = torch.cat([out2_1, out2_2, out1], 1)
# out3_1 = self.stage3_1(out3)
# out3_2 = self.stage3_2(out3)
# out4 = torch.cat([out3_1, out3_2, out1], 1)
#
# out4_1 = self.stage4_1(out4)
# out4_2 = self.stage4_2(out4)
# out5 = torch.cat([out4_1, out4_2, out1], 1)
#
# out5_1 = self.stage5_1(out5)
# out5_2 = self.stage5_2(out5)
# out6 = torch.cat([out5_1, out5_2, out1], 1)
#
# out6_1 = self.stage6_1(out6)
# out6_2 = self.stage6_2(out6)
if self.upsample:
# parameters to check for up-sampling: align_corners = True, mode='nearest'
upsampler = nn.Upsample(scale_factor=2, mode='bilinear')
out2_1_up = upsampler(out2_1)
out2_2_up = upsampler(out2_2)
return out1_1, out1_2, out2_1, out2_2, out2_1_up, out2_2_up
return out1_1, out1_2, out2_1, out2_2
model = PoseModel(blocks, upsample=upsample)
return model | 364050799adc3312e4a46081e4a82338407f177b | 2,593 |
def bootstrap(config_uri, request=None, options=None):
""" Load a WSGI application from the PasteDeploy config file specified
by ``config_uri``. The environment will be configured as if it is
currently serving ``request``, leaving a natural environment in place
to write scripts that can generate URLs and utilize renderers.
This function returns a dictionary with ``app``, ``root``, ``closer``,
``request``, and ``registry`` keys. ``app`` is the WSGI app loaded
(based on the ``config_uri``), ``root`` is the traversal root resource
of the Pyramid application, and ``closer`` is a parameterless callback
that may be called when your script is complete (it pops a threadlocal
stack).
.. note::
Most operations within :app:`Pyramid` expect to be invoked within the
context of a WSGI request, thus it's important when loading your
application to anchor it when executing scripts and other code that is
not normally invoked during active WSGI requests.
.. note::
For a complex config file containing multiple :app:`Pyramid`
applications, this function will setup the environment under the context
of the last-loaded :app:`Pyramid` application. You may load a specific
application yourself by using the lower-level functions
:meth:`pyramid.paster.get_app` and :meth:`pyramid.scripting.prepare` in
conjunction with :attr:`pyramid.config.global_registries`.
``config_uri`` -- specifies the PasteDeploy config file to use for the
interactive shell. The format is ``inifile#name``. If the name is left
off, ``main`` will be assumed.
``request`` -- specified to anchor the script to a given set of WSGI
parameters. For example, most people would want to specify the host,
scheme and port such that their script will generate URLs in relation
to those parameters. A request with default parameters is constructed
for you if none is provided. You can mutate the request's ``environ``
later to setup a specific host/port/scheme/etc.
``options`` Is passed to get_app for use as variable assignments like
{'http_port': 8080} and then use %(http_port)s in the
config file.
See :ref:`writing_a_script` for more information about how to use this
function.
"""
app = get_app(config_uri, options=options)
env = prepare(request)
env['app'] = app
return env | 608629eb380765ebafa4009946a30b9f46de6ff9 | 2,594 |
def readSegy(filename) :
"""
Data,SegyHeader,SegyTraceHeaders=getSegyHeader(filename)
"""
printverbose("readSegy : Trying to read "+filename,0)
data = open(filename).read()
filesize=len(data)
SH=getSegyHeader(filename)
bps=getBytePerSample(SH)
ntraces = (filesize-3600)/(SH['ns']*bps+240)
# ntraces = 100
printverbose("readSegy : Length of data : " + str(filesize),2)
SH["ntraces"]=ntraces;
ndummy_samples=240/bps
printverbose("readSegy : ndummy_samples="+str(ndummy_samples),6)
printverbose("readSegy : ntraces=" + str(ntraces) + " nsamples="+str(SH['ns']),2)
# GET TRACE
index=3600;
nd=(filesize-3600)/bps
# READ ALL SEGY TRACE HEADRES
SegyTraceHeaders = getAllSegyTraceHeaders(SH,data)
printverbose("readSegy : reading segy data",2)
# READ ALL DATA EXCEPT FOR SEGY HEADER
#Data = zeros((SH['ns'],ntraces))
revision=SH["SegyFormatRevisionNumber"]
if (revision==100):
revision=1
dsf=SH["DataSampleFormat"]
DataDescr=SH_def["DataSampleFormat"]["descr"][revision][dsf]
printverbose("readSegy : SEG-Y revision = "+str(revision),1)
printverbose("readSegy : DataSampleFormat="+str(dsf)+"("+DataDescr+")",1)
if (SH["DataSampleFormat"]==1):
printverbose("readSegy : Assuming DSF=1, IBM FLOATS",2)
Data1 = getValue(data,index,'ibm',endian,nd)
elif (SH["DataSampleFormat"]==2):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 32bit INT",2)
Data1 = getValue(data,index,'l',endian,nd)
elif (SH["DataSampleFormat"]==3):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 16bit INT",2)
Data1 = getValue(data,index,'h',endian,nd)
elif (SH["DataSampleFormat"]==5):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", IEEE",2)
Data1 = getValue(data,index,'float',endian,nd)
elif (SH["DataSampleFormat"]==8):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 8bit CHAR",2)
Data1 = getValue(data,index,'B',endian,nd)
else:
printverbose("readSegy : DSF=" + str(SH["DataSampleFormat"]) + ", NOT SUPORTED",2)
Data = Data1[0]
printverbose("readSegy : - reshaping",2)
Data=reshape(Data,(ntraces,SH['ns']+ndummy_samples))
printverbose("readSegy : - stripping header dummy data",2)
Data=Data[:,ndummy_samples:(SH['ns']+ndummy_samples)]
printverbose("readSegy : - transposing",2)
Data=transpose(Data)
# SOMEONE NEEDS TO IMPLEMENT A NICER WAY DO DEAL WITH DSF=8
if (SH["DataSampleFormat"]==8):
for i in arange(ntraces):
for j in arange(SH['ns']):
if Data[i][j]>128:
Data[i][j]=Data[i][j]-256
printverbose("readSegy : read data",2)
return Data,SH,SegyTraceHeaders | 5e3920255aa49c70e0e898b2d3915c05afc7f869 | 2,595 |
def planar_transform(imgs, masks, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a):
"""transforms imgs, masks and computes dmaps according to planar transform.
Args:
imgs: are L X [...] X C, typically RGB images per layer
masks: L X [...] X 1, indicating which layer pixels are valid
pixel_coords_trg: [...] X H_t X W_t X 3;
pixel (u,v,1) coordinates of target image pixels.
k_s: intrinsics for source cameras, are [...] X 3 X 3 matrices
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: L X [...] X 1 X 3, plane normal w.r.t source camera frame
a: L X [...] X 1 X 1, plane equation displacement
Returns:
imgs_transformed: L X [...] X C images in trg frame
masks_transformed: L X [...] X 1 masks in trg frame
dmaps_trg: L X [...] X 1, indicating per pixel inverse depth
Assumes the first dimension corresponds to layers.
"""
with tf.name_scope('planar_transform'):
n_layers = imgs.get_shape().as_list()[0]
rot_rep_dims = [n_layers]
rot_rep_dims += [1 for _ in range(len(k_s.get_shape()))]
cds_rep_dims = [n_layers]
cds_rep_dims += [1 for _ in range(len(pixel_coords_trg.get_shape()))]
k_s = tf.tile(tf.expand_dims(k_s, axis=0), rot_rep_dims)
k_t = tf.tile(tf.expand_dims(k_t, axis=0), rot_rep_dims)
t = tf.tile(tf.expand_dims(t, axis=0), rot_rep_dims)
rot = tf.tile(tf.expand_dims(rot, axis=0), rot_rep_dims)
pixel_coords_trg = tf.tile(
tf.expand_dims(pixel_coords_trg, axis=0), cds_rep_dims)
ndims_img = len(imgs.get_shape())
imgs_masks = tf.concat([imgs, masks], axis=ndims_img - 1)
imgs_masks_trg = homography.transform_plane_imgs(
imgs_masks, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a)
imgs_trg, masks_trg = tf.split(imgs_masks_trg, [3, 1], axis=ndims_img - 1)
dmaps_trg = homography.trg_disp_maps(pixel_coords_trg, k_t, rot, t, n_hat,
a)
return imgs_trg, masks_trg, dmaps_trg | 18f90706b996ee9ba81ab7142313dcaa761cf773 | 2,596 |
def convertDynamicRenderStates(data, builder):
"""
Converts dynamic render states. The data map is expected to contain the following elements:
- lineWidth: float width for the line. Defaults to 1.
- depthBiasConstantFactor: float value for the depth bias constant factor. Defaults to 0.
- depthBiasClamp: float value for the depth bias clamp. Defaults to 0.
- depthBiasSlopeFactor: float value for the depth bias slope factor. Defaults to 0.
- blendConstants: array of 4 floats for the blend color. Defaults to [0, 0, 0, 0].
- depthBounds: array of 2 floats for the min and max depth value. Defaults to [0, 1].
- stencilCompareMask: int compare mask for both the front and back stencil. Defaults to
0xFFFFFFFF.
- frontStencilCompareMask: int compare mask for just the front stencil.
- backStencilCompareMask: int compare mask for just the back stencil.
- stencilWriteMask: int write mask for both the front and back stencil. Defaults to 0.
- frontStencilWriteMask: int write mask for just the front stencil.
- backStencilWriteMask: int write mask for just the back stencil.
- stencilReference: int reference for both the front and back stencil. Defaults to 0.
- frontStencilReference: int reference for just the front stencil.
- backStencilReference: int reference for just the back stencil.
"""
def readFloat(value, name):
try:
return float(value)
except:
raise Exception('Invalid ' + name + ' float value "' + str(value) + '".')
def readUInt(value, name):
try:
intVal = int(value)
if intVal < 0:
raise Exception()
return intVal
except:
raise Exception('Invalid ' + name + ' unsigned int value "' + str(value) + '".')
lineWidth = readFloat(data.get('lineWidth', 1.0), 'line width')
depthBiasConstantFactor = readFloat(data.get('depthBiasConstantFactor', 0.0),
'depth bias constant factor')
depthBiasClamp = readFloat(data.get('depthBiasClamp', 0.0), 'depth bias clamp')
depthBiasSlopeFactor = readFloat(data.get('depthBiasSlopeFactor', 0.0),
'depth bias slope factor')
colorValue = data.get('blendConstants', [0.0, 0.0, 0.0, 0.0])
try:
if len(colorValue) != 4:
raise Exception()
except:
raise Exception('Blend constants value must be an array of 4 floats.')
blendConstants = []
for c in colorValue:
blendConstants.append(readFloat(c, 'blend constant'))
depthBoundsValue = data.get('depthBounds', [0.0, 1.0])
try:
if len(depthBoundsValue) != 2:
raise Exception()
except:
raise Exception('Depth bounds value must be an array of 2 floats.')
depthBounds = []
for b in depthBoundsValue:
depthBounds.append(readFloat(b, 'depth bounds'))
stencilCompareMask = data.get('stencilCompareMask', 0xFFFFFFFF)
frontStencilCompareMask = readUInt(data.get('frontStencilCompareMask', stencilCompareMask),
'stencil compare mask')
backStencilCompareMask = readUInt(data.get('backStencilCompareMask', stencilCompareMask),
'stencil compare mask')
stencilWriteMask = data.get('stencilWriteMask', 0)
frontStencilWriteMask = readUInt(data.get('frontStencilWriteMask', stencilWriteMask),
'stencil write mask')
backStencilWriteMask = readUInt(data.get('backStencilWriteMask', stencilWriteMask),
'stencil write mask')
stencilReference = data.get('stencilReference', 0)
frontStencilReference = readUInt(data.get('frontStencilReference', stencilReference),
'stencil reference')
backStencilReference = readUInt(data.get('backStencilReference', stencilReference),
'stencil reference')
DynamicRenderStates.Start(builder)
DynamicRenderStates.AddLineWidth(builder, lineWidth)
DynamicRenderStates.AddDepthBiasConstantFactor(builder, depthBiasConstantFactor)
DynamicRenderStates.AddDepthBiasClamp(builder, depthBiasClamp)
DynamicRenderStates.AddDepthBiasSlopeFactor(builder, depthBiasSlopeFactor)
DynamicRenderStates.AddBlendConstants(builder, CreateColor4f(builder, *blendConstants))
DynamicRenderStates.AddDepthBounds(builder, CreateVector2f(builder, *depthBounds))
DynamicRenderStates.AddFrontStencilCompareMask(builder, frontStencilCompareMask)
DynamicRenderStates.AddBackStencilCompareMask(builder, backStencilCompareMask)
DynamicRenderStates.AddFrontStencilWriteMask(builder, frontStencilWriteMask)
DynamicRenderStates.AddBackStencilWriteMask(builder, backStencilWriteMask)
DynamicRenderStates.AddFrontStencilReference(builder, frontStencilReference)
DynamicRenderStates.AddBackStencilReference(builder, backStencilReference)
return DynamicRenderStates.End(builder) | 5c27ebd4401d8b6c0388bfe6f1973c137404ddf5 | 2,597 |
def binary_search(a, search_value):
"""
@name binary_search
@param a array
"""
N = len(a)
l = 0
r = len(a) - 1
while(True):
try:
result = binary_search_iteration(a, l, r, search_value)
l, r = result
except TypeError:
return -1 if not result else result | 5fc2748a76d89c2559cda8bc9dacd16d90b2aa5e | 2,598 |
from typing import Dict
from typing import Any
from typing import cast
def _key_match(d1: Dict[str, Any], d2: Dict[str, Any], key: str) -> bool:
"""
>>> _key_match({"a": 1}, {"a": 2}, "a")
False
>>> _key_match({"a": 1}, {"a": 2}, "b")
True
>>> _key_match({"a": 2}, {"a": 1}, "a")
False
>>> _key_match({"a": 1}, {"a": 1}, "a")
True
>>> _key_match({"a": 2}, {"b": 1}, "a")
False
>>> _key_match({"b": 2}, {"a": 1}, "a")
False
"""
try:
return (key not in d1 and key not in d2) or cast(bool, d1[key] == d2[key])
except KeyError:
return False | 8e76ee70c6209b357b13890a9fcf2b0b7d770c1b | 2,599 |
def calculate(over):
"""Returns the value of the first triangle number to have
over the specified number of divisors"""
triangle = 0
count = sum(range(triangle))
while True:
if num_divisors(count) > over:
answer = count
return answer
triangle += 1
count = sum(range(triangle)) | e7391bea108261bb2b7abc64cbdd6ba6285deaae | 2,600 |
import numpy
def convert_image_to_kernel(im: Image, oversampling, kernelwidth):
""" Convert an image to a griddata kernel
:param im: Image to be converted
:param oversampling: Oversampling of Image spatially
:param kernelwidth: Kernel width to be extracted
:return: numpy.ndarray[nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]
"""
naxis = len(im.shape)
assert numpy.max(numpy.abs(im.data)) > 0.0, "Image is empty"
nchan, npol, ny, nx = im.shape
assert nx % oversampling == 0, "Oversampling must be even"
assert ny % oversampling == 0, "Oversampling must be even"
assert kernelwidth < nx and kernelwidth < ny, "Specified kernel width %d too large"
assert im.wcs.wcs.ctype[0] == 'UU', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[0]
assert im.wcs.wcs.ctype[1] == 'VV', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[1]
newwcs = WCS(naxis=naxis + 2)
for axis in range(2):
newwcs.wcs.ctype[axis] = im.wcs.wcs.ctype[axis]
newwcs.wcs.crpix[axis] = kernelwidth // 2
newwcs.wcs.crval[axis] = 0.0
newwcs.wcs.cdelt[axis] = im.wcs.wcs.cdelt[axis] * oversampling
newwcs.wcs.ctype[axis + 2] = im.wcs.wcs.ctype[axis]
newwcs.wcs.crpix[axis + 2] = oversampling // 2
newwcs.wcs.crval[axis + 2] = 0.0
newwcs.wcs.cdelt[axis + 2] = im.wcs.wcs.cdelt[axis]
# Now do Stokes and Frequency
newwcs.wcs.ctype[axis + 4] = im.wcs.wcs.ctype[axis + 2]
newwcs.wcs.crpix[axis + 4] = im.wcs.wcs.crpix[axis + 2]
newwcs.wcs.crval[axis + 4] = im.wcs.wcs.crval[axis + 2]
newwcs.wcs.cdelt[axis + 4] = im.wcs.wcs.cdelt[axis + 2]
newdata_shape = [nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]
newdata = numpy.zeros(newdata_shape, dtype=im.data.dtype)
assert oversampling * kernelwidth < ny
assert oversampling * kernelwidth < nx
ystart = ny // 2 - oversampling * kernelwidth // 2
xstart = nx // 2 - oversampling * kernelwidth // 2
yend = ny // 2 + oversampling * kernelwidth // 2
xend = nx // 2 + oversampling * kernelwidth // 2
for chan in range(nchan):
for pol in range(npol):
for y in range(oversampling):
slicey = slice(yend + y, ystart + y, -oversampling)
for x in range(oversampling):
slicex = slice(xend + x, xstart + x, -oversampling)
newdata[chan, pol, y, x, ...] = im.data[chan, pol, slicey, slicex]
return create_image_from_array(newdata, newwcs, polarisation_frame=im.polarisation_frame) | fe1a2a81421a5f3c09e6c6439aeb7b52e217967f | 2,601 |
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob | b916f75bc3596bbbff701b6dbb3b43add0f06373 | 2,602 |
def get_date_strings():
"""
Get date strings for last month and this month in "%Y%m" format, e.g. "202201"
"""
today = date.today()
first = today.replace(day=1)
last_month = first - timedelta(days=1)
this_month_string = today.strftime("%Y%m")
last_month_string = last_month.strftime("%Y%m")
return this_month_string, last_month_string | cc09f710d86efcc73a7e653d30cc2d590ba865e6 | 2,604 |
import torch
from typing import Tuple
def rotate(
img: torch.Tensor,
boxes: np.ndarray,
angle: float,
) -> Tuple[torch.Tensor, np.ndarray]:
"""Rotate image around the center, interpolation=NEAREST, pad with 0 (black)
Args:
img: image to rotate
boxes: array of boxes to rotate as well
angle: angle in degrees. +: counter-clockwise, -: clockwise
Returns:
A tuple of rotated img (tensor), rotated boxes (np array)
"""
rotated_img = F.rotate(img, angle=angle, fill=0) # Interpolation NEAREST by default
_boxes = deepcopy(boxes)
if boxes.dtype == int:
# Compute relative boxes
_boxes = _boxes.astype(float)
_boxes[:, [0, 2]] = _boxes[:, [0, 2]] / img.shape[2]
_boxes[:, [1, 3]] = _boxes[:, [1, 3]] / img.shape[1]
# Compute rotated bboxes: xmin, ymin, xmax, ymax --> x, y, w, h, alpha
r_boxes = rotate_boxes(_boxes, angle=angle, min_angle=0)
if boxes.dtype == int:
# Back to absolute boxes
r_boxes[:, [0, 2]] *= img.shape[2]
r_boxes[:, [1, 3]] *= img.shape[1]
return rotated_img, r_boxes | acd5c83a857b1bdb2312a078cfd972f9a1a0df9f | 2,606 |
def _letterbox_image(img, w_in, h_in):
"""To get the image in boxed format."""
imc, imh, imw = img.shape
if (w_in / imw) < (h_in / imh):
new_w = w_in
new_h = imh * w_in // imw
else:
new_h = h_in
new_w = imw * h_in // imh
resized = _resize_image(img, new_w, new_h)
boxed = np.full((imc, h_in, w_in), 0.5, dtype=float)
_, resizedh, resizedw = resized.shape
boxed[:, int((h_in - new_h) / 2)
:int((h_in - new_h) / 2) + resizedh, int((w_in - new_w) / 2)
:int((w_in - new_w) / 2) + resizedw] = resized
return boxed | 918e96f3ac7f5b1c8f7177ad759dab0579763e77 | 2,607 |
def to_RRDB(**kwargs):
"""
Residual in Residual Dense Blocks
"""
kwargs["n_filer"] = (" ",) * len(kwargs["n_filer"]) # remove x label
return _Box(fill="{rgb:white,1;black,3}", **kwargs) | 2b1afd5f4a8c65364fcdee18fc8da3da71eade08 | 2,608 |
def continuous_agg_dict_features(n, n_feats, ks):
"""Listdict-like continuous aggregated features.
Parameters
----------
n: int
the number of the elements to create their features.
n_feats: int
the number of features.
ks: int
the number of perturbations.
Returns
-------
features: list
the random features we want to compute.
"""
features = []
for k in range(ks):
features.append(continuous_dict_features(n, n_feats))
return features | ec98930c124553a86ef50db58cf7e13107bf6e52 | 2,609 |
def counts_matrix(x, quantiles):
"""Count samples in strata
Get eta, the number of samples in ``x`` binned by ``quantiles`` in each
variable, for continuous variables. The shape of eta is the same as the
shape of ``x``, and the shape of ``quantiles`` should be
(``numpy.shape(x)[0] + 1``, ``numpy.shape(x)[1]``) for 2D, or
(``numpy.size(x) + 1``,) for 1D
Parameters
----------
x : :class:`numpy.ndarray` (Nx,) or (Nx, Npredictors)
The sampled predictors, with observations as rows and predictors (if
more than 1) as columns
quantiles : :class:`numpy.ndarray` (Nx + 1,) or (Nx + 1, Npredictors)
The quantiles which mark the edge of strata. The 0th axis must be
one element longer than the 0th axis of ``x``
Returns
-------
eta : :class:`numpy.ndarray`[``int``] (Nx,) or (Nx, Npredictors)
The matrix of counts in strata, with the same shape as ``x``
"""
if np.ndim(quantiles) == 1:
eta = np.histogram(np.squeeze(x), bins=quantiles)[0].astype(int)
else:
eta = np.array([
np.histogram(xj, bins=qj)[0].astype(int) for xj, qj in zip(
np.asarray(x).T, np.asarray(quantiles).T)]).T
return eta | 935cd19913e420ea6713ca74ead19f720bdef782 | 2,610 |
import logging
def get_xml_string(stream_pointer):
""" This function checks for valid xml in a stream
and skips bytes until it hits something that looks like
xml. In general, this 'skipping' should never be used, as
we expect to see well-formed XML from the server.
stream_pointer: input stream
returns: string of xml
"""
# This function avoid stream_pointer.seek() for the vast majority
# of cases (when xml is formatted correctly) just because i don't
# like using 'seek' (never know when you're getting non-rewindable
# streams
c = stream_pointer.read(1)
count = 0
while c != '<' and c != '':
count = count + 1
c = stream_pointer.read(1)
if c == '':
stream_pointer.seek(0)
logging.error("Poorly formatted schema - no '<' found", \
extra={'xml':stream_pointer.read()})
return
xml_string = "<" + stream_pointer.read()
if count > 0:
stream_pointer.seek(0)
logging.error("Poorly formatted schema", \
extra={'xml':stream_pointer.read()})
return xml_string | 3fa2e3d05bfc66cee592c4c40cc1e9349e512c3a | 2,611 |
import re
def parse_header(header):
"""Parse header div for pub. title, authors journal, year, and doi."""
# TITLE
title = header.find('h1').text.strip()
# JOURNAL
journal = header.find('button').text.strip()
# PUBLICATION YEAR
pub_date = header.find('span', attrs={'class': "cit"}).text
year = re.search(r"(\d{4}).*?[\.;]", pub_date).group(1)
# DOI
doi_cit = header.find(attrs={'class': "citation-doi"})
doi = doi_cit.text.strip().lstrip("doi: ").rstrip(".") if doi_cit else ""
# AUTHORS
authors = [parse_author(a) for a in header.find_all(
'span', attrs={'class': "authors-list-item"})]
authors = [a for a in authors if a]
return (title, journal, year, doi, authors) | 70dc1defbd9e6098e0754164d0dd23c7c79074d6 | 2,612 |
def put_this_into_the_db(query, param):
"""put this value into the database
see : find_by_exactly_this_query()
Arguments:
query {[type]} -- [description]
param {[type]} -- [description]
Returns:
bool -- [description]
"""
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='(drElizabeth)',
db='communications',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# Create a new record
sql = query
cursor.execute(sql, param)
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
except Exception as e:
print(e)
connection.close()
return False
connection.close()
return True | 08cebe330cea5f10189342c6f3ec4f9f7cc022e1 | 2,614 |
def _gen_new_aux_page(label: str, is_title: bool) -> str:
"""Generate latex for auxillary pages"""
page = []
if is_title:
page.append("\\thispagestyle{empty}")
page.append("\\begin{center}")
page.append("\t\\vfil")
page.append("\t\\vspace*{0.4\\textheight}\n")
page.append("\t\\Huge")
page.append(f"\t\\bf{{{label}}}\n")
page.append("\t\\normalsize")
page.append("\\end{center}")
return "\n".join(page) | 3ff31ae80f007fd5da2dd6153ea605978421c086 | 2,615 |
def expand_matrix_col(matrix, max_size, actual_size):
"""
add columns of zeros to the right of the matrix
"""
return np.append(matrix,
np.zeros((matrix.shape[0], max_size - actual_size), dtype=matrix.dtype), axis=1) | 23b20b443c880d1658eeec89910f9f3384576e6e | 2,616 |
import logging
def vms_list(access_token, config_id):
"""List FlexVM Virtual Machines"""
logging.info("--> List FlexVM Virtual Machines...")
uri = FLEXVM_API_BASE_URI + "vms/list"
headers = COMMON_HEADERS.copy()
headers["Authorization"] = f"Bearer {access_token}"
body = {"configId": config_id}
results = requests_post(uri, body, headers)
return results | eed35eefae4e26d743e0e96e791b6f5dd84d0c2f | 2,617 |
from unittest.mock import patch
def method_mock(cls, method_name, request):
"""
Return a mock for method *method_name* on *cls* where the patch is
reversed after pytest uses it.
"""
_patch = patch.object(cls, method_name)
request.addfinalizer(_patch.stop)
return _patch.start() | b14d991c42e0c05a51d9c193c3769b1e1e71dd1f | 2,619 |
def get_eps_float32():
"""Return the epsilon value for a 32 bit float.
Returns
-------
_ : np.float32
Epsilon value.
"""
return np.finfo(np.float32).eps | e0506637aa3f9c29dc33d1256ce21d7dc686a4cd | 2,620 |
def distributions_to_params(nest):
"""Convert distributions to its parameters, keep Tensors unchanged.
Only returns parameters that have tf.Tensor values.
Args:
nest (nested Distribution and Tensor): Each Distribution will be
converted to dictionary of its Tensor parameters.
Returns:
A nest of Tensor/Distribution parameters. Each leaf is a Tensor or a
dict corresponding to one distribution, with keys as parameter name and
values as tensors containing parameter values.
"""
def _to_params(dist_or_tensor):
if isinstance(dist_or_tensor, tfp.distributions.Distribution):
params = dist_or_tensor.parameters
return {
k: params[k]
for k in params if isinstance(params[k], tf.Tensor)
}
elif isinstance(dist_or_tensor, tf.Tensor):
return dist_or_tensor
else:
raise ValueError(
"Only Tensor or Distribution is allowed in nest, ",
"got %s. nest is %s" % (dist_or_tensor, nest))
return tf.nest.map_structure(_to_params, nest) | bfa1cfd043bd46667de8ed07fd54fef959b272ae | 2,621 |
def _return_xarray_system_ids(xarrs: dict):
"""
Return the system ids for the given xarray object
Parameters
----------
xarrs
Dataset or DataArray that we want the sectors from
Returns
-------
list
system identifiers as string within a list
"""
return list(xarrs.keys()) | 8380d1c2ae9db48eb4b97138dcd910d58085073e | 2,622 |
def sub(a, b):
"""Subtracts b from a and stores the result in a."""
return "{b} {a} ?+1\n".format(a=a, b=b) | dcc0ddfc9dbefe05d79dea441b362f0ddfe82627 | 2,623 |
def metrics_cluster(models = None, ytrain = None, ytest = None,
testlabels = None,
trainlabels = None,
Xtrain = None, Xtest = None):
"""
Calculates Metrics such as accuracy, balanced accuracy,
specificity, sensitivity, precision, True Positives,
True Negatives etc.
These metrics are calculated for each cluster:
models: predictive models trained in each cluster
ytrain: Target labels of training set
ytest: target labels of test set
testlabels: a matrix with numbers from 0 to c-1 number of clusters
indicating in which cluster each data point belongs
in the test set
trainlabels: the same as testlabels but for training data
Xtrain: trainiing data
Xtest: testing data
"""
# matrix with metrics for each cluster
metricsTrain = []
#metrics for test data in each cluster
metricsTest = []
columns = ['cluster', 'size', 'high_cost%','low_cost%',
'TP', 'TN', 'FP', 'FN',
'FPR', 'specificity', 'sensitivity', 'precision',
'accuracy', 'balanced accuracy', 'f1', 'auc']
#Calculate the Metrics for Each Cluster
for cluster in np.arange( len( models ) ):
#INDEXES OF CLUSTER "cluster"
inC = np.where( trainlabels == cluster )[0]
inCT = np.where( testlabels == cluster )[0]
#predict probabilities of data in cluster "cluster"
#to be 1
probTrain = models[cluster].predict_proba(Xtrain[inC])[:, 1]
probTest = models[cluster].predict_proba(Xtest[inCT])[:, 1]
#calculate optimal tau based on F1
try:
tau = optimalTau(probTrain, ytrain[inC])
except:
tau = 0.5
print(" Warning tau setted to 0.5 due to error(s) \
in <<optimalTau>> function" )
#CALCULATE METRICS : ACCURACY, RECALL, PRECISION ,
#BALANCED ACCURACY ETC
metTrain , _= calc_metrics( custom_prob = probTrain,
y = ytrain[inC],
cluster = cluster,
tau = tau )
metTest, _ = calc_metrics( custom_prob = probTest,
y = ytest[inCT],
cluster = cluster,
tau = tau)
metricsTrain.append( metTrain )
metricsTest.append( metTest )
#Create a dataframe with metrics for better Visualization
metricsTrain = pd.DataFrame ( metricsTrain, columns = columns )
metricsTest = pd.DataFrame( metricsTest, columns = columns )
return metricsTrain, metricsTest | c9c131385a47df3de511db0e85ece20131647d4e | 2,624 |
def prune_cloud_borders (numpy_cloud, clearance=1.2 ):
"""Delete points at the clouds' borders in range of distance, restricting the x-y plane (ground)"""
# get min/max of cloud
cloud_max_x = np.max (numpy_cloud[:, 0])
cloud_min_x = np.min (numpy_cloud[:, 0])
cloud_max_y = np.max (numpy_cloud[:, 1])
cloud_min_y = np.min (numpy_cloud[:, 1])
# define 4 borders
borders = [cloud_max_x - clearance, cloud_min_x + clearance,
cloud_max_y - clearance, cloud_min_y + clearance]
# index all points within borders
numpy_cloud = numpy_cloud[numpy_cloud[:, 0] < borders[0]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 0] > borders[1]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 1] < borders[2]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 1] > borders[3]]
return numpy_cloud | f208c9778343c3240803b52ff3e5f4701a8bb1cb | 2,625 |
def factory(name, Base, Deriveds):
"""Find the base or derived class by registered name.
Parameters
----------
Base: class
Start the lookup here.
Deriveds: iterable of (name, class)
A list of derived classes with their names.
Returns
-------
class
"""
Derived = Base
for (nm, NmCl) in Deriveds:
if nm == name:
Derived = NmCl
break
return Derived | 1bce29651004cf1f04740fd95a4f62c6c2277a72 | 2,626 |
def root_sum_square(values, ax_val, index, Nper, is_aper, is_phys, unit):
"""Returns the root sum square (arithmetic or integral) of values along given axis
Parameters
----------
values: ndarray
array to derivate
ax_val: ndarray
axis values
index: int
index of axis along which to derivate
Nper: int
number of periods to replicate
is_aper: bool
True if values is anti-periodic along axis
is_phys: bool
True if physical quantity (time/angle/z)
Returns
-------
values: ndarray
root sum square of values
"""
# To sum dB or dBA
if "dB" in unit:
return my_sum(values, index, Nper, is_aper, unit)
else:
if is_aper and Nper is not None:
# Remove anti-periodicity since values is squared
is_aper = False
if ax_val.size == 1: # Do not use integrate for single point axes
is_phys = False
if is_phys:
values = integrate(values ** 2, ax_val, index, Nper, is_aper, is_phys)
else:
values = my_sum(values ** 2, index, Nper, is_aper, unit)
return np.sqrt(values) | 2af20718dc4d7a6b8d40e939a46d140fda5bf375 | 2,627 |
import json
def comment_on_tweet():
""""
http://127.0.0.1:5000/user/comment_on_tweet
body = {
"id": "5da61dbed78b3b2b10a53582",
"comments" : {
"commenter" : "[email protected]",
"comment" : "comments against tweet : 7"
}
}
"""
data = request.get_json()
tweet_id = data['id']
record = tweetDB.find({'_id': ObjectId(tweet_id)})
if record is None:
return json.dumps({'error': "No collaborations to update matched id"})
else:
try:
if 'comments' in data and isinstance(data['comments'], object):
result = tweetDB.update(
{"_id": ObjectId(tweet_id)},
{
'$push': {
"comments": data['comments']
}
}
)
return json.dumps({"success": True})
except Exception as e:
return json.dumps({"error": "Exception found"}) | 232854a883a4bbd99a46dc3dc46e9a47fb1993dc | 2,628 |
def generate_git_api_header(event, sig):
"""
Create header for GitHub API Request, based on header information from https://developer.github.com/webhooks/.
:param event: Name of the event type that triggered the delivery.
:param sig: The HMAC hex digest of the response body. The HMAC hex digest is generated
using the sha1 hash function and the secret as the HMAC key.
"""
return Headers([
('X-GitHub-Event', event),
('X-GitHub-Delivery', "72d3162e-cc78-11e3-81ab-4c9367dc0958"),
('X-Hub-Signature', f"sha1={sig}"),
('User-Agent', "GitHub-Hookshot/044aadd"),
('Content-Type', "application/json"),
('Content-Length', 6615)
]) | 9b60d9eb6a8ea962bb7426970f2c2b82a229ef12 | 2,629 |
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0)) | 2ff13a6b222663a200b77e526475331bfacd9c07 | 2,630 |
import math
def lnglat_to_tile(lon, lat, zoom):
"""Get the tile which contains longitude and latitude.
:param lon: longitude
:param lat: latitude
:param zoom: zoom level
:return: tile tuple
"""
lon, lat = truncate(lon, lat)
n = 1 << zoom
tx = int((lon + 180.0) / 360.0 * n)
ty = int((1.0 - math.asinh(math.tan(math.radians(lat))) / math.pi) / 2.0 * n)
return Tile(tx, ty, zoom) | 84e1c103b03a2ec80a9585c8c852045c5d58cb76 | 2,631 |
from typing import Union
from typing import Optional
from typing import Callable
from typing import Any
def group_obs_annotation(
adata: AnnData,
gdata: AnnData,
*,
groups: Union[str, ut.Vector],
name: str,
formatter: Optional[Callable[[Any], Any]] = None,
method: str = "majority",
min_value_fraction: float = 0.5,
conflict: Optional[Any] = None,
inplace: bool = True,
) -> Optional[ut.PandasSeries]:
"""
Transfer per-observation data from the per-observation (cell) ``adata`` to the
per-group-of-observations (metacells) ``gdata``.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes, and the
``gdata`` containing the per-metacells summed data.
**Returns**
Observations (Cell) Annotations
``<name>``
The per-group-observation annotation computed based on the per-observation annotation.
If ``inplace`` (default: {inplace}), this is written to the ``gdata``, and the function returns
``None``. Otherwise this is returned as a pandas series (indexed by the group observation
names).
**Computation Parameters**
1. Iterate on all the observations (groups, metacells) in ``gdata``.
2. Consider all the cells whose ``groups`` annotation maps them into this group.
3. Consider all the ``name`` annotation values of these cells.
4. Compute an annotation value for the whole group of cells using the ``method``. Supported
methods are:
``unique``
All the values of all the cells in the group are expected to be the same, use this
unique value for the whole groups.
``majority``
Use the most common value across all cells in the group as the value for the whole
group. If this value doesn't have at least ``min_value_fraction`` (default:
{min_value_fraction}) of the cells, use the ``conflict`` (default: {conflict}) value
instead.
"""
group_of_cells = ut.get_o_numpy(adata, groups, formatter=ut.groups_description)
values_of_cells = ut.get_o_numpy(adata, name, formatter=formatter)
value_of_groups = np.empty(gdata.n_obs, dtype=values_of_cells.dtype)
assert method in ("unique", "majority")
if method == "unique":
with ut.timed_step(".unique"):
value_of_groups[group_of_cells] = values_of_cells
else:
assert method == "majority"
with ut.timed_step(".majority"):
for group_index in range(gdata.n_obs):
cells_mask = group_of_cells == group_index
cells_count = np.sum(cells_mask)
assert cells_count > 0
values_of_cells_of_group = values_of_cells[cells_mask]
unique_values_of_group, unique_counts_of_group = np.unique(values_of_cells_of_group, return_counts=True)
majority_index = np.argmax(unique_counts_of_group)
majority_count = unique_counts_of_group[majority_index]
if majority_count / cells_count < min_value_fraction:
value_of_groups[group_index] = conflict
else:
majority_value = unique_values_of_group[majority_index]
value_of_groups[group_index] = majority_value
if inplace:
ut.set_o_data(gdata, name, value_of_groups)
return None
return ut.to_pandas_series(value_of_groups, index=gdata.obs_names) | fc9abd9a983d24869f46efb71d29cd2db53508da | 2,632 |
def generate_languages(request):
"""
Returns the languages list.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = GenerateLanguagesRequest(data=request.data)
if request_serializer.is_valid():
get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
list_of_languages = Language.objects.all()
return Response({'detail': 'successful',
'data': [language.language for language in list_of_languages]},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer) | 67856b4bac293e272debb0ac9f2a2e0c863f4cdb | 2,634 |
def all_stocks():
"""
#查询当前所有正常上市交易的股票列表
:return:
"""
data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
return data["symbol"].values | 582381319bd0b613758f41de2005e192c802a923 | 2,635 |
import requests
import json
def getBotHash(userID, isCompile=False):
"""Gets the checksum of a user's bot's zipped source code"""
params = {"apiKey": API_KEY, "userID": userID}
if isCompile:
params["compile"] = 1
result = requests.get(MANAGER_URL+"botHash", params=params)
print("Getting bot hash:")
print(result.text)
return json.loads(result.text).get("hash") | 700d5418212836e1ad20a3a336587436cf1e93de | 2,636 |
def next_remote_buffer_uuid(number=1):
"""Return the next uuid of a remote buffer."""
global remote_buffer_counter
if number == 1:
ret = remote_buffer_counter
else:
ret = np.arange(remote_buffer_counter, remote_buffer_counter + number)
remote_buffer_counter = (remote_buffer_counter + number) % (1 << 60)
return ret | da31c68dd199ff765ec6eaab17912dd4e3ea8ee4 | 2,637 |
def ball_collide(i):
"""
This function will handle the ball collide interaction between brick and paddle
:param i: (int) The index of the ball to interact
:return: (Bool) If this ball collide with brick or paddle
"""
global score
collide = False
for j in range(2):
for k in range(2):
object_get = graphics.window.get_object_at(graphics.ball[i].x + graphics.ball[i].width * j,
graphics.ball[i].y + graphics.ball[i].height * k)
if object_get in graphics.brick:
# brick lose life when being hit by ball
index = graphics.brick.index(object_get)
graphics.brick_collide(index)
score += 1
collide = True
elif object_get is graphics.paddle:
collide = True
return collide | 33ee97dde1302578067e16b8251e5c3787901697 | 2,638 |
from pathlib import Path
import tqdm
def gen_sparse_graph(destination_folder: Path,
vertices_number: int,
edge_probability: float) -> Path:
"""
Generates sparse graph
:param destination_folder: directory to save the graph
:type destination_folder: Path
:param vertices_number: number of vertices in the graph
:type vertices_number: int
:param edge_probability: probability of edge existence in the graph
:type edge_probability: float
:return: path to generated graph
:rtype: Path
"""
tmp_graph = nx.generators.fast_gnp_random_graph(vertices_number, edge_probability)
output_graph = rdflib.Graph()
edges = list()
for v, to in tmp_graph.edges():
edges.append((v, 'A', to))
edges.append((v, 'AR', to))
for subj, pred, obj in tqdm(
edges,
desc=f'G{vertices_number}-{edge_probability} generation'
):
add_rdf_edge(subj, pred, obj, output_graph)
target = destination_folder / f'G{vertices_number}-{edge_probability}.xml'
write_to_rdf(target, output_graph)
return target | 79369b7c436ca903e5cbc620b95d6425d5646a55 | 2,639 |
def read_csv(path):
"""Reads the CSV file at the indicated path and returns a list of rows.
Parameters:
path (str): The path to a CSV file.
Returns:
list[row]: A list of rows. Each row is a list of strings and numbers.
"""
with open(path, 'rb') as f:
return decode_csv(f.read()) | 7b979a9e15ae07cbdb2733ec071ea82664df5bab | 2,642 |
def obj_mask(im):
"""Computes the mask for an image with transparent background
Keyword arguments:
im -- the input image (must be RGBA)
"""
A = im.split()[-1]
T = ImageOps.invert(A)
return Image.merge("RGBA", (T, T, T, A)) | bfcb6c9c8877dc2507bc9bc658eeb1140fc950bc | 2,643 |
def rnn(rnn_type, inputs, length, hidden_size, layer_num=1,
dropout_keep_prob=None, concat=True):
"""
Implements (Bi-)LSTM, (Bi-)GRU and (Bi-)RNN
在这个module中,rnn是主要的接口,所以把rnn放在上面
Args:
rnn_type: the type of rnn, such as lstm
inputs: padded inputs into rnn, usually a d*p or l*p matrix
length: the valid length of the inputs,
usually the length of the sentence
hidden_size: the size of hidden units
layer_num: multiple rnn layer are stacked if layer_num > 1
dropout_keep_prob: dropout in RNN
concat: When the rnn is bidirectional, the forward outputs and backward
outputs are concatenated (such as a 2l*p matrix) if this is True,
else we add them (add two matrices).
Returns:
RNN outputs and final state (such as the state of lstm)
"""
if not rnn_type.startswith('bi'):
cell = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# 得到cell,在z轴、y轴已经展开,但是在x轴上并没有延展
outputs, state = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=length,
dtype=tf.float32)
# 利用dynamic_rnn函数对cell在x轴方向上进行延展,并且把cell的inputs输入
# outputs的维度是hidden_size*length, state的维度是hidden_size*layer_num*2
if rnn_type.endswith('lstm'):
c, h = state
state = h
# 把hidden state作为state
else: # bidirectional rnn
cell_fw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# forward cell
cell_bw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# backward cell
outputs, state = tf.nn.bidirectional_dynamic_rnn(
cell_bw, cell_fw, inputs, sequence_length=length, dtype=tf.float32
)
# 双向rnn相比单向rnn,在hidden_size这个维度上变成了之前的2倍
state_fw, state_bw = state
# 首先把state分离成forward state和backward state
if rnn_type.endswith('lstm'):
c_fw, h_fw = state_fw
c_bw, h_bw = state_bw
state_fw, state_bw = h_fw, h_bw
# 对于lstm来说,我们要的state是hidden state
if concat:
outputs = tf.concat(outputs, 2)
# 把两个tensor沿着hidden_size的维度连起来
state = tf.concat([state_fw, state_bw], 1)
# state同样要沿着hidden_size的维度连起来
else:
outputs = outputs[0] + outputs[1]
state = state_fw + state_bw
# 简单向量(张量)相加或者做平均处理
return outputs, state | 80d06ed499c4668bd398efdf9358c8d72e2e3192 | 2,644 |
def find_expired(bucket_items, now):
"""
If there are no expired items in the bucket returns
empty list
>>> bucket_items = [('k1', 1), ('k2', 2), ('k3', 3)]
>>> find_expired(bucket_items, 0)
[]
>>> bucket_items
[('k1', 1), ('k2', 2), ('k3', 3)]
Expired items are returned in the list and deleted from
the bucket
>>> find_expired(bucket_items, 2)
['k1']
>>> bucket_items
[('k2', 2), ('k3', 3)]
"""
expired_keys = []
for i in range(len(bucket_items) - 1, -1, -1):
key, expires = bucket_items[i]
if expires < now:
expired_keys.append(key)
del bucket_items[i]
return expired_keys | 476fd079616e9f5c9ed56ee8c85171fcb0ddb172 | 2,645 |
import array
def find_sprites(image=None, background_color=None):
""" Find sprites
@image: MUST be an Image object
@background_color: optinal, whether tuple (RGB/ RGBA) or int (grayscale)
"""
def find_sprites_corners(sprite, label_map, numpy_array):
columns = set()
rows = set()
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = label_map[row_index][column_index]
if current_pixel.label == sprite:
columns.add(current_pixel.column)
rows.add(current_pixel.row)
return min(columns), min(rows), max(columns), max(rows)
def collect_sprites(exist_sprites_label, label_map, numpy_array):
""" Return A dictionary with key:the label of a sprite and value:it's Sprite object
"""
sprites = {}
for sprite in exist_sprites_label:
top_left_column, top_left_row, bottom_right_column, bottom_right_row = find_sprites_corners(sprite, label_map, numpy_array)
sprites[sprite] = Sprite(sprite, top_left_column, top_left_row, bottom_right_column, bottom_right_row)
return sprites
def search_exist_sprites_label(pixels_to_sprites):
""" Return a set of exist sprite's label inside the map
"""
exist_sprites = set()
for key in pixels_to_sprites:
exist_sprites.add(pixels_to_sprites[key])
return exist_sprites
def unify_sprites(pixels_to_sprites, unified_matrix, numpy_array):
""" Unify all pixels that are in a same sprite
Return a 2D-array map of sprites
"""
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = pixels_matrix[row_index][column_index]
current_label = current_pixel.label
# Ignore background pixels
if current_label == 0 or current_label not in pixels_to_sprites:
continue
current_pixel.label = pixels_to_sprites[current_label]
return unified_matrix
def analyze_connected_sprites(connected_sprites):
""" Find all pixels that are connected (belong to a same sprite)
Return a dict:
key: pixel'label
value: sprite's label that key belong to
"""
pixels_to_sprites = {}
for key in list(connected_sprites.keys()):
if key not in connected_sprites or len(connected_sprites[key]) == 1:
continue
in_progress = True
old_length = len(connected_sprites[key])
while in_progress:
for value in connected_sprites[key]:
if value not in connected_sprites:
continue
connected_sprites[key] = connected_sprites[key] | connected_sprites[value]
if value in connected_sprites and value != key:
del connected_sprites[value]
if old_length == len(connected_sprites[key]):
in_progress = False
else:
old_length = len(connected_sprites[key])
for key in connected_sprites:
for value in connected_sprites[key]:
pixels_to_sprites[value] = key
return pixels_to_sprites
def is_new_sprite(current_row, current_column, pixels_matrix, background_color):
""" Return False if there is a non-background pixel adjacent to current pixel
Ignores background pixels.
"""
neighbor_coordinates = [(-1, -1), (-1, 0), (-1, 1), (0, -1)]
current_pixel = pixels_matrix[current_row][current_column]
is_new_sprite = True
# Ignore background pixels
if current_pixel.is_background_pixel:
return False
# Check 4 neighbor of current pixels
for coordinate in neighbor_coordinates:
neighbor_row = current_row + coordinate[0]
neighbor_column = current_column + coordinate[1]
if 0 <= neighbor_row < image_height and 0 <= neighbor_column < image_width:
neighbor_pixel = pixels_matrix[neighbor_row][neighbor_column]
if neighbor_pixel.label == 0:
continue
if current_pixel.label != 0 and current_pixel.label != neighbor_pixel.label:
connected_sprites.setdefault(current_pixel.label, set()).add(neighbor_pixel.label)
else:
pixels_matrix[current_row][current_column].label = neighbor_pixel.label
is_new_sprite = False
return is_new_sprite
def is_ignored_pixel(current_pixel, numpy_array):
""" Check if that pixel is considered background pixel
Return False by default
"""
if (background_color == (0,0,0,0) and current_pixel[-1] == 0) or (current_pixel == array(background_color)).all() or (image.mode == "L" and current_pixel == background_color):
return True
return False
def analyze_numpy_array(background_color):
""" Convert image to numpy array then analyze each pixel
@background_color: RGBA or RGB or grayscale formats
Return Maps of pixels under format matrix and numpy array (multi-dimensional)
"""
numpy_array = array(image)
pixels_matrix = zeros(numpy_array.shape, dtype=int).tolist()
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = numpy_array[row_index, column_index]
pixels_matrix[row_index][column_index] = Pixel(row_index, column_index, is_ignored_pixel(current_pixel, numpy_array))
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
if is_new_sprite(row_index, column_index, pixels_matrix, background_color):
new_label = sprites_label[-1] + 1
pixels_matrix[row_index][column_index].label = new_label
sprites_label.append(new_label)
connected_sprites.setdefault(new_label, set()).add(new_label)
return pixels_matrix, numpy_array
def is_valid_background_color():
""" Check if arg @background_color is valid
Return True by default
"""
# Not int or tuple
if type(background_color) not in (int, tuple):
return False
# Invalid grayscale format
if type(background_color) == int:
if not 255 >= background_color >= 0 or image.mode != "L":
return False
# Invalid RGB/ RGBA format
if type(background_color) == tuple:
if len(background_color) not in (3,4) or image.mode == "L":
return False
for element in background_color:
if type(element) != int or not 255 >= element >= 0:
return False
return True
if background_color:
pass
elif image.mode == "RGBA":
background_color = (0,0,0,0)
else:
background_color = find_most_common_color(image)
# Check validation of arg background_color
if not is_valid_background_color() or not image:
print("Invalid arguments! Please try again!")
return
image_width, image_height = image.size
# Store all connected sprites that can be unified latter
connected_sprites = {}
# List of pixels label exist inside the map
sprites_label = [0]
# Maps of pixels under format matrix and numpy array
pixels_matrix, numpy_array = analyze_numpy_array(background_color)
# Dict of pixels'label corresponding to sprite's label
pixels_to_sprites = analyze_connected_sprites(connected_sprites)
# Map of sprites under format 2D-matrix
label_map = unify_sprites(pixels_to_sprites, pixels_matrix, numpy_array)
# Set of sprite-label that exist inside the map
exist_sprites_label = search_exist_sprites_label(pixels_to_sprites)
# A dictionary with key:the label of a sprite and value:it's Sprite object
sprites = collect_sprites(exist_sprites_label, label_map, numpy_array)
return (sprites, label_map) | 67a544e916ebd01fbddd16f755e386d820507433 | 2,646 |
def get_java_package(path):
"""Extract the java package from path"""
segments = path.split("/")
# Find different root start indecies based on potential java roots
java_root_start_indecies = [_find(segments, root) for root in ["java", "javatests"]]
# Choose the root that starts earliest
start_index = min(java_root_start_indecies)
if start_index == len(segments):
fail("Cannot find java root: " + path)
return ".".join(segments[start_index + 1:]) | 253e503a146cffe6a8c00786539d8e3a2d6374f7 | 2,647 |
def get_plugin():
"""Return the filter."""
return TextFilter | b0d43cab9c3b887fd9735ecfdc5372a8e2aefb49 | 2,649 |
import time
def caltech256(root):
"""Caltech256 dataset from http://www.vision.caltech.edu/Image_Datasets/Caltech256
Pictures of objects belonging to 256 categories.
About 80 to 800 images per category.
Collected in September 2003 by Fei-Fei Li, Marco Andreetto,
and Marc 'Aurelio Ranzato.
The size of each image is roughly 300 x 200 pixels.
We have carefully clicked outlines of each object in these pictures,
these are included under the 'Annotations.tar'.
There is also a matlab script to view the annotaitons, 'show_annotations.m'.
Attention: if exist dirs `root/caltech256`, api will delete it and create it.
Data storage directory:
root = `/user/.../mydata`
caltech256 data:
`root/caltech256/train/007.bat/xx.jpg`
`root/caltech256/train/010.beer-mug/xx.ipg`
`root/caltech256/train/064.elephant-101/xx.jpg`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/caltech256`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/caltech256`.
"""
start = time.time()
task_path = assert_dirs(root, 'caltech256', make_root_dir=False)
url = "http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar"
rq.files(url, gfile.path_join(root, url.split('/')[-1]))
un_tar(gfile.path_join(root, url.split('/')[-1]), task_path)
gfile.rename(gfile.path_join(task_path, '256_ObjectCategories'), gfile.path_join(task_path, 'train'))
gfile.remove(gfile.path_join(root, '256_ObjectCategories.tar'))
print('caltech256 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path | 972cec00a3360fe0ace5b1fb8165e45718c137c1 | 2,650 |
def draw__mask_with_edge(cv2_image: np.ndarray, edge_size: int = 10) -> np.ndarray:
"""
From a color image, get a black white image each instance separated by a border.
1. Change a color image to black white image.
2. Get edge image from `cv2_image`, then invert it to separate instance by a border.
3. Merge 1 and 2.
.. image:: https://i.imgur.com/YAHVVSl.png
:width: 2496px
:height: 1018px
:scale: 25%
:alt: mask_with_edge
:align: center
Parameters
----------
cv2_image : np.ndarray
BGR color Image
edge_size : int
Edge size, by default 10
Returns
-------
np.ndarray
Grayscale image each instance separated by a border.
Examples
--------
>>> cv2_img: np.ndarray = cv2.imread("...")
>>> edge_masked_image: np.ndarray = mask_with_edge(cv2_img, edge_size=10)
"""
img_edge = draw__edge_only(cv2_image, edge_size)
not_img_edge = cv2.bitwise_not(img_edge)
bw_image = img_color_to_bw(cv2_image)
return mask_image(bw_image, mask_image=not_img_edge) | 50a25b60fdfa83f8cd1ec707f4c0e63b3c621695 | 2,651 |
def get_functions(pdb_file):
"""Get the offset for the functions we are interested in"""
methods = {'ssl3_new': 0,
'ssl3_free': 0,
'ssl3_connect': 0,
'ssl3_read_app_data': 0,
'ssl3_write_app_data': 0}
try:
# Do this the hard way to avoid having to load
# the types stream in mammoth PDB files
pdb = pdbparse.parse(pdb_file, fast_load=True)
pdb.STREAM_DBI.load()
pdb._update_names()
pdb.STREAM_GSYM = pdb.STREAM_GSYM.reload()
if pdb.STREAM_GSYM.size:
pdb.STREAM_GSYM.load()
pdb.STREAM_SECT_HDR = pdb.STREAM_SECT_HDR.reload()
pdb.STREAM_SECT_HDR.load()
# These are the dicey ones
pdb.STREAM_OMAP_FROM_SRC = pdb.STREAM_OMAP_FROM_SRC.reload()
pdb.STREAM_OMAP_FROM_SRC.load()
pdb.STREAM_SECT_HDR_ORIG = pdb.STREAM_SECT_HDR_ORIG.reload()
pdb.STREAM_SECT_HDR_ORIG.load()
except AttributeError:
pass
try:
sects = pdb.STREAM_SECT_HDR_ORIG.sections
omap = pdb.STREAM_OMAP_FROM_SRC
except AttributeError:
sects = pdb.STREAM_SECT_HDR.sections
omap = DummyOmap()
gsyms = pdb.STREAM_GSYM
if not hasattr(gsyms, 'globals'):
gsyms.globals = []
#names = []
for sym in gsyms.globals:
try:
name = sym.name.lstrip('_').strip()
if name.startswith('?'):
end = name.find('@')
if end >= 0:
name = name[1:end]
#names.append(name)
if name in methods:
off = sym.offset
virt_base = sects[sym.segment-1].VirtualAddress
addr = omap.remap(off+virt_base)
if methods[name] == 0:
methods[name] = addr
else:
methods[name] = -1
except IndexError:
pass
except AttributeError:
pass
#with open('names.txt', 'wb') as f_out:
# for name in names:
# f_out.write(name + "\n")
return methods | e2a36d3799004c1f96d5bccb3c4f0a8ad3ce2607 | 2,652 |
import typing
def empty_iterable() -> typing.Iterable:
"""
Return an empty iterable, i.e., an empty list.
:return: an iterable
:Example:
>>> from flpy.iterators import empty_iterable
>>> empty_iterable()
[]
"""
return list() | 904fe365abf94f790f962c9a49f275a6068be4f0 | 2,653 |
from re import M
def nearest_pow_2(x):
"""
Finds the nearest integer that is a power of 2.
In contrast to :func:`next_pow_2` also searches for numbers smaller than
the input and returns them if they are closer than the next bigger power
of 2.
"""
a = M.pow(2, M.ceil(M.log(x, 2)))
b = M.pow(2, M.floor(M.log(x, 2)))
if abs(a - x) < abs(b - x):
return int(a)
else:
return int(b) | c9dba6f38badcedee02f7071fc5fcf82519dbdcb | 2,654 |
def timestamp_preprocess(ds, column, name):
"""This function takes the timestamp in the dataset and create from it features according to the settings above
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
ts = pd.to_datetime(ds[name])
for feature in TIMESTAMP_FEATURES.keys():
if TIMESTAMP_FEATURES[feature] is not None:
if feature == "timestamp":
ds[feature] = ts
elif feature == "day_of_week":
ds[feature] = ts.apply(lambda X: X.day_of_week)
elif feature == "day_of_month":
ds[feature] = ts.apply(lambda X: X.day)
elif feature == "month":
ds[feature] = ts.apply(lambda X: X.month)
elif feature == "hour":
ds[feature] = ts.apply(lambda X: X.hour)
elif feature == "minute":
ds[feature] = ts.apply(lambda X: X.minute)
elif feature == "year":
ds[feature] = ts.apply(lambda X: X.year)
return ds | 18203f8e9a016d3302d5fe06d498d68403eb5805 | 2,655 |
def make_taubin_loss_function(x, y):
"""closure around taubin_loss_function to make
surviving pixel positions availaboe inside.
x, y: positions of pixels surviving the cleaning
should not be quantities
"""
def taubin_loss_function(xc, yc, r):
"""taubin fit formula
reference : Barcelona_Muons_TPA_final.pdf (slide 6)
"""
upper_term = (((x - xc) ** 2 + (y - yc) ** 2 - r ** 2) ** 2).sum()
lower_term = (((x - xc) ** 2 + (y - yc) ** 2)).sum()
return np.abs(upper_term) / np.abs(lower_term)
return taubin_loss_function | b11aae3586cb387a6e280f5b0e985dcf6364306e | 2,656 |
def init_rf_estimator():
"""
Instantiate a Random forest estimator with the optimized hyper-parameters.
:return: The RandomForest estimator instance.
"""
rf = RandomForestClassifier(
criterion=RF_CRIT,
min_samples_leaf=RF_MIN_SAMPLES_LEAF,
max_features='auto',
n_estimators=RF_N_ESTS,
n_jobs=-1)
return rf | 1171b5582869151823da29c61545c857e04ffed6 | 2,657 |
def dict_filter(d, exclude=()):
"""
Exclude specified keys from a nested dict
"""
def fix_key(k):
return str(k) if isinstance(k, builtin_str) else k
if isinstance(d, list):
return [dict_filter(e, exclude) for e in d]
if isinstance(d, dict):
items = ((fix_key(k), v) for k, v in d.items())
return {
k: dict_filter(v, exclude) for k, v in items if k not in exclude
}
return d | afa87c730fd105741a3bf95601d682fa817b903d | 2,658 |
async def mongoengine_multiple_objects_exception_handler(request, exc):
"""
Error handler for MultipleObjectsReturned.
Logs the MultipleObjectsReturned error detected and returns the
appropriate message and details of the error.
"""
logger.exception(exc)
return JSONResponse(
Response(success=False, error_code=422, message=str(exc)).dict()
) | c0e3d8d25ee02b9240cbf02f532cb853cbc693ee | 2,659 |
def _get_sample_times(*traces, **kwargs):
"""Get sample times for all the traces."""
# Set the time boundaries for the DataFrame.
max_stop_time = max(
[trace.stop_time() for trace in traces if isinstance(trace, Trace)]
)
stop_time = kwargs.pop("stop_time", max_stop_time)
min_start_time = min(
[trace.start_time() for trace in traces if isinstance(trace, Trace)]
)
start_time = kwargs.pop("start_time", min_start_time)
# Get all the sample times of all the traces between the start and stop times.
times = set([start_time, stop_time])
for trace in traces:
times.update(
set(trace.get_sample_times(start_time=start_time, stop_time=stop_time))
)
# If requested, fill in additional times between sample times.
step = kwargs.pop("step", 0)
if step:
times.update(set(range(start_time, stop_time + 1, step)))
# Sort sample times in increasing order.
times = sorted(list(times))
return times | 3e20bed62017e8306b3489ec41b7f6cd59a4c916 | 2,660 |
Subsets and Splits