content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def poppler_page_get_text_layout(page):
"""
Wrapper of an underlying c-api function not yet exposed by the
python-poppler API.
Returns a list of text rectangles on the pdf `page`
"""
n = c_uint(0)
rects = CRectangle.ptr()
# From python-poppler internals it is known that hash(page) returns the
# c-pointer to the underlying glib object. See also the repr(page).
page_ptr = hash(page)
_c_text_layout(page_ptr, rects, n)
# Obtain pointer to array of rectangles of the correct length
rectangles = POINTER(CRectangle * n.value).from_address(addressof(rects))
get_text = page.get_selected_text
poppler_rect = poppler.Rectangle()
result = []
for crect in rectangles.contents:
# result.append(Rectangle(
# x1=crect.x1, y1=crect.y1, x2=crect.x2, y2=crect.y2))
_ = (crect.x1, crect.y1, crect.x2, crect.y2)
poppler_rect.x1, poppler_rect.y1, poppler_rect.x2, poppler_rect.y2 = _
text = get_text(GLYPH, poppler_rect).decode("utf8")
if text.endswith(" \n"):
text = text[:-2]
elif text.endswith(" ") and len(text) > 1:
text = text[:-1]
elif text.endswith("\n"):
text = text[:-1]
rect = Box(
rect=Rectangle(x1=crect.x1, y1=crect.y1, x2=crect.x2, y2=crect.y2),
text=text,
)
result.append(rect)
# TODO(pwaller): check that this free is correct
g_free(rectangles)
return result | 2c7313004c9a551d943be741c904cadb4593b892 | 3,653,900 |
from typing import Optional
def get_volume(name: Optional[str] = None,
namespace: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeResult:
"""
## Example Usage
```python
import pulumi
import pulumi_harvester as harvester
ubuntu20_dev_mount_disk = harvester.get_volume(name="ubuntu20-dev-mount-disk",
namespace="default")
```
:param str name: A unique name
"""
__args__ = dict()
__args__['name'] = name
__args__['namespace'] = namespace
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('harvester:index/getVolume:getVolume', __args__, opts=opts, typ=GetVolumeResult).value
return AwaitableGetVolumeResult(
access_mode=__ret__.access_mode,
attached_vm=__ret__.attached_vm,
description=__ret__.description,
id=__ret__.id,
image=__ret__.image,
name=__ret__.name,
namespace=__ret__.namespace,
phase=__ret__.phase,
size=__ret__.size,
state=__ret__.state,
storage_class_name=__ret__.storage_class_name,
tags=__ret__.tags,
volume_mode=__ret__.volume_mode) | 528dfb0432b30b40037b86a234e83c8327eb5206 | 3,653,901 |
def both_block_num_missing(record):
"""
Returns true of both block numbers are missing
:param record: dict - The record being evaluated
:return: bool
"""
rpt_block_num = record.get("rpt_block_num", "") or ""
rpt_sec_block_num = record.get("rpt_sec_block_num", "") or ""
# True, if neither address has a block number.
if rpt_block_num == "" and rpt_sec_block_num == "":
return True
return False | 63e2fdaef78dbc3c6560a4b015ed022583f30d05 | 3,653,902 |
def map_keys(func,dic):
"""
TODO:
Test against all types
handle python recursion limit
"""
return {func(k):map_keys(func,v)
if isinstance(v,dict) else v
for k,v in dic.items()} | 5dc2c292e769037300d6b3f118e887bcb50752ac | 3,653,903 |
def jsonize(v):
"""
Convert the discount configuration into a state in which it can be
stored inside the JSON field.
Some information is lost here; f.e. we only store the primary key
of model objects, so you have to remember yourself which objects
are meant by the primary key values.
"""
if isinstance(v, dict):
return dict((i1, jsonize(i2)) for i1, i2 in v.items())
if hasattr(v, "__iter__"):
return [jsonize(i) for i in v]
if isinstance(v, Model):
return v.pk
return v | 1aa7954c0089726b7707e0180b35a12d679c286b | 3,653,904 |
def clean_kaggle_movies(movies_df):
"""
Clean the Kaggle movie data with the following steps:
1. Drop duplicate rows
2. Filter out adult videos and drop unnecessary columns
3. Recast columns to appropriate data types
Parameters
----------
movies_df : Pandas dataframe
Kaggle movie data
Returns
-------
Pandas dataframe
Clean Kaggle movie data
"""
# Drop duplicate rows
movies_df = udf_movies.drop_duplicates(movies_df)
# Filter out adult videos and drop unnecessary columns
movies_df = drop_cols(movies_df)
# Recast columns to appropriate data types
movies_df = recast_cols(movies_df)
return movies_df | 05d5a0eb965b26cdc04dcfb9f3a76690d272389c | 3,653,905 |
from typing import List
from typing import Dict
from typing import Any
import torch
import pathlib
import os
def generate_trainer(
datafiles: List[str],
labelfiles: List[str],
class_label: str,
batch_size: int,
num_workers: int,
optim_params: Dict[str, Any]={
'optimizer': torch.optim.Adam,
'lr': 0.02,
},
weighted_metrics: bool=None,
scheduler_params: Dict[str, float]=None,
wandb_name: str=None,
weights: torch.Tensor=None,
max_epochs=500,
*args,
**kwargs,
):
"""
Generates PyTorch Lightning trainer and datasets for model training.
:param datafiles: List of absolute paths to datafiles
:type datafiles: List[str]
:param labelfiles: List of absolute paths to labelfiles
:type labelfiles: List[str]
:param class_label: Class label to train on
:type class_label: str
:param weighted_metrics: To use weighted metrics in model training
:type weighted_metrics: bool
:param batch_size: Batch size in dataloader
:type batch_size: int
:param num_workers: Number of workers in dataloader
:type num_workers: int
:param optim_params: Dictionary defining optimizer and any needed/optional arguments for optimizer initializatiom
:type optim_params: Dict[str, Any]
:param wandb_name: Name of run in Wandb.ai, defaults to ''
:type wandb_name: str, optional
:return: Trainer, model, datamodule
:rtype: Trainer, model, datamodule
"""
device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f'Device is {device}')
here = pathlib.Path(__file__).parent.absolute()
data_path = os.path.join(here, '..', '..', '..', 'data')
wandb_logger = WandbLogger(
project=f"tabnet-classifer-sweep",
name=wandb_name
)
uploadcallback = UploadCallback(
path=os.path.join(here, 'checkpoints'),
desc=wandb_name
)
early_stop_callback = EarlyStopping(
monitor=("weighted_val_accuracy" if weighted_metrics else "val_accuarcy"),
min_delta=0.00,
patience=3,
verbose=False,
mode="max"
)
module = DataModule(
datafiles=datafiles,
labelfiles=labelfiles,
class_label=class_label,
batch_size=batch_size,
num_workers=num_workers,
)
model = TabNetLightning(
input_dim=module.num_features,
output_dim=module.num_labels,
weighted_metrics=weighted_metrics,
optim_params=optim_params,
scheduler_params=scheduler_params,
weights=weights,
)
trainer = pl.Trainer(
gpus=(1 if torch.cuda.is_available() else 0),
auto_lr_find=False,
# gradient_clip_val=0.5,
logger=wandb_logger,
max_epochs=max_epochs,
# callbacks=[
# uploadcallback,
# ],
# val_check_interval=0.25, # Calculate validation every quarter epoch instead of full since dataset is large, and would like to test this
)
return trainer, model, module | e3448901793e2b58befff751b89edfa968a9e0d1 | 3,653,906 |
def make_shift_x0(shift, ndim):
"""
Returns a callable that calculates a shifted origin for each derivative
of an operation derivatives scheme (given by ndim) given a shift object
which can be a None, a float or a tuple with shape equal to ndim
"""
if shift is None:
return lambda s, d, i, j: None
elif isinstance(shift, float):
return lambda s, d, i, j: d + s * d.spacing
elif type(shift) is tuple and np.shape(shift) == ndim:
if len(ndim) == 1:
return lambda s, d, i, j: d + s[j] * d.spacing
elif len(ndim) == 2:
return lambda s, d, i, j: d + s[i][j] * d.spacing
else:
raise ValueError("ndim length must be equal to 1 or 2")
raise ValueError("shift parameter must be one of the following options: "
"None, float or tuple with shape equal to %s" % (ndim,)) | e6b01e43c8bf73ba21a9bdfcd27a93db9ccb7478 | 3,653,907 |
import os
def load_pca_tsne(pca, name, tpmmode=True, logmode=True, exclude=[], cache=True, dir='.'):
"""
Run t-sne using pca result
Parameters
----------
pca : array, shape (n_samples, n_pca)
pca matrix.
name: str
name of pca results
Returns
-------
tsne : array, shape (n_samples, 2)
The tsne coordinates for each sample
"""
file = get_tsne_file(dir, name, tpmmode=tpmmode, logmode=logmode)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it...'.format(file))
# perplexity = 5, n_iter = 5000, learning = 10
tsne = new_tsne()
if isinstance(pca, SparseDataFrame):
tsne_results = SparseDataFrame(tsne.fit_transform(pca.data), pca.index, pca.columns)
else:
tsne_results = tsne.fit_transform(pca)
data = pd.DataFrame({'Barcode':pca.index, 'TSNE-1':tsne_results[:, 0], 'TSNE-2':tsne_results[:, 1]})
data = data[['Barcode', 'TSNE-1', 'TSNE-2']]
data = data.set_index('Barcode')
data.to_csv(file, sep='\t', header=True)
return read_tsne(file) | 43946e73e367d2d5f0e623dff5f5a8d481917e87 | 3,653,908 |
def one_zone_numerical(params, ref_coeff, num_molecules=1e-9):
"""Returns one zone reactor exit flow."""
time = np.array(params[0], dtype=float)
gradient = np.array(params[1], dtype=float)
gridpoints = int(params[2])
step_size, area = float(params[3]), float(params[4])
solu = odeint(
_one_zone_fd, np.zeros(int(gradient.size)), time,
args=(ref_coeff, gradient, gridpoints, step_size, area)
)
return solu[:, -2] * ref_coeff * area / (step_size * num_molecules) | 4eb17f9684d1d12175bf85d15bada4178074de8a | 3,653,909 |
import re
def get_all_event_history_links():
"""From ufcstat website finds all completed fights and saves
the http into the current working directory
"""
url = "http://www.ufcstats.com/statistics/events/completed?page=all"
href_collection = get_all_a_tags(url)
#Add all links to list that have event-details in them
links = []
for i in href_collection:
site_regex = re.search('event-details', i)
if site_regex is not None:
links.append(i)
links = list(dict.fromkeys(links))
return links | ab452c66460f18b5d55ce2be2e22877f07e959d5 | 3,653,910 |
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.colors as colors
def profile_plot(x,z,data,ax,context_label = None,add_labels = False,xlabel = None,xmin = None, xmax=None, max_depth=None):
"""
UnTRIM-like profile plot of salinity
xmin,xmax are the bounds (in km) of the profile
max_depth is the maximum depth, data assumed to be
"""
global x_part
global z_part
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
if (not max_depth):
max_depth = data.shape[0]
if (xmin):
min_station = x[0,:].searchsorted(xmin)
else:
min_station = 0
if (xmax):
max_station = x[0,:].searchsorted(xmax)
else:
max_station = x.shape[1]
set_index_bounds(min_station,max_station,max_depth)
print("context label: %s" % context_label)
print("add label: %s" % add_labels)
print("min x dist %s max x dist %s" %(xmin,xmax))
print("min x station %s max x station %s max_depth %s" %(min_station,max_station,max_depth))
x_part = x[0:max_depth,min_station:max_station]
z_part = z[0:max_depth,min_station:max_station]
data_part = data[0:max_depth,min_station:max_station]
data_part = np.ma.masked_where(np.isnan(data_part),data_part)
norml = ThreePointLinearNorm(2,0,20)
cmap=cm.get_cmap("RdBu_r").copy()
cmap.set_bad("white",0.0)
do_image=False
if do_image:
lev = [0.0,0.1,0.2,0.5,1.0,2.0,4.0,8.0,16.0,24.0,32.0]
norml = colors.BoundaryNorm(lev, 256)
im = ax.imshow(vertical_fill(data_part), interpolation='bilinear', origin='upper',
aspect = 'auto', vmin = 0.0, vmax = 32.0,
norm=norml, cmap=cmap,
extent=(x[0,min_station],x[0,max_station-1],max_depth,0))
bad_data = np.ma.masked_where(~data_part.mask, data_part.mask)
ax.imshow(bad_data, interpolation='nearest', aspect = 0.75, cmap=cm.gray,extent=(x[0,min_station],x[0,max_station-1],max_depth,0))
# Colorbar for the image.
cbi = ax.colorbar(im, orientation='vertical', shrink=0.6,ticks = lev)
cbi.set_label("Salinity (psu)", size = 14)
else:
im = None
do_line_contour = True
if do_line_contour:
lev = np.array([2.0, 4.0, 8.0, 16.0])
greys = 1.0-lev/32.
cs = ax.contour(x_part,z_part,data_part,levels = lev,colors=['black','black','black','black'],linewidths=2)
greylev = 1.0
for c in cs.collections:
c.set_linestyle('solid')
#Thicken the zero contour.
zc = cs.collections[0]
#ax.setp(zc, linewidth=3)
#ax.setp(zc, linestyle = 'dotted')
ax.clabel(cs, lev, # label every second level
inline=1,
inline_spacing = 3,
fmt='%1.1f',
fontsize=12)
else:
cs = None
do_filled_contour = True
if do_filled_contour:
lev = [0.0,0.1,0.2,0.5,1.0,2.0,4.0,8.0,16.0,32.0]
norml = colors.BoundaryNorm(lev, 256)
filled_data_part = vertical_fill(data_part)
bad_data = np.ma.masked_where(~data_part.mask, data_part.mask, copy=True)
maxz = np.argmax(bad_data,axis=0)
maxz[maxz == 0] = max_depth
maxz = np.concatenate(([max_depth],maxz,[max_depth]))
xstat = np.concatenate(([x_part[0,0]],x_part[0,:],[x_part[0,-1]]))
ax.set_ylim([max_depth,0])
cs = ax.contourf(x_part,z_part,filled_data_part,levels = lev, cmap = cm.RdBu_r,
norm = norml,extent=(x[0,min_station],x[0,max_station-1],max_depth,0))
ax.fill(xstat,maxz,"darkgray")
#cb = ax.colorbar(cs, orientation='vertical', shrink=0.8,ticks = [32,16,8,4,2,1,0.5,0.2,0.1,0])
#cb.set_label("Salinity (psu)", size = 14)
add_cruise_loc = False
if add_cruise_loc:
xloc = x_part[0]
zloc = np.ones_like(xloc)*19
stops, = ax.plot(xloc,zloc,'o',label="USGS cast")
xloc = np.array([84.86])
yloc = np.ones_like(xloc)*19
dayflow, = ax.plot(xloc,yloc,"*",label="Dayflow X2",markersize=14)
add_labels = True
if (add_labels):
inbound_label_dists = [x for x in location_labels.keys() if (x>xmin and x<xmax)]
bbox_props = dict(boxstyle="rarrow,pad=0.3", fc="white", lw=2)
for dist in inbound_label_dists:
t = ax.text(dist, max_depth-2, location_labels[dist], ha="center", va="bottom", rotation=270,
size=12,
bbox=bbox_props)
if (add_cruise_loc and add_labels):
font= FontProperties(color="white");
leg = ax.legend(("USGS cast","Dayflow X2"),"center left",numpoints=1,frameon=False)
leg_texts = leg.get_texts()
if len(leg_texts) > 0:
leg_texts[0].set_color("white")
leg_texts[1].set_color("white")
if context_label:
ttxt = ax.text(x_part[0,0]+2,5,context_label,size = 18, color = 'white')
#ax.title('Vertical Salinity Profile', size = 14)
if xlabel:
ax.set_xlabel(xlabel, size = 14)
ax.set_ylabel('Depth (m)', size = 14)
return im, cs, ttxt | 9758ae430a2d45fd14932b8476648c61120c9749 | 3,653,911 |
def plot_book_wordbags(urn, wordbags, window=5000, pr = 100):
"""Generate a diagram of wordbags in book """
return plot_sammen_vekst(urn, wordbags, window=window, pr=pr) | 12a03c70316d3920419f85cd2e4af87c7a16f0f8 | 3,653,912 |
def map_line2citem(decompilation_text):
"""
Map decompilation line numbers to citems.
This function allows us to build a relationship between citems in the
ctree and specific lines in the hexrays decompilation text.
Output:
+- line2citem:
| a map keyed with line numbers, holding sets of citem indexes
|
| eg: { int(line_number): sets(citem_indexes), ... }
'
"""
line2citem = {}
#
# it turns out that citem indexes are actually stored inline with the
# decompilation text output, hidden behind COLOR_ADDR tokens.
#
# here we pass each line of raw decompilation text to our crappy lexer,
# extracting any COLOR_ADDR tokens as citem indexes
#
for line_number in range(decompilation_text.size()):
line_text = decompilation_text[line_number].line
line2citem[line_number] = lex_citem_indexes(line_text)
return line2citem | 86c8a24f769c7404560bb63c34f2b60ff3a097da | 3,653,913 |
def from_dict(params, filter_func=None, excludes=[], seeds=[], order=2,
random_seed=None):
"""Generates pair-wise cases from given parameter dictionary."""
if random_seed is None or isinstance(random_seed, int):
return _from_dict(params, filter_func, excludes, seeds, order, random_seed)
# Find the best (smallest) test suite by trying multiple seeds.
best = None
for rs in random_seed:
case = _from_dict(params, filter_func, excludes, seeds, order, rs)
if best is None or len(case) < len(best):
best = case
return best | d9ecd0528340adbe874afa70d3a9309e53ff87cc | 3,653,914 |
def ensure_min_topology(*args, **kwargs):
"""
verifies if the current testbed topology satifies the
minimum topology required by test script
:param spec: needed topology specification
:type spec: basestring
:return: True if current topology is good enough else False
:rtype: bool
"""
return getwa().ensure_min_topology(*args, **kwargs) | 364e7b3c166b725fd73846e1814bd3b7ab92ad96 | 3,653,915 |
def encode_mode(mode):
"""
JJ2 uses numbers instead of strings, but strings are easier for humans to work with
CANNOT use spaces here, as list server scripts may not expect spaces in modes in port 10057 response
:param mode: Mode number as sent by the client
:return: Mode string
"""
if mode == 16:
return "headhunters"
if mode == 15:
return "domination"
if mode == 14:
return "tlrs"
if mode == 13:
return "flagrun"
if mode == 12:
return "deathctf"
if mode == 11:
return "jailbreak"
if mode == 10:
return "teambattle"
if mode == 9:
return "pestilence"
if mode == 8:
return "xlrs"
if mode == 7:
return "lrs"
if mode == 6:
return "roasttag"
if mode == 5:
return "coop"
if mode == 4:
return "race"
if mode == 3:
return "ctf"
if mode == 2:
return "treasure"
if mode == 1:
return "battle"
return "unknown" | db83c419acb299284b7b5338331efc95051115a5 | 3,653,916 |
def split_array(arr, num_of_splits):
"""split an array into equal pieces"""
# TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7
size = arr.shape[0]
if size < num_of_splits:
return [arr[i:i + 1] for i in range(size)]
slice_len, rest = divmod(size, num_of_splits)
div_points = [0] + [(slice_len * index + min(index, rest) + slice_len +
(index < rest)) for index in range(num_of_splits)]
slices = [
arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)
]
return slices | f8d4812619725940f9e986d238bc4e0f650e8da6 | 3,653,917 |
import random
def randclust(SC, k):
""" cluster using random """
# generate labels.
labels = np.array([random.randint(0,k-1) for x in range(SC.shape[1])])
# compute the average.
S, cats = avg_cat(labels, SC)
# return it.
return S, labels, cats | 42530495959977c1289fa6bdc2089747a246d210 | 3,653,918 |
def get_domains_by_name(kw, c, adgroup=False):
"""Searches for domains by a text fragment that matches the domain name (not the tld)"""
domains = []
existing = set()
if adgroup:
existing = set(c['adgroups'].find_one({'name': adgroup}, {'sites':1})['sites'])
for domain in c['domains'].find({}, {'domain': 1, 'alexa.rank.latest':1}):
try:
rank = domain['alexa']['rank']['latest']
domain_name = domain['domain'].replace('#', '.')
if kw in domain_name:
if domain_name not in existing:
domains.append({
"domain": domain_name,
"rank": rank
})
except KeyError:
pass
return domains[:50] | 6ecaf4ccf1ecac806fb621c02282bf46929459ce | 3,653,919 |
def read_bbgt(filename):
"""
Read ground truth from bbGt file.
See Piotr's Toolbox for details
"""
boxes = []
with open(filename,"r") as f:
signature = f.readline()
if not signature.startswith("% bbGt version=3"):
raise ValueError("Wrong file signature")
rects = []
ignore = []
labels = []
for line in f:
elms = line.strip().split()
assert len(elms) == 12, "Invalid file"
lbl = elms[0]
rect = tuple(map(float, elms[1:5]))
ign = int(elms[10])
rects.append(rect)
ignore.append(ign)
labels.append(lbl)
if not rects:
rects = np.empty((0,4),"f")
ignore = np.empty(0,"i")
labels = np.empty(0,"<U1")
boxes = bbox_list(np.array(rects,"f"),
format=RectFormat.XYWH,
ignore=np.array(ignore,"i"),
labels=np.array(labels))
return boxes | 25cfe28de9ed67ca0888da5bf27d01a803da8690 | 3,653,920 |
def measure(G, wire, get_cb_delay = False, meas_lut_access = False):
"""Calls HSPICE to obtain the delay of the wire.
Parameters
----------
G : nx.MultiDiGraph
The routing-resource graph.
wire : str
Wire type.
get_cb_delay : Optional[bool], default = False
Determines the position of the wire and the connection block and then calls
>>meas_local_wire.py<< to obtain the delay from the wire to a LUT input pin.
Returns
-------
float
Delay.
"""
#------------------------------------------------------------------------#
def run():
"""Runs HSPICE and parses the delay."""
with open(netlist_filename, "w") as outf:
outf.write(conv_nx_to_spice(net, meas_lut_access = meas_lut_access))
hspice_call = os.environ["HSPICE"] + " %s > %s" % (netlist_filename, hspice_dump)
os.system(hspice_call)
scale_dict = {'f' : 1e-15, 'p' : 1e-12, 'n' : 1e-9}
with open(hspice_dump, "r") as inf:
lines = inf.readlines()
#os.system("rm " + hspice_dump)
td_dict = {}
get_td = lambda l : round(float(l.split()[1][:-1]), 1) * scale_dict[l.split()[1][-1]]
get_tap = lambda l : wire + '_' + l.split('=', 1)[0].split('_', 1)[1]
for line in lines:
if "tfall=" in line:
tfall = get_td(line)
elif "trise=" in line:
trise = get_td(line)
elif meas_lut_access:
if "tfall_ble_mux" in line or "trise_ble_mux" in line:
td = get_td(line)
if td < 0:
print "Negative time!"
raise ValueError
try:
td_dict["ble_mux"] = 0.5 * (td_dict["ble_mux"] + td)
except:
td_dict.update({"ble_mux" : td})
elif wire[0] == 'V':
if "tfall_tap" in line or "trise_tap" in line:
tap = get_tap(line)
td = get_td(line)
if td < 0:
print "Negative time!"
raise ValueError
try:
td_dict[tap] = 0.5 * (td_dict[tap] + td)
except:
td_dict.update({tap : td})
if trise < 0 or tfall < 0:
print "Negative time!"
raise ValueError
if wire[0] == 'V':
td_dict.update({"whole" : 0.5 * (trise + tfall)})
if meas_lut_access:
td_dict.update({"lut_access" : 0.5 * (trise + tfall) - td_dict["ble_mux"]})
return td_dict
if wire[0] == 'V':
return td_dict
return 0.5 * (trise + tfall)
#------------------------------------------------------------------------#
netlist_filename = "sim_global_%s_%s.sp" % (args.arc_name, wire)
hspice_dump = "hspice_%s_%s.dump" % (args.arc_name, wire)
if meas_lut_access:
net = meas_lut_access_delay(G)
return run()
else:
pins, all_sizes = stack_muxes(G, get_pins = True)
source_dict = {}
for mux in pins:
if wire in mux and mux.startswith("ble_%d_" % NEUTRAL_BLE):
if ROBUSTNESS_LEVEL == 0:
source = mux
if get_cb_delay:
return get_netlist(G, wire, source, get_cb_delay = True)
net = get_netlist(G, wire, source)
return run()
key = mux.split("_tap")[0]
offset = pins[mux]['o'][0 if wire[0] == 'V' else 1]
deg = 0
for fanout in G:
if fanout.startswith(key):
deg += G.in_degree(fanout) + G.out_degree(fanout)
source_dict.update({key : {"mux" : mux, "deg" : deg, "offset" : offset}})
sorted_keys = sorted(source_dict, key = lambda s : source_dict[s]["deg"]\
* abs(source_dict[s]["offset"]))
if ROBUSTNESS_LEVEL == 1 or get_cb_delay:
#NOTE: Connection-block delays are very robust to changing the multiplexer as they usually
#assume only one or two columns, immediately next to the crossbar. Hence, the x-offset is
#less varialbe. Also, the load is within the cluster itself. If there is any variation in
#multiplexer sizes, that is more of an artifact of parametrized architecture generation.
#Median fanin should be a good representative in this case.
source = source_dict[sorted_keys[len(source_dict) / 2]]["mux"]
if get_cb_delay:
return get_netlist(G, wire, source, get_cb_delay = True)
net = get_netlist(G, wire, source)
return run()
td_dicts = []
for source_key in sorted_keys:
source = source_dict[source_key]["mux"]
net = get_netlist(G, wire, source)
td_dicts.append(run())
if ROBUSTNESS_LEVEL == 3:
potential_targets = [u for u, attrs in net.nodes(data = True) if attrs.get("potential_target", False)]
for i, u in enumerate(potential_targets):
relabeling_dict = {}
if u == 't':
continue
relabeling_dict.update({'t' : "prev_t_%d" % i})
relabeling_dict.update({u : 't'})
net = nx.relabel_nodes(net, relabeling_dict)
td_dicts.append(run())
if (wire[0] == 'H' and not meas_lut_access) or get_cb_delay:
return sum(td_dicts) / len(td_dicts)
for v in td_dicts[0]:
for td_dict in td_dicts[1:]:
td_dicts[0][v] += td_dict[v]
td_dicts[0][v] /= len(td_dicts)
return td_dicts[0] | 7db83ff5084798100a00d79c4df13a226a2e55a8 | 3,653,921 |
def direction_to(front,list_of_others,what="average") :
"""
Compute the direction vector towards *some other entities*.
Parameters
----------
front : :py:class:`front.Front`
Front to be used as the origin (starting point) of the direction \
vector
list_of_others : list
List created by ***get_enity*** LINK
what : string
String options:
- 'nearest': compute the direction vector to the nearest \
point in the list_of_others.
- 'average': default. TODO. Currently returns a list of direction \
vectors
- 'all': Return a list of all direction vectors
"""
if len(list_of_others) == 0 :
# shouldn't this be array([0,0,0])??? No items, null vector
#return np.array([0,0,0])
return None # handle the error somewhere else
pos = front.xyz
vecs = []
smallest_vec = np.array([100000000,100000000,100000000])
for loc in list_of_others :
vec = loc-pos#pos-loc
vecs.append(vec)
# THIS SHOULD BE MEMORIZED / TABULATED ++++++++++++++++++++ <-----
if np.sqrt(np.sum((loc-pos)**2)) < np.sqrt(np.sum((smallest_vec)**2)) :
smallest_vec = vec
if what == "nearest" :
return smallest_vec
else :
return vecs | f5757837e0eb71f2c03fda7c1d5b438d5036e8ac | 3,653,922 |
def live_ferc_db(request):
"""Use the live FERC DB or make a temporary one."""
return request.config.getoption("--live_ferc_db") | f0540c8e3383572c5f686ea89011d9e1ab0bf208 | 3,653,923 |
from typing import Optional
async def get_eth_hash(timestamp: int) -> Optional[str]:
"""Fetches next Ethereum blockhash after timestamp from API."""
try:
this_block = w3.eth.get_block("latest")
except Exception as e:
logger.error(f"Unable to retrieve latest block: {e}")
return None
if this_block["timestamp"] < timestamp:
logger.error(
f"Timestamp {timestamp} is older than current "
"block timestamp {this_block['timestamp']}"
)
return None
block_num = block_num_from_timestamp(timestamp)
if block_num is None:
logger.warning("Unable to retrieve block number from Etherscan API")
return None
try:
block = w3.eth.get_block(block_num)
except Exception as e:
logger.error(f"Unable to retrieve block {block_num}: {e}")
return None
return str(block["hash"].hex()) | f7f8cd70857d8bb84261685385f59e7cfd048f4c | 3,653,924 |
import sys
def resize_opencv(method, *args, **kwargs):
"""Direct arguments to one of the resize functions.
Parameters
----------
method
One among 'crop', 'cover', 'contain', 'width', 'height' or 'thumbnail'
image
Numpy array
size
Size object with desired size
"""
method = f"resize_{method}_opencv"
valid_methods = [
x for x in globals().keys() if x.endswith("opencv") and x != "resize_opencv"
]
LOG.info("Resizing with %s()", method)
try:
return getattr(sys.modules[__name__], method)(*args, **kwargs)
except AttributeError:
LOG.critical(
f"Invalid method '{method}'; should be one of {', '.join(valid_methods)}"
) | fdbf2e166dd348101c3e39d6edb417112c389aba | 3,653,925 |
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
def extract_url_dataset(dataset,msg_flag=False):
"""
Given a dataset identifier this function extracts the URL for the page where the actual raw data resides.
"""
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
dataset_dict={}
baseurl='https://archive.ics.uci.edu/ml/datasets/'
url = baseurl+dataset
try:
uh= urllib.request.urlopen(url, context=ctx)
html =uh.read().decode()
soup=BeautifulSoup(html,'html5lib')
if soup.text.find("does not appear to exist")!=-1:
if msg_flag:
print(f"{dataset} not found")
return None
else:
for link in soup.find_all('a'):
if link.attrs['href'].find('machine-learning-databases')!=-1:
a=link.attrs['href']
a=a[2:]
dataurl="https://archive.ics.uci.edu/ml/"+str(a)
#print(dataurl)
return str(dataurl)
#dataurls.append(dataurl)
# After finishing the for-loop with a-tags, the first dataurl is added to the dictionary
#dataset_dict['dataurl']=dataurls[0]
except:
#print("Could not retrieve")
return None | 06ec2dd6bea4c264fe9590663a28c7c92eed6a49 | 3,653,926 |
def test_encrypt_and_decrypt_one(benchmark: BenchmarkFixture) -> None:
"""Benchmark encryption and decryption run together."""
primitives.encrypt = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_encrypt
primitives.decrypt = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_decrypt
def encrypt_and_decrypt() -> bytes:
token = version2.encrypt(MESSAGE, KEY, FOOTER)
return version2.decrypt(token, KEY, FOOTER)
plain_text = benchmark(encrypt_and_decrypt)
assert plain_text == MESSAGE | fdd15ca362b983e5f7e28632434c2cbe1ab983ac | 3,653,927 |
def MPI_ITOps(mintime = 5, maxtime = 20, cap = 60):
"""
Returns a costOfLaborValue object suitable to attach to a sim or other event
Time is in hours
"""
timeDist = LogNormalValue(maxtime, mintime, cap)
costDist = LogNormalValue(235, 115, 340)
team = costOfLaborValue("IT I&O Team", timeDist, costDist)
return team | 829c702d31a585fc18f81eea01c87f32c2458ea6 | 3,653,928 |
import json
def load_private_wallet(path):
"""
Load a json file with the given path as a private wallet.
"""
d = json.load(open(path))
blob = bytes.fromhex(d["key"])
return BLSPrivateHDKey.from_bytes(blob) | 9c98be3b3891eaab7b62eba32b426b78ae985880 | 3,653,929 |
import json
def format_parameters(parameters: str) -> str:
"""
Receives a key:value string and retuns a dictionary string ({"key":"value"}). In the process strips trailing and
leading spaces.
:param parameters: The key-value-list
:return:
"""
if not parameters:
return '{}'
pairs = []
for item in parameters.split(','):
try:
key, value = item.split(':')
except ValueError:
raise ValueError(f"Got unexpected parameters {item}.")
pairs.append((key.strip(), value.strip()))
return json.dumps(dict(pairs)) | 95f115b9000d495db776798700cfdf35209cfbd4 | 3,653,930 |
def downvote_question(current_user, question_id):
"""Endpoint to downvote a question"""
error = ""
status = 200
response = {}
question = db.get_single_question(question_id)
if not question:
error = "That question does not exist!"
status = 404
elif db.downvote_question(current_user[0], question_id) is False:
error = "You have already downvoted!"
status = 400
else:
db.downvote_question(current_user[0], question_id)
votes = db.get_votes(question_id)
que_details = db.get_question_details(question_id)
data = {
"meetup": que_details[0],
"title": que_details[1].strip(),
"body": que_details[2].strip(),
"votes": votes[0]
}
status = 200
if error:
response.update({"status": status, "error": error})
return jsonify(response), status
response.update({"status": status, "data": data})
return jsonify(response), status | a7bba2a9608d25b3404f22ca2f283486f205f0ad | 3,653,931 |
def get_new_generation(generation: GEN, patterns: PATTERNS) -> GEN:
"""Mutate current generation and get the next one."""
new_generation: GEN = dict()
plant_ids = generation.keys()
min_plant_id = min(plant_ids)
max_plant_id = max(plant_ids)
for i in range(min_plant_id - 2, max_plant_id + 2):
pattern = get_pattern(generation, i)
if patterns.get(pattern, Pot.EMPTY) is Pot.PLANT:
new_generation[i] = Pot.PLANT
return new_generation | a0908c9c7570814ca86d3b447425e7b75cdbfde2 | 3,653,932 |
def ell2tm(latitude, longitude, longitude_CM, ellipsoid = 'GRS80'):
"""
Convert ellipsoidal coordinates to 3 degree Transversal Mercator
projection coordinates
Input:
latitude: latitude of a point in degrees
longitude: longitude of a point in degrees
longitude_CM: central meridian in degrees
ellipsoid: name of ellipsoid in string format
Output:
Easting, Northing [unit:meters]
"""
Phi = _np.deg2rad(latitude) # degree to radian
Lambda = _np.deg2rad(longitude) # degree to radian
Lambda_CM = _np.deg2rad(longitude_CM) # degree to radian
dlambda = Lambda - Lambda_CM
# -----------------------------------------------------------------------------
# Define Ellipsoid
ell = _ellipsoid(ellipsoid)
# -----------------------------------------------------------------------------
# Some parameters
N = ell.a/_np.sqrt(1-ell.e1**2*_np.sin(Phi)**2)
t = _np.tan(Phi)
n = ell.e2 * _np.cos(Phi)
# -----------------------------------------------------------------------------
# Easting Computation
easting = N*(dlambda*_np.cos(Phi)+((dlambda**3*_np.cos(Phi)**3)/6)*(1-t**2+n**2) +
((dlambda**5*_np.cos(Phi)**5)/120)*(5-18*t**2+t**4+14*n**2-58*t**2*n**2+13*n**4+4*n**6-64*n**4*t**2-24*n**6*t**2) +
((dlambda**7*_np.cos(Phi)**7)/5040)*(61-479*t**2+179*t**4-t**6))
easting += 500000 # false easting
# -----------------------------------------------------------------------------
# Meridian Arc Computation
# Meridian Arc Computation
A0 = 1 - ell.e1**2/4 - (3/64)*ell.e1**4 - (5/256)*ell.e1**6 - (175/16384)*ell.e1**8
A2 = (3/8) * (ell.e1**2 + ell.e1**4/4 + (15/128)*ell.e1**6 - (455/4096)*ell.e1**8)
A4 = (15/256) * (ell.e1**4 + (3/4)*ell.e1**6 - (77/128)*ell.e1**8)
A6 = (35/3072) * (ell.e1**6 - (41/32)*ell.e1**8)
A8 = (-315/131072) * ell.e1**8
S_phi = ell.a * ( A0 * Phi - A2*_np.sin(2*Phi) + A4*_np.sin(4*Phi) - A6*_np.sin(6*Phi) + A8*_np.sin(8*Phi))
# -----------------------------------------------------------------------------
# Northing Computation
northing = S_phi + N * ( (dlambda**2/2) * _np.sin(Phi) * _np.cos(Phi) + (dlambda**4/24) * _np.sin(Phi) * _np.cos(Phi)**3 * (5 - t**2 + 9*n**2 + 4*n**4) +
(dlambda**6/720) * _np.sin(Phi) * _np.cos(Phi)**5 * (61 - 58*t**2 + t**4 + 270*n**2 - 330*t**2*n**2 + 445*n**4 + 324*n**6 - 680*n**4*t**2 + 88*n**8 -
600*n**6*t**2 - 192*n**8*t**2) + (dlambda**8/40320) * _np.sin(Phi) * _np.cos(Phi)**7 * (1385 - 311*t**2 + 543*t**4 - t**6))
return easting, northing | b6e1361df8b51e188bbc7a49557dbe8f14905df3 | 3,653,933 |
def Format_Phone(Phone):
"""Function to Format a Phone Number into (999)-999 9999)"""
Phone = str(Phone)
return f"({Phone[0:3]}) {Phone[3:6]}-{Phone[6:10]}" | 8e46c35bca9d302d86909457c84785ad5d366c15 | 3,653,934 |
from sets import Set
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
if problem.isGoalState(startState):
return []
# Each element in the fringe stores the state and the cost to reach it.
fringe = util.PriorityQueue()
fringe.push(startState, 0 + heuristic(startState, problem))
# Each pair in itemsInFringe stores a state and the list of actions
# required to reach it. States are added in itemsInFringe when they are
# added to the fringe. The states are removed from itemsInFringe when
# they get removed from the fringe.
itemsInFringe = {startState: []}
visitedStates = Set()
while not fringe.isEmpty():
currState = fringe.pop()
actionsToCurrState = itemsInFringe[currState]
del itemsInFringe[currState]
costOfActionsToCurrState = problem.getCostOfActions(actionsToCurrState)
if problem.isGoalState(currState):
return actionsToCurrState
visitedStates.add(currState)
for successor, action, stepCost in problem.getSuccessors(currState):
heuristicCostToSuccessor = heuristic(successor, problem)
newCostToSuccessor = costOfActionsToCurrState + stepCost + \
heuristicCostToSuccessor
newActionsToSuccessor = actionsToCurrState + [action]
if successor not in visitedStates:
fringe.update(successor, newCostToSuccessor)
if successor in itemsInFringe and \
problem.getCostOfActions(itemsInFringe[successor]) + \
heuristicCostToSuccessor <= newCostToSuccessor:
# If successor is already in itemsInFringe, only update the
# cost if the current cost is greater than the new cost.
continue
itemsInFringe[successor] = newActionsToSuccessor
# Goal not found, so no action.
return [] | 429c45bff701bbd2bb515be6d8a0f538183941d3 | 3,653,935 |
def _stack_add_equal_dataset_attributes(merged_dataset, datasets, a=None):
"""Helper function for vstack and hstack to find dataset
attributes common to a set of datasets, and at them to the output.
Note:by default this function does nothing because testing for equality
may be messy for certain types; to override a value should be assigned
to the add_keys argument.
Parameters
----------
merged_dataset: Dataset
the output dataset to which attributes are added
datasets: tuple of Dataset
Sequence of datasets to be stacked. Only attributes present
in all datasets and with identical values are put in
merged_dataset
a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None).
Indicates which dataset attributes from datasets are stored
in merged_dataset. If an int k, then the dataset attributes from
datasets[k] are taken. If 'unique' then it is assumed that any
attribute common to more than one dataset in datasets is unique;
if not an exception is raised. If 'drop_nonunique' then as 'unique',
except that exceptions are not raised. If 'uniques' then, for each
attribute, any unique value across the datasets is stored in a tuple
in merged_datasets. If 'all' then each attribute present in any
dataset across datasets is stored as a tuple in merged_datasets;
missing values are replaced by None. If None (the default) then no
attributes are stored in merged_dataset. True is equivalent to
'drop_nonunique'. False is equivalent to None.
"""
if a is None or a is False:
# do nothing
return
elif a is True:
a = 'drop_nonunique'
if not datasets:
# empty - so nothing to do
return
if type(a) is int:
base_dataset = datasets[a]
for key in base_dataset.a.keys():
merged_dataset.a[key] = base_dataset.a[key].value
return
allowed_values = ['unique', 'uniques', 'drop_nonunique', 'all']
if not a in allowed_values:
raise ValueError("a should be an int or one of "
"%r" % allowed_values)
# consider all keys that are present in at least one dataset
all_keys = set.union(*[set(dataset.a.keys()) for dataset in datasets])
def _contains(xs, y, comparator=all_equal):
for x in xs:
if comparator(x, y):
return True
return False
for key in all_keys:
add_key = True
values = []
for i, dataset in enumerate(datasets):
if not key in dataset.a:
if a == 'all':
values.append(None)
continue
value = dataset.a[key].value
if a in ('drop_nonunique', 'unique'):
if not values:
values.append(value)
elif not _contains(values, value):
if a == 'unique':
raise DatasetError("Not unique dataset attribute value "
" for %s: %s and %s"
% (key, values[0], value))
else:
add_key = False
break
elif a == 'uniques':
if not _contains(values, value):
values.append(value)
elif a == 'all':
values.append(value)
else:
raise ValueError("this should not happen: %s" % a)
if add_key:
if a in ('drop_nonunique', 'unique'):
merged_dataset.a[key] = values[0]
else:
merged_dataset.a[key] = tuple(values) | acfeb1e7ca315aa7109731427ce6f058b2fceb6d | 3,653,936 |
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = "{0}{1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc)
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array | f59d6d127d4d3f0725df5eee2e4586ccbea9288b | 3,653,937 |
def compute_fixpoint_0(graph, max_value):
"""
Computes the fixpoint obtained by the symbolic version of the backward algorithm for safety games.
Starts from the antichain of the safe set and works backwards using controllable predecessors.
The maximum value for the counters is a parameter to facilitate the incremental algorithm.
:param graph:
:type graph:
:param max_value:
:type max_value:
:return:
:rtype:
"""
# wether we want to print the sets during computation
toPrint = False
# get the values to create the create the antichain of maximal elements of the safe set
nbr_functions, nbr_counters_per_function = compute_counters_sizes_0(graph)
start_antichain = Antichain(comparator_generalized_0, intersector_generalized_0)
# create the antichain of maximal elements of the safe set
# every counter in every tuple has the maximal value
for node in graph.get_nodes():
temp = [node]
for func in range(0, nbr_functions):
temp.append(nbr_counters_per_function[func] * [max_value])
start_antichain.insert(temp)
if (toPrint):
print("Start antichain : " + str(start_antichain) + "\n")
antichain1 = start_antichain
cpre1 = Cpre(start_antichain, 1, graph, nbr_functions, max_value)
if (toPrint):
print("CPre_1 of start antichain: " + str(cpre1) + "\n")
cpre0 = Cpre(start_antichain, 0, graph, nbr_functions, max_value)
if (toPrint):
print("CPre_0 of start antichain: " + str(cpre0) + "\n")
# we know the elements of cpre0 and cpre1 to be incomparable. Union of the two antichains can be done through
# simple extend
cpre0.incomparable_elements.extend(cpre1.incomparable_elements)
if (toPrint):
print("Union of CPre_0 and CPre_1 " + str(cpre0) + "\n")
antichain2 = antichain1.intersection(cpre0)
if (toPrint):
print("Inter of start and previous union " + str(antichain2) + "\n")
nb_iter = 0
# while we have not obtained the fixpoint
while not antichain1.compare(antichain2):
nb_iter += 1
antichain1 = antichain2
cpre1 = Cpre(antichain1, 1, graph, nbr_functions, max_value)
if (toPrint):
print("ITER " + str(nb_iter) + " CPre 1 of prev " + str(cpre1) + "\n")
cpre0 = Cpre(antichain1, 0, graph, nbr_functions, max_value)
if (toPrint):
print("ITER " + str(nb_iter) + " CPre 0 of prev " + str(cpre0) + "\n")
temp = cpre0.union(cpre1)
if (toPrint):
print("ITER " + str(nb_iter) + " Union of Pre 0 and Pre 1 " + str(temp) + "\n")
antichain2 = antichain1.intersection(temp)
if (toPrint):
print("ITER " + str(nb_iter) + " final set " + str(antichain2) + "\n")
return antichain1 | e2ee9cb00ce6f88e03a080d85a635c208cdd5a35 | 3,653,938 |
def extract_unii_other_code(tree):
"""Extract the codes for other ingredients"""
unii_other_xpath = \
'//generalizedMaterialKind/code[@codeSystem="%s"]/@code' % UNII_OTHER_OID
return tree.getroot().xpath(unii_other_xpath) | 0576dc7537a9212990a72125b8fd406c457efb76 | 3,653,939 |
import functools
def initfunc(f):
"""
Decorator for initialization functions that should
be run exactly once.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if wrapper.initialized:
return
wrapper.initialized = True
return f(*args, **kwargs)
wrapper.initialized = False
return wrapper | 337ca902fc1fbe138ad5dd4c203a3cac77e89f57 | 3,653,940 |
def edit_frame(frame):
"""edit frame to analyzable frame
rgb 2 gray
thresh frame color
bitwise color
Args
frame (ndarray): original frame from movie
Returns
work_frame (ndarray): edited frame
"""
work_frame = frame
work_frame = cv2.cvtColor(work_frame, cv2.COLOR_RGB2GRAY)
work_frame = cv2.threshold(work_frame, FRAME_THRESH, 255, cv2.THRESH_BINARY)[1]
work_frame = cv2.bitwise_not(work_frame)
return work_frame | b82588fa81093c05e4a683b76aa367ba2be4b2e2 | 3,653,941 |
def manchester(bin_string):
"""
Applies the Manchester technique to a string of bits.
:param bin_string:
:type bin_string: str
:return:
:rtype: str
"""
signal_manager = Signal()
for bin_digit in bin_string:
if bin_digit == '0': # Generate +-
if signal_manager.signal == '+': # It's positive
signal_manager.keep() # +
signal_manager.flip() # -
else: # It's negative
signal_manager.flip() # +
signal_manager.flip() # -
else: # Generate -+
if signal_manager.signal == '+': # It's positive
signal_manager.flip() # -
signal_manager.flip() # +
else: # It's negative
signal_manager.keep() # -
signal_manager.flip() # +
return str(signal_manager) | e5f58e929db74f0eeb2a003c25c5097f45c74989 | 3,653,942 |
from typing import List
def load_admin_cells(identifier: str) -> List[MultiPolygon]:
"""Loads the administrative region cells
Data is loaded from :py:const:`ADMIN_GEOJSON_TEMPLATE` ``% identifier``.
This is a wrapper function for :py:func:`load_polygons_from_json`.
Returns:
A list of the administrative region cells.
"""
return load_polygons_from_json(ADMIN_GEOJSON_TEMPLATE % identifier) | dc2083ca7392da5b2d6509b6dd8f108bd8218726 | 3,653,943 |
import inspect
def register_writer(format, cls=None):
"""Return a decorator for a writer function.
A decorator factory for writer functions.
A writer function should have at least the following signature:
``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always**
an open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The writer must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern.
Any additional `**kwargs` will be passed to the writer and may be used if
necessary.
The writer must not return a value. Instead it should only mutate the `fh`
in a way consistent with it's purpose.
If the writer accepts a generator, it should exhaust the generator to
ensure that the potentially open filehandle backing said generator is
closed.
.. note:: Failure to adhere to the above interface specified for a writer
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated writer will be bound to.
cls : type, optional
The class which a decorated writer will be bound to. If `cls` is None
the writer will be bound as expecting a generator.
Default is None.
Returns
-------
function
A decorator to be used on a writer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*writer* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.write
skbio.io.get_writer
"""
def decorator(writer):
format_class = _formats.setdefault(format, {}).setdefault(cls, {})
if 'writer' in format_class:
raise DuplicateRegistrationError('writer', format, cls)
file_args = []
writer_spec = inspect.getargspec(writer)
if writer_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
for key, default in zip(
writer_spec.args[-len(writer_spec.defaults):],
writer_spec.defaults):
if default is FileSentinel:
file_args.append(key)
# We wrap the writer so that basic file handling can be managed
# externally from the business logic.
def wrapped_writer(obj, fp, mode='w', **kwargs):
file_keys = []
files = [fp]
for file_arg in file_args:
if file_arg in kwargs:
if kwargs[file_arg] is not None:
file_keys.append(file_arg)
files.append(kwargs[file_arg])
else:
kwargs[file_arg] = None
with open_files(files, mode) as fhs:
for key, fh in zip(file_keys, fhs[1:]):
kwargs[key] = fh
writer(obj, fhs[0], **kwargs)
wrapped_writer.__doc__ = writer.__doc__
wrapped_writer.__name__ = writer.__name__
format_class['writer'] = wrapped_writer
return wrapped_writer
return decorator | b8312d987cecfa106c73afb4eca5299637d260f6 | 3,653,944 |
import os
def get_downloadpath(user_id):
"""
find the download path
"""
path = settings.DOCUMENT_PATH + str(user_id) + '/'
if not os.path.isdir(path):
os.mkdir(path)
return path | da3508639cd8740410e5b21df8944a64ade7e50c | 3,653,945 |
import torch
def accuracy(output, target, topk=1,axis=1,ignore_index=-100, exclude_mask=False):
"""Computes the precision@k for the specified values of k
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
"""
input_tensor=output.copy().detach()
target_tensor=target.copy().detach()
num_classes = int_shape(output)[axis]
if len(input_tensor)==0:
return to_tensor(0.0)
is_logsoftmax = None
from_logits = None
output_exp = exp(input_tensor)
if (ndim(input_tensor) >= 1 and 'float' in str(input_tensor.dtype) and input_tensor.min() >= 0 and input_tensor.max() <= 1):
is_logsoftmax = False
from_logits = True
input_tensor = clip(input_tensor, min=1e-8, max=1 - 1e-8)
elif (ndim(output_exp) >= 1 and 'float' in str(output_exp.dtype) and output_exp.min() >= 0 and output_exp.max() <= 1):
is_logsoftmax = True
from_logits = True
input_tensor = clip(output_exp, min=1e-8, max=1 - 1e-8)
else:
is_logsoftmax = False
from_logits = False
if input_tensor.dtype!=torch.int64 and topk==1:
if len(input_tensor.size())==1: #binary
input_tensor=input_tensor.gt(0.5).float()
else:
input_tensor=argmax(input_tensor,axis).squeeze()
if target_tensor.dtype!=torch.int64:
target_tensor=argmax(target_tensor,axis).squeeze()
if input_tensor.shape!=target_tensor.shape and topk==1:
raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape))
input_mask=ones_like(input_tensor)
if isinstance(ignore_index, int) and 0 <= ignore_index < num_classes:
input_mask[input_tensor==ignore_index] = 0
elif isinstance(ignore_index, (list, tuple)):
for idx in ignore_index:
if isinstance(idx, int) and 0 <= idx < int_shape(output)[axis]:
input_mask[input_tensor == idx] = 0
batch_size = target_tensor.size(0)
if topk==1:
return (input_tensor.eq(target_tensor).float()*input_mask).sum()/clip((input_mask).float().sum(),min=1)
else:
_, pred = input_tensor.topk(topk)
pred = pred.t()
correct = pred.eq(target_tensor.reshape((1, -1)).expand_as(pred))
correct_k = reduce_sum(correct[:topk].reshape(-1).float(),axis=0,keepdims=True)
return correct_k.mul_(1 / batch_size) | a35d4bff308862c4c8a83948c619e66299d7887f | 3,653,946 |
def jitter_over_thresh(x: xr.DataArray, thresh: str, upper_bnd: str) -> xr.DataArray:
"""Replace values greater than threshold by a uniform random noise.
Do not confuse with R's jitter, which adds uniform noise instead of replacing values.
Parameters
----------
x : xr.DataArray
Values.
thresh : str
Threshold over which to add uniform random noise to values, a quantity with units.
upper_bnd : str
Maximum possible value for the random noise, a quantity with units.
Returns
-------
xr.DataArray
Notes
-----
If thresh is low, this will change the mean value of x.
"""
return jitter(x, lower=None, upper=thresh, minimum=None, maximum=upper_bnd) | 1a508c30aa68c3b8808f3fe0b254ad98621cd245 | 3,653,947 |
from .rename_axis import rename_axis_with_level
def index_set_names(index, names, level=None, inplace=False):
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for all
levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> import mars.dataframe as md
>>> idx = md.Index([1, 2, 3, 4])
>>> idx.execute()
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter').execute()
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = md.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx.execute()
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx.set_names(['kind', 'year'], inplace=True)
>>> idx.execute()
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.set_names('species', level=0).execute()
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
"""
op = DataFrameRename(index_mapper=names, level=level,
output_types=get_output_types(index))
ret = op(index)
if inplace:
df_or_series = getattr(index, '_get_df_or_series', lambda: None)()
if df_or_series is not None:
rename_axis_with_level(df_or_series, names, axis=index._axis,
level=level, inplace=True)
index.data = df_or_series.axes[index._axis].data
else:
index.data = ret.data
else:
return ret | 4ad24ea4c1fd42b1259d43e273c44c4295a9e329 | 3,653,948 |
def get_critic(obs_dim: int) -> tf.keras.Model:
"""Get a critic that returns the expect value for the current state"""
observation = tf.keras.Input(shape=(obs_dim,), name='observation')
x = layers.Dense(64, activation='tanh')(observation)
x = layers.Dense(64, activation='tanh')(x)
value = layers.Dense(1, name='value')(x)
critic = tf.keras.Model(observation, value)
# critic.summary()
return critic | 69082d6260666c733e32093fbc7180726f77acc6 | 3,653,949 |
def register_and_login_test_user(c):
"""
Helper function that makes an HTTP request to register a test user
Parameters
----------
c : object
Test client object
Returns
-------
str
Access JWT in order to use in subsequent tests
"""
c.post(
"/api/auth/register",
json={
"username": "test",
"password": "secret",
"first_name": "tim",
"last_name": "apple",
"email": "[email protected]",
"birthday": "1990-01-01",
},
)
setup_resp = c.post(
"/api/auth/login", json={"username": "test", "password": "secret"}
)
setup_resp_json = setup_resp.get_json()
setup_access_token = setup_resp_json["access_token"]
return setup_access_token | b76f7f6afa9af453246ae304b1b0504bd68b8919 | 3,653,950 |
def get_ssh_challenge_token(account, appid, ip=None, vo='def'):
"""
Get a challenge token for subsequent SSH public key authentication.
The challenge token lifetime is 5 seconds.
:param account: Account identifier as a string.
:param appid: The application identifier as a string.
:param ip: IP address of the client as a string.
:param vo: The VO to act on.
:returns: A dict with token and expires_at entries.
"""
kwargs = {'account': account}
if not permission.has_permission(issuer=account, vo=vo, action='get_ssh_challenge_token', kwargs=kwargs):
raise exception.AccessDenied('User can not get challenge token for account %s' % account)
account = InternalAccount(account, vo=vo)
return authentication.get_ssh_challenge_token(account, appid, ip) | 12da26b4a20e648ca9fc6d325647f42288324b83 | 3,653,951 |
def rootpath_capacity_exceeded(rootpath,newSize):
"""
Return True if rootpath is already allocated to the extent
it cannot accomadate newSize, otherwise return False
"""
vols_in_rootpath = Volume.objects.filter(root_path=rootpath)
rootpathallocsum = 0
if vols_in_rootpath.count() > 0:
rootpathallocsum = vols_in_rootpath.aggregate(
alSize=db.models.Sum('size_GB'))['alSize']
if rootpathallocsum + newSize > rootpath.capacity_GB:
return True
return False | 3b8d90f3693ce12de93c967d20c6a7b2ccb7ec38 | 3,653,952 |
import requests
import json
def user_token(user: str) -> str:
"""
Authorize this request with the GitHub app set by the 'app_id' and
'private_key' environment variables.
1. Get the installation ID for the user that has installed the app
2. Request a new token for that user
3. Return it so it can be used in future API requests
"""
# Hardcode the installation to PyTorch so we can always get a valid ID key
id = installation_id("pytorch")
url = f"https://api.github.com/app/installations/{id}/access_tokens"
r_bytes = requests.post(url, headers=app_headers())
r = json.loads(r_bytes.content.decode())
token = str(r["token"])
return token | c02fae92505a922f58f231682a009d24ed6432bc | 3,653,953 |
def file_exists(path):
"""
Return True if the file from the path exists.
:param path: A string containing the path to a file.
:return: a boolean - True if the file exists, otherwise False
"""
return isfile(path) | 79610224c3e83f6ba4fdeb98b1faaf932c249ff2 | 3,653,954 |
from typing import Callable
from typing import Dict
def _static_to_href(pathto: Callable, favicon: Dict[str, str]) -> Dict[str, str]:
"""If a ``static-file`` is provided, returns a modified version of the icon
attributes replacing ``static-file`` with the correct ``href``.
If both ``static-file`` and ``href`` are provided, ``href`` will be ignored.
"""
if FILE_FIELD in favicon:
attrs = favicon.copy()
attrs["href"] = pathto(
f"{OUTPUT_STATIC_DIR}/{attrs.pop(FILE_FIELD)}", resource=True
)
return attrs
return favicon | f54e5ced825dc44bfa14a09622c7fa9b179660c5 | 3,653,955 |
import torch
def concatenate(tensor1, tensor2, axis=0):
"""
Basically a wrapper for torch.dat, with the exception
that the array itself is returned if its None or evaluates to False.
:param tensor1: input array or None
:type tensor1: mixed
:param tensor2: input array
:type tensor2: numpy.ndarray
:param axis: axis to concatenate
:type axis: int
:return: concatenated array
:rtype: numpy.ndarray
"""
assert isinstance(tensor2, torch.Tensor) or isinstance(tensor2, torch.autograd.Variable)
if tensor1 is not None:
assert isinstance(tensor1, torch.Tensor) or isinstance(tensor1, torch.autograd.Variable)
return torch.cat((tensor1, tensor2), axis=axis)
else:
return tensor2 | 24791a201f1ddb64cd2d3f683ecc38471d21697b | 3,653,956 |
def setKey(key, keytype):
""" if keytype is valid, save a copy of key accordingly
and check if the key is valid """
global _key, _keytype, FREE_API_KEY, PREMIUM_API_KEY
keytype = keytype.lower()
if keytype in ("f", "fr", "free"):
keytype = "free"
FREE_API_KEY = key
elif keytype.startswith("prem") or keytype in ("nonfree", "non-free"):
keytype = "premium"
PREMIUM_API_KEY = key
else:
print "invalid keytype", keytype
return
oldkey = _key
oldkeytype = _keytype
_key = key
_keytype = keytype
w = LocalWeather("london")
# w.data != False rather than w.data to suppress Python 2.7 FurtureWarning:
# "The behavior of this method will change in future versions...."
if w is not None and hasattr(w, 'data') and w.data is not False:
return True
else:
print "The key is not valid."
_key = oldkey
_keytype = oldkeytype
return False | 8baab2972ea5c9fbe33845aaed7c1ab4cc631a2e | 3,653,957 |
import functools
import operator
def sum_(obj):
"""Sum the values in the given iterable.
Different from the built-in summation function, the summation is based on
the first item in the iterable. Or a SymPy integer zero is created
when the iterator is empty.
"""
i = iter(obj)
try:
init = next(i)
except StopIteration:
return Integer(0)
else:
return functools.reduce(operator.add, i, init) | 70727443ba5a62e5bd91e3c0a60130f6cc0b65e5 | 3,653,958 |
def setup_s3_client(job_data):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
:param job_data: The job data structure
:return: An S3 client with the appropriate credentials
"""
try:
key_id = job_data['artifactCredentials']['accessKeyId']
key_secret = job_data['artifactCredentials']['secretAccessKey']
session_token = job_data['artifactCredentials']['sessionToken']
session = Session(aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
aws_session_token=session_token)
except Exception as e:
logger.warn('No credentials in artifact - using default role access: {}'.format(e))
session = Session()
return session.client('s3', config=botocore.client.Config(signature_version='s3v4')) | bb51e03de125eeb6ff5e1e6d16b50ba07fdc7c56 | 3,653,959 |
import os
import shutil
import stat
def collect_operations(opts):
"""
Produce a list of operations to take.
Each element in the operations list is in the format:
(function, (arguments,), 'logging message')
"""
operations = []
#######################
# Destination directory
if os.path.exists(opts.dest_dir):
if not opts.force:
raise Exception(
'ERROR: The destination directory exists: "%s"\n'
'Use -f or --force option to overwrite the directory.'
% opts.dest_dir
)
else:
operations.append(
(shutil.rmtree, (opts.dest_dir,),
'Forcing deletion of existing destination directory "%s"'
% opts.dest_dir)
)
operations.append(
(os.makedirs, (opts.dest_dir,),
'Creating destination directory "%s"' % opts.dest_dir)
)
##########################
# Input joshua.config file
config_text = opts.config.read()
if opts.copy_config_options:
config_text = filter_through_copy_config_script(
config_text,
opts.copy_config_options
)
config_lines = config_text.split('\n')
###############
# Files to copy
# Parse the joshua.config and collect copy operations
result_config_lines = []
grammar_configs_count = 0
for i, line in enumerate(config_lines):
line_num = i + 1
if line_specifies_grammar(line):
try:
line, operation = process_line_containing_grammar(
line, opts.orig_dir, opts.dest_dir,
opts.grammar_paths, grammar_configs_count, opts.symlink, opts.absolute
)
except PathException as e:
# TODO: make this more appropriate for when the source
# path was overridden by a command-line option
message = (
# Prepend the line number to the error message
'ERROR: Configuration file "{0}" line {1}: {2}'
.format(opts.config.name, line_num, str(e))
)
e.message = message
raise e
operations.append(operation)
grammar_configs_count += 1
elif line_specifies_path(line):
try:
line, operation = process_line_containing_path(
line, opts.orig_dir, opts.dest_dir, opts.symlink, opts.absolute
)
except PathException as e:
# Prepend the line number to the error message
message = (
'ERROR: Configuration file "{0}" line {1}: {2}'
.format(opts.config.name, line_num, str(e))
)
e.message = message
raise e
operations.append(operation)
result_config_lines.append(line)
###########################
# Output joshua.config file
# Create the Joshua configuration file for the package
path = os.path.join(opts.dest_dir, OUTPUT_CONFIG_FILE_NAME)
text = '\n'.join(result_config_lines) + '\n'
operations.append(
(write_string_to_file, (path, text),
'Writing the updated joshua.config to %s' % path
)
)
#######################
# Bundle runner scripts
# Write the scripts that run Joshua using the configuration and
# resource in the bundle, and make their mode world-readable, and
# world-executable.
for file_name, file_text in [[BUNDLE_RUNNER_FILE_NAME, BUNDLE_RUNNER_TEXT],
[SERVER_RUNNER_FILE_NAME, SERVER_RUNNER_TEXT],
]:
path = os.path.join(opts.dest_dir, file_name)
operations.append(
(write_string_to_file, (path, file_text),
'Writing the bundle runner file "%s"' % path)
)
mode = (stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH |
stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
operations.append(
(os.chmod, (path, mode),
'Making the bundle runner file executable')
)
#######################
# Write the README file
path = os.path.join(opts.dest_dir, 'README')
operations.append(
(write_string_to_file, (path, README_TEMPLATE),
'Writing the README to "%s"' % path
)
)
return operations | 85f904d25a1a72fc754e09dbcbcbbed9cee98256 | 3,653,960 |
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
"""
FROM KERAS
Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x | 5d4603c5a71f898dc4f501d2424707ea10adbd0e | 3,653,961 |
def basic_pyxll_function_22(x, y, z):
"""if z return x, else return y"""
if z:
# we're returning an integer, but the signature
# says we're returning a float.
# PyXLL will convert the integer to a float for us.
return x
return y | 851b5eef683b0456a0f5bce7f3850698693b067e | 3,653,962 |
def job_hotelling(prob_label, tr, te, r, ni, n):
"""Hotelling T-squared test"""
with util.ContextTimer() as t:
htest = tst.HotellingT2Test(alpha=alpha)
test_result = htest.perform_test(te)
return {
'test_method': htest,
'test_result': test_result,
'time_secs': t.secs} | 137a3b4426a01675fe6995408ad71eec34126341 | 3,653,963 |
def return_circle_aperature(field, mask_r):
"""Filter the circle aperature of a light field.
Filter the circle aperature of a light field.
Parameters
----------
field : Field
Input square field.
mask_r : float, from 0 to 1
Radius of a circle mask.
Returns
----------
X : array-like
Filtered meshgird X.
Y : array-like
Filtered meshgrid Y.
"""
length = field.shape[0]
norm_length = np.linspace(-1, 1, length)
X, Y = np.meshgrid(norm_length, norm_length)
norm_radius = np.sqrt(X**2 + Y**2)
X[norm_radius > mask_r] = np.nan
Y[norm_radius > mask_r] = np.nan
return X, Y, norm_radius | 49ae52bfa639cf8a7b8592caa1dbf8dbdef115f8 | 3,653,964 |
import json
def user_get():
"""
Get information from the database about an user, given his id. If there are
field names received in the body, only those will be queried. If no field is
provided, every field will be selected. The body should be a JSON object
following the schema:
{
"user_id": id,
"fields": ["field1", ...]
}
Returns:
Response: - 200 in case of success and the user info in the body.
- 400 if the body does not have all the necessary information
or the field names are wrong.
- 404 if the user is not found.
"""
body_schema = {
"type": "object",
"properties": {
"user_id": {"type": "number"},
"fields": {
"type": "array",
"minItems": 1,
"items": {
"type": "string",
}
}
},
"required": ["user_id"]
}
payload = request.get_json(silent=True)
is_valid = validate_json(payload, body_schema)
if not is_valid:
return Response(status=400)
user_id = payload["user_id"]
if "fields" in payload:
fields = payload["fields"]
query = sql.SQL("SELECT {} FROM users WHERE user_id={};").format(
sql.SQL(", ").join(map(sql.Identifier, fields)),
sql.Literal(user_id)
)
else:
query = sql.SQL("SELECT * FROM users WHERE user_id={};").format(
sql.Literal(payload["user_id"])
)
cursor = CONN.cursor(cursor_factory=RealDictCursor)
try:
cursor.execute(query)
results = cursor.fetchall()
except psycopg2.errors.UndefinedColumn:
CONN.rollback()
return Response(status=400)
finally:
cursor.close()
CONN.commit()
if len(results) == 0:
return Response(status=404)
return Response(
status=200,
response=json.dumps(results),
mimetype="application/json"
) | 02b1a1bc1bedc2098bd907098a40fad41c2391d7 | 3,653,965 |
def get_data_file_args(args, language):
"""
For a interface, return the language-specific set of data file arguments
Args:
args (dict): Dictionary of data file arguments for an interface
language (str): Language of the testbench
Returns:
dict: Language-specific data file arguments
"""
if language in args:
return args[language]
return args["generic"] | 11e30b92316bad9a46b87bd9188f97d5e8860377 | 3,653,966 |
def branch_exists(branch):
"""Return True if the branch exists."""
try:
run_git("rev-parse --verify {}".format(branch), quiet=True)
return True
except ProcessExecutionError:
return False | 8f08eeb78b322220def2f883e8172ac94df97063 | 3,653,967 |
import numpy
def spectrum_like_noise(signal: numpy.ndarray,
*,
sampling_rate=40000,
keep_signal_amp_envelope=False,
low_pass_cutoff=50, # Hz
low_pass_order=6,
seed: int = 42,
window_length_sec: float = 20 / 1000, # 20 ms
p_overlap: float = .5,
long_term_avg: bool = True
) -> numpy.ndarray:
"""Create a noise with same spectrum as the input signal.
randomises phase
Parameters
----------
signal : array_like
Input signal.
sampling_rate : int
Sampling frequency of the input signal. (Default value = 40000)
keep_signal_amp_envelope : bool
Apply the envelope of the original signal to the noise. (Default
value = False)
low_pass_cutoff : float
low_pass_order : int
seed : int
long_term_avg : bool
window_length_sec: int
p_overlap: float
Returns
-------
ndarray
Noise signal.
"""
assert window_length_sec > 0
assert 0 <= p_overlap <= 1
signal = zero_pad_to_power_2(signal) # Ensure welch works with any window size
signal_length = signal.shape[-1]
window_sum_squares = signal_length # scaling factor defined as sum of squared samples of window function
sc = 2 / (sampling_rate * window_sum_squares) # Scaling coefficient 2 takes into account removal of energy at negative frequencies (we drop this side of PSD)
if not long_term_avg:
n_fft = next_pow_2(signal_length)
spec = numpy.abs(fft.rfft(signal, n_fft))
psd = (spec ** 2) * sc
else:
n_per_seg = next_pow_2(int(sampling_rate * window_length_sec)) # next_pow_2 per seg == n_fft
n_overlap = int(n_per_seg * p_overlap)
f, psd = welch(signal,
sampling_rate,
nperseg=n_per_seg,
noverlap=n_overlap,
scaling='density',
return_onesided=True,
detrend=False,
# window='boxcar',
window='hanning',
)
n_fft = n_per_seg
psd /= (signal_length / n_per_seg ) # normalise?
spec = numpy.sqrt((psd / sc))
noise = []
runs = signal_length // n_fft
for i in range(runs + 1):
numpy_seed(seed + i)
noise.extend(numpy.real(
fft.irfft(
spec *
numpy.exp(2 * numpy.pi * 1j * numpy.random.random(spec.shape[-1])), # Randomise phase. 0->360, 2 pi rads
n_fft))) # Give each spectral component a random phase, PHI(f(k)) = random number,
# uniformly distributed between 0 and 360 degrees (or equivalently, between 0 and 2Pi radians);
noise = numpy.array(noise)[:signal_length]
if keep_signal_amp_envelope:
[bb, aa] = butter(low_pass_order, low_pass_cutoff / (sampling_rate / 2)) # Cutoff Hz, LP filter
noise *= filtfilt(bb, # numerator
aa, # denominator
hilbert_envelope(signal) # envelope of speech signal in time domain
)
return numpy.expand_dims(noise, 0) | 6e6ced3a5220a9a1d66c83a33a9232d265d18a1a | 3,653,968 |
import re
def check_string_capitalised(string):
""" Check to see if a string is in all CAPITAL letters. Boolean. """
return bool(re.match('^[A-Z_]+$', string)) | f496d79fafae4c89c3686856b42113c4818f7ed8 | 3,653,969 |
import torch
def sample_zero_entries(edge_index, seed, num_nodes, sample_mult=1.0):
"""Obtain zero entries from a sparse matrix.
Args:
edge_index (tensor): (2, N), N is the number of edges.
seed (int): to control randomness
num_nodes (int): number of nodes in the graph
sample_mult (float): the number of edges sampled is
N * sample_mult.
Returns:
torch.tensor, (2, N) containing zero entries
"""
n_edges = edge_index.shape[1]
np.random.seed(seed)
# Number of edges in both directions must be even
n_samples = int(np.ceil(sample_mult * n_edges / 2) * 2)
adjacency = adj_from_edge_index(edge_index, num_nodes)
zero_entries = np.zeros([2, n_samples], dtype=np.int32)
nonzero_or_sampled = set(zip(*adjacency.nonzero()))
i = 0
while True:
t = tuple(np.random.randint(0, adjacency.shape[0], 2))
# Don't sample diagonal of the adjacency matrix
if t[0] == t[1]:
continue
if t not in nonzero_or_sampled:
# Add edge in both directions
t_rev = (t[1], t[0])
zero_entries[:, i] = t
zero_entries[:, i+1] = t_rev
i += 2
if i == n_samples:
break
nonzero_or_sampled.add(t)
nonzero_or_sampled.add(t_rev)
return torch.tensor(zero_entries, dtype=torch.long) | 211e97fa0a2622d49c50673c0b6255954383f3a0 | 3,653,970 |
import textwrap
def ped_file_parent_missing(fake_fs):
"""Return fake file system with PED file"""
content = textwrap.dedent(
"""
# comment
FAM II-1\tI-1\t0\t1\t2
FAM I-1 0\t0\t1\t1
"""
).strip()
fake_fs.fs.create_file("/test.ped", create_missing_dirs=True, contents=content)
return fake_fs | 9df19ab925984236aa581c9b8843591f05d3b7b4 | 3,653,971 |
import random
import requests
def GettingAyah():
"""The code used to get an Ayah from the Quran every fixed time"""
while True:
ayah = random.randint(1, 6237)
url = f'http://api.alquran.cloud/v1/ayah/{ayah}'
res = requests.get(url)
if len(res.json()['data']['text']) <= 280:
return res.json()['data']['text'] | 5739cbd3554b97f01eefef7f59a4087e5497e3e7 | 3,653,972 |
def iterdecode(value):
"""
Decode enumerable from string presentation as a tuple
"""
if not value:
return tuple()
result = []
accumulator = u''
escaped = False
for c in value:
if not escaped:
if c == CHAR_ESCAPE:
escaped = True
continue
elif c == CHAR_SEPARATOR:
result.append(accumulator)
accumulator = u''
continue
else:
escaped = False
accumulator += c
result.append(accumulator)
return tuple(result) | d8b03338a4578ee7b37a4f6d31d23463fc0a9b84 | 3,653,973 |
def run_resolution_filter(image=None, image_path=None, height=600, width=1000):
"""
This will take the image which is correctly rotated yolo output. Initially, We
are doing for driving licenses only, Will return 1 if the height and width
are greater than 700 and 1100 pixels else it will return 10002
:return:
"""
result = False
if image is not None:
result = test_image_height_and_width(image, desired_width=width,
desired_height=height)
if image_path is not None and image is None:
img = cv2.imread(image_path)
result = test_image_height_and_width(img, desired_width=width,
desired_height=height)
if result:
return 1
else:
return 10002 | fde3040dbb29d6c5f7d79237df51f425d2d043b4 | 3,653,974 |
import types
def text_to_emotion(text):
"""
テキストから感情を推測して返す
Parameters
----------
text : string
テキスト
Returns
-------
{'magnitude','score'}
"""
client = language.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language="ja"
)
sentiment = client.analyze_sentiment(document=document).document_sentiment
return {'magnitude':sentiment.magnitude,'score':sentiment.score} | b3b784fa777146f7f1a361784f848b28651676a4 | 3,653,975 |
def process_actions(list_response_json, headers, url, force_reset):
"""
If a policy does not exist on a given cluster find the right values
defined in qos_dict and apply them
"""
qos_dict = {}
# This dictionary sets the tiers and min/max/burst settings
qos_dict['tiers'] = {"bronze": [500, 5000, 10000],
"silver": [2000, 20000, 50000],
"gold": [5000, 100000, 150000]}
# Check to see if there are no policies set
force_reset_dict = {}
if len(list_response_json['result']['qosPolicies']) == 0:
print(f"No existing QoS Policies found, implementing full install")
for qos_key, qos_val in qos_dict['tiers'].items():
pol_name = qos_key
min_iops = qos_val[0]
max_iops = qos_val[1]
burst_iops = qos_val[2]
payload = build_payload(pol_name, min_iops, max_iops, burst_iops)
connect_cluster(headers, url, payload)
# If there are policies ignore them if they match names, remove that
# name from the dict and move on
else:
for policy in list_response_json['result']['qosPolicies']:
pol_name = policy['name']
if pol_name in qos_dict['tiers'].keys():
pol_id = policy['qosPolicyID']
min_iops = qos_dict['tiers'][pol_name][0]
max_iops = qos_dict['tiers'][pol_name][1]
burst_iops = qos_dict['tiers'][pol_name][2]
pol_min = policy['qos']['minIOPS']
pol_max = policy['qos']['maxIOPS']
pol_burst = policy['qos']['burstIOPS']
if ((min_iops != pol_min or max_iops != pol_max or
burst_iops != pol_burst) and force_reset is True):
print(f"Policy mismatch detected on {pol_name}... resetting "
f"as reset flag is set to True")
print(qos_dict['tiers'][pol_name])
modify_qos_policy(headers, url, pol_id, min_iops,
max_iops, burst_iops)
elif ((min_iops != pol_min or max_iops != pol_max or
burst_iops != pol_burst) and force_reset is False):
print(f"Policy mismatch detected on {pol_name}... Leaving "
f"as reset flag is set to false")
else:
print(f"QoS Policy {pol_name} found, policy is not in "
f"configuration dictionary. Ignoring")
pass
if policy['name'] in qos_dict['tiers'].keys():
qos_dict['tiers'].pop(pol_name)
return qos_dict | 1c3651518c8c0f0f174876afdd0961099b3af342 | 3,653,976 |
def validate_ac_power(observation, values):
"""
Run a number of validation checks on a daily timeseries of AC power.
Parameters
----------
observation : solarforecastarbiter.datamodel.Observation
Observation object that the data is associated with
values : pandas.Series
Series of observation values
Returns
-------
timestamp_flag : pandas.Series
Bitmask from :py:func:`.validator.check_timestamp_spacing`
night_flag : pandas.Series
Bitmask from :py:func:`.validator.check_day_night` or
:py:func:`.validator.check_day_night_interval`
limit_flag : pandas.Series
Bitmask from :py:func:`.validator.check_ac_power_limits`
"""
solar_position, dni_extra, timestamp_flag, night_flag = _solpos_dni_extra(
observation, values)
day_night = \
~quality_mapping.convert_mask_into_dataframe(night_flag)['NIGHTTIME']
limit_flag = validator.check_ac_power_limits(
values, day_night,
observation.site.modeling_parameters.ac_capacity, _return_mask=True)
return timestamp_flag, night_flag, limit_flag | fcc487f61276e319316df3f99559ef935a7f0e7b | 3,653,977 |
import itertools
import six
def partition(predicate, iterable):
"""Use `predicate` to partition entries into falsy and truthy ones.
Recipe taken from the official documentation.
https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
t1, t2 = itertools.tee(iterable)
return (
six.moves.filterfalse(predicate, t1),
six.moves.filter(predicate, t2),
) | 5777203d9d34a9ffddc565129d8dda3ec91efc8e | 3,653,978 |
def _download(url: str) -> bytes:
"""Download something from osu!web at `url`, returning the file contents."""
with _login() as sess:
resp = sess.get(f"{url}/download", headers={"Referer": url})
if not resp.ok:
raise ReplyWith("Sorry, a download failed.")
return resp.content | 1f7fc60299f066d12438042f43da6b74d1ac0348 | 3,653,979 |
def get_node(obj, path):
"""Retrieve a deep object based on a path. Return either a Wrapped instance if the deep object is not a node, or another type of object."""
subobj = obj
indices = []
for item in path:
try:
subobj = subobj[item]
except Exception as e:
indices.append(item)
subobj, indices = _select(subobj, indices)
if isinstance(subobj, dict) or (isinstance(subobj, list) and
subobj and
isinstance(subobj[0], dict)):
return Wrapped(obj, path)
else:
assert not indices, "This path does not exist."
return subobj | f48cb0dc5ae149d0758348725ebc521e7838230f | 3,653,980 |
def without_bond_orders(gra):
""" resonance graph with maximum spin (i.e. no pi bonds)
"""
bnd_keys = list(bond_keys(gra))
# don't set dummy bonds to one!
bnd_ord_dct = bond_orders(gra)
bnd_vals = [1 if v != 0 else 0
for v in map(bnd_ord_dct.__getitem__, bnd_keys)]
bnd_ord_dct = dict(zip(bnd_keys, bnd_vals))
return set_bond_orders(gra, bnd_ord_dct) | 88b785c802a1d74a12f64a1eab6403429fa00cad | 3,653,981 |
def check_struc(d1, d2,
errors=[], level='wf'):
"""Recursively check struct of dictionary 2 to that of dict 1
Arguments
---------
d1 : dict
Dictionary with desired structure
d2 : dict
Dictionary with structre to check
errors : list of str, optional
Missing values in d2. Initial value is [].
level : str, optional
Level of search. Inital value is 'wf' (wind farm) for top-level
dictionary.
Returns
-------
errors : list of str
Missing values in d2.
"""
for k1, v1 in d1.items(): # loop through keys and values in first dict
if k1 not in d2.keys(): # if key doesn't exist in d2
errors.append('{} not in dictionary'.format('.'.join([level,k1])))
elif isinstance(v1, dict): # otherwise, if item is a dict, recurse
errors = check_struc(v1, d2[k1],
errors=errors, # pass in accumulated errros
level='.'.join([level, k1])) # change level
return errors | aa835e7bbd6274e73d0b3d45d1ec4d617af0a167 | 3,653,982 |
def indexoflines(LFtop):
""" Determining selected line index of Gromacs compatible topology files """
file1 = open(LFtop, "r")
readline = file1.readlines()
lineindex = ["x", "x", "x"]
n = 0
for line in readline:
linelist = line.split()
if "atomtypes" in linelist:
lineindex[0] = n
n += 1
elif "moleculetype" in linelist:
lineindex[1] = n
n += 1
elif "system" in linelist:
lineindex[2] = n
n += 1
else:
n += 1
file1.close()
Idx = 0
while Idx < len(lineindex):
if not str(lineindex[Idx]).isnumeric() == True:
lineindex[Idx] = n + 1
Idx += 1
else:
Idx += 1
return {'atomtypes': lineindex[0], 'moleculetype': lineindex[1], 'system': lineindex[2]} | dd2653c6245d9f7a0fa8647dcc841c51e01f9b2d | 3,653,983 |
def create_mock_github(user='octo-cat', private=False):
"""Factory for mock GitHub objects.
Example: ::
>>> github = create_mock_github(user='octocat')
>>> github.branches(user='octocat', repo='hello-world')
>>> [{u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230',
... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'},
... u'name': u'dev'},
... {u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c',
... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'},
... u'name': u'master'},
... {u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6',
... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'},
... u'name': u'no-bundle'}]
:param str user: Github username.
:param bool private: Whether repo is private.
:return: An autospecced GitHub Mock object
"""
github_mock = mock.create_autospec(GitHub)
github_mock.repo.return_value = github3.repos.Repository.from_json({
u'archive_url': u'https://api.github.com/repos/{user}/mock-repo/{{archive_format}}{{/ref}}'.format(user=user),
u'assignees_url': u'https://api.github.com/repos/{user}/mock-repo/assignees{{/user}}'.format(user=user),
u'blobs_url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs{{/sha}}'.format(user=user),
u'branches_url': u'https://api.github.com/repos/{user}/mock-repo/branches{{/bra.format(user=user)nch}}'.format(user=user),
u'clone_url': u'https://github.com/{user}/mock-repo.git'.format(user=user),
u'collaborators_url': u'https://api.github.com/repos/{user}/mock-repo/collaborators{{/collaborator}}'.format(user=user),
u'comments_url': u'https://api.github.com/repos/{user}/mock-repo/comments{{/number}}'.format(user=user),
u'commits_url': u'https://api.github.com/repos/{user}/mock-repo/commits{{/sha}}'.format(user=user),
u'compare_url': u'https://api.github.com/repos/{user}/mock-repo/compare/{{base}}...{{head}}',
u'contents_url': u'https://api.github.com/repos/{user}/mock-repo/contents/{{+path}}'.format(user=user),
u'contributors_url': u'https://api.github.com/repos/{user}/mock-repo/contributors'.format(user=user),
u'created_at': u'2013-06-30T18:29:18Z',
u'default_branch': u'dev',
u'description': u'Simple, Pythonic, text processing--Sentiment analysis, part-of-speech tagging, noun phrase extraction, translation, and more.',
u'downloads_url': u'https://api.github.com/repos/{user}/mock-repo/downloads'.format(user=user),
u'events_url': u'https://api.github.com/repos/{user}/mock-repo/events'.format(user=user),
u'fork': False,
u'forks': 89,
u'forks_count': 89,
u'forks_url': u'https://api.github.com/repos/{user}/mock-repo/forks',
u'full_name': u'{user}/mock-repo',
u'git_commits_url': u'https://api.github.com/repos/{user}/mock-repo/git/commits{{/sha}}'.format(user=user),
u'git_refs_url': u'https://api.github.com/repos/{user}/mock-repo/git/refs{{/sha}}'.format(user=user),
u'git_tags_url': u'https://api.github.com/repos/{user}/mock-repo/git/tags{{/sha}}'.format(user=user),
u'git_url': u'git://github.com/{user}/mock-repo.git'.format(user=user),
u'has_downloads': True,
u'has_issues': True,
u'has_wiki': True,
u'homepage': u'https://mock-repo.readthedocs.org/',
u'hooks_url': u'https://api.github.com/repos/{user}/mock-repo/hooks'.format(user=user),
u'html_url': u'https://github.com/{user}/mock-repo'.format(user=user),
u'id': 11075275,
u'issue_comment_url': u'https://api.github.com/repos/{user}/mock-repo/issues/comments/{{number}}'.format(user=user),
u'issue_events_url': u'https://api.github.com/repos/{user}/mock-repo/issues/events{{/number}}'.format(user=user),
u'issues_url': u'https://api.github.com/repos/{user}/mock-repo/issues{{/number}}'.format(user=user),
u'keys_url': u'https://api.github.com/repos/{user}/mock-repo/keys{{/key_id}}'.format(user=user),
u'labels_url': u'https://api.github.com/repos/{user}/mock-repo/labels{{/name}}'.format(user=user),
u'language': u'Python',
u'languages_url': u'https://api.github.com/repos/{user}/mock-repo/languages'.format(user=user),
u'master_branch': u'dev',
u'merges_url': u'https://api.github.com/repos/{user}/mock-repo/merges'.format(user=user),
u'milestones_url': u'https://api.github.com/repos/{user}/mock-repo/milestones{{/number}}'.format(user=user),
u'mirror_url': None,
u'name': u'mock-repo',
u'network_count': 89,
u'notifications_url': u'https://api.github.com/repos/{user}/mock-repo/notifications{{?since,all,participating}}'.format(user=user),
u'open_issues': 2,
u'open_issues_count': 2,
u'owner': {u'avatar_url': u'https://gravatar.com/avatar/c74f9cfd7776305a82ede0b765d65402?d=https%3A%2F%2Fidenticons.github.com%2F3959fe3bcd263a12c28ae86a66ec75ef.png&r=x',
u'events_url': u'https://api.github.com/users/{user}/events{{/privacy}}'.format(user=user),
u'followers_url': u'https://api.github.com/users/{user}/followers'.format(user=user),
u'following_url': u'https://api.github.com/users/{user}/following{{/other_user}}'.format(user=user),
u'gists_url': u'https://api.github.com/users/{user}/gists{{/gist_id}}'.format(user=user),
u'gravatar_id': u'c74f9cfd7776305a82ede0b765d65402',
u'html_url': u'https://github.com/{user}'.format(user=user),
u'id': 2379650,
u'login': user,
u'organizations_url': u'https://api.github.com/users/{user}/orgs'.format(user=user),
u'received_events_url': u'https://api.github.com/users/{user}/received_events',
u'repos_url': u'https://api.github.com/users/{user}/repos'.format(user=user),
u'site_admin': False,
u'starred_url': u'https://api.github.com/users/{user}/starred{{/owner}}{{/repo}}',
u'subscriptions_url': u'https://api.github.com/users/{user}/subscriptions'.format(user=user),
u'type': u'User',
u'url': u'https://api.github.com/users/{user}'.format(user=user)},
u'private': private,
u'pulls_url': u'https://api.github.com/repos/{user}/mock-repo/pulls{{/number}}'.format(user=user),
u'pushed_at': u'2013-12-30T16:05:54Z',
u'releases_url': u'https://api.github.com/repos/{user}/mock-repo/releases{{/id}}'.format(user=user),
u'size': 8717,
u'ssh_url': u'[email protected]:{user}/mock-repo.git'.format(user=user),
u'stargazers_count': 1469,
u'stargazers_url': u'https://api.github.com/repos/{user}/mock-repo/stargazers'.format(user=user),
u'statuses_url': u'https://api.github.com/repos/{user}/mock-repo/statuses/{{sha}}'.format(user=user),
u'subscribers_count': 86,
u'subscribers_url': u'https://api.github.com/repos/{user}/mock-repo/subscribers'.format(user=user),
u'subscription_url': u'https://api.github.com/repos/{user}/mock-repo/subscription'.format(user=user),
u'svn_url': u'https://github.com/{user}/mock-repo'.format(user=user),
u'tags_url': u'https://api.github.com/repos/{user}/mock-repo/tags'.format(user=user),
u'teams_url': u'https://api.github.com/repos/{user}/mock-repo/teams'.format(user=user),
u'trees_url': u'https://api.github.com/repos/{user}/mock-repo/git/trees{{/sha}}'.format(user=user),
u'updated_at': u'2014-01-12T21:23:50Z',
u'url': u'https://api.github.com/repos/{user}/mock-repo'.format(user=user),
u'watchers': 1469,
u'watchers_count': 1469,
# NOTE: permissions are only available if authorized on the repo
'permissions': {
'push': True
}
})
github_mock.branches.return_value = [
Branch.from_json({u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230',
u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'.format(user=user)},
u'name': u'dev'}),
Branch.from_json({u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c',
u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'.format(user=user)},
u'name': u'master'}),
Branch.from_json({u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6',
u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'.format(user=user)},
u'name': u'no-bundle'})
]
# http://developer.github.com/v3/repos/contents/
github_mock.contents.return_value = {
'octokit.rb': github3.repos.contents.Contents.from_json({
"type": "file",
"size": 625,
"name": u"\xf0octokit.rb",
"path": u"\xf0octokit.rb",
"sha": "fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b",
"url": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit.rb".format(user=user),
"git_url": "https://api.github.com/repos/{user}/octokit/git/blobs/fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b".format(user=user),
"html_url": "https://github.com/{user}/octokit/blob/master/lib/octokit.rb",
"_links": {
"self": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit.rb".format(user=user),
"git": "https://api.github.com/repos/{user}/octokit/git/blobs/fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b".format(user=user),
"html": "https://github.com/{user}/octokit/blob/master/lib/octokit.rb"
}
}),
'octokit': github3.repos.contents.Contents.from_json({
"type": "dir",
"size": 0,
"name": u"\xf0octokit",
"path": u"\xf0octokit",
"sha": "a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d",
"url": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit".format(user=user),
"git_url": "https://api.github.com/repos/{user}/octokit/git/trees/a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d",
"html_url": "https://github.com/{user}/octokit/tree/master/lib/octokit".format(user=user),
"_links": {
"self": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit".format(user=user),
"git": "https://api.github.com/repos/{user}/octokit/git/trees/a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d".format(user=user),
"html": "https://github.com/{user}/octokit/tree/master/lib/octokit".format(user=user)
}
})
}
github_mock.tree.return_value = github3.git.Tree.from_json({
'url': u'https://api.github.com/repos/{user}/mock-repo/git/trees/dev'.format(user=user),
'sha': 'dev',
'tree': [
{u'mode': u'100644',
u'path': u'coveragerc',
u'sha': u'92029ff5ce192425d346b598d7e7dd25f5f05185',
u'size': 245,
u'type': u'blob',
u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/92029ff5ce192425d346b598d7e7dd25f5f05185'.format(user=user)},
{u'mode': u'100644',
u'path': u'.gitignore',
u'sha': u'972ac8aeb0e652642b042064c835f27419e197b4',
u'size': 520,
u'type': u'blob',
u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/972ac8aeb0e652642b042064c835f27419e197b4'.format(user=user)},
{u'mode': u'100644',
u'path': u'.travis.yml',
u'sha': u'86e1fef2834cc2682e753f3ed26ab3c2e100478c',
u'size': 501,
u'type': u'blob',
u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/86e1fef2834cc2682e753f3ed26ab3c2e100478c'.format(user=user)}
]
})
github_mock.commits.return_value = [
{
'sha': '12345',
'name': 'authname',
'email': 'authmail',
'date': 'yesterday',
}
]
return github_mock | 7eaffcc7bc22657eaf3c3e7d41d9492300128a73 | 3,653,984 |
import argparse
from datetime import datetime
from typing import OrderedDict
def get_args_string(args: argparse.Namespace) -> str:
"""
Creates a string summarising the argparse arguments.
:param args: parser.parse_args()
:return: String of the arguments of the argparse namespace.
"""
string = ''
if hasattr(args, 'experiment_name'):
string += f'{args.experiment_name} ({datetime.now()})\n'
max_length = max([len(k) for k, _ in vars(args).items()])
new_dict = OrderedDict((k, v) for k, v in sorted(
vars(args).items(), key=lambda x: x[0]
))
for key, value in new_dict.items():
string += ' ' * (max_length - len(key)) + key + ': ' + str(value) + '\n'
return string | f1f4de0821d04a21df046bc0dc526b2f9f1135f6 | 3,653,985 |
def top_9(limit=21):
"""Vrni dano število knjig (privzeto 9). Rezultat je seznam, katerega
elementi so oblike [knjiga_id, avtor,naslov,slika] """
cur.execute(
"""SELECT book_id, authors, title, original_publication_year, average_rating,image_url
FROM books
ORDER BY average_rating DESC
LIMIT %s
""", [limit])
najboljsi = cur.fetchall()
# Vrnemo nabor, kot je opisano v dokumentaciji funkcije:
return(najboljsi) | ec87714ea5925c5d4115b0ee091597f7ffb1c323 | 3,653,986 |
import torch
def spm_dot_torch(X, x, dims_to_omit=None):
""" Dot product of a multidimensional array with `x` -- Pytorch version, using Tensor instances
@TODO: Instead of a separate function, this should be integrated with spm_dot so that it can either take torch.Tensors or nd.arrays
The dimensions in `dims_to_omit` will not be summed across during the dot product
Parameters
----------
'X' [torch.Tensor]
`x` [1D torch.Tensor or numpy object array containing 1D torch.Tensors]
The array(s) to dot X with
`dims_to_omit` [list :: int] (optional)
Which dimensions to omit from summing across
"""
if x.dtype == object:
dims = (np.arange(0, len(x)) + X.ndim - len(x)).astype(int)
else:
if x.shape[0] != X.shape[1]:
"""
Case when the first dimension of `x` is likely the same as the first dimension of `A`
e.g. inverting the generative model using observations.
Equivalent to something like self.values[np.where(x),:]
when `x` is a discrete 'one-hot' observation vector
"""
dims = np.array([0], dtype=int)
else:
"""
Case when `x` leading dimension matches the lagging dimension of `values`
E.g. a more 'classical' dot product of a likelihood with hidden states
"""
dims = np.array([1], dtype=int)
x_new = np.empty(1, dtype=object)
x_new[0] = x.squeeze()
x = x_new
if dims_to_omit is not None:
if not isinstance(dims_to_omit, list):
raise ValueError("dims_to_omit must be a `list`")
dims = np.delete(dims, dims_to_omit)
if len(x) == 1:
x = np.empty([0], dtype=object)
else:
x = np.delete(x, dims_to_omit)
Y = X
for d in range(len(x)):
s = np.ones(Y.ndim, dtype=int)
s[dims[d]] = max(x[d].shape)
Y = Y * x[d].view(tuple(s))
Y = Y.sum(dim=int(dims[d]), keepdim=True)
Y = Y.squeeze()
# perform check to see if `y` is a number
if Y.numel() <= 1:
Y = np.asscalar(Y)
Y = torch.Tensor([Y])
return Y | 2dad76db822c24dc740e17b034d74644d0c91e19 | 3,653,987 |
def inverseTranslateTaps(lowerTaps, pos):
"""Method to translate tap integer in range
[-lower_taps, raise_taps] to range [0, lowerTaps + raiseTaps]
"""
# Hmmm... is it this simle?
posOut = pos + lowerTaps
return posOut | 827bdfc51b3581b7b893ff8ff02dd5846ff6cd0f | 3,653,988 |
def GMLstring2points(pointstring):
"""Convert list of points in string to a list of points. Works for 3D points."""
listPoints = []
#-- List of coordinates
coords = pointstring.split()
#-- Store the coordinate tuple
assert(len(coords) % 3 == 0)
for i in range(0, len(coords), 3):
listPoints.append([float(coords[i]), float(coords[i+1]), float(coords[i+2])])
return listPoints | e755d344d163bdcdb114d0c9d614a1bbd40be29f | 3,653,989 |
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
"""
Set a key/value pair in the etcd service
"""
client = _get_conn(profile)
client.set(key, value)
return get(key, service, profile) | c9689c53dc837caf182ac0b5d0e8552888ec70e9 | 3,653,990 |
def compute_cw_score_normalized(p, q, edgedict, ndict, params = None):
"""
Computes the common weighted normalized score between p and q
@param p -> A node of the graph
@param q -> Another node in the graph
@param edgedict -> A dictionary with key `(p, q)` and value `w`.
@param ndict -> A dictionary with key `p` and the value a set `{p1, p2, ...}`
@param params -> Should always be none here
@return -> A real value representing the score
"""
if (len(ndict[p]) > len(ndict[q])):
temp = p
p = q
q = temp
score = 0
for elem in ndict[p]:
if elem in ndict[q]:
p_elem = edgedict[(p, elem)] if (p, elem) in edgedict else edgedict[(elem, p)]
q_elem = edgedict[(q, elem)] if (q, elem) in edgedict else edgedict[(elem, q)]
score += p_elem + q_elem
degrees = params["deg"]
return score / np.sqrt(degrees[p] * degrees[q]) | 7769bc21d6a6bf176002ea6f4020cbe78f971b84 | 3,653,991 |
def prompt_user_friendly_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = '\n'.join([' [{}] {}{}'
.format(i + 1,
x['name'] if isinstance(x, dict) and 'name' in x else x,
' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '')
for i, x in enumerate(a_list)])
allowed_vals = list(range(1, len(a_list) + 1))
linesToDelete = len(a_list) + 1
while True:
val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default))
if val == '?' and help_string is not None:
for x in range(0, linesToDelete):
delete_last_line()
print('Please enter a choice [Default choice({})]: {}'.format(default, '?'))
print(help_string)
continue
if not val:
val = '{}'.format(default)
try:
ans = int(val)
if ans in allowed_vals:
for x in range(0, linesToDelete):
delete_last_line()
print('Please enter a choice [Default choice({})]: {}'.format(default, a_list[ans - 1]))
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
for x in range(0, linesToDelete):
delete_last_line()
print('Please enter a choice [Default choice({})]: {}'.format(default, val))
logger.warning('Valid values are %s', allowed_vals) | d2c81b8af3f2de3203dd8cfd11372909e2e9cbe3 | 3,653,992 |
import typing
import json
def fuse(search: typing.Dict, filepath: str):
"""Build a JSON doc of your pages"""
with open(filepath, "w") as jsonfile:
return json.dump(
[x for x in _build_index(search, id_field="id")],
fp=jsonfile,
) | 542aff31a2861bc8de8a25025582305db0ce2af1 | 3,653,993 |
def aggregate_gradients_using_copy_with_device_selection(
tower_grads, avail_devices, use_mean=True, check_inf_nan=False):
"""Aggregate gradients, controlling device for the aggregation.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
return agg_grads | 24d12a4e3ee63b96dd453bc901a9180ed956003b | 3,653,994 |
def ToOrdinal(value):
"""
Convert a numerical value into an ordinal number.
@param value: the number to be converted
"""
if value % 100//10 != 1:
if value % 10 == 1:
ordval = '{}st'.format(value)
elif value % 10 == 2:
ordval = '{}nd'.format(value)
elif value % 10 == 3:
ordval = '{}rd'.format(value)
else:
ordval = '{}th'.format(value)
else:
ordval = '{}th'.format(value)
return ordval | 774bac5fd22714ba3eb4c9dd2b16f4236e2f5e8c | 3,653,995 |
from typing import List
def compute_partition(num_list: List[int]):
"""Compute paritions that add up."""
solutions = []
for bits in helper.bitprod(len(num_list)):
iset = []
oset = []
for idx, val in enumerate(bits):
(iset.append(num_list[idx]) if val == 0 else
oset.append(num_list[idx]))
if sum(iset) == sum(oset):
solutions.append(bits)
return solutions | 408f5f85b1648facdcebfd47f96f53221b54888e | 3,653,996 |
def renumber(conllusent):
"""Fix non-contiguous IDs because of multiword tokens or removed tokens"""
mapping = {line[ID]: n for n, line in enumerate(conllusent, 1)}
mapping[0] = 0
for line in conllusent:
line[ID] = mapping[line[ID]]
line[HEAD] = mapping[line[HEAD]]
return conllusent | 30f336cd63e7aff9652e6e3d1a35a21dc3379c99 | 3,653,997 |
def recall_at(target, scores, k):
"""Calculation for recall at k."""
if target in scores[:k]:
return 1.0
else:
return 0.0 | 0c3f70be3fb4cfde16d5e39b256e565f180d1655 | 3,653,998 |
def supports_dynamic_state() -> bool:
"""Checks if the state can be displayed with widgets.
:return: True if widgets available. False otherwise.
"""
return widgets is not None | bee2f32bb315f086b6bd8b75535eb8fdde36a188 | 3,653,999 |
Subsets and Splits