content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def covid_API(cases_and_deaths: dict) -> dict:
"""
Imports Covid Data
:param cases_and_deaths: This obtains dictionary from config file
:return: A dictionary of covid information
"""
api = Cov19API(filters=england_only, structure=cases_and_deaths)
data = api.get_json()
return data | 8429c35770d25d595a6f51a2fe80d2eac585c785 | 3,650,927 |
def gather_gltf2(export_settings):
"""
Gather glTF properties from the current state of blender.
:return: list of scene graphs to be added to the glTF export
"""
scenes = []
animations = [] # unfortunately animations in gltf2 are just as 'root' as scenes.
active_scene = None
for blender_scene in bpy.data.scenes:
scenes.append(__gather_scene(blender_scene, export_settings))
if export_settings[gltf2_blender_export_keys.ANIMATIONS]:
animations += __gather_animations(blender_scene, export_settings)
if bpy.context.scene.name == blender_scene.name:
active_scene = len(scenes) -1
return active_scene, scenes, animations | 6a382349a1a2aef3d5d830265b0f7430440ac6ef | 3,650,929 |
import time
def getOneRunMountainCarFitness_modifiedReward(tup):
"""Get one fitness from the MountainCar or MountainCarContinuous
environment while modifying its reward function.
The MountainCar environments reward only success, not progress towards
success. This means that individuals that are trying to drive up the
hill, but not succeeding will get the exact same fitness as individuals
that do nothing at all. This function provides some reward to the
individual based on the maximum distance it made it up the hill.
Parameters: A tuple expected to contain the following:
0: individual - The model,
1: continuous - True if using MountainCarContinuous, false to use
MountainCar.
2: renderSpeed - None to not render, otherwise the number of seconds to
sleep between each frame; this can be a floating point
value."""
individual, continuous, renderSpeed = tup[0], tup[1], tup[2]
env = None
if continuous:
env = gym.make('MountainCarContinuous-v0')
else:
env = gym.make('MountainCar-v0')
maxFrames = 2000
runReward = 0
maxPosition = -1.2 # 1.2 is the minimum for this environment.
observation = env.reset()
individual.resetForNewTimeSeries()
for j in range(maxFrames):
# The continuous version doesn't required argmax, but it does need
# a conversion from a single value to the list that the environment
# expects:
if continuous:
action = [individual.calculateOutputs(observation)]
else:
action = np.argmax(individual.calculateOutputs(observation))
if renderSpeed is not None:
env.render()
if renderSpeed != 0:
time.sleep(renderSpeed)
observation, reward, done, info = env.step(action)
runReward += reward
# Record the furthest we made it up the hill:
maxPosition = max(observation[0], maxPosition)
if done:
break
env.close()
# Return the fitness, modified by the maxPosition attained. The position
# weighs heavier with the continuous version:
if continuous:
return runReward + (1000.0 * maxPosition)
else:
return runReward + (10.0 * maxPosition) | f17e768755d0b4862ee70a0fe7d317a8074d7852 | 3,650,930 |
def ArtToModel(art, options):
"""Convert an Art object into a Model object.
Args:
art: geom.Art - the Art object to convert.
options: ImportOptions - specifies some choices about import
Returns:
(geom.Model, string): if there was a major problem, Model may be None.
The string will be errors and warnings.
"""
pareas = art2polyarea.ArtToPolyAreas(art, options.convert_options)
if not pareas:
return (None, "No visible faces found")
if options.scaled_side_target > 0:
pareas.scale_and_center(options.scaled_side_target)
m = model.PolyAreasToModel(pareas, options.bevel_amount,
options.bevel_pitch, options.quadrangulate)
if options.extrude_depth > 0:
model.ExtrudePolyAreasInModel(m, pareas, options.extrude_depth,
options.cap_back)
return (m, "") | 3130471f7aa6b0b8fd097c97ca4916a51648112e | 3,650,931 |
def simulate_data(N, intercept, slope, nu, sigma2=1, seed=None):
"""Simulate noisy linear model with t-distributed residuals.
Generates `N` samples from a one-dimensional linear regression with
residuals drawn from a t-distribution with `nu` degrees of freedom, and
scaling-parameter `sigma2`. The true parameters of the linear model are
specified by the `intercept` and `slope` parameters.
Args:
N, int: Number of samples.
intercept, float: The intercept of the linear model.
slope, float: The slope of the linear model.
nu, float (>0): The degrees of freedom of the t-distribution.
sigma2, float (>0): The scale-parameter of the t-distribution.
seed, int: Set random seed for repeatability.
Return:
DataFrame containing N samples from noisy linear model.
"""
np.random.seed(seed)
# x ~ Uniform(0,1)
interval = np.linspace(0,1, num=2*N)
sample = np.random.choice(interval, size=N, replace=False)
df = pd.DataFrame({"x": sample})
# generate y values using linear model
linear_map = lambda x: intercept + slope*x
df['y'] = linear_map(df['x']) + sigma2*np.random.standard_t(nu, N)
return df | a88e7f1958876c3dd47101da7f2f1789e02e4d18 | 3,650,932 |
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
"""
52ms 93.76%
13.1MB 83.1%
:param self:
:param head:
:param n:
:return:
"""
if not head:
return head
dummy = ListNode(0)
dummy.next = head
fast = dummy
while n:
fast = fast.next
n -= 1
slow = dummy
while fast and fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return dummy.next | 5b9fa939aec64425e7ca9932fe0cc5814fd0f608 | 3,650,934 |
import logging
def get_masters(domain):
""" """
content = request.get_json()
conf = {
'check_masters' : request.headers.get('check_masters'),
'remote_api' : request.headers.get('remote_api'),
'remote_api_key' : request.headers.get('remote_api_key')
}
masters = pdns_get_masters(
remote_api=conf['remote_api'],
remote_api_key=conf['remote_api_key'],
domain=domain
)
logging.info("masters: {}".format(masters))
return jsonify(masters) | dd006d889ee9f11a8f522a111ce7a4db4f5ba039 | 3,650,938 |
def SplitGeneratedFileName(fname):
"""Reverse of GetGeneratedFileName()
"""
return tuple(fname.split('x',4)) | 0210361d437b134c3c24a224ab93d2ffdcfc32ec | 3,650,939 |
def chooseBestFeatureToSplit(dataSet):
"""
选择最优划分特征
输入: 数据集
输出: 最优特征
"""
numFeatures = len(dataSet[0])-1
baseEntropy = calcShannonEnt(dataSet) #原始数据的熵
bestInfoGain = 0
bestFfeature = -1
for i in range(numFeatures): #循环所有特征
featList = [example[i] for example in dataSet]
uniqueVals = set(featList) #某个特征的取值,如[long,short]
newEntropy = 0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value) #按某一特征的取值分类,如Long
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob*calcShannonEnt(subDataSet) #计算按该特征分类的熵,如DATASET(LONG)和DATASET(Short)的熵
infoGain = baseEntropy - newEntropy #计算增益,原始熵-Dataset(long)的熵-Dataset(short)的熵
if (infoGain>bestInfoGain):
bestInfoGain = infoGain
bestFfeature = i #选出最优分类特征
return bestFfeature | 1e9935cf280b5bf1a32f34187038301109df7d19 | 3,650,940 |
import torch
import tqdm
def evaluate_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, device: torch.device):
"""Function for evaluation of a model `model` on the data in `dataloader` on device `device`"""
# Define a loss (mse loss)
mse = torch.nn.MSELoss()
# We will accumulate the mean loss in variable `loss`
loss = torch.tensor(0., device=device)
with torch.no_grad(): # We do not need gradients for evaluation
# Loop over all samples in `dataloader`
for data in tqdm(dataloader, desc="scoring", position=0):
# Get a sample and move inputs and targets to device
inputs, targets, mask = data
inputs = inputs.to(device)
targets = targets.to(device)
mask = mask.to(device)
# mask = mask.to(dtype=torch.bool)
# Get outputs for network
outputs = model(inputs) * mask
# predictions = [outputs[i, mask[i]] for i in range(len(outputs))]
# Here we could clamp the outputs to the minimum and maximum values of inputs for better performance
# Calculate mean mse loss over all samples in dataloader (accumulate mean losses in `loss`)
# losses = torch.stack([mse(prediction, target.reshape((-1,))) for prediction, target in zip(predictions, targets)])
# loss = losses.mean()
loss = mse(outputs, targets)
return loss | e550c469d0b66cc0a0ef32d2907521c77ed760fa | 3,650,941 |
def get_DOE_quantity_byfac(DOE_xls, fac_xls, facilities='selected'):
"""
Returns total gallons of combined imports and exports
by vessel type and oil classification to/from WA marine terminals
used in our study.
DOE_xls[Path obj. or string]: Path(to Dept. of Ecology transfer dataset)
facilities [string]: 'all' or 'selected'
"""
# convert inputs to lower-case
#transfer_type = transfer_type.lower()
facilities = facilities.lower()
# Import Department of Ecology data:
print('get_DOE_quantity_byfac: not yet tested with fac_xls as input')
df = get_DOE_df(DOE_xls, fac_xls)
# get list of oils grouped by our monte_carlo oil types
oil_types = [
'akns', 'bunker', 'dilbit',
'jet', 'diesel', 'gas', 'other'
]
# names of oil groupings that we want for our output/graphics
oil_types_graphics = [
'ANS', 'Bunker-C', 'Dilbit',
'Jet Fuel', 'Diesel', 'Gasoline',
'Other'
]
oil_classification = get_DOE_oilclassification(DOE_xls)
# SELECTED FACILITIES
exports={}
imports={}
combined={}
if facilities == 'selected':
# The following list includes facilities used in Casey's origin/destination
# analysis with names matching the Dept. of Ecology (DOE) database.
# For example, the shapefile "Maxum Petroleum - Harbor Island Terminal" is
# labeled as 'Maxum (Rainer Petroleum)' in the DOE database. I use the
# Ecology language here and will need to translate to Shapefile speak
# If facilities are used in output to compare with monte-carlo transfers
# then some terminals will need to be grouped, as they are in the monte carlo.
# Terminal groupings in the voyage joins are: (1)
# 'Maxum (Rainer Petroleum)' and 'Shell Oil LP Seattle Distribution Terminal'
# are represented in
# ==>'Kinder Morgan Liquids Terminal - Harbor Island', and
# (2) 'Nustar Energy Tacoma' => 'Phillips 66 Tacoma Terminal'
facility_names = [
'Alon Asphalt Company (Paramount Petroleum)',
'Andeavor Anacortes Refinery (formerly Tesoro)',
'BP Cherry Point Refinery',
'Kinder Morgan Liquids Terminal - Harbor Island' ,
'Maxum (Rainer Petroleum)',
'Naval Air Station Whidbey Island (NASWI)',
'NAVSUP Manchester',
'Nustar Energy Tacoma',
'Phillips 66 Ferndale Refinery',
'Phillips 66 Tacoma Terminal',
'SeaPort Sound Terminal',
'Shell Oil LP Seattle Distribution Terminal',
'Shell Puget Sound Refinery',
'Tesoro Port Angeles Terminal','U.S. Oil & Refining',
'Tesoro Pasco Terminal', 'REG Grays Harbor, LLC',
'Tesoro Vancouver Terminal',
'Tidewater Snake River Terminal',
'Tidewater Vancouver Terminal',
'TLP Management Services LLC (TMS)'
]
for vessel_type in ['atb','barge','tanker']:
exports[vessel_type]={}
imports[vessel_type]={}
combined[vessel_type]={}
if vessel_type == 'barge':
print('Tallying barge quantities')
# get transfer quantities by oil type
type_description = ['TANK BARGE','TUGBOAT']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(~df.Receiver.str.contains('ITB')) &
(~df.Receiver.str.contains('ATB')) &
(df.Deliverer.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(~df.Deliverer.str.contains('ITB')) &
(~df.Deliverer.str.contains('ATB')) &
(df.Receiver.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
elif vessel_type == 'tanker':
print('Tallying tanker quantities')
# get transfer quantities by oil type
type_description = ['TANK SHIP']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(df.Deliverer.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(df.Receiver.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
elif vessel_type == 'atb':
print('Tallying atb quantities')
# get transfer quantities by oil type
type_description = ['TANK BARGE','TUGBOAT']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(df.Receiver.str.contains('ITB') |
df.Receiver.str.contains('ATB')) &
(df.Deliverer.isin(facility_names))&
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(df.Deliverer.str.contains('ITB') |
df.Deliverer.str.contains('ATB')) &
(df.Receiver.isin(facility_names))&
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# combine imports and exports and convert oil type names to
# those we wish to use for graphics/presentations
# The name change mostly matters for AKNS -> ANS.
for idx,oil in enumerate(oil_types):
# convert names
exports[vessel_type][oil_types_graphics[idx]] = (
exports[vessel_type][oil]
)
imports[vessel_type][oil_types_graphics[idx]] = (
imports[vessel_type][oil]
)
# remove monte-carlo names
exports[vessel_type].pop(oil)
imports[vessel_type].pop(oil)
# combine imports and exports
combined[vessel_type][oil_types_graphics[idx]] = (
imports[vessel_type][oil_types_graphics[idx]] + \
exports[vessel_type][oil_types_graphics[idx]]
)
return exports, imports, combined | 371fd9b2bc0f9e45964af5295de1edad903729c9 | 3,650,942 |
import re
def number_finder(page, horse):
"""Extract horse number with regex."""
if 'WinPlaceShow' in page:
return re.search('(?<=WinPlaceShow\\n).[^{}]*'.format(horse), page).group(0)
elif 'WinPlace' in page:
return re.search('(?<=WinPlace\\n).[^{}]*'.format(horse), page).group(0) | 483067fcfa319a7dfe31fdf451db82550fd35d03 | 3,650,943 |
from ...data import COCODetection
def ssd_300_mobilenet0_25_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet0.25 base networks for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
classes = COCODetection.CLASSES
return get_ssd('mobilenet0.25', 300,
features=['relu22_fwd', 'relu26_fwd'],
filters=[256, 256, 128, 128],
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs) | 5c234e824d60a116b7640eff4c50adba98792927 | 3,650,944 |
def get_project_by_id(client: SymphonyClient, id: str) -> Project:
"""Get project by ID
:param id: Project ID
:type id: str
:raises:
* FailedOperationException: Internal symphony error
* :class:`~psym.exceptions.EntityNotFoundError`: Project does not exist
:return: Project
:rtype: :class:`~psym.common.data_class.Project`
**Example**
.. code-block:: python
project = client.get_project_by_id(
id="12345678",
)
"""
result = ProjectDetailsQuery.execute(client, id=id)
if result is None:
raise EntityNotFoundError(entity=Entity.Project, entity_id=id)
return format_to_project(project_fragment=result) | 72904b1f72eb2ce3e031df78d8f00cef8d5b5791 | 3,650,945 |
def write_trans_output(k, output_fname, output_steps_fname, x, u, time, nvar):
"""
Output transient step and spectral step in a CSV file"""
# Transient
if nvar > 1:
uvars = np.split(u, nvar)
results_u = [np.linalg.norm(uvar, np.inf) for uvar in uvars]
results = [
time,
]
results[1:1] = results_u
else:
results = [time, np.linalg.norm(u, np.inf)]
fmt = ["%1.4e"]
fmt_var = ["%1.4e"] * nvar
fmt[1:1] = fmt_var
with open(output_fname, "a+", newline="") as write_obj:
np.savetxt(
write_obj,
[results],
fmt=fmt,
comments="",
delimiter=",",
)
# Spectral
if bool(output_steps_fname): # string not empty
filename = output_steps_fname + str(k) + ".csv"
if nvar > 1:
uvars = np.split(u, nvar)
uvars = [np.concatenate([[0.0], uvar, [0.0]]) for uvar in uvars]
uvars = np.array(uvars)
header = ["x"]
header_var = ["u" + str(int(k)) for k in range(nvar)]
header[1:1] = header_var
header = ",".join(header)
data = np.column_stack((np.flip(x), uvars.transpose()))
else:
u = np.concatenate([[0.0], u, [0.0]])
header = "x,u"
data = np.column_stack((np.flip(x), u))
np.savetxt(
filename, data, delimiter=",", fmt="%1.4e", header=header, comments=""
)
return None | 5681902519af79777f8fb5aa2a36f8445ee4cf32 | 3,650,946 |
def browser(browserWsgiAppS):
"""Fixture for testing with zope.testbrowser."""
assert icemac.addressbook.testing.CURRENT_CONNECTION is not None, \
"The `browser` fixture needs a database fixture like `address_book`."
return icemac.ab.calendar.testing.Browser(wsgi_app=browserWsgiAppS) | 47c9a0d4919be55d15a485632bca826183ba92b2 | 3,650,947 |
def mixture_fit(samples,
model_components,
model_covariance,
tolerance,
em_iterations,
parameter_init,
model_verbosity,
model_selection,
kde_bandwidth):
"""Fit a variational Bayesian non-parametric Gaussian mixture model to samples.
This function takes the parameters described below to initialize and then fit a
model to a provided set of data points. It returns a Scikit-learn estimator object
that can then be used to generate samples from the distribution approximated by the
model and score the log-probabilities of data points based on the returned model.
Parameters:
-----------
samples : array-like
The set of provided data points that the function's model should be fitted to.
model_components : int, defaults to rounding up (2 / 3) * the number of dimensions
The maximum number of Gaussians to be fitted to data points in each iteration.
model_covariance : {'full', 'tied', 'diag', 'spherical'}
The type of covariance parameters the model should use for the fitting process.
tolerance : float
The model's convergence threshold at which the model's fit is deemed finalized.
em_iterations : int
The maximum number of expectation maximization iterations the model should run.
parameter_init : {'kmeans', 'random'}
The method used to initialize the model's weights, the means and the covariances.
model_verbosity : {0, 1, 2}
The amount of information that the model fitting should provide during runtime.
model_selection : {'gmm', 'kde'}
The selection of the type of model that should be used for the fitting process,
i.e. either a variational Bayesian non-parametric GMM or kernel density estimation.
kde_bandwidth : float
The kernel bandwidth that should be used in the case of kernel density estimation.
Returns:
--------
model : sklearn estimator
A variational Bayesian non-parametric Gaussian mixture model fitted to samples.
Attributes:
-----------
fit(X) : Estimate a model's parameters with the expectation maximization algorithm.
sample(n_samples=1) : Generate a new set of random data points from fitted Gaussians.
score_samples(X) : Calculate the weighted log-probabilities for each data point.
"""
# Check which type of model should be used for the iterative fitting process
if model_selection == 'gmm':
# Initialize a variational Bayesian non-parametric GMM for fitting
model = BGM(n_components = model_components,
covariance_type = model_covariance,
tol = tolerance,
max_iter = em_iterations,
init_params = parameter_init,
verbose = model_verbosity,
verbose_interval = 10,
warm_start = False,
random_state = 42,
weight_concentration_prior_type = 'dirichlet_process')
if model_selection == 'kde':
model = KD(bandwidth = kde_bandwidth,
kernel = 'gaussian',
metric = 'euclidean',
algorithm = 'auto',
breadth_first = True,
atol = 0.0,
rtol = tolerance)
# Fit the previously initialized model to the provided data points
model.fit(np.asarray(samples))
return model | 807f0ef2028a5dcb99052e6b86558f8b325405db | 3,650,948 |
def find_best_lexer(text, min_confidence=0.85):
"""
Like the built in pygments guess_lexer, except has a minimum confidence
level. If that is not met, it falls back to plain text to avoid bad
highlighting.
:returns: Lexer instance
"""
current_best_confidence = 0.0
current_best_lexer = None
for lexer in _iter_lexerclasses():
confidence = lexer.analyse_text(text)
if confidence == 1.0:
return lexer()
elif confidence > current_best_confidence:
current_best_confidence = confidence
current_best_lexer = lexer
if current_best_confidence >= min_confidence:
return current_best_lexer()
else:
return TextLexer() | 57cffae3385886cc7841086697ce30ff10bb3bd8 | 3,650,951 |
def volta(contador, quantidade):
"""
Volta uma determinada quantidade de caracteres
:param contador: inteiro utilizado para determinar uma posição na string
:param quantidade: inteiro utilizado para determinar a nova posição na string
:type contador: int
:type quantidade: int
:return: retorna o novo contador
:rtype: int
"""
return contador - quantidade | 4183afebdfc5273c05563e4675ad5909124a683a | 3,650,952 |
from operator import and_
def keep_room(session, worker_id, room_id):
"""Try to keep a room"""
# Update room current timestamp
query = update(
Room
).values({
Room.updated: func.now(),
}).where(
and_(Room.worker == worker_id,
Room.id == room_id)
)
proxy = session.execute(query)
session.commit()
return proxy.rowcount == 1 | b4dbbc972d7fd297bf55b205e92d2126a5a68e6e | 3,650,953 |
from typing import List
def get_rounds(number: int) -> List[int]:
"""
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return list(range(number, number + 3)) | 9bf55545404acd21985c1765906fc439f5f4aed6 | 3,650,954 |
from bs4 import BeautifulSoup
from datetime import datetime
def parse_pasinobet(url):
"""
Retourne les cotes disponibles sur pasinobet
"""
selenium_init.DRIVER["pasinobet"].get("about:blank")
selenium_init.DRIVER["pasinobet"].get(url)
match_odds_hash = {}
match = None
date_time = None
WebDriverWait(selenium_init.DRIVER["pasinobet"], 15).until(
EC.invisibility_of_element_located(
(By.CLASS_NAME, "skeleton-line")) or sportsbetting.ABORT
)
if sportsbetting.ABORT:
raise sportsbetting.AbortException
inner_html = selenium_init.DRIVER["pasinobet"].execute_script(
"return document.body.innerHTML")
soup = BeautifulSoup(inner_html, features="lxml")
date = ""
for line in soup.findAll():
if sportsbetting.ABORT:
raise sportsbetting.AbortException
if "class" in line.attrs and "category-date" in line["class"]:
date = line.text.lower()
date = date.replace("nov", "novembre")
date = date.replace("déc", "décembre")
if "class" in line.attrs and "event-title" in line["class"]:
match = " - ".join(map(lambda x: list(x.stripped_strings)[0],
line.findChildren("div", {"class": "teams-container"})))
if "class" in line.attrs and "time" in line["class"]:
try:
date_time = datetime.datetime.strptime(
date+line.text.strip(), "%A, %d %B %Y%H:%M")
except ValueError:
date_time = "undefined"
if "class" in line.attrs and "event-list" in line["class"]:
if "---" not in list(line.stripped_strings):
odds = list(map(float, line.stripped_strings))
match_odds_hash[match] = {}
match_odds_hash[match]["date"] = date_time
match_odds_hash[match]["odds"] = {"pasinobet": odds}
return match_odds_hash | 5cda34741f4e6cc26e2ecccec877c9af2426084a | 3,650,955 |
def create_toolbutton(parent, icon=None, tip=None, triggered=None):
"""Create a QToolButton."""
button = QToolButton(parent)
if icon is not None:
button.setIcon(icon)
if tip is not None:
button.setToolTip(tip)
if triggered is not None:
button.clicked.connect(triggered)
return button | dfff516f498f924ca5d5d6b15d94907ed2e06029 | 3,650,956 |
import select
def __basic_query(model, verbose: bool = False) -> pd.DataFrame:
"""Execute and return basic query."""
stmt = select(model)
if verbose:
print(stmt)
return pd.read_sql(stmt, con=CONN, index_col="id") | eb9c44eb64144b1e98e310e2dd026e5b1e912619 | 3,650,957 |
def format_data_preprocessed(data, dtype = np.float):
"""
The input data preprocessing
data the input data frame
preprocessing whether to use features preprocessing (Default: False)
dtype the data type for ndarray (Default: np.float)
"""
train_flag = np.array(data['train_flag'])
print 'Formatting input data, size: %d' % (len(train_flag))
# outputs, nans excluded
y = data.loc[ :,'y1':'y3']
# replace nans with 0
y.fillna(0, inplace=True)
# collect only train data
ytr = np.array(y)[train_flag]
# collect only validation data
yvl = np.array(y)[~train_flag]
print 'Train data outputs collected, size: %d' % (len(ytr))
print '\n\nData before encoding\n\n%s' % data.describe()
# dropping target and synthetic columns
data.drop(['y1','y2','y3','train_flag', 'COVAR_y1_MISSING', 'COVAR_y2_MISSING', 'COVAR_y3_MISSING'], axis=1, inplace=True)
print '\n\nData after encoding\n\n%s' % data.describe()
# split into training and test
X = np.array(data).astype(dtype)
Xtr = X[train_flag]
Xvl = X[~train_flag]
#print 'Train data first: %s' % (Xtr[0])
#print 'Evaluate data first: %s' % (Xvl[0])
return Xtr, ytr, Xvl, yvl | a5785ef81a0f5d35f8fb73f72fbe55084bc5e2b0 | 3,650,958 |
def get_word_idxs_1d(context, token_seq, char_start_idx, char_end_idx):
"""
0 based
:param context:
:param token_seq:
:param char_start_idx:
:param char_end_idx:
:return: 0-based token index sequence in the tokenized context.
"""
spans = get_1d_spans(context,token_seq)
idxs = []
for wordIdx, span in enumerate(spans):
if not (char_end_idx <= span[0] or char_start_idx >= span[1]):
idxs.append(wordIdx)
assert len(idxs) > 0, "{} {} {} {}".format(context, token_seq, char_start_idx, char_end_idx)
return idxs | b279a3baea0e9646b55e598fd6ae16df70de5100 | 3,650,960 |
import binascii
def create_b64_from_private_key(private_key: X25519PrivateKey) -> bytes:
"""Create b64 ascii string from private key object"""
private_bytes = private_key_to_bytes(private_key)
b64_bytes = binascii.b2a_base64(private_bytes, newline=False)
return b64_bytes | 3abd69bcd3fc254c94da9fac446c6ffbc462f58d | 3,650,961 |
def create_fake_record(filename):
"""Create records for demo purposes."""
data_to_use = _load_json(filename)
data_acces = {
"access_right": fake_access_right(),
"embargo_date": fake_feature_date(),
}
service = Marc21RecordService()
draft = service.create(
data=data_to_use, identity=system_identity(), access=data_acces
)
record = service.publish(id_=draft.id, identity=system_identity())
return record | 744ed3a3b13bc27d576a31d565d846850e6640a3 | 3,650,962 |
import json
def load_configuration():
"""
This function loads the configuration from the
config.json file and then returns it.
Returns: The configuration
"""
with open('CONFIG.json', 'r') as f:
return json.load(f) | 91eae50d84ec9e4654ed9b8bcfa35215c8b6a7c2 | 3,650,963 |
def scrape(webpage, linkNumber, extention):
"""
scrapes the main page of a news website using request and beautiful soup and
returns the URL link to the top article as a string
Args:
webpage: a string containing the URL of the main website
linkNumber: an integer pointing to the URL of the top article from the list
of all the URL's that have been scrapped
extention: a string containing the suffix of the URL to be sent to the
function sub_soup()
returns:
headline: a string containing the 500 word summary of the scrapped article
"""
# returns the link to the top headline link
req = Request(webpage, headers={'User-Agent':'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = bs.BeautifulSoup(webpage,'lxml')
link = soup.find_all('a')
if linkNumber > 0:
story = (link[linkNumber])
sub_soup = str(extention + '{}'.format(story['href']))
elif linkNumber == -1:
sub_soup = articles[0][5]
elif linkNumber == -2:
link = soup.find('a',{'class':'gs-c-promo-heading'})
sub_soup = 'https://www.bbc.co.uk{}'.format(link['href'])
headline = sub_scrape(sub_soup)
return headline | f04cb8c8583f7f242ce70ec4da3e8f2556af7edb | 3,650,965 |
def Scheduler(type):
"""Instantiate the appropriate scheduler class for given type.
Args:
type (str): Identifier for batch scheduler type.
Returns:
Instance of a _BatchScheduler for given type.
"""
for cls in _BatchScheduler.__subclasses__():
if cls.is_scheduler_for(type):
return cls(type)
raise ValueError | 21074ecf33383b9f769e8dd63786194b4678246b | 3,650,966 |
def event_detail(request, event_id):
"""
A View to return an individual selected
event details page.
"""
event = get_object_or_404(Event, pk=event_id)
context = {
'event': event
}
return render(request, 'events/event_detail.html', context) | 6fda0906e70d88839fbcd26aa6724b5f2c433c07 | 3,650,967 |
from typing import Optional
from typing import Iterable
from typing import Tuple
import numpy
def variables(
metadata: meta.Dataset,
selected_variables: Optional[Iterable[str]] = None
) -> Tuple[dataset.Variable]:
"""Return the variables defined in the dataset.
Args:
selected_variables: The variables to return. If None, all the
variables are returned.
Returns:
The variables defined in the dataset.
"""
selected_variables = selected_variables or metadata.variables.keys()
return tuple(
dataset.VariableArray(
v.name,
numpy.ndarray((0, ) * len(v.dimensions), v.dtype),
v.dimensions,
attrs=v.attrs,
compressor=v.compressor,
fill_value=v.fill_value,
filters=v.filters,
) for k, v in metadata.variables.items() if k in selected_variables) | 6175ad712996a30673eb2f5ff8b64c76d2f4a66b | 3,650,968 |
def builder(tiledata, start_tile_id, version, clear_old_tiles=True):
"""
Deserialize a list of serialized tiles, then re-link all the tiles to
re-create the map described by the tile links
:param list tiledata: list of serialized tiles
:param start_tile_id: tile ID of tile that should be used as the start tile
:param str version: object model version of the tile data to be deserialized
:return: starting tile of built map
:rtype: text_game_maker.tile.tile.Tile
"""
tiles = {}
visited = []
if clear_old_tiles:
_tiles.clear()
for d in tiledata:
tile = deserialize(d, version)
tiles[tile.tile_id] = tile
if start_tile_id not in tiles:
raise RuntimeError("No tile found with ID '%s'" % start_tile_id)
tilestack = [tiles[start_tile_id]]
while tilestack:
t = tilestack.pop(0)
if t.tile_id in visited:
continue
visited.append(t.tile_id)
if isinstance(t, LockedDoor) and t.replacement_tile:
if t.replacement_tile:
t.replacement_tile = tiles[t.replacement_tile]
tilestack.append(t.replacement_tile)
if t.source_tile:
t.source_tile = tiles[t.source_tile]
tilestack.append(t.source_tile)
else:
for direction in ['north', 'south', 'east', 'west']:
tile_id = getattr(t, direction)
if not tile_id:
continue
setattr(t, direction, tiles[tile_id])
tilestack.append(tiles[tile_id])
return tiles[start_tile_id] | 235df5c953705fbbbd69d8f1c7ed1ad282b469ba | 3,650,969 |
import base64
def data_uri(content_type, data):
"""Return data as a data: URI scheme"""
return "data:%s;base64,%s" % (content_type, base64.urlsafe_b64encode(data)) | f890dc1310e708747c74337f5cfa2d6a31a23fc0 | 3,650,970 |
def next_line(ionex_file):
"""
next_line
Function returns the next line in the file
that is not a blank line, unless the line is
'', which is a typical EOF marker.
"""
done = False
while not done:
line = ionex_file.readline()
if line == '':
return line
elif line.strip():
return line | 053e5582e5146ef096d743973ea7069f19ae6d4d | 3,650,971 |
def last(value):
"""
returns the last value in a list (None if empty list) or the original if value not a list
:Example:
---------
>>> assert last(5) == 5
>>> assert last([5,5]) == 5
>>> assert last([]) is None
>>> assert last([1,2]) == 2
"""
values = as_list(value)
return values[-1] if len(values) else None | f3a04f0e2544879639b53012bbd9068ae205be18 | 3,650,972 |
import numpy
def levup(acur, knxt, ecur=None):
"""
LEVUP One step forward Levinson recursion
Args:
acur (array) :
knxt (array) :
Returns:
anxt (array) : the P+1'th order prediction polynomial based on the P'th
order prediction polynomial, acur, and the P+1'th order
reflection coefficient, Knxt.
enxt (array) : the P+1'th order prediction prediction error, based on the
P'th order prediction error, ecur.
References:
P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3.
"""
if acur[0] != 1:
raise ValueError(
'At least one of the reflection coefficients is equal to one.')
acur = acur[1:] # Drop the leading 1, it is not needed
# Matrix formulation from Stoica is used to avoid looping
anxt = numpy.concatenate((acur, [0])) + knxt * numpy.concatenate(
(numpy.conj(acur[-1::-1]), [1]))
enxt = None
if ecur is not None:
# matlab version enxt = (1-knxt'.*knxt)*ecur
enxt = (1. - numpy.dot(numpy.conj(knxt), knxt)) * ecur
anxt = numpy.insert(anxt, 0, 1)
return anxt, enxt | 182102d03369d23d53d21bae7209cf49d2caecb4 | 3,650,973 |
def gradient_output_wrt_input(model, img, normalization_trick=False):
"""
Get gradient of softmax with respect to the input.
Must check if correct.
Do not use
# Arguments
model:
img:
# Returns
gradient:
"""
grads = K.gradients(model.output, model.input)[0]
if normalization_trick:
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
iterate = K.function([model.input], [grads])
grad_vals = iterate([img])[0]
gradient = grad_vals[0]
return gradient | ed45fccb0f412f8f8874cd8cd7f62ff2101a3a40 | 3,650,974 |
def response_GET(client, url):
"""Fixture that return the result of a GET request."""
return client.get(url) | b4762c9f652e714cc5c3694b75f935077039cb02 | 3,650,975 |
import tqdm
def twitter_preprocess():
"""
ekphrasis-social tokenizer sentence preprocessor.
Substitutes a series of terms by special coins when called
over an iterable (dataset)
"""
norm = ['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'date', 'number']
ann = {"hashtag", "elongated", "allcaps", "repeated",
"emphasis", "censored"}
preprocessor = TextPreProcessor(
normalize=norm,
annotate=ann,
all_caps_tag="wrap",
fix_text=True,
segmenter="twitter_2018",
corrector="twitter_2018",
unpack_hashtags=True,
unpack_contractions=True,
spell_correct_elong=False,
tokenizer=SocialTokenizer(lowercase=True).tokenize,
dicts=[emoticons]).pre_process_doc
def preprocess(name, dataset):
description = " Ekphrasis-based preprocessing dataset "
description += "{}...".format(name)
data = [preprocessor(x) for x in tqdm(dataset, desc=description)]
return data
return preprocess | 18bcd48cff7c77480cd76165fef02d0e39ae19cc | 3,650,977 |
import math
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac), 0],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab), 0],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc, 0],
[0,0,0,1]]) | cd940b60096fa0c92b8cd04d36a0d62d7cd46455 | 3,650,978 |
from typing import Type
from typing import List
def get_routes(interface: Type[Interface]) -> List[ParametrizedRoute]:
"""
Retrieves the routes from an interface.
"""
if not issubclass(interface, Interface):
raise TypeError('expected Interface subclass, got {}'
.format(interface.__name__))
routes = []
for member in interface.members():
if isinstance(member, _InterfaceMethod):
route_data = getattr(member.original, '__route__', None)
if route_data is not None:
assert isinstance(route_data, RouteData)
routes.append(ParametrizedRoute.from_function(
route_data, interface, member.original))
return routes | 9d3baf951312d3027e2329fa635b2425dda579e5 | 3,650,979 |
def _get_realm(response):
"""Return authentication realm requested by server for 'Basic' type or None
:param response: requests.response
:type response: requests.Response
:returns: realm
:rtype: str | None
"""
if 'www-authenticate' in response.headers:
auths = response.headers['www-authenticate'].split(',')
basic_realm = next((auth_type for auth_type in auths
if auth_type.rstrip().lower().startswith("basic")),
None)
if basic_realm:
realm = basic_realm.split('=')[-1].strip(' \'\"').lower()
return realm
else:
return None
else:
return None | 346b3278eb52b565f747c952493c15820eece729 | 3,650,981 |
import math
def exp_mantissa(num, base=10):
"""Returns e, m such that x = mb^e"""
if num == 0:
return 1, 0
# avoid floating point error eg log(1e3, 10) = 2.99...
exp = math.log(abs(num), base)
exp = round(exp, FLOATING_POINT_ERROR_ON_LOG_TENXPONENTS)
exp = math.floor(exp) # 1 <= mantissa < 10
mantissa = num / (base**exp)
return exp, mantissa | b0fd7a961fbd0f796fc00a5ce4005c7aa9f92950 | 3,650,982 |
from typing import Callable
def decide_if_taxed(n_taxed: set[str]) -> Callable[[str], bool]:
"""To create an decider function for omitting taxation.
Args:
n_taxed: The set containing all items, which should not be taxed.
If empty, a default set will be chosen.
Returns:
Decider function for omitting taxation.
"""
local_set = _D_TAX_E
if n_taxed:
local_set = n_taxed
def _decide_if_taxed(in_str: str, /) -> bool:
"""To check whether an item is taxed or not.
A very simple function, which look up the item in a
given set. This set contains all item names, which should omitted
from taxation.
Args:
in_str: The name of the purchased item, which should be checked for taxation.
Returns:
Whether the item is taxed or not.
"""
for item_sub_name in in_str.split(" "):
if item_sub_name in local_set:
return False
return True
return _decide_if_taxed | c13c7e832b86bd85e2cade03cbc84a43893dfe17 | 3,650,983 |
def generate_two_cat_relation_heat_map():
"""
A correlation matrix for categories
"""
data = Heatmap(
z=df_categories.corr(),
y=df_categories.columns,
x=df_categories.columns)
title = 'Correlation Distribution of Categories'
y_title = 'Category'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title) | 90efbffd54c723eef9297ba0abba71d55a500cd0 | 3,650,984 |
def build_phase2(VS, FS, NS, VT, VTN, marker, wc):
"""
Build pahase 2 sparse matrix M_P2 closest valid point term with of source vertices (nS)
triangles(mS) target vertices (nT)
:param VS: deformed source mesh from previous step nS x 3
:param FS: triangle index of source mesh mS * 3
:param NS: triangle normals of source mesh mS * 3
:param VT: target mesh nT * 3
:param VTN: Vertex normals of source mesh nT * 3
:param marker: marker constraint
:param wc: weight value
:return: M_P2: (3 * nS) x (3 * (nS + mS)) big sparse matrix
C_P2: (3 * nS) matrix
"""
VSN = calc_vertex_norm(FS, NS)
S_size = VS.shape[0]
valid_pt = np.zeros((S_size, 2))
C_P2 = np.zeros((3*S_size, 1))
for j in range(0, S_size):
if len(np.where(marker[:, 0]-1 == j)[0]) != 0:
valid_pt[j, :] = np.array([j, marker[marker[:, 0]-1 == j, 1] - 1], dtype=np.int32)
else:
valid_pt[j, :] = np.array([j, find_closest_validpt(VS[j, :], VSN[j, :], VT, VTN)], dtype=np.int32)
C_P2[np.linspace(0, 2, 3, dtype=np.int32) + j*3, 0] = wc * VT[int(valid_pt[j, 1]), :].T
M_P2 = sparse.coo_matrix((np.tile(wc, [3*S_size, 1])[:, 0], (np.arange(0, 3*S_size), np.arange(0, 3*S_size))), shape=(3*S_size, 3*(VS.shape[0]+FS.shape[0])))
return M_P2, C_P2 | ab3622f5b4377b1a60d34345d5396f66d5e3c641 | 3,650,985 |
def voronoi_to_dist(voronoi):
""" voronoi is encoded """
def decoded_nonstacked(p):
return np.right_shift(p, 20) & 1023, np.right_shift(p, 10) & 1023, p & 1023
x_i, y_i, z_i = np.indices(voronoi.shape)
x_v, y_v, z_v = decoded_nonstacked(voronoi)
return np.sqrt((x_v - x_i) ** 2 + (y_v - y_i) ** 2 + (z_v - z_i) ** 2) | 38c2630d45b281477531fcc845d34ea7b2980dab | 3,650,986 |
def post_update_view(request):
"""View To Update A Post For Logged In Users"""
if request.method == 'POST':
token_type, token = request.META.get('HTTP_AUTHORIZATION').split()
if(token_type != 'JWT'):
return Response({'detail': 'No JWT Authentication Token Found'}, status=status.HTTP_400_BAD_REQUEST)
token_data = {'token': token}
try:
valid_data = VerifyJSONWebTokenSerializer().validate(token_data)
logged_in_user = valid_data.get('user')
except:
return Response({'detail': 'Invalid Token'}, status.HTTP_400_BAD_REQUEST)
updated_data = request.data
instance = Post.objects.get(slug=updated_data.get('slug'))
admin_user = User.objects.get(pk=1) # PK Of Admin User Is 1
if(instance.author == logged_in_user or logged_in_user == admin_user):
updated_data.pop('slug')
serializer = PostUpdateSerializer(instance, data=updated_data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
else:
return Response({'detail': 'Something Went Wrong.'}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN)
else:
return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN) | 8044e12328c5bb63c48f673971ae1ed8727b02b7 | 3,650,987 |
from typing import List
def _is_binary_classification(class_list: List[str]) -> bool:
"""Returns true for binary classification problems."""
if not class_list:
return False
return len(class_list) == 1 | 82ada7dd8df93d58fad489b19b9bf4a93ee819c3 | 3,650,988 |
def create_post_like(author, post):
"""
Create a new post like given an author and post
"""
return models.Like.objects.create(author=author, post=post) | f8e07c10076015e005cd62bb3b39a5656ebc45a3 | 3,650,989 |
def translate_entries(yamldoc, base_url):
"""
Reads the field `entries` from the YAML document, processes each entry that is read using the
given base_url, and appends them all to a list of processed entries that is then returned.
"""
if 'entries' in yamldoc and type(yamldoc['entries']) is list:
entries = []
for i, entry in enumerate(yamldoc['entries']):
entries.append(process_entry(base_url, i, entry))
return entries | 0c949939020b3bb1017fca5543be8dcc77d03bbf | 3,650,990 |
def get_in(obj, lookup, default=None):
""" Walk obj via __getitem__ for each lookup,
returning the final value of the lookup or default.
"""
tmp = obj
for l in lookup:
try: # pragma: no cover
tmp = tmp[l]
except (KeyError, IndexError, TypeError): # pragma: no cover
return default
return tmp | 73dfcaadb6936304baa3471f1d1e980f815a7057 | 3,650,991 |
import six
def GetSpec(resource_type, message_classes, api_version):
"""Returns a Spec for the given resource type."""
spec = _GetSpecsForVersion(api_version)
if resource_type not in spec:
raise KeyError('"%s" not found in Specs for version "%s"' %
(resource_type, api_version))
spec = spec[resource_type]
table_cols = []
for name, action in spec.table_cols:
if isinstance(action, six.string_types):
table_cols.append((name, property_selector.PropertyGetter(action)))
elif callable(action):
table_cols.append((name, action))
else:
raise ValueError('expected function or property in table_cols list: {0}'
.format(spec))
message_class = getattr(message_classes, spec.message_class_name)
fields = list(_ProtobufDefinitionToFields(message_class))
return Spec(message_class=message_class,
fields=fields,
table_cols=table_cols,
transformations=spec.transformations,
editables=spec.editables) | ece9dd996c52f01bb985af9529b33bb7b12fbfdc | 3,650,992 |
def ips_between(start: str, end: str) -> int:
"""
A function that receives two IPv4 addresses,
and returns the number of addresses between
them (including the first one, excluding the
last one).
All inputs will be valid IPv4 addresses in
the form of strings. The last address will
always be greater than the first one.
:param start:
:param end:
:return:
"""
ip_start = [int(a) for a in start.split('.')]
ip_end = [int(b) for b in end.split('.')]
ips = zip(ip_start, ip_end)
ips_range = [0, 0, 0, 0]
for ip_id, ip in enumerate(ips):
calc_ip_range(ip, ip_id, ips_range)
return calc_result(ips_range) | aa523ec8a127e2224b7c9fc7a67d720ac4d100ed | 3,650,993 |
def tmNstate(trTrg):
"""Given (newq, new_tape_sym, dir),
return newq.
"""
return trTrg[0] | 17db0bc5cae4467e7a66d506e1f32d48c949e5eb | 3,650,994 |
def _preprocess_continuous_variable(df: pd.DataFrame, var_col: str, bins: int,
min_val: float = None,
max_val: float = None) -> pd.DataFrame:
"""
Pre-processing the histogram for continuous variables by splitting the variable in buckets.
:param df: (pd.DataFrame) Data frame containing at least the continuous variable
:param var_col: (str) Name of the continuous variable
:param bins: (int) Preferred number of bins in histogram
:param min_val: (float, optional) Minimal value to be taken by the variable (if other than the minimum observed in
the data.
:param max_val: (float, optional) Maximal value to be taken by the variable (if other than the maximum observed in
the data.
:return: pd.DataFrame with *var_col* transformed to range
"""
# set *min_val* and *max_val* to minimal and maximal values observed in data
if min_val is None:
min_val = df[var_col].min()
if max_val is None:
max_val = df[var_col].max()
# compute the most appropriate step size for the histogram
step_size, decimals = _compute_step_size(min_val, max_val, bins)
min_val = min_val - (min_val % step_size)
# cut values into buckets
df[var_col] = pd.cut(df[var_col],
list(np.arange(min_val, max_val, step_size)) + [max_val],
include_lowest=True)
# convert buckets into strings
if decimals == 0:
df[var_col] = df[var_col].map(lambda x: f"{int(np.round(x.left))} - {int(np.round(x.right))}")
else:
df[var_col] = df[var_col].map(lambda x: f"{np.round(x.left, decimals)} - {np.round(x.right, decimals)}")
return df | 9c2844497dbe55727f6b2aea17cf7a23e60a3002 | 3,650,996 |
import itertools
def get_pairs(labels):
"""
For the labels of a given word, creates all possible pairs
of labels that match sense
"""
result = []
unique = np.unique(labels)
for label in unique:
ulabels = np.where(labels==label)[0]
# handles when a word sense has only one occurrence
if len(ulabels) == 1:
# returns the instance paired with itself, so it can be counted
result.append((ulabels[0], ulabels[0]))
else:
for p in itertools.combinations(ulabels, 2):
result.append(p)
return result | 454de57eedf6f272fef2c15b40f84de57ed3fa64 | 3,650,997 |
def iredv(tvp,tton):
""" makes sop tvp irredundant relative to onset truth table"""
res = []
red = list(tvp)
for j in range(len(tvp)):
tvj=tvp[j]&tton #care part of cube j
if (tvj&~or_redx(red,j)) == m.const(0): # reduce jth cube to 0
red[j]=m.const(0)
else: #keep cube j
res = res + [tvp[j]]
return res | 5fdb9ed97216b668110908419b364107ed3b7c37 | 3,650,998 |
def ridder_fchp(st, target=0.02, tol=0.001, maxiter=30, maxfc=0.5, config=None):
"""Search for highpass corner using Ridder's method.
Search such that the criterion that the ratio between the maximum of a third order
polynomial fit to the displacement time series and the maximum of the displacement
timeseries is a target % within a tolerance.
This algorithm searches between a low initial corner frequency a maximum fc.
Method developed originally by Scott Brandenberg
Args:
st (StationStream):
Stream of data.
target (float):
target percentage for ratio between max polynomial value and max
displacement.
tol (float):
tolereance for matching the ratio target
maxiter (float):
maximum number of allowed iterations in Ridder's method
maxfc (float):
Maximum allowable value of the highpass corner freq.
int_method (string):
method used to perform integration between acceleration, velocity, and
dispacement. Options are "frequency_domain", "time_domain_zero_init" or
"time_domain_zero_mean"
config (dict):
Configuration dictionary (or None). See get_config().
Returns:
StationStream.
"""
if not st.passed:
return st
if config is None:
config = get_config()
processing_steps = config["processing"]
ps_names = [list(ps.keys())[0] for ps in processing_steps]
ind = int(np.where(np.array(ps_names) == "highpass_filter")[0][0])
hp_args = processing_steps[ind]["highpass_filter"]
frequency_domain = hp_args["frequency_domain"]
if frequency_domain is True:
filter_code = 1
elif frequency_domain is False:
filter_code = 0
for tr in st:
initial_corners = tr.getParameter("corner_frequencies")
initial_f_hp = initial_corners["highpass"]
new_f_hp = get_fchp(
dt=tr.stats.delta,
acc=tr.data,
target=target,
tol=tol,
poly_order=FORDER,
maxiter=maxiter,
fchp_max=maxfc,
filter_type=filter_code,
)
# Method did not converge if new_f_hp reaches maxfc
if (maxfc - new_f_hp) > 1e9:
tr.fail("auto_fchp did not find an acceptable f_hp.")
continue
if new_f_hp > initial_f_hp:
tr.setParameter(
"corner_frequencies",
{
"type": "snr_polyfit",
"highpass": new_f_hp,
"lowpass": initial_corners["lowpass"],
},
)
return st | ee3198c443885fa9524d12c30aa277d8cd843d27 | 3,650,999 |
def get_impropers(bonds):
"""
Iterate over bonds to get impropers.
Choose all three bonds that have one atom in common.
For each set of bonds you have 3 impropers where one of the noncommon atoms is out of plane.
Parameters
----------
bonds : list
List of atom ids that make up bonds.
Returns
-------
list
List of atom id quadruplets that make up a improper.
"""
impropers, checked = [], []
for bond in bonds:
for atom in bond:
if atom not in checked:
bonded_list = []
for bond2 in bonds:
if atom in bond2:
bonded_list.append(bond2[1 - bond2.index(atom)])
if len(bonded_list) >= 3:
for triplet in combinations(bonded_list, 3):
for out_of_plane in triplet:
imp = tuple([out_of_plane, atom] + sorted([i for i in triplet if i != out_of_plane]))
impropers.append(imp)
checked.append(atom)
return sorted(impropers) | c5c2fe4684269407cd4387d86840bd982f1d3fa5 | 3,651,001 |
def get_ret_tev_return(*args):
"""get_ret_tev_return(int n) -> ea_t"""
return _idaapi.get_ret_tev_return(*args) | 94d476d12313b7df4da32cb45cfe644a0078debb | 3,651,002 |
def make_figure_6(prefix=None, rng=None, colors=None):
"""
Figures 6, Comparison of Performance
Ported from MATLAB Code
Nicholas O'Donoughue
24 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:param colors: colormap for plotting
:return: figure handle
"""
# Vary Time-Bandwidth Product
tbwp_vec_db = np.arange(start=10., stop=31., step=10., dtype=int)
tbwp_vec_lin = np.expand_dims(db_to_lin(tbwp_vec_db), axis=0).astype(int)
input_snr_vec_db = np.arange(start=-20, stop=10.1, step=0.1)
input_snr_vec_lin = np.expand_dims(db_to_lin(input_snr_vec_db), axis=1)
output_snr_vec_lin = tbwp_vec_lin*input_snr_vec_lin**2/(1+2*input_snr_vec_lin)
# output_snr_vec_db = lin_to_db(output_snr_vec_lin)
# Energy Detector Performance
prob_fa = 1e-6
threshold_ed = stats.chi2.ppf(q=1-prob_fa, df=2*tbwp_vec_lin)
prob_det_ed = stats.ncx2.sf(x=threshold_ed, df=2*tbwp_vec_lin, nc=2*tbwp_vec_lin*input_snr_vec_lin)
# Cross-Correlator Performance
threshold_xc = stats.chi2.ppf(q=1-prob_fa, df=2)
prob_det_xc = stats.ncx2.sf(x=threshold_xc/(1+2*input_snr_vec_lin), df=2, nc=2*output_snr_vec_lin)
# Monte Carlo Trials
input_snr_vec_coarse_db = input_snr_vec_db[::10]
input_snr_vec_coarse_lin = db_to_lin(input_snr_vec_coarse_db)
num_monte_carlo = int(1e4)
num_tbwp = int(tbwp_vec_lin.size)
num_snr = int(input_snr_vec_coarse_lin.size)
# Generate noise vectors
noise_pwr = 1 # Unit Variance
prob_det_ed_mc = np.zeros(shape=(num_snr, num_tbwp))
prob_det_xc_mc = np.zeros(shape=(num_snr, num_tbwp))
for idx_tbwp, tbwp in enumerate(np.ravel(tbwp_vec_lin)):
# Generate the noise vectors
noise1 = np.sqrt(noise_pwr/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo))
+ 1j*rng.standard_normal(size=(tbwp, num_monte_carlo)))
noise2 = np.sqrt(noise_pwr/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo))
+ 1j*rng.standard_normal(size=(tbwp, num_monte_carlo)))
# Generate a signal vector
signal = np.sqrt(1/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo))
+ 1j*rng.standard_normal(size=(tbwp, num_monte_carlo)))
phase_difference = np.exp(1j*rng.uniform(low=0, high=2*np.pi, size=(1, num_monte_carlo)))
for idx_snr, snr in enumerate(input_snr_vec_coarse_lin):
# Scale the signal power to match SNR
this_signal = signal * np.sqrt(snr)
y1 = this_signal+noise1
y2 = this_signal*phase_difference+noise2
det_result_ed = detector.squareLaw.det_test(z=y1, noise_var=noise_pwr/2, prob_fa=prob_fa)
prob_det_ed_mc[idx_snr, idx_tbwp] = np.sum(det_result_ed, axis=None)/num_monte_carlo
det_result_xc = detector.xcorr.det_test(y1=y1, y2=y2, noise_var=noise_pwr, num_samples=tbwp,
prob_fa=prob_fa)
prob_det_xc_mc[idx_snr, idx_tbwp] = np.sum(det_result_xc, axis=None)/num_monte_carlo
fig6 = plt.figure()
for idx, tbwp in enumerate(tbwp_vec_lin[0, :]):
if idx == 0:
ed_label = 'ED'
xc_label = 'XC'
ed_mc_label = 'ED (Monte Carlo)'
xc_mc_label = 'XC (Monte Carlo)'
else:
ed_label = None
xc_label = None
ed_mc_label = None
xc_mc_label = None
plt.plot(input_snr_vec_db, prob_det_ed[:, idx], color=colors(idx), linestyle='-', label=ed_label)
plt.plot(input_snr_vec_db, prob_det_xc[:, idx], color=colors(idx), linestyle='--', label=xc_label)
plt.scatter(input_snr_vec_coarse_db, prob_det_ed_mc[:, idx], color=colors(idx), marker='^', label=ed_mc_label)
plt.scatter(input_snr_vec_coarse_db, prob_det_xc_mc[:, idx], color=colors(idx), marker='x', label=xc_mc_label)
plt.legend(loc='lower right')
# Create ellipses
ax = plt.gca()
ell = Ellipse(xy=(2, .4), width=5, height=.05)
ell.set_fill(False)
ell.set_edgecolor(colors(0))
ax.add_artist(ell)
plt.annotate(s='TB=10', xy=(-.5, .4), xytext=(-16, .3), arrowprops=dict(arrowstyle='-', color=colors(0)))
ell = Ellipse(xy=(-3.5, .5), width=3, height=.05)
ell.set_fill(False)
ell.set_edgecolor(colors(1))
ax.add_artist(ell)
plt.annotate(s='TB=100', xy=(-5, .5), xytext=(-16, .5), arrowprops=dict(arrowstyle='-', color=colors(1)))
ell = Ellipse(xy=(-8.5, .6), width=3, height=.05)
ell.set_fill(False)
ell.set_edgecolor(colors(2))
ax.add_artist(ell)
plt.annotate(s='TB=1,000', xy=(-10, .6), xytext=(-16, .7), arrowprops=dict(arrowstyle='-', color=colors(2)))
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig6.svg')
plt.savefig(prefix + 'fig6.png')
return fig6 | 761d6ddd541dfbe42e5b57cd680306c71ae978d9 | 3,651,003 |
def slim_form(domain_pk=None, form=None):
"""
What is going on? We want only one domain showing up in the
choices. We are replacing the query set with just one object. Ther
are two querysets. I'm not really sure what the first one does, but
I know the second one (the widget) removes the choices. The third
line removes the default u'--------' choice from the drop down.
"""
return form | 7b58674e307fbbd31f0546b70309c0c723d1021c | 3,651,004 |
def input(*args):
"""
Create a new input
:param args: args the define a TensorType, can be either a TensorType or a shape and a DType
:return: the input expression
"""
tensor_type = _tensor_type_polymorhpic(*args)
return InputTensor(tensor_type, ExpressionDAG.num_inputs) | 47ab3a08f412b7dc9c679ae72bb44c76123a9057 | 3,651,005 |
def commong_substring(input_list):
"""Finds the common substring in a list of strings"""
def longest_substring_finder(string1, string2):
"""Finds the common substring between two strings"""
answer = ""
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if i + j < len1 and string1[i + j] == string2[j]:
match += string2[j]
else:
if len(match) > len(answer):
answer = match
match = ""
return answer
if len(input_list) == 2:
return longest_substring_finder(*input_list)
if len(input_list) > 2:
item0 = input_list[0]
for i in range(len(input_list) - 1):
item1 = input_list[i + 1]
item0 = commong_substring([item0, item1])
return commong_substring([item0, item1])
if len(input_list) == 1:
return input_list[0] | 9e5e0878072a5416326ac1ed0d929adcb8511b37 | 3,651,006 |
def is_valid_url(url):
"""Checks if a URL is in proper format.
Args:
url (str): The URL that should be checked.
Returns:
bool: Result of the validity check in boolean form.
"""
valid = validators.url(url)
if valid:
return True
else:
return False | b55fd89267884dfc2507966825272a02e18d34f5 | 3,651,007 |
import re
import requests
def codepoint_to_url(codepoint, style):
"""
Given an emoji's codepoint (e.g. 'U+FE0E') and a non-apple emoji style,
returns a url to to the png image of the emoji in that style.
Only works for style = 'twemoji', 'noto', and 'blobmoji'.
"""
base = codepoint.replace('U+', '').lower()
if style == 'twemoji':
# See discussion in commit 8115b76 for more information about
# why the base needs to be patched like this.
patched = re.sub(r'0*([1-9a-f][0-9a-f]*)', lambda m: m.group(1),
base.replace(' ', '-').replace('fe0f-20e3', '20e3').replace('1f441-fe0f-200d-1f5e8-fe0f', '1f441-200d-1f5e8'))
response = requests.get('https://github.com/twitter/twemoji/raw/gh-pages/v/latest')
version = response.text if response.ok else None
if version:
return 'https://github.com/twitter/twemoji/raw/gh-pages/v/%s/72x72/%s.png' \
% (version, patched)
else:
return 'https://github.com/twitter/twemoji/raw/master/assets/72x72/%s.png' \
% patched
elif style == 'noto':
return 'https://github.com/googlefonts/noto-emoji/raw/master/png/128/emoji_u%s.png' \
% base.replace(' ', '_')
elif style == 'blobmoji':
return 'https://github.com/C1710/blobmoji/raw/master/png/128/emoji_u%s.png' \
% base.replace(' ', '_') | a5b47f5409d465132e3fb7141d81dbd617981ca8 | 3,651,008 |
def getRNCS(ChargeSA):
"""The calculation of relative negative charge surface area
-->RNCS
"""
charge=[]
for i in ChargeSA:
charge.append(float(i[1]))
temp=[]
for i in ChargeSA:
temp.append(i[2])
try:
RNCG = min(charge)/sum([i for i in charge if i < 0.0])
return temp[charge.index(min(charge))]/RNCG
except:
return 0.0 | f03011de85e1bcac01b2aba4afde61a3dd9f7866 | 3,651,009 |
def handle_auth_manager_auth_exception(error):
"""Return a custom message and 403 status code"""
response_header = {'X-REQUEST-ID': util.create_request_id()}
return {'message': error.message}, 403, response_header | 4b5212f4471a21cd54d012728705e83de5c7a86f | 3,651,010 |
def get_default_converter():
"""Intended only for advanced uses"""
return _TYPECATS_DEFAULT_CONVERTER | f88cdb13d53a228ff1d77a9065c1dabd0f83ed1d | 3,651,011 |
import json
def login(request):
"""
:param: request
:return: JSON data
"""
response = {}
if request.method == 'GET':
username = request.GET.get('username')
password = request.GET.get('password')
try:
usr = models.User.objects.filter(username=username, password=password)
if usr:
response['status'] = 'success'
response['error_msg'] = ''
response['data'] = json.loads(serializers.serialize('json', usr))
else:
response['status'] = 'failure'
response['error_msg'] = '用户名或密码错误,请重试'
response['data'] = None
except Exception as e:
response['status'] = 'error'
response['error_msg'] = str(e)
response['data'] = None
return JsonResponse(response) | 2d9b6791a2160ec63929d5a37e6d8336cca7709a | 3,651,012 |
def average_win_rate(strategy, baseline=always_roll(4)):
"""Return the average win rate of STRATEGY against BASELINE. Averages the
winrate when starting the game as player 0 and as player 1.
"""
win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)
win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)
return (win_rate_as_player_0 + win_rate_as_player_1) / 2 | 2e6b78127543456b7e931c837cf1a9468c013c33 | 3,651,013 |
def decode(chrom):
"""
Returns the communities of a locus-based adjacency codification
in a vector of int where each position is a node id and the value
of that position the id of the community where it belongs. To position
with the same number means that those two nodes belongs to same community.
"""
try:
size = len(chrom)
last_c = 0
communities = [float("inf")] * size
pending = set(range(size))
while len(pending) != 0:
index = int(pending.pop())
neighbour = int(chrom[index])
if neighbour != -1:
communities[index] = min(last_c, communities[index], communities[neighbour])
while neighbour in pending:
pending.remove(neighbour)
communities[neighbour] = min(last_c, communities[neighbour])
neighbour = int(chrom[neighbour])
last_c += 1
return communities
except Exception as e:
raise e | 998a58e0d4efad2c079a9d023530aca37d0e226e | 3,651,014 |
import math
def bin_search(query, data):
""" Query is a coordinate interval. Approximate binary search for the query in sorted data,
which is a list of coordinates. Finishes when the closest overlapping value of query and
data is found and returns the index in data. """
i = int(math.floor(len(data)/2)) # binary search prep
lower, upper = 0, len(data)
if not upper:
return -1
tried = set()
rightfound = '' # null value in place of 0, which is a valid value for rightfound
while not (data[i][0] <= query[0] and data[i][1] >= query[0]): # query left coordinate not found in data yet
if data[i][0] <= query[1] and data[i][1] >= query[1]: # query right found, will keep looking for left
rightfound = i
if data[i][1] < query[0]: # i is too low of an index
lower = i
i = int(math.floor((lower + upper)/2.))
else: # i is too high of an index
upper = i
i = int(math.floor((lower + upper)/2.))
if i in tried or i == upper:
if data[i][0] >= query[0] and data[i][1] <= query[1]: # data interval sandwiched inside query
break
elif i + 1 < len(data) and data[i+1][0] > query[0] and data[i+1][1] < query[1]: # data can be incremented
i = i + 1
else:
i = rightfound if rightfound != '' else -1
break
tried.add(i)
return i | bb93034bc5c7e432c3fc55d4485949688e62b84a | 3,651,015 |
def get_rating(business_id):
""" GET Business rating"""
rating = list(
db.ratings.aggregate(
[{"$group": {"_id": "$business", "pop": {"$avg": "$rating"}}}]
)
)
if rating is None:
return (
jsonify(
{
"success": False,
"message": "Rating for business {} not found.".format(business_id),
}
),
404,
)
print(rating)
return jsonify({"success": True, "rating": clean_dict_helper(rating)}) | 3a1cbf3e815c879b4ddaa5185477f141b261a859 | 3,651,016 |
def fwhm(x,y):
"""Calulate the FWHM for a set of x and y values.
The FWHM is returned in the same units as those of x."""
maxVal = np.max(y)
maxVal50 = 0.5*maxVal
#this is to detect if there are multiple values
biggerCondition = [a > maxVal50 for a in y]
changePoints = []
xPoints = []
for k in range(len(biggerCondition)-1):
if biggerCondition[k+1] != biggerCondition[k]:
changePoints.append(k)
assert len(changePoints) == 2, "More than two crossings of the threshold found."
for k in changePoints:
# do a polyfit
# with the points before and after the point where the change occurs.
# note that here we are fitting the x values as a function of the y values.
# then we can use the polynom to compute the value of x at the threshold, i.e. at maxVal50.
yPolyFit = x[k-1:k+2]
xPolyFit = y[k-1:k+2]
z = np.polyfit(xPolyFit,yPolyFit,2)
p = np.poly1d(z)
xThis = p(maxVal50)
xPoints.append(xThis)
if len(xPoints) == 2:
linewidth = xPoints[1] - xPoints[0]
else:
linewidth = None
print(sorted(xPoints))
return linewidth | 2dc18d15d2940520acde39c5914413d89e9fbc71 | 3,651,017 |
import glob
def parse_names(input_folder):
"""
:param input_folder:
:return:
"""
name_set = set()
if args.suffix:
files = sorted(glob(f'{input_folder}/*{args.suffix}'))
else:
files = sorted(glob(f'{input_folder}/*'))
for file in files:
with open(file) as f:
for record in SeqIO.parse(f, args.in_format):
fname = record.description
name = fname.split('_')[0]
name_set.add(name)
return files, sorted(list(name_set)) | 10b72d9822d6c8057f9bc45936c8d1bfb1a029b6 | 3,651,018 |
from typing import Iterable
from typing import Tuple
from typing import Mapping
from typing import Union
def build_charencoder(corpus: Iterable[str], wordlen: int=None) \
-> Tuple[int, Mapping[str, int], TextEncoder]:
"""
Create a char-level encoder: a Callable, mapping strings into integer arrays.
Encoders dispatch on input type: if you pass a single string, you will get
a 1D array, if you pass an Iterable of strings, you will get a 2D array
where row i encodes the i-th string in the Iterable.
:param corpus: an Iterable of strings to extract characters from. The
encoder will map any non-ASCII character into the OOV code.
:param wordlen: when `wordlen` is None and an encoder receives an Iterable of
strings, the second dimension in the output array will be as long as the
longest string, otherwise it will be `wordlen` long. In the latter case
words exceeding `wordlen` will be trimmed. In both cases empty-spaces are
filled with zeros.
in the Iterable. If wordlen is not
:return: the OOV code, a character mapping representing non-OOV character
encodings, an encoder
"""
if wordlen and wordlen < 1:
raise ValueError('`wordlen` must be positive')
try:
charmap = {char: i + 1 for i, char in enumerate(asciicharset(corpus))}
except TypeError:
raise ValueError('`corpus` can be either a string or an Iterable of '
'strings')
if not charmap:
raise ValueError('the `corpus` is empty')
oov = len(charmap) + 1
def encode_string(string: str) -> np.ndarray:
if not string:
raise ValueError("can't encode empty strings")
return np.fromiter((charmap.get(char, oov) for char in string), np.int32,
len(string))
def charencoder(target: Union[str, Iterable[str]]):
if isinstance(target, str):
return encode_string(target)
encoded_strings = list(map(encode_string, target))
if not encoded_strings:
raise ValueError('there are no `target`')
return preprocessing.stack(
encoded_strings, [wordlen or -1], np.int32, 0, True)[0]
return oov, charmap, charencoder | 207a5f499930f2c408ac88199ac45c60b3ed9d97 | 3,651,019 |
import struct
def Decodingfunc(Codebyte):
"""This is the version 'A' of decoding function,
that decodes data coded by 'A' coding function"""
Decodedint=struct.unpack('b',Codebyte)[0]
N=0 #number of repetitions
L=0 # length of single/multiple sequence
if Decodedint >= 0: #single
N = 1
L = Decodedint+1
else: #multiple
L = -Decodedint//16+1
N = -Decodedint-(L-1)*16+1
#print("N =",N," L =",L)
return (N,L) | 450a3e6057106e9567952b33271935392702aea9 | 3,651,020 |
def _metric_notification_text(metric: MetricNotificationData) -> str:
"""Return the notification text for the metric."""
new_value = "?" if metric.new_metric_value is None else metric.new_metric_value
old_value = "?" if metric.old_metric_value is None else metric.old_metric_value
unit = metric.metric_unit if metric.metric_unit.startswith("%") else f" {metric.metric_unit}"
old_value_text = " (unchanged)" if new_value == old_value else f", was {old_value}{unit}"
return (
f" * *{metric.metric_name}* status is {metric.new_metric_status}, was {metric.old_metric_status}. "
f"Value is {new_value}{unit}{old_value_text}.\n"
) | 855ec000b3e37d9f54e4a12d7df4f973b15b706f | 3,651,021 |
from typing import Optional
from typing import Union
from typing import List
from typing import Dict
def train_dist(
domain: Text,
config: Text,
training_files: Optional[Union[Text, List[Text]]],
output: Text = rasa.shared.constants.DEFAULT_MODELS_PATH,
dry_run: bool = False,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
core_additional_arguments: Optional[Dict] = None,
nlu_additional_arguments: Optional[Dict] = None,
model_to_finetune: Optional[Text] = None,
finetuning_epoch_fraction: float = 1.0,
) -> TrainingResult:
"""Trains a Rasa model (Core and NLU).
Args:
domain: Path to the domain file.
config: Path to the config file.
training_files: List of paths to training data files.
output: Output directory for the trained model.
dry_run: If `True` then no training will be done, and the information about
whether the training needs to be done will be printed.
force_training: If `True` retrain model even if data has not changed.
fixed_model_name: Name of model to be stored.
persist_nlu_training_data: `True` if the NLU training data should be persisted
with the model.
core_additional_arguments: Additional training parameters for core training.
nlu_additional_arguments: Additional training parameters forwarded to training
method of each NLU component.
model_to_finetune: Optional path to a model which should be finetuned or
a directory in case the latest trained model should be used.
finetuning_epoch_fraction: The fraction currently specified training epochs
in the model configuration which should be used for finetuning.
Returns:
An instance of `TrainingResult`.
"""
file_importer = TrainingDataImporter.load_from_config(
config, domain, training_files
)
stories = file_importer.get_stories()
nlu_data = file_importer.get_nlu_data()
training_type = TrainingType.BOTH
if nlu_data.has_e2e_examples():
rasa.shared.utils.common.mark_as_experimental_feature("end-to-end training")
training_type = TrainingType.END_TO_END
if stories.is_empty() and nlu_data.contains_no_pure_nlu_data():
rasa.shared.utils.cli.print_error(
"No training data given. Please provide stories and NLU data in "
"order to train a Rasa model using the '--data' argument."
)
return TrainingResult(code=1)
domain = file_importer.get_domain()
if domain.is_empty():
rasa.shared.utils.cli.print_warning(
"Core training was skipped because no valid domain file was found. "
"Only an NLU-model was created. Please specify a valid domain using "
"the '--domain' argument or check if the provided domain file exists."
)
training_type = TrainingType.NLU
elif stories.is_empty():
rasa.shared.utils.cli.print_warning(
"No stories present. Just a Rasa NLU model will be trained."
)
training_type = TrainingType.NLU
# We will train nlu if there are any nlu example, including from e2e stories.
elif nlu_data.contains_no_pure_nlu_data() and not nlu_data.has_e2e_examples():
rasa.shared.utils.cli.print_warning(
"No NLU data present. Just a Rasa Core model will be trained."
)
training_type = TrainingType.CORE
with telemetry.track_model_training(
file_importer, model_type="rasa",
):
return _train_graph_dist(
file_importer,
training_type=training_type,
output_path=output,
fixed_model_name=fixed_model_name,
model_to_finetune=model_to_finetune,
force_full_training=force_training,
persist_nlu_training_data=persist_nlu_training_data,
finetuning_epoch_fraction=finetuning_epoch_fraction,
dry_run=dry_run,
**(core_additional_arguments or {}),
**(nlu_additional_arguments or {}),
) | 1d1f55dca4a6274713cdd17a7ff5efcc90b46d14 | 3,651,022 |
def wav2vec2_base() -> Wav2Vec2Model:
"""Build wav2vec2 model with "base" configuration
This is one of the model architecture used in *wav2vec 2.0*
[:footcite:`baevski2020wav2vec`] for pretraining.
Returns:
Wav2Vec2Model:
"""
return _get_model(
extractor_mode="group_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=768,
encoder_projection_dropout=0.1,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=12,
encoder_num_heads=12,
encoder_attention_dropout=0.1,
encoder_ff_interm_features=3072,
encoder_ff_interm_dropout=0.1,
encoder_dropout=0.1,
encoder_layer_norm_first=False,
encoder_layer_drop=0.1,
aux_num_out=None,
) | fb288116f5ef57b314ecfde4a85b1a9bb5d437ce | 3,651,024 |
from unittest.mock import patch
def dont_handle_lock_expired_mock(app):
"""Takes in a raiden app and returns a mock context where lock_expired is not processed
"""
def do_nothing(raiden, message): # pylint: disable=unused-argument
return []
return patch.object(
app.raiden.message_handler, "handle_message_lockexpired", side_effect=do_nothing
) | 2a893e7e755010104071b2b1a93b60a0417e5457 | 3,651,025 |
def system(_printer, ast):
"""Prints the instance system initialization."""
process_names_str = ' < '.join(map(lambda proc_block: ', '.join(proc_block), ast["processNames"]))
return f'system {process_names_str};' | f16c6d5ebe1a029c07efd1f34d3079dd02eb4ac0 | 3,651,027 |
import random
def genmove(proc, colour, pluck_random=True):
""" Send either a `genmove` command to the client, or generate a random
move until it is accepted by the client """
if pluck_random and random() < 0.05:
for _count in range(100):
proc.stdin.write('1000 play %s %s\n' % (colour, random_vertex(),))
proc.stdin.flush()
for line in proc.stdout:
line = (str(line) or '').strip()
print(line)
if line.startswith('=1000'):
vertex = line.split(' ', maxsplit=2)[-1].strip()
return vertex
elif line.startswith('?1000'):
break
return 'pass'
else:
proc.stdin.write('2000 genmove %s\n' % (colour,))
proc.stdin.flush()
for line in proc.stdout:
line = (str(line) or '').strip()
print(line)
if line.startswith('=2000'):
vertex = line.split(' ', maxsplit=2)[-1].strip()
return vertex
return None | 589a054be52c40507d8aba5f10a3d67489ec301b | 3,651,028 |
def geojson_to_meta_str(txt):
""" txt is assumed to be small
"""
vlayer = QgsVectorLayer(txt, "tmp", "ogr")
crs_str = vlayer.sourceCrs().toWkt()
wkb_type = vlayer.wkbType()
geom_str = QgsWkbTypes.displayString(wkb_type)
feat_cnt = vlayer.featureCount()
return geom_str, crs_str, feat_cnt | 33b0a2055ec70c2142977469384a20b99d26cee8 | 3,651,029 |
def tdf_UppestID(*args):
"""
* Returns ID 'ffffffff-ffff-ffff-ffff-ffffffffffff'.
:rtype: Standard_GUID
"""
return _TDF.tdf_UppestID(*args) | 1d9d5c528a2f202d49c104b7a56dd7a75b9bc795 | 3,651,030 |
def blend_multiply(cb: float, cs: float) -> float:
"""Blend mode 'multiply'."""
return cb * cs | d53c3a49585cf0c12bf05c233fc6a9dd30ad25b9 | 3,651,031 |
def print_data_distribution(y_classes, class_names):
"""
:param y_classes: class of each instance, for example, if there are 3 classes, and y[i] is [1,0,0], then instance[i] belongs to class[0]
:param class_names: name of each class
:return: None
"""
count = np.zeros(len(class_names))
pro = []
num = []
for y in y_classes:
class_index = np.argmax(y)
count[class_index] = count[class_index] + 1
for i, class_name in enumerate(class_names):
print(class_name, count[i])
pro.append(class_name)
num.append(count[i])
return pro, num | 289ada7cab00153f894e81dd32980b8d224d637c | 3,651,032 |
import collections
def reorder_conj_pols(pols):
"""
Reorders a list of pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after conjugating
the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Args:
pols: Polarization array (strings or ints)
Returns:
conj_order: Indices to reorder polarization axis
"""
if not isinstance(pols, collections.Iterable):
raise ValueError('reorder_conj_pols must be given an array of polarizations.')
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError('Not all conjugate pols exist in the polarization array provided.')
return conj_order | 98730f8434eff02c9a63506e01fbcd478e23e76e | 3,651,033 |
def get_machine_from_uuid(uuid):
"""Helper function that returns a Machine instance of this uuid."""
machine = Machine()
machine.get_from_uuid(uuid)
return machine | 6f78afd9547af5c83abf49a1ac56209ee0e6b506 | 3,651,034 |
def convert_numbers(text):
"""Convert numbers to number words"""
tokens = []
for token in text.split(" "):
try:
word = w2n.num_to_word(token)
tokens.append(word)
except:
tokens.append(token)
return " ".join(tokens) | 8d6eb622076a0404824db2dbeaaba704f3bf6e79 | 3,651,035 |
def init_emulator(rom: bytes):
""" For use in interactive mode """
emulator = NitroEmulator()
emulator.load_nds_rom(rom, True)
return emulator | 9ecaa2a876b8e5bd93deece3ccc62b41ef9c6f3f | 3,651,036 |
from typing import Dict
from typing import Union
import torch
def sub_module_name_of_named_params(named_params: kParamDictType, module_name_sub_dict: Dict[str, str]) \
-> Union[Dict[str, nn.Parameter], Dict[str, torch.Tensor]]:
"""Sub named_parameters key's module name part with module_name_sub_dict.
Args:
named_params: Key-value pair of param name and param value.
module_name_sub_dict: Module names' sub dict.
Returns:
named parameters whose module name part of it's param name is subbed by module_name_sub_dict.
"""
sub_named_params = dict()
for module_param_name, value in named_params.items():
param_name, module_name = map(lambda inverse_name: inverse_name[::-1],
module_param_name[::-1].split('.', maxsplit=1))
if module_name not in module_name_sub_dict:
sub_named_params[module_param_name] = value
else:
sub_named_params[module_name_sub_dict[module_name] + '.' + param_name] = value
return sub_named_params | 8bbcdb865f2b0c452c773bc18767128561e806c7 | 3,651,037 |
def my_func_1(x, y):
"""
Возвращает возведение числа x в степень y.
Именованные параметры:
x -- число
y -- степень
(number, number) -> number
>>> my_func_1(2, 2)
4
"""
return x ** y | 9572566f1660a087056118bf974bf1913348dfa4 | 3,651,039 |
def indexer_testapp(es_app):
""" Indexer testapp, meant for manually triggering indexing runs by posting to /index.
Always uses the ES app (obviously, but not so obvious previously) """
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'INDEXER',
}
return webtest.TestApp(es_app, environ) | 59343963307c39e43034664febb0ebf00f6ab1bd | 3,651,040 |
def BNN_like(NN,cls=tfp.layers.DenseReparameterization,copy_weight=False,**kwargs):
"""
Create Bayesian Neural Network like input Neural Network shape
Parameters
----------
NN : tf.keras.Model
Neural Network for imitating shape
cls : tfp.layers
Bayes layers class
copy_weight : bool, optional
Copy weight from NN when `True`. The default is `False`
Returns
-------
model : tf.keras.Model
Bayes Neural Network
"""
inputs = tf.keras.Input(shape=(tf.shape(NN.layers[0].kernel)[0],))
x = inputs
for i, L in enumerate(NN.layers):
layer_kwargs = { **kwargs }
if copy_weight:
layer_kwargs["kernel_prior_fn": multivariate_normal_fn(L.kernel)]
layer_kwargs["bias_prior_fn": multivariate_normal_fn(L.bias)]
x = cls(L.units,activation=L.activation,**layer_kwargs)(x)
return tf.keras.Model(inputs=inputs,outputs=x) | 9039f70701fd832843fd160cd71d5d46f7b17b56 | 3,651,041 |
def matrix_mult(a, b):
"""
Function that multiplies two matrices a and b
Parameters
----------
a,b : matrices
Returns
-------
new_array : matrix
The matrix product of the inputs
"""
new_array = []
for i in range(len(a)):
new_array.append([0 for i in range(len(b[0]))])
for j in range(len(b[0])):
for k in range(len(a[0])):
new_array[i][j] += a[i][k] * b[k][j]
return new_array | 5e0f27f29b6977ea38987fa243f08bb1748d4567 | 3,651,042 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.