content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def createMonatomicGas(elm, pascal):
"""createMonatomicGas(elm, pascal)
Create a gas of single atoms of the specified element at the specified pressure in Pascal and 300 K"""
return epq.Gas((elm,), (1,), pascal, 300.0, elm.toString() + " gas at %f Pa" % pascal) | 4552f551c27e0f10dea72c96bc32b9927649f749 | 4,768 |
import torch
def boxes_to_central_line_torch(boxes):
"""See boxes_to_central_line
Args:
boxes (tensor[..., 7]): (x, y, z, l, w, h, theta) of each box
Returns:
boxes_lp (tensor[..., 3]): (a, b, c) line parameters of each box
"""
# in case length is shorter than width
bmask = boxes[..., 3] < boxes[..., 4]
theta = -boxes[..., 6] # not sure why minus is needed
theta[bmask] -= 0.5 * np.pi
a = torch.tan(theta)
b = -torch.ones_like(a)
c = -a * boxes[..., 0] - b * boxes[..., 1]
boxes_lp = torch.stack((a, b, c), dim=-1)
boxes_lp /= torch.linalg.norm(boxes_lp, dim=-1, keepdim=True)
return boxes_lp | e96667177cee058fe5f5cd1e8446df97d976474e | 4,769 |
from pyspark.sql import SparkSession
def load_as_spark(url: str) -> "PySparkDataFrame": # noqa: F821
"""
Load the shared table using the give url as a Spark DataFrame. `PySpark` must be installed, and
the application must be a PySpark application with the Apache Spark Connector for Delta Sharing
installed.
:param url: a url under the format "<profile>#<share>.<schema>.<table>"
:return: A Spark DataFrame representing the shared table.
"""
try:
except ImportError:
raise ImportError("Unable to import pyspark. `load_as_spark` requires PySpark.")
spark = SparkSession.getActiveSession()
assert spark is not None, (
"No active SparkSession was found. "
"`load_as_spark` requires running in a PySpark application."
)
return spark.read.format("deltaSharing").load(url) | d427f71530b982703853146cbaa1ce3585b8f195 | 4,770 |
def calClassSpecificProbPanel(param, expVars, altAvMat, altChosen, obsAv):
"""
Function that calculates the class specific probabilities for each decision-maker in the
dataset
Parameters
----------
param : 1D numpy array of size nExpVars.
Contains parameter values.
expVars : 2D numpy array of size (nExpVars x (nRows)).
Contains explanatory variables.
altAvMat : sparse matrix of size (nRows x nObs).
The (i, j)th element equals 1 if the alternative corresponding to the ith
column in expVars is available to the decision-maker corresponding to the
jth observation, and 0 otherwise.
altChosen : sparse matrix of size (nRows x nObs).
The (i, j)th element equals 1 if the alternative corresponding to the ith
column in expVars was chosen by the decision-maker corresponding to the
jth observation, and 0 otherwise.
obsAv : sparse matrix of size (nObs x nInds).
The (i, j)th element equals 1 if the ith observation in the dataset corresponds
to the jth decision-maker, and 0 otherwise.
Returns
-------
np.exp(lPInd) : 2D numpy array of size 1 x nInds.
Identifies the class specific probabilities for each individual in the
dataset.
"""
v = np.dot(param[None, :], expVars) # v is 1 x nRows
ev = np.exp(v) # ev is 1 x nRows
ev[np.isinf(ev)] = 1e+20 # As precaution when exp(v) is too large for machine
ev[ev < 1e-200] = 1e-200 # As precaution when exp(v) is too close to zero
nev = ev * altAvMat # nev is 1 x nObs
nnev = altAvMat * np.transpose(nev) # nnev is nRows x 1
p = np.divide(ev, np.transpose(nnev)) # p is 1 x nRows
p[np.isinf(p)] = 1e-200 # When none of the alternatives are available
pObs = p * altChosen # pObs is 1 x nObs
lPObs = np.log(pObs) # lPObs is 1 x nObs
lPInd = lPObs * obsAv # lPInd is 1 x nInds
return np.exp(lPInd) # prob is 1 x nInds | ccb867b44db9f0d7f9b35c92ef66a96097b4b881 | 4,771 |
def build_expression_tree(tokens):
"""Returns an ExpressionTree based upon by a tokenized expression."""
s = [] # we use Python list as stack
for t in tokens:
if t in '+-x*/': # t is an operator symbol
s.append(t) # push the operator symbol on the stack
elif t not in '()': # consider t to be a literal
s.append(ExpressionTree(t)) # push trivial tree storing value
elif t == ')' : # compose a new tree from three constituent parts
right = s.pop() # right subtree as per LIFO
op = s.pop() # operator symbol
left = s.pop() # left subtree
s.append(ExpressionTree(op, left, right)) # reconstruct tree and push it back on the stack
# ignore the parenthesis
return s.pop() # the last reconstructed tree | b54ce3c3d784ff80f380774135c7353d6ebd1078 | 4,772 |
import json
def unpack_blockchain(s: str) -> block.Blockchain:
"""Unapck blockchain from JSON string with b64 for bytes."""
blocks = json.loads(s)
return [_unpack_block(block) for block in blocks] | ed43ea73df866489e814fd1bdff357c158aade91 | 4,773 |
import re
def parse(options,full_path):
"""
Parse the data according to several regexes
"""
global p_entering_vip_block, p_exiting_vip_block, p_vip_next, p_vip_number, p_vip_set
in_vip_block = False
vip_list = []
vip_elem = {}
order_keys = []
if (options.input_file != None):
with open(options.input_file, mode=fd_read_options) as fd_input:
for line in fd_input:
line = line.strip()
# We match a vip block
if p_entering_vip_block.search(line):
in_vip_block = True
# We are in a vip block
if in_vip_block:
if p_vip_number.search(line):
vip_number = p_vip_number.search(line).group('vip_number')
vip_number = re.sub('["]', '', vip_number)
vip_elem['id'] = vip_number
if not('id' in order_keys):
order_keys.append('id')
# We match a setting
if p_vip_set.search(line):
vip_key = p_vip_set.search(line).group('vip_key')
if not(vip_key in order_keys):
order_keys.append(vip_key)
vip_value = p_vip_set.search(line).group('vip_value').strip()
vip_value = re.sub('["]', '', vip_value)
vip_elem[vip_key] = vip_value
# We are done with the current vip id
if p_vip_next.search(line):
vip_list.append(vip_elem)
vip_elem = {}
# We are exiting the vip block
if p_exiting_vip_block.search(line):
in_vip_block = False
return (vip_list, order_keys)
else:
# for files in os.listdir(os.path.abspath(options.input_folder)):
with open(full_path, mode=fd_read_options) as fd_input:
for line in fd_input:
line = line.strip()
# We match a vip block
if p_entering_vip_block.search(line):
in_vip_block = True
# We are in a vip block
if in_vip_block:
if p_vip_number.search(line):
vip_number = p_vip_number.search(line).group('vip_number')
vip_number = re.sub('["]', '', vip_number)
vip_elem['id'] = vip_number
if not('id' in order_keys):
order_keys.append('id')
# We match a setting
if p_vip_set.search(line):
vip_key = p_vip_set.search(line).group('vip_key')
if not(vip_key in order_keys):
order_keys.append(vip_key)
vip_value = p_vip_set.search(line).group('vip_value').strip()
vip_value = re.sub('["]', '', vip_value)
vip_elem[vip_key] = vip_value
# We are done with the current vip id
if p_vip_next.search(line):
vip_list.append(vip_elem)
vip_elem = {}
# We are exiting the vip block
if p_exiting_vip_block.search(line):
in_vip_block = False
return (vip_list, order_keys) | 08177b0ab18c77154053249c2308c4705d1dbb65 | 4,774 |
def update_wishlist_games(cur, table, wishlist_args, update_delay):
"""A function to update wishlist games.
:param cur: database cursor object
:type cur: Cursor
:param table: name of table to work on
:type table: str
:param wishlist_args: list of wishlist games to add to database
:type wishlist_args: list
:param update_delay: the amount of time that must pass before updating
:type update_delay: timedelta
"""
# Figure out which games need updating
outdated_games = DB_Calls.wishlist_needs_updating(cur, table, update_delay)
# Fetch deals for new and existing wishlist games
if(wishlist_args or outdated_games):
if(table == DB_Tables.PC_WISHLIST.value):
_table = DB_Tables.PC_WISHLIST.value
games_to_update, new_games = (
PC.get_wishlist_deals(cur, outdated_games+wishlist_args))
elif(table == DB_Tables.PS_WISHLIST.value):
_table = DB_Tables.PS_WISHLIST.value
games_to_update, new_games = (
PS.get_wishlist_deals(cur, outdated_games+wishlist_args))
if(new_games):
DB_Calls.add_games(cur, _table, new_games, games_to_update)
return True
return False | fcd80f19065112893af84d0a9862888a13bde372 | 4,775 |
from re import M
def WrapSignal(signal):
"""Wrap a model signal with a corresponding frontend wrapper."""
if type(signal) is M.BitsSignal:
return BitsFrontend(signal)
elif type(signal) is M.ListSignal:
return ListFrontend(signal)
elif type(signal) is M.BundleSignal:
return BundleFrontend(signal)
else:
assert False, f'Cannot wrap signal of type {type(signal)}' | 374c47d5053853bc2b23d56d40a2752521a1351f | 4,776 |
from typing import Any
def is_array_like(element: Any) -> bool:
"""Returns `True` if `element` is a JAX array, a NumPy array, or a Python
`float`/`complex`/`bool`/`int`.
"""
return isinstance(
element, (jnp.ndarray, np.ndarray, float, complex, bool, int)
) or hasattr(element, "__jax_array__") | acb681e329883742009e3e2543158cd602839ae8 | 4,777 |
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code) | 295a6d5683b975a9229e27d06cc1369e6a6f0a95 | 4,778 |
def twitterAuth():
""" Authenticate user using Twitter API generated credentials """
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
return tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) | c0522247e22b2a029c7f954960b1f9f91e71e3cb | 4,779 |
def GetInstalledPackageUseFlags(pkg_str, board=None):
"""Gets the list of USE flags for installed packages matching |pkg_str|.
Args:
pkg_str: The package name with optional category, version, and slot.
board: The board to inspect.
Returns:
A dictionary with the key being a package CP and the value being the list
of USE flags for that package.
"""
cmd = ['qlist']
if board:
cmd = ['qlist-%s' % board]
cmd += ['-CqU', pkg_str]
result = cros_build_lib.RunCommand(
cmd, enter_chroot=True, capture_output=True, error_code_ok=True)
use_flags = {}
if result.returncode == 0:
for line in result.output.splitlines():
tokens = line.split()
use_flags[tokens[0]] = tokens[1:]
return use_flags | 0b203ebe078d56053c4e2c3b23db91492399de55 | 4,780 |
def make_cursor():
"""
Creates a cursor for iterating through results
GetParams:
account: an account
user: a user
handle: a shark client handle
Returns:
a json object container the cursor handle
"""
data, statusCode = cursor()
return jsonify(data), statusCode | 225cf3bdcb001f90041cb94dc5fd89c935daaf24 | 4,781 |
from typing import Any
def run_result_factory(data: list[tuple[Any, Any]]):
"""
We need to handle dt.datetime and agate.table.Table.
The rest of the types should already be JSON-serializable.
"""
d = {}
for key, val in data:
if isinstance(val, dt.datetime):
val = val.isoformat()
elif isinstance(val, agate.table.Table):
# agate Tables have a few print methods but they offer plain
# text representations of the table which are not very JSON
# friendly. There is a to_json method, but I don't think
# sending the whole table in an XCOM is a good idea either.
val = {
k: v.__class__.__name__
for k, v in zip(val._column_names, val._column_types)
}
d[key] = val
return d | 25462e0eaf87d4fcdd1f48161dfa5be4643485f4 | 4,782 |
def compute_steepness(zeroth_moment, peak_wavenumber):
"""Compute characteristic steepness from given peak wave number."""
return np.sqrt(2 * zeroth_moment) * peak_wavenumber | e1cb0beb19ff73e7d2b6a6879d4a388d04644953 | 4,783 |
def secondary_side_radius(mass_ratio, surface_potential):
"""
Side radius of secondary component
:param mass_ratio: float;
:param surface_potential: float;
:return: float; side radius
"""
return calculate_side_radius(1.0, mass_ratio, 1.0, surface_potential, 'secondary') | 3353d5b9cb76f9127ed1066a20a3328fea9b8a46 | 4,784 |
def pts_from_rect_inside(r):
""" returns start_pt, end_pt where end_pt is _inside_ the rectangle """
return (r[0], r[1]), ((r[0] + r[2] - 1), (r[1] + r[3] - 1)) | 51f5ea39763e9f16a2bb3a56eebef4dfe06c5746 | 4,785 |
import numpy as np
def minimum_distance(object_1, object_2):
""" Takes two lists as input
A list of numpy arrays of coordinates that make up object 1 and object 2
Measures the distances between each of the coordinates
Returns the minimum distance between the two objects, as calculated using a vector norm
Stops the calculation and returns 0 if two coordinates overlap
"""
# package import
# main algorithm
minimum_distance = 100000
for coord_1 in object_1:
for coord_2 in object_2:
distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)
if distance_btwn_coords == 0:
minimum_distance = distance_btwn_coords
return float(minimum_distance)
elif distance_btwn_coords < minimum_distance:
minimum_distance = distance_btwn_coords
return float(minimum_distance) | e61fbb1ab83c5147f69351022f59ebab3295cb5a | 4,786 |
def retrieve_pkl_file(filename, verbose = False):
"""
Retrieve and return contents of pkl file
"""
if verbose == True:
start_time = timelib.time()
print("\n * Retrieving %s file ..."%filename)
data = pd.read_pickle(filename)
if verbose == True:
print("\n %s retrieved in %.1f seconds."%(filename, timelib.time() - start_time))
return data; | aa7c108d32ea387c2677c0fccf285437d149ec01 | 4,787 |
def extractIpsFile(containerFile,newSimName):
"""
Given a container file, get the ips file in it and write it to current
directory so that it can be used
"""
oldIpsFile=os.path.splitext(containerFile)[0]+os.extsep+"ips"
zf=zipfile.ZipFile(containerFile,"r")
foundFile=""
# Assume that container file contains 1 ips file.
oldIpsFile=fnmatch.filter(zf.namelist(),"*.ips")[0]
ifile=zf.read(oldIpsFile)
ipsFile=newSimName+".ips"
if os.path.exists(ipsFile):
print "Moving "+ipsFile+" to "+"Save"+ipsFile
shutil.copy(ipsFile, "Save"+ipsFile)
ff=open(ipsFile,"w")
ff.write(ifile)
ff.close()
return ipsFile | a8135c7d3a10825e539819dfdb62d5f677680e44 | 4,788 |
import torch
def nplr(measure, N, rank=1, dtype=torch.float):
""" Return w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
"""
assert dtype == torch.float or torch.cfloat
if measure == 'random':
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
# w = torch.randn(N//2, dtype=dtype)
w = -torch.exp(torch.randn(N//2)) + 1j*torch.randn(N//2)
P = torch.randn(rank, N//2, dtype=dtype)
B = torch.randn(N//2, dtype=dtype)
V = torch.eye(N, dtype=dtype)[..., :N//2] # Only used in testing
return w, P, B, V
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype)
AP = A + torch.sum(P.unsqueeze(-2)*P.unsqueeze(-1), dim=-3)
w, V = torch.linalg.eig(AP) # (..., N) (..., N, N)
# V w V^{-1} = A
# Only keep one of the conjugate pairs
w = w[..., 0::2].contiguous()
V = V[..., 0::2].contiguous()
V_inv = V.conj().transpose(-1, -2)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
P = contract('ij, ...j -> ...i', V_inv, P.to(V)) # V^* P
return w, P, B, V | 0451fa5ed1eeb60bef386991b2d953c190282e0e | 4,789 |
def read_data(oldest_year: int = 2020, newest_year: int = 2022):
"""Read in csv files of yearly covid data from the nytimes and concatenate into a single pandas DataFrame.
Args:
oldest_year: first year of data to use
newest_year: most recent year of data to use
"""
df_dicts = {} # dictionary to hold the data for each year before concatenation
for year in range(oldest_year, newest_year + 1):
df_dicts[f"df_{year}"] = pd.read_csv(
f"https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-counties-{year}.csv",
index_col="date",
)
logger.info("data read in successfully")
return pd.concat(df_dicts.values()) | 7b8e55ae41890eef3e4f0ac5a9502b8b19f1ad20 | 4,790 |
def ip_is_v4(ip: str) -> bool:
"""
Determines whether an IP address is IPv4 or not
:param str ip: An IP address as a string, e.g. 192.168.1.1
:raises ValueError: When the given IP address ``ip`` is invalid
:return bool: True if IPv6, False if not (i.e. probably IPv4)
"""
return type(ip_address(ip)) == IPv4Address | d0fa8351921e34ee44c1b6c9fecf14c0efe83397 | 4,791 |
def kdump(self_update=False, snapshot=None):
"""Regenerate kdump initrd
A new initrd for kdump is created in a snapshot.
self_update
Check for newer transactional-update versions.
snapshot
Use the given snapshot or, if no number is given, the current
default snapshot as a base for the next snapshot. Use
"continue" to indicate the last snapshot done.
CLI Example:
.. code-block:: bash
salt microos transactional_update kdump snapshot="continue"
"""
cmd = ["transactional-update"]
cmd.extend(_global_params(self_update=self_update, snapshot=snapshot))
cmd.append("kdump")
return _cmd(cmd) | fd49bf6bfb4af52625b4e479eca60594edb59d9e | 4,792 |
import logging
from datetime import datetime
def register_keywords_user(email, keywords, price):
"""Register users then keywords and creates/updates doc
Keyword arguments:
email - email for user
keywords - string of keywords
price -- (optional) max price can be set to None
"""
logging.info('[INFO] Registering user email \'{}\' '.format(email))
# create user doc if doesn't exist
db = utils.get_db_handle('users')
doc = db.find_one({ 'email': email })
# metadata
keywords_id = keywords.replace(" ", "_")
date = str(datetime.datetime.now()).split('.')[0]
num_keywords = 0
list_keywords = []
if doc == None:
doc = db.insert_one({
'email': email,
'dateCreated': date,
'numKeywords': num_keywords,
'keywords': []
})
logging.info('[INFO] Creating new user doc {} with _id: {}'.format(email, doc.inserted_id))
else:
num_keywords = doc['numKeywords']
list_keywords = doc['keywords']
logging.info('[INFO] Found user doc \'{}\' with {} keywords'.format(email, num_keywords))
# insert keywords info along in user doc
max_keywords = 5
if not utils.check_key_exists(list_keywords, keywords_id):
if num_keywords < max_keywords:
update = utils.update_users_doc(db, email, keywords_id, price, date)
if update:
logging.info('[INFO] Successfully created or updated doc for \'{}\''.format(email))
else:
logging.info('[INFO] Error creating or updating doc for \'{}\''.format(email))
return False, 'ERROR_CREATE_DOC'
else:
logging.info('[INFO] Unable to create doc for \'{}\''.format(email))
logging.info('[INFO] Number of keywords exceed maximum of {}'.format(max_keywords))
return False, 'MAX_KEYWORDS_LIMIT'
else:
logging.info('[INFO] Unable to create doc for \'{}\''.format(email))
logging.info('[INFO] Duplicate key {} for user {}'.format(max_keywords, email))
return False, 'ERROR_DUPE_KEY'
logging.info('[INFO] Registering keywords \'{}\' for email \'{}\' with price \'{}\''.format(keywords, email, price))
# create keywords doc if doesn't exist
db = utils.get_db_handle('keywords')
doc = db.find_one({ 'keyword': keywords_id })
# keywords metadata
date = str(datetime.datetime.now()).split('.')[0]
if doc == None:
doc = db.insert_one({
'keyword': keywords_id,
'subreddit': 'frugalmalefashion',
'dateCreated': date,
'users': []
})
logging.info('[INFO] Creating new keywords doc {} with _id: {}'.format(keywords_id, doc.inserted_id))
else:
logging.info('[INFO] Found keywords doc \'{}\''.format(keywords_id))
# insert user info along in keyword doc
update = utils.update_keywords_doc(db, keywords_id, email, price, date)
if update:
logging.info('[INFO] Successfully created or updated doc for \'{}\''.format(keywords_id))
else:
logging.error('[ERROR] Error creating or updating doc for \'{}\''.format(keywords_id))
return False, 'ERROR_CREATE_DOC'
return True, None | 09c0d3ff12fbd99d6e6a6c23906a74b525f91649 | 4,793 |
def plot_distribution(df, inv, ax=None, distribution=None, tau_plot=None, plot_bounds=True, plot_ci=True,
label='', ci_label='', unit_scale='auto', freq_axis=True, area=None, normalize=False,
predict_kw={}, **kw):
"""
Plot the specified distribution as a function of tau.
Parameters
----------
df : pandas DataFrame
DataFrame containing experimental EIS data. Used only for scaling and frequency bounds
If None is passed, scaling will not be performed and frequency bounds will not be drawn
inv : Inverter instance
Fitted Inverter instance
ax : matplotlib axis
Axis on which to plot
distribution : str, optional (default: None)
Name of distribution to plot. If None, first distribution in inv.distributions will be used
tau_plot : array, optonal (default: None)
Time constant grid over which to evaluate the distribution.
If None, a grid extending one decade beyond the basis time constants in each direction will be used.
plot_bounds : bool, optional (default: True)
If True, indicate frequency bounds of experimental data with vertical lines.
Requires that DataFrame of experimental data be passed for df argument
plot_ci : bool, optional (default: True)
If True, plot the 95% credible interval of the distribution (if available).
label : str, optional (default: '')
Label for matplotlib
unit_scale : str, optional (default: 'auto')
Scaling unit prefix. If 'auto', determine from data.
Options are 'mu', 'm', '', 'k', 'M', 'G'
freq_axis : bool, optional (default: True)
If True, add a secondary x-axis to display frequency
area : float, optional (default: None)
Active area. If provided, plot the area-normalized distribution
normalize : bool, optional (default: False)
If True, normalize the distribution such that the polarization resistance is 1
predict_kw : dict, optional (default: {})
Keyword args to pass to Inverter predict_distribution() method
kw : keyword args, optional
Keyword args to pass to maplotlib.pyplot.plot
Returns
-------
ax : matplotlib axis
Axis on which distribution is plotted
"""
if ax is None:
fig, ax = plt.subplots(figsize=(3.5, 2.75))
# If no distribution specified, use first distribution
if distribution is None:
distribution = list(inv.distributions.keys())[0]
# If tau_plot not given, go one decade beyond basis tau in each direction
if tau_plot is None:
basis_tau = inv.distributions[distribution]['tau']
tmin = np.log10(np.min(basis_tau)) - 1
tmax = np.log10(np.max(basis_tau)) + 1
num_decades = tmax - tmin
tau_plot = np.logspace(tmin, tmax, int(20 * num_decades + 1))
F_pred = inv.predict_distribution(distribution, tau_plot, **predict_kw)
if normalize and area is not None:
raise ValueError('If normalize=True, area cannot be specified.')
if area is not None:
if df is not None:
for col in ['Zmod', 'Zreal', 'Zimag']:
df[col] *= area
F_pred *= area
if normalize:
Rp_kw = predict_kw.copy()
# if time given, calculate Rp at given time
if 'time' in predict_kw.keys():
Rp_kw['times'] = [predict_kw['time'], predict_kw['time']]
del Rp_kw['time']
Rp = inv.predict_Rp(**Rp_kw)
F_pred /= Rp
if unit_scale == 'auto':
if normalize:
unit_scale = ''
elif df is not None:
unit_scale = get_unit_scale(df, area)
else:
unit_map = {-2: '$\mu$', -1: 'm', 0: '', 1: 'k', 2: 'M', 3: 'G'}
F_max = np.max(F_pred)
F_ord = np.floor(np.log10(F_max) / 3)
unit_scale = unit_map.get(F_ord, '')
scale_factor = get_factor_from_unit(unit_scale)
ax.plot(tau_plot, F_pred / scale_factor, label=label, **kw)
if plot_ci:
if inv.fit_type.find('bayes') >= 0:
F_lo = inv.predict_distribution(distribution, tau_plot, percentile=2.5, **predict_kw)
F_hi = inv.predict_distribution(distribution, tau_plot, percentile=97.5, **predict_kw)
if area is not None:
F_lo *= area
F_hi *= area
if normalize:
F_lo /= Rp
F_hi /= Rp
ax.fill_between(tau_plot, F_lo / scale_factor, F_hi / scale_factor, color='k', alpha=0.2, label=ci_label)
ax.set_xscale('log')
ax.set_xlabel(r'$\tau$ / s')
if plot_bounds:
if df is not None:
ax.axvline(1 / (2 * np.pi * df['Freq'].max()), c='k', ls=':', alpha=0.6, zorder=-10)
ax.axvline(1 / (2 * np.pi * df['Freq'].min()), c='k', ls=':', alpha=0.6, zorder=-10)
if area is not None:
ax.set_ylabel(fr'$\gamma \, (\ln{{\tau}})$ / {unit_scale}$\Omega\cdot\mathrm{{cm}}^2$')
elif normalize:
ax.set_ylabel(fr'$\gamma \, (\ln{{\tau}}) / R_p$')
else:
ax.set_ylabel(fr'$\gamma \, (\ln{{\tau}})$ / {unit_scale}$\Omega$')
# add freq axis to DRT plot
if freq_axis:
# check for existing twin axis
all_axes = ax.figure.axes
ax2 = None
for other_ax in all_axes:
if other_ax.bbox.bounds == ax.bbox.bounds and other_ax is not ax:
ax2 = other_ax
break
else:
continue
if ax2 is None:
ax2 = ax.twiny()
ax2.set_xscale('log')
ax2.set_xlim(ax.get_xlim())
f_powers = np.arange(7, -4.1, -2)
f_ticks = 10 ** f_powers
ax2.set_xticks(1 / (2 * np.pi * f_ticks))
ax2.set_xticklabels(['$10^{{{}}}$'.format(int(p)) for p in f_powers])
ax2.set_xlabel('$f$ / Hz')
# Indicate zero if necessary
if np.min(F_pred) >= 0:
ax.set_ylim(0, ax.get_ylim()[1])
else:
ax.axhline(0, c='k', lw=0.5)
return ax | f5f6eb29597abb34b4e0c634112370824cedf907 | 4,794 |
def profitsharing_order(self, transaction_id, out_order_no, receivers, unfreeze_unsplit,
appid=None, sub_appid=None, sub_mchid=None):
"""请求分账
:param transaction_id: 微信支付订单号,示例值:'4208450740201411110007820472'
:param out_order_no: 商户分账单号,只能是数字、大小写字母_-|*@,示例值:'P20150806125346'
:param receivers: 分账接收方列表,最多可有50个分账接收方,示例值:[{'type':'MERCHANT_ID', 'account':'86693852', 'amount':888, 'description':'分给商户A'}]
:param unfreeze_unsplit: 是否解冻剩余未分资金,示例值:True, False
:param appid: 应用ID,可不填,默认传入初始化时的appid,示例值:'wx1234567890abcdef'
:param sub_appid: (服务商模式)子商户应用ID,示例值:'wxd678efh567hg6999'
:param sub_mchid: (服务商模式)子商户的商户号,由微信支付生成并下发。示例值:'1900000109'
"""
params = {}
if transaction_id:
params.update({'transaction_id': transaction_id})
else:
raise Exception('transaction_id is not assigned')
if out_order_no:
params.update({'out_order_no': out_order_no})
else:
raise Exception('out_order_no is not assigned')
if isinstance(unfreeze_unsplit, bool):
params.update({'unfreeze_unsplit': unfreeze_unsplit})
else:
raise Exception('unfreeze_unsplit is not assigned')
if isinstance(receivers, list):
params.update({'receivers': receivers})
else:
raise Exception('receivers is not assigned')
for receiver in params.get('receivers'):
if receiver.get('name'):
receiver['name'] = self._core.encrypt(receiver.get('name'))
params.update({'appid': appid or self._appid})
if self._partner_mode:
if sub_appid:
params.update({'sub_appid': sub_appid})
if sub_mchid:
params.update({'sub_mchid': sub_mchid})
else:
raise Exception('sub_mchid is not assigned.')
path = '/v3/profitsharing/orders'
return self._core.request(path, method=RequestType.POST, data=params) | 8885a953de7e74a562fc57ac242fafbf79ada7a8 | 4,795 |
def merge_time_batch_dims(x: Tensor) -> Tensor:
"""
Pack the time dimension into the batch dimension.
Args:
x: input tensor
Returns:
output tensor
"""
if xnmt.backend_dynet:
((hidden_dim, seq_len), batch_size_) = x.dim()
return dy.reshape(x, (hidden_dim,), batch_size=batch_size_ * seq_len)
else:
batch_size_, seq_len, hidden_dim = x.size()
return x.view((batch_size_ * seq_len, hidden_dim)) | 73b09ca714870f18523c07b82e544b208fcde680 | 4,796 |
def get_log_likelihood(P, v, subs_counts):
"""
The stationary distribution of P is empirically derived.
It is proportional to the codon counts by construction.
@param P: a transition matrix using codon counts and free parameters
@param v: stationary distribution proportional to observed codon counts
@param subs_counts: observed substitution counts
"""
A = subs_counts
B = algopy.log(P.T * v)
log_likelihoods = slow_part(A, B)
return algopy.sum(log_likelihoods) | b7ed78e1e111a74f08b36f5ac41618318539d1c7 | 4,797 |
def union(l1, l2):
""" return the union of two lists """
return list(set(l1) | set(l2)) | 573e3b0e475b7b33209c4a477ce9cab53ec849d4 | 4,798 |
def actual_kwargs():
"""
Decorator that provides the wrapped function with an attribute 'actual_kwargs' containing just those keyword
arguments actually passed in to the function.
Based on code from http://stackoverflow.com/a/1409284/127480
"""
def decorator(function):
def inner(*args, **kwargs):
inner.actual_kwargs = kwargs
inner.actual_kwargs_except = \
lambda keys: {key: value for key, value in kwargs.iteritems() if key not in keys}
return function(*args, **kwargs)
return inner
return decorator | 37477edecb9442f759f4a234ea9037f7568f9770 | 4,799 |
def k1(f, t, y, paso):
"""
f : funcion a integrar. Retorna un np.ndarray
t : tiempo en el cual evaluar la funcion f
y : para evaluar la funcion f
paso : tamano del paso a usar.
"""
output = paso * f(t, y)
return output | 55a358a5d099111bd399bf1a0e0211d6616ab3d0 | 4,800 |
import json
def script_from_json_string(json_string, base_dir=None):
"""Returns a Script instance parsed from the given string containing JSON.
"""
raw_json = json.loads(json_string)
if not raw_json:
raw_json = []
return script_from_data(raw_json, base_dir) | 87845df4f365f05753f48e3988bb6f57e9e327ef | 4,801 |
def check_api_key(key: str, hashed: str) -> bool:
"""
Check a API key string against a hashed one from the user database.
:param key: the API key to check
:type key: str
:param hashed: the hashed key to check against
:type hashed: str
"""
return hash_api_key(key) == hashed | 86784ac5b6e79e009423e32a68fbac814e18fd40 | 4,803 |
def travel_chart(user_list, guild):
"""
Builds the chart to display travel data for Animal Crossing
:param user_list:
:param guild:
:return:
"""
out_table = []
fruit_lookup = {'apple': '🍎', 'pear': '🍐', 'cherry': '🍒', 'peach': '🍑', 'orange': '🍊'}
for user in user_list:
discord_user = guild.get_member(user.discord_id)
if discord_user:
discord_name = clean_string(discord_user.display_name, max_length=DISPLAY_CHAR_LIMIT)
else:
discord_name = user.discord_id
island_open = '✈️' if user.island_open else '⛔'
fruit = fruit_lookup[user.fruit] if user.fruit != '' else ''
dodo_code = clean_string(user.dodo_code, max_length=8)
out_table.append([discord_name, dodo_code, island_open + fruit])
return tabulate(out_table, headers=['User', 'Dodo', '🏝️ '], disable_numparse=True) | f866cb792b4382f66f34357e5b39254c4f2f1113 | 4,804 |
def evaluate_hyperparameters(parameterization):
""" Train and evaluate the network to find the best parameters
Args:
parameterization: The hyperparameters that should be evaluated
Returns:
float: classification accuracy """
net = Net()
net, _, _ = train_bayesian_optimization(net=net, input_picture=DATA['x_train'],\
label_picture=DATA['y_train'], parameters=parameterization,)
return eval_bayesian_optimization(net=net, input_picture=DATA['x_valid'],\
label_picture=DATA['y_valid'],) | 28811908e8015cbc95c35368bafd47428b5c31b3 | 4,805 |
from bs4 import BeautifulSoup
def get_post_type(h_entry, custom_properties=[]):
"""
Return the type of a h-entry per the Post Type Discovery algorithm.
:param h_entry: The h-entry whose type to retrieve.
:type h_entry: dict
:param custom_properties: The optional custom properties to use for the Post Type Discovery algorithm.
:type custom_properties: list[tuple[str, str]]
:return: The type of the h-entry.
:rtype: str
"""
post = h_entry.get("properties")
if post is None:
return "unknown"
values_to_check = [
("rsvp", "rsvp"),
("in-reply-to", "reply"),
("repost-of", "repost"),
("like-of", "like"),
("video", "video"),
("photo", "photo"),
("summary", "summary"),
]
for prop in custom_properties:
if len(prop) == 2 and type(prop) == tuple:
values_to_check.append(prop)
else:
raise Exception("custom_properties must be a list of tuples")
for item in values_to_check:
if post.get(item[0]):
return item[1]
post_type = "note"
if post.get("name") is None or post.get("name")[0] == "":
return post_type
title = post.get("name")[0].strip().replace("\n", " ").replace("\r", " ")
content = post.get("content")
if content and content[0].get("text") and content[0].get("text")[0] != "":
content = BeautifulSoup(content[0].get("text"), "lxml").get_text()
if content and content[0].get("html") and content[0].get("html")[0] != "":
content = BeautifulSoup(content[0].get("html"), "lxml").get_text()
if not content.startswith(title):
return "article"
return "note" | 7d6d8e7bb011a78764985d834d259cb794d00cb9 | 4,806 |
def get_start_end(sequence, skiplist=['-','?']):
"""Return position of first and last character which is not in skiplist.
Skiplist defaults to ['-','?'])."""
length=len(sequence)
if length==0:
return None,None
end=length-1
while end>=0 and (sequence[end] in skiplist):
end-=1
start=0
while start<length and (sequence[start] in skiplist):
start+=1
if start==length and end==-1: # empty sequence
return -1,-1
else:
return start,end | b67e0355516f5aa5d7f7fad380d262cf0509bcdb | 4,807 |
def view_event(request, eventid):
"""
View an Event.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param eventid: The ObjectId of the event to get details for.
:type eventid: str
:returns: :class:`django.http.HttpResponse`
"""
analyst = request.user.username
template = 'event_detail.html'
(new_template, args) = get_event_details(eventid, analyst)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request)) | 3b0abfaf2579ef660935d99638d0f44655f5676e | 4,808 |
def archiveOpen(self, file, path):
"""
This gets added to the File model to open a path within an archive file.
:param file: the file document.
:param path: the path within the archive file.
:returns: a file-like object that can be used as a context or handle.
"""
return ArchiveFileHandle(self, file, path) | e320299a96785d97d67fd91124fa58862c238213 | 4,810 |
def get_masked_bin(args, key: int) -> str:
"""Given an input, output, and mask type: read the bytes, identify the factory, mask the bytes, write them to disk."""
if args.bin == None or args.mask == None:
logger.bad("Please specify -b AND -m (bin file and mask)")
return None
# get the bytes of the input bin
blob: bytes = helpers.get_bytes_from_file(args.bin)
# if that isnt possible, return.
if blob == None:
return None
logger.info(f"Loaded {args.bin} ({len(blob)} bytes)")
# get the correct factory
factory = get_mask_factory(args.mask)
# if that fails, return.
if factory == None:
return None
# if the factory is obtained, grab the class for the mask
mask = factory.get_mask_type()
logger.info(f"Masking shellcode with: {factory.name}")
# XOR
if (key != 0):
# python 3 should ~~~ theoretically ~~~ handle a list of integers by auto converting to bytes blob
blob: bytes = bytes([x ^ key for x in blob])
# give the blob to the class and perform whatever transformations... This should then return a multiline string containing the transformed data
return mask.mask(blob, args.payload_preview) | d97806f984a6cad9b42d92bfcf050c1e032c5537 | 4,811 |
def count_entries(df, col_name = 'lang'):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: cols_count
cols_count = {}
# Extract column from DataFrame: col
col = df[col_name]
# Iterate over the column in DataFrame
for entry in col:
# If entry is in cols_count, add 1
if entry in cols_count.keys():
cols_count[entry] += 1
# Else add the entry to cols_count, set the value to 1
else:
cols_count[entry] = 1
# Return the cols_count dictionary
return cols_count | f933b77c8ff1ae123c887813ca559b410a104290 | 4,812 |
def workerfunc(prob, *args, **kwargs):
""" Helper function for wrapping class methods to allow for use
of the multiprocessing package """
return prob.run_simulation(*args, **kwargs) | 620799615b60784e754385fac31e5a7f1db37ed3 | 4,813 |
from unittest.mock import patch
async def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
with patch.object(config, "SECTIONS", ["core"]):
assert await async_setup_component(hass, "config", {})
return await hass_ws_client(hass) | c42af4334912c05d2ea3413cb2af2f24f9f1cecf | 4,814 |
def test_curve_plot(curve):
"""
Tests mpl image of curve.
"""
fig = curve.plot().get_figure()
return fig | 033ee4ce5f5fa14c60914c34d54af4c39f6f84b3 | 4,815 |
import time
import pickle
import asyncio
async def _app_parser_stats():
"""Retrieve / update cached parser stat information.
Fields:
id: identifier of parser
size_doc: approximate size (bytes) per document or null
"""
parser_cfg = faw_analysis_set_util.lookup_all_parsers(
app_mongodb_conn.delegate, app_config)
parsers = []
promises = []
for k, v in parser_cfg.items():
if v.get('disabled'):
continue
parser = {
'id': k,
'size_doc': None,
'pipeline': True if v.get('pipeline') else False,
}
parsers.append(parser)
r = _app_parser_sizetable.get(k)
if r is not None and r[1] > time.monotonic():
parser['size_doc'] = r[0]
else:
async def stat_pop(k, parser):
ndocs = 5
# Only include successful runs
docs = await app_mongodb_conn['invocationsparsed'].find({
'parser': k,
'exitcode': 0}).limit(ndocs).to_list(None)
size = None
if len(docs) > 1:
# Size is actually a differential. Indicates information
# added per additional document due to unique features.
size = 0
fts_size = set([dr['k'] for dr in docs[0]['result']])
for d in docs[1:]:
fts_new = set([dr['k'] for dr in d['result']])
fts_new.difference_update(fts_size)
size += len(pickle.dumps(fts_new))
fts_size.update(fts_new)
size /= len(docs) - 1
if len(docs) == ndocs:
# Keep result for a long time
r = [size, time.monotonic() + 600]
else:
# Let it load, don't hammer the DB
r = [size, time.monotonic() + 30]
_app_parser_sizetable[k] = r
parser['size_doc'] = r[0]
promises.append(asyncio.create_task(stat_pop(k, parser)))
if promises:
await asyncio.wait(promises)
return parsers | 06c363eee075a045e5ea16947253d4fc11e0cd6d | 4,816 |
def update_wishlists(wishlist_id):
"""
Update a Wishlist
This endpoint will update a Wishlist based the body that is posted
"""
app.logger.info('Request to Update a wishlist with id [%s]', wishlist_id)
check_content_type('application/json')
wishlist = Wishlist.find(wishlist_id)
if not wishlist:
raise NotFound("Wishlist with id '{}' was not found.".format(wishlist_id))
data = request.get_json()
app.logger.info(data)
wishlist.deserialize(data)
wishlist.id = wishlist_id
wishlist.save()
return make_response(jsonify(wishlist.serialize()), status.HTTP_200_OK) | a7f19fa93f733c8419f3caeee2f0c7471282b05b | 4,817 |
def simplifiedview(av_data: dict, filehash: str) -> str:
"""Builds and returns a simplified string containing basic information about the analysis"""
neg_detections = 0
pos_detections = 0
error_detections = 0
for engine in av_data:
if av_data[engine]['category'] == 'malicious' or av_data[engine]['category'] == 'suspicious':
neg_detections += 1
elif av_data[engine]['category'] == 'undetected':
pos_detections += 1
elif av_data[engine]['category'] == 'timeout' or av_data[engine]['category'] == 'type-unsupported' \
or av_data[engine]['category'] == 'failure':
error_detections += 1
vt_url = f'https://www.virustotal.com/gui/file/{filehash}'
response = f"__VirusTotal Analysis Summary__:\n\nHash: `{filehash}`\n\nLink: [Click Here]({vt_url})\n\n❌" \
f" **Negative: {neg_detections}**\n\n✅ Positive: {pos_detections}\n\n⚠ " \
f"Error/Unsupported File: {error_detections}"
return response | c6aecf6c12794453dd8809d53f20f6152ac6d5a3 | 4,818 |
def GetManualInsn(ea):
"""
Get manual representation of instruction
@param ea: linear address
@note: This function returns value set by SetManualInsn earlier.
"""
return idaapi.get_manual_insn(ea) | d3a292d626ced87d4c3f08171d485aada87cad1d | 4,820 |
from datetime import datetime
def feature_time(data: pd.DataFrame) -> pd.DataFrame:
"""
Time Feature Engineering.
"""
# print(data)
# print(data.info())
day = 24*60*60
year = (365.2425)*day
time_stp = data['time'].apply(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:00") if isinstance(x, str) else x
).map(datetime.timestamp)
data['day_sin'] = np.sin(time_stp * (2*np.pi / day))
data['day_cos'] = np.cos(time_stp * (2*np.pi / day))
data['year_sin'] = np.sin(time_stp * (2*np.pi / year))
data['year_cos'] = np.cos(time_stp * (2*np.pi / year))
return data | fd9322837032204e920a438c7a38ebdd2060b060 | 4,821 |
from typing import Tuple
from typing import List
def _transform(mock_file) -> Tuple[List[Page], SaneJson]:
""" Prepare the data as sections before calling report """
transformer = Transform(get_mock(mock_file, ret_dict=False))
sane_json = transformer.get_sane_json()
pages = transformer.get_pages()
return pages, sane_json | 01c090f3af95024752b4adb919659ff7c5bc0d0a | 4,822 |
from typing import List
from typing import Any
from typing import Optional
def make_table(rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False) -> str:
"""
:param rows: 2D array containing object that can be converted to string using `str(obj)`.
:param labels: Array containing the column labels, the length must equal that of rows.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.
"""
# Transpose into columns
columns = list(transpose(labels, *rows) if labels else transpose(*rows))
# Padding
for column in columns:
# Find the required column width
column_width = max(map(len, map(str, column)))
# Add and record padding
for i, item in enumerate(column):
column[i] = f' {str(item):^{column_width}} ' if centered else f' {str(item):<{column_width}} '
# Border Widths
horizontal_lines = tuple('─' * len(column[0]) for column in columns)
# Create a list of rows with the row separators
rows = [row_with_separators(('│', '│', '│'), row) for row in transpose(*columns)]
# Create a separator between the labels and the values if needed
if labels:
label_border_bottom = row_with_separators(('├', '┼', '┤'), horizontal_lines)
rows.insert(1, label_border_bottom)
# Create the top and bottom border of the table
top_border = row_with_separators(('┌', '┬', '┐'), horizontal_lines)
rows.insert(0, top_border)
bottom_border = row_with_separators(('└', '┴', '┘'), horizontal_lines)
rows.append(bottom_border)
# Join all the components
return '\n'.join(rows) | cf175dbf9dd40c7e56b0a449b6bcc4f797f36b20 | 4,823 |
def coef_determ(y_sim, y_obs):
"""
calculate the coefficient of determination
:param y_sim: series of simulated values
:param y_obs: series of observed values
:return:
"""
assert y_sim.ndim == 1 and y_obs.ndim == 1 and len(y_sim) == len(y_obs)
r = np.corrcoef(y_sim, y_obs)
r2 = r[0, 1] ** 2
return r2 | ce06c6fffa79d165cf59e98f634725856e44938e | 4,825 |
async def generate_latest_metrics(client):
"""Generate the latest metrics and transform the body."""
resp = await client.get(prometheus.API_ENDPOINT)
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == CONTENT_TYPE_TEXT_PLAIN
body = await resp.text()
body = body.split("\n")
assert len(body) > 3
return body | d86736d8395158f66dc7592eae1d67d3bf06db50 | 4,826 |
def simulate(population: int, n: int, timer: int) -> int:
"""
Recursively simulate population growth of the fish.
Args:
population (int): Starting population
n (int): Number of days to simulate.
timer (int): The reset timer of the fish
initialised at 6 or 8 depending on whether
it's newborn, and decremented on each round.
Returns:
int: The population of fish after `n` days
"""
if n == 0:
# It's the start
return population
if timer == 0:
# A fish's timer has reached 0
# create required new fish
newborns = simulate(population, n - 1, NEW_FISH_TIMER)
current = simulate(population, n - 1, TIMER_START)
return current + newborns
return simulate(population, n - 1, timer - 1) | e69ce89a586b72cdbdcbc197c234c058d6d959b6 | 4,827 |
def normalize_valign(valign, err):
"""
Split align into (valign_type, valign_amount). Raise exception err
if align doesn't match a valid alignment.
"""
if valign in (TOP, MIDDLE, BOTTOM):
return (valign, None)
elif (isinstance(valign, tuple) and len(valign) == 2 and
valign[0] == RELATIVE):
return valign
raise err("valign value %r is not one of 'top', 'middle', "
"'bottom', ('relative', percentage 0=left 100=right)"
% (valign,)) | e16e3c5cfb0425e3b04e64a6df01dd35407e2fbe | 4,828 |
def svn_auth_open(*args):
"""
svn_auth_open(svn_auth_baton_t auth_baton, apr_array_header_t providers,
apr_pool_t pool)
"""
return apply(_core.svn_auth_open, args) | 1083639e25b612ad47df86b39daedc8ae3dc74e2 | 4,829 |
def quote_key(key):
"""特殊字符'/'转义处理
"""
return key.replace('/', '%2F') | ce1978ca23ed3c00489c134a35ae8d04370b49dd | 4,830 |
def middle(word):
"""Returns all but the first and last characters of a string."""
return word[1:-1] | 257a159c46633d3c3987437cb3395ea2be7fad70 | 4,831 |
def surprise_communities(g_original, initial_membership=None, weights=None, node_sizes=None):
"""
Surprise_communities is a model where the quality function to optimize is:
.. math:: Q = m D(q \\parallel \\langle q \\rangle)
where :math:`m` is the number of edges, :math:`q = \\frac{\\sum_c m_c}{m}`, is the fraction of internal edges, :math:`\\langle q \\rangle = \\frac{\\sum_c \\binom{n_c}{2}}{\\binom{n}{2}}` is the expected fraction of internal edges, and finally
:math:`D(x \\parallel y) = x \\ln \\frac{x}{y} + (1 - x) \\ln \\frac{1 - x}{1 - y}` is the binary Kullback-Leibler divergence.
For directed graphs we can multiplying the binomials by 2, and this leaves :math:`\\langle q \\rangle` unchanged, so that we can simply use the same
formulation. For weighted graphs we can simply count the total internal weight instead of the total number of edges for :math:`q` , while :math:`\\langle q \\rangle` remains unchanged.
:param g_original: a networkx/igraph object
:param initial_membership: list of int Initial membership for the partition. If :obj:`None` then defaults to a singleton partition. Deafault None
:param weights: list of double, or edge attribute Weights of edges. Can be either an iterable or an edge attribute. Deafault None
:param node_sizes: list of int, or vertex attribute Sizes of nodes are necessary to know the size of communities in aggregate graphs. Usually this is set to 1 for all nodes, but in specific cases this could be changed. Deafault None
:return: NodeClustering object
:Example:
>>> from cdlib import algorithms
>>> import networkx as nx
>>> G = nx.karate_club_graph()
>>> coms = algorithms.surprise_communities(G)
:References:
Traag, V. A., Aldecoa, R., & Delvenne, J.-C. (2015). `Detecting communities using asymptotical surprise. <https://journals.aps.org/pre/abstract/10.1103/PhysRevE.92.022816/>`_ Physical Review E, 92(2), 022816. 10.1103/PhysRevE.92.022816
.. note:: Reference implementation: https://github.com/vtraag/leidenalg
"""
if ig is None:
raise ModuleNotFoundError("Optional dependency not satisfied: install igraph to use the selected feature.")
g = convert_graph_formats(g_original, ig.Graph)
part = leidenalg.find_partition(g, leidenalg.SurpriseVertexPartition, initial_membership=initial_membership,
weights=weights, node_sizes=node_sizes)
coms = [g.vs[x]['name'] for x in part]
return NodeClustering(coms, g_original, "Surprise", method_parameters={"initial_membership": initial_membership,
"weights": weights, "node_sizes": node_sizes}) | 7efba73c4948f4f6735f815e32b8700a08fc2d1e | 4,832 |
def get_azpl(cdec, cinc, gdec, ginc):
"""
gets azimuth and pl from specimen dec inc (cdec,cinc) and gdec,ginc (geographic) coordinates
"""
TOL = 1e-4
Xp = dir2cart([gdec, ginc, 1.])
X = dir2cart([cdec, cinc, 1.])
# find plunge first
az, pl, zdif, ang = 0., -90., 1., 360.
while zdif > TOL and pl < 180.:
znew = X[0] * np.sin(np.radians(pl)) + X[2] * np.cos(np.radians(pl))
zdif = abs(Xp[2] - znew)
pl += .01
while ang > 0.1 and az < 360.:
d, i = dogeo(cdec, cinc, az, pl)
ang = angle([gdec, ginc], [d, i])
az += .01
return az - .01, pl - .01 | 19b6ec0179223bc453893ffd05fd555f4e6aea76 | 4,835 |
def read_embroidery(reader, f, settings=None, pattern=None):
"""Reads fileobject or filename with reader."""
if reader == None:
return None
if pattern == None:
pattern = EmbPattern()
if is_str(f):
text_mode = False
try:
text_mode = reader.READ_FILE_IN_TEXT_MODE
except AttributeError:
pass
if text_mode:
try:
with open(f, "r") as stream:
reader.read(stream, pattern, settings)
stream.close()
except IOError:
pass
else:
try:
with open(f, "rb") as stream:
reader.read(stream, pattern, settings)
stream.close()
except IOError:
pass
else:
reader.read(f, pattern, settings)
return pattern | c84407f3f1969f61558dadafef2defda17a0ac0c | 4,836 |
import re
from pathlib import Path
import json
def load_stdlib_public_names(version: str) -> dict[str, frozenset[str]]:
"""Load stdlib public names data from JSON file"""
if not re.fullmatch(r"\d+\.\d+", version):
raise ValueError(f"{version} is not a valid version")
try:
json_file = Path(__file__).with_name("stdlib_public_names") / (
version + ".json"
)
json_text = json_file.read_text(encoding="utf-8")
json_obj = json.loads(json_text)
return {module: frozenset(names) for module, names in json_obj.items()}
except FileNotFoundError:
raise ValueError(
f"there is no data of stdlib public names for Python version {version}"
) from None | 02775d96c8a923fc0380fe6976872a7ed2cf953a | 4,837 |
def mask_inside_range(cube, minimum, maximum):
"""
Mask inside a specific threshold range.
Takes a MINIMUM and a MAXIMUM value for the range, and masks off anything
that's between the two in the cube data.
"""
cube.data = np.ma.masked_inside(cube.data, minimum, maximum)
return cube | b7a1ea1415d6f8e0f6b31372dce88355915bd2e6 | 4,839 |
def s3_client() -> client:
"""
Returns a boto3 s3 client - configured to point at a specfic endpoint url if provided
"""
if AWS_RESOURCES_ENDPOINT:
return client("s3", endpoint_url=AWS_RESOURCES_ENDPOINT)
return client("s3") | 256c2c52bc65f6899b1c800c2b53b2415ebc0aef | 4,840 |
def tokenize_with_new_mask(orig_text, max_length, tokenizer, orig_labels, orig_re_labels, label_map, re_label_map):
"""
tokenize a array of raw text and generate corresponding
attention labels array and attention masks array
"""
pad_token_label_id = -100
simple_tokenize_results = [list(tt) for tt in zip(
*[simple_tokenize(orig_text[i], tokenizer, orig_labels[i], orig_re_labels[i], label_map, re_label_map,
max_length) for i in
range(len(orig_text))])]
bert_tokens, label_ids, re_label_ids = simple_tokenize_results[0], simple_tokenize_results[1], \
simple_tokenize_results[2]
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in bert_tokens]
input_ids = pad_sequences(input_ids, maxlen=max_length, dtype="long", truncating="post", padding="post")
label_ids = pad_sequences(label_ids, maxlen=max_length, dtype="long", truncating="post", padding="post",
value=pad_token_label_id)
re_label_ids = pad_sequences(re_label_ids, maxlen=max_length, dtype="long", truncating="post", padding="post",
value=pad_token_label_id)
attention_masks = []
for seq in input_ids:
seq_mask = [float(i > 0) for i in seq]
attention_masks.append(seq_mask)
attention_masks = np.array(attention_masks)
return input_ids, attention_masks, label_ids, re_label_ids | 56be66cf1679db07a2f98a4fa576df6118294fa3 | 4,841 |
def RMSE(stf_mat, stf_mat_max):
"""error defined as RMSE"""
size = stf_mat.shape
err = np.power(np.sum(np.power(stf_mat - stf_mat_max, 2.0))/(size[0]*size[1]), 0.5)
return err | b797e07f24f44b1cd3534de24d304d7de818eca8 | 4,843 |
def get_read_only_permission_codename(model: str) -> str:
"""
Create read only permission code name.
:param model: model name
:type model: str
:return: read only permission code name
:rtype: str
"""
return f"{settings.READ_ONLY_ADMIN_PERMISSION_PREFIX}_{model}" | d95e49067df9977aedc7b6420eada77b7206049d | 4,844 |
def hours_to_minutes( hours: str ) -> int:
"""Converts hours to minutes"""
return int(hours)*60 | 861e8724a2fa752c907e7ead245f0cb370e3fe28 | 4,845 |
def sir_model():
"""
this returns a density dependent population process of an SIR model
"""
ddpp = rmf.DDPP()
ddpp.add_transition([-1, 1, 0], lambda x: x[0]+2*x[0]*x[1])
ddpp.add_transition([0, -1, +1], lambda x: x[1])
ddpp.add_transition([1, 0, -1], lambda x: 3*x[2]**3)
return ddpp | b28e92a9cc142573465925e0c1be1bb58f5ad077 | 4,847 |
import re
def read_cmupd(strip_stress=False, apostrophe="'"):
"""Read the CMU-Pronunciation Dictionary
Parameters
----------
strip_stress : bool
Remove stress from pronunciations (default ``False``).
apostrophe : str | bool
Character to replace apostrophe with in keys (e.g., "COULDN'T"; default
is to keep apostrophe; set to ``False`` to split entries with
apostrophes into pre- and post-apostrophy components).
Returns
-------
cmu : dict {str: list of str}
Dictionary mapping words (all caps) to lists of pronunciations.
"""
path = download('http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b', 'cmudict-0.7b.txt')
out = defaultdict(set)
for line in path.open('rb'):
m = re.match(rb"^([\w']+)(?:\(\d\))? ([\w ]+)$", line)
if m:
k, v = m.groups()
out[k.decode()].add(v.decode())
# remove apostrophes from keys
if apostrophe != "'":
keys = [key for key in out if "'" in key]
if apostrophe is False:
for key in keys:
values = out.pop(key)
# hard-coded exceptions
if key in IGNORE:
continue
elif key.count("'") > 1:
continue
elif key in PUNC_WORD_SUB:
out[PUNC_WORD_SUB[key]].update(values)
continue
a_index = key.index("'")
# word-initial or -final apostrophy
if a_index == 0 or a_index == len(key) - 1:
if a_index == 0:
key_a = key[1:]
else:
key_a = key[:-1]
out[key_a].update(values)
continue
# word-medial apostrophy
key_a, key_b = key.split("'")
for value in values:
if key_b in POST_FIXES:
if key.endswith("N'T") and value.endswith("N"):
value_a = value
value_b = None
else:
value_a, value_b = value.rsplit(' ', 1)
assert value_b in POST_FIXES[key_b]
elif key_a in PRE_FIXES:
value_a, value_b = value.split(' ', 1)
assert value_a in PRE_FIXES[key_a]
else:
raise RuntimeError(" %r," % key)
for k, v in ((key_a, value_a), (key_b, value_b)):
if v is not None:
out[k].add(v)
elif isinstance(apostrophe, str):
for key in keys:
out[key.replace("'", apostrophe)].update(out.pop(key))
else:
raise TypeError(f"apostrophe={apostrophe!r}")
# remove stress from pronunciations
if strip_stress:
out = {word: {' '.join(STRIP_STRESS_MAP[p] for p in pronunciation.split())
for pronunciation in pronunciations}
for word, pronunciations in out.items()}
return out | 0cc9ba95eeccf1e49f01a7e77082fd7a6674cd34 | 4,848 |
import numpy
def my_eval(inputstring, seq, xvalues=None, yvalues=None):
"""
Evaluate a string as an expression to make a data set.
This routine attempts to evaluate a string as an expression.
It uses the python "eval" function. To guard against bad inputs,
only numpy, math and builtin functions can be used in the
transformation.
Parameters
----------
inputstring a string that defines the new data set
seq : a numpy vector of floating point or integer values,
nominally a sequence of values when the data creation
option is used, which could be another numpy array in
the transformation case
xvalues : optionally, the x data values in a set, a numpy
floating point vector
yvalues : optionally, the y data values in a set, a numpy
floating point vector
Returns
-------
values : a new numpy vector of floating point values calculated
from the input numpy arrays and the string defining the
function; or None if there is an issue
Note: the three numpy arrays "seq", "xvalues", and "yvalues" need
to be one dimensional and of the same lengths
The python "eval" command is used here. To avoid issues with this
being used to run arbitrary commands, only the __builtin__, math,
and numpy packages are available to the eval command upon execution.
The assumption is that math and numpy have been imported in the main
code (and that numpy is not abbreviated as "np" at import).
"""
sh1 = seq.shape
try:
sh2 = xvalues.shape
except AttributeError:
sh2 = seq.shape
try:
sh3 = yvalues.shape
except AttributeError:
sh3 = seq.shape
if (sh1 != sh2) or (sh2 != sh3) or (len(sh1) > 1):
return None
# check the input string for command elements that could cause issues
if ('import' in inputstring) or ('os.' in inputstring) or \
('eval' in inputstring) or ('exec' in inputstring) or \
('shutil' in inputstring):
return None
str1 = inputstring.replace('np.', 'numpy.')
try:
# get the global environment, extract the three items allowed here
global1 = globals()
global2 = {}
global2['__builtins__'] = global1['__builtins__']
global2['math'] = global1['math']
global2['numpy'] = global1['numpy']
# define local variables, s, x, and y; only these will be
# available in eval if they are actually defined in the call....
local1 = {}
s = numpy.copy(seq)
local1['seq'] = s
if xvalues is not None:
x = numpy.copy(xvalues)
local1['x'] = x
if yvalues is not None:
y = numpy.copy(yvalues)
local1['y'] = y
values = eval(str1, global2, local1)
return values
except Exception:
return None | 95993e5608e2cd5c8ee0bdedc9fce5f7e6310fc8 | 4,850 |
def readlines(filepath):
"""
read lines from a textfile
:param filepath:
:return: list[line]
"""
with open(filepath, 'rt') as f:
lines = f.readlines()
lines = map(str.strip, lines)
lines = [l for l in lines if l]
return lines | 1aa16c944947be026223b5976000ac38556983c3 | 4,851 |
def n_tuple(n):
"""Factory for n-tuples."""
def custom_tuple(data):
if len(data) != n:
raise TypeError(
f'{n}-tuple requires exactly {n} items '
f'({len(data)} received).'
)
return tuple(data)
return custom_tuple | 0c5d8f0f277e07f73c4909895c8215427fb5e705 | 4,855 |
def novel_normalization(data, base):
"""Initial data preparation of CLASSIX."""
if base == "norm-mean":
# self._mu, self._std = data.mean(axis=0), data.std()
_mu = data.mean(axis=0)
ndata = data - _mu
_scl = ndata.std()
ndata = ndata / _scl
elif base == "pca":
_mu = data.mean(axis=0)
ndata = data - _mu # mean center
rds = norm(ndata, axis=1) # distance of each data point from 0
_scl = np.median(rds) # 50% of data points are within that radius
ndata = ndata / _scl # now 50% of data are in unit ball
elif base == "norm-orthant":
# self._mu, self._std = data.min(axis=0), data.std()
_mu = data.min(axis=0)
ndata = data - _mu
_scl = ndata.std()
ndata = ndata / _scl
else:
# self._mu, self._std = data.mean(axis=0), data.std(axis=0) # z-score
_mu, _scl = 0, 1 # no normalization
ndata = (data - _mu) / _scl
return ndata, (_mu, _scl) | 2ab0644687ab3b2cc0daa00f72dcad2bce3c6f73 | 4,856 |
def calc_dof(model):
"""
Calculate degrees of freedom.
Parameters
----------
model : Model
Model.
Returns
-------
int
DoF.
"""
p = len(model.vars['observed'])
return p * (p + 1) // 2 - len(model.param_vals) | ccff8f5a7624b75141400747ec7444ec55eb492d | 4,857 |
def parse_event_export_xls(
file: StrOrBytesPath, parsing_elements: list[str] = _ALL_PARSING_ELEMENTS
) -> ParsedEventResultXlsFile:
"""Parse a Hytek MeetManager .hy3 file.
Args:
file (StrOrBytesPath): A path to the file to parse.
parsing_elements (Sequence[str]): Elements to extract from the file.
Valid elements: 'name', 'age', 'team', 'seed time',
'prelim time', 'finals time'
Returns:
ParsedEventHyvFile: The parsed file.
"""
book = xlrd.open_workbook(file)
sheet = book.sheet_by_index(0)
# Get event name
event_name = str(sheet.cell_value(1, 0))
# Extract the header row
# This should be one with "Name" as it's first element
for rx in range(sheet.nrows):
row = sheet.row(rx)
if str(row[0].value).lower() == "name":
header_row = [str(e.value).lower() for e in row]
header_row_index = rx
break
# Make sure we have a header row
if header_row is None:
raise ExportXlsParseError("Could not find header row.")
first_row_index = get_first_row_index(sheet, header_row_index)
# Only parse times in the header row
if "seed time" in parsing_elements and "seed time" not in header_row:
parsing_elements.pop(parsing_elements.index("seed time"))
if "prelim time" in parsing_elements and "prelim time" not in header_row:
parsing_elements.pop(parsing_elements.index("prelim time"))
if "finals time" in parsing_elements and "finals time" not in header_row:
parsing_elements.pop(parsing_elements.index("finals time"))
# Determine offsets to extract from
offsets = get_offsets_from_header(
sheet, header_row, first_row_index, parsing_elements
)
# Start parsing rows
results = []
rx = first_row_index
while rx < sheet.nrows and sheet.cell_value(rx, 0).strip() != "":
row = sheet.row(rx)
place = safe_cast(int, row[0].value, -1)
if place == -1 and row[0].value != "---":
rx += 1
continue
name = extract_plain_value("name", row, offsets)
age = extract_plain_value("age", row, offsets, cast_to=int)
team = extract_plain_value("team", row, offsets)
seed_time, seed_time_extra, seed_time_qualifications = extract_time_value(
"seed time", row, offsets
)
prelim_time, prelim_time_extra, prelim_time_qualifications = extract_time_value(
"prelim time", row, offsets
)
finals_time, finals_time_extra, finals_time_qualifications = extract_time_value(
"finals time", row, offsets
)
results.append(
EventResultEntry(
place=place,
swimmer_name=name,
swimmer_age=age,
swimmer_team=team,
seed_time=seed_time,
seed_time_extra=seed_time_extra,
seed_time_qualifications=seed_time_qualifications,
prelim_time=prelim_time,
prelim_time_extra=prelim_time_extra,
prelim_time_qualifications=prelim_time_qualifications,
finals_time=finals_time,
finals_time_extra=finals_time_extra,
finals_time_qualifications=finals_time_qualifications,
)
)
rx += 1
return ParsedEventResultXlsFile(
event_name=event_name, parsing_elements=tuple(parsing_elements), results=results
) | 7116ffa78d9a4747934fb826cce39035fcf24aa1 | 4,858 |
def create_form(request, *args, **kwargs):
"""
Create a :py:class:`deform.Form` instance for this request.
This request method creates a :py:class:`deform.Form` object which (by
default) will use the renderer configured in the :py:mod:`h.form` module.
"""
env = request.registry[ENVIRONMENT_KEY]
renderer = Jinja2Renderer(env, {
'feature': request.feature,
})
kwargs.setdefault('renderer', renderer)
return deform.Form(*args, **kwargs) | 152c82abe40995f214c6be88d1070abffba1df79 | 4,859 |
def from_dtw2dict(alignment):
"""Auxiliar function which transform useful information of the dtw function
applied in R using rpy2 to python formats.
"""
dtw_keys = list(alignment.names)
bool_traceback = 'index1' in dtw_keys and 'index2' in dtw_keys
bool_traceback = bool_traceback and 'stepsTaken' in dtw_keys
## Creating a dict to save all the information in python format
dtw_dict = {}
# Transformation into a dict
dtw_dict['stepPattern'] = ri2numpy(alignment.rx('stepPattern'))
dtw_dict['N'] = alignment.rx('N')[0]
dtw_dict['M'] = alignment.rx('M')[0]
dtw_dict['call'] = alignment.rx('call')
dtw_dict['openEnd'] = alignment.rx('openEnd')[0]
dtw_dict['openBegin'] = alignment.rx('openBegin')[0]
dtw_dict['windowFunction'] = alignment.rx('windowFunction')
dtw_dict['jmin'] = alignment.rx('jmin')[0]
dtw_dict['distance'] = alignment.rx('distance')[0]
dtw_dict['normalizedDistance'] = alignment.rx('normalizedDistance')[0]
if bool_traceback:
aux = np.array(ri2numpy(alignment.rx('index1')).astype(int))
dtw_dict['index1'] = aux
aux = np.array(ri2numpy(alignment.rx('index2')).astype(int))
dtw_dict['index2'] = aux
dtw_dict['stepsTaken'] = ri2numpy(alignment.rx('stepsTaken'))
elif 'localCostMatrix' in dtw_keys:
aux = np.array(ri2numpy(alignment.rx('localCostMatrix')))
dtw_dict['localCostMatrix'] = aux
elif 'reference' in dtw_keys and 'query' in dtw_keys:
dtw_dict['reference'] = alignment.rx('reference')
dtw_dict['query'] = alignment.rx('query')
return dtw_dict | ef2c35ea32084c70f67c6bab462d662fe03c6b89 | 4,861 |
def fix_bad_symbols(text):
"""
HTML formatting of characters
"""
text = text.replace("è", "è")
text = text.replace("ä", "ä")
text = text.replace("Ã", "Ä")
text = text.replace("Ã", "Ä")
text = text.replace("ö", "ö")
text = text.replace("é", "é")
text = text.replace("Ã¥", "å")
text = text.replace("Ã
", "Å")
text = text.strip()
return text | e128435a9a9d2eb432e68bf9cff9794f9dcd64ba | 4,862 |
def _level2partition(A, j):
"""Return views into A used by the unblocked algorithms"""
# diagonal element d is A[j,j]
# we access [j, j:j+1] to get a view instead of a copy.
rr = A[j, :j] # row
dd = A[j, j:j+1] # scalar on diagonal / \
B = A[j+1:, :j] # Block in corner | r d |
cc = A[j+1:, j] # column \ B c /
return rr, dd, B, cc | 16ba7715cc28c69ad35cdf3ce6b542c14d5aa195 | 4,863 |
from typing import Optional
def _null_or_int(val: Optional[str]) -> Optional[int]:
"""Nullify unknown elements and convert ints"""
if not isinstance(val, str) or is_unknown(val):
return None
return int(val) | 6bd8d9ed350109444988077f4024b084a2189f91 | 4,864 |
def stackset_exists(stackset_name, cf_client):
"""Check if a stack exists or not
Args:
stackset_name: The stackset name to check
cf_client: Boto3 CloudFormation client
Returns:
True or False depending on whether the stack exists
Raises:
Any exceptions raised .describe_stack_set() besides that
the stackset doesn't exist.
"""
try:
logger.info(f"Checking if StackSet {stackset_name} exits.")
cf_client.describe_stack_set(StackSetName=stackset_name, CallAs=call_as)
return True
except Exception as e:
if f"{stackset_name} not found" in str(e) or f"{stackset_name} does not exist" in str(e):
logger.info(f"StackSet {stackset_name} does not exist.")
return False
else:
raise e | 78f6e383a6d4b06f164936edcc3f101e523aee34 | 4,865 |
def convert_l_hertz_to_bins(L_p_Hz, Fs=22050, N=1024, H=512):
"""Convert filter length parameter from Hertz to frequency bins
Notebook: C8/C8S1_HPS.ipynb
Args:
L_p_Hz (float): Filter length (in Hertz)
Fs (scalar): Sample rate (Default value = 22050)
N (int): Window size (Default value = 1024)
H (int): Hop size (Default value = 512)
Returns:
L_p (int): Filter length (in frequency bins)
"""
L_p = int(np.ceil(L_p_Hz * N / Fs))
return L_p | b7f7d047565dc08021ccbecbd05912ad11e8910b | 4,866 |
from macrostate import Macrostate
def macrostate_to_dnf(macrostate, simplify = True):
""" Returns a macrostate in disjunctive normal form (i.e. an OR of ANDs).
Note that this may lead to exponential explosion in the number of terms.
However it is necessary when creating Multistrand Macrostates, which can
only be represented in this way. Also, we don't try to simplify much
so the expressions may be inefficient/redundant. Adding simplifications of the
logical expression using (e.g.) De Morgan's laws is a future optimization. """
if macrostate.type != Macrostate.types['conjunction'] and macrostate.type != Macrostate.types['disjunction']:
dnf_macrostates = [Macrostate(type='conjunction', macrostates=[macrostate])]
elif macrostate.type == Macrostate.types['conjunction']:
clauses = [macrostate_to_dnf(m, simplify=False) for m in macrostate.macrostates]
dnf_macrostates = clauses[0].macrostates
for clause in clauses[1:]:
# multiply two dnf clauses
dnf_macrostates = [Macrostate(type='conjunction', macrostates=m1.macrostates+m2.macrostates) for m1,m2 in it.product(dnf_macrostates, clause.macrostates)]
elif macrostate.type == Macrostate.types['disjunction']:
clauses = [macrostate_to_dnf(m, simplify=False) for m in macrostate.macrostates]
dnf_macrostates = []
for clause in clauses:
# add two dnf clauses
dnf_macrostates += clause.macrostates
# The most basic simplification. We just subsitute AND/OR expressions with only one operand
# with just that operand.
if simplify:
for i,m in enumerate(dnf_macrostates):
if len(m.macrostates) == 1: dnf_macrostates[i]=m.macrostates[0]
if simplify and len(dnf_macrostates)==1:
dnf = dnf_macrostates[0]
else:
dnf = Macrostate(type='disjunction', macrostates=dnf_macrostates)
return dnf | b3fa9666f0f79df21744ec08d0ef9a969210f7ae | 4,867 |
def construct_features(all_data):
# type: (pd.DataFrame) -> pd.DataFrame
"""
Create the features for the model
:param all_data: combined processed df
:return: df with features
"""
feature_constructor = FeatureConstructor(all_data)
return feature_constructor.construct_all_features() | 30bf001abdef6e7cdda927d340e640acc902906a | 4,868 |
from typing import Optional
import logging
def restore_ckpt_from_path(ckpt_path: Text, state: Optional[TrainState] = None):
"""Load a checkpoint from a path."""
if not gfile.exists(ckpt_path):
raise ValueError('Could not find checkpoint: {}'.format(ckpt_path))
logging.info('Restoring checkpoint from %s', ckpt_path)
with gfile.GFile(ckpt_path, 'rb') as fp:
if state is None:
# Returns a dict in MsgPack format. This is useful when the loaded
# checkpoint needs to be sliced and diced to extract only relevant
# parameters.
# E.g. The optimizer state may be ignored when loading from a pretrained
# model.
return serialization.msgpack_restore(fp.read())
else:
return serialization.from_bytes(state, fp.read()) | 297beb0a45c33522c172e59c0a2767b7f2e75ad2 | 4,869 |
import logging
def _GetChannelData():
"""Look up the channel data from omahaproxy.appspot.com.
Returns:
A string representing the CSV data describing the Chrome channels. None is
returned if reading from the omahaproxy URL fails.
"""
for unused_i in range(_LOOKUP_RETRIES):
try:
channel_csv = urllib2.urlopen(_OMAHAPROXY_URL)
return channel_csv.read()
except (urllib2.URLError, urllib2.HTTPError):
logging.exception('Exception on reading from the omahaproxy URL.')
return None | 6337dc236b310117c8e4f0ec7365c9d37a85a868 | 4,870 |
def look(direction=Dir.HERE):
"""
Looks in a given direction and returns the object found there.
"""
if direction in Dir:
# Issue the command and let the Obj enumeration find out which object is
# in the reply
# Don't use formatted strings in order to stay compatible to Python 3.4
reply = _issue_request("?_look_{0}".format(direction.value))
return Obj.from_str(reply)
else:
raise ValueError("look(...) erlaubt nur eine der Dir-Konstanten.") | bddae1d8da57cfb4016b96ae4fee72d37da97395 | 4,871 |
def merge(a, b, path=None):
"""Deprecated.
merges b into a
Moved to siem.utils.merge_dicts.
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif str(a[key]) in str(b[key]):
# strで上書き。JSONだったのをstrに変換したデータ
a[key] = b[key]
else:
# conflict and override original value with new one
a[key] = b[key]
else:
a[key] = b[key]
return a | 26b9dc9fc8451dc48b86b3e6fcf5f7870ac0fe7e | 4,874 |
import json
import requests
def post_attachment(fbid, media_url, file_type,
is_reusable=False, messaging_type="RESPONSE", tag=None):
""" Sends a media attachment to the specified user
:param str fbid: User id to send the audio.
:param str media_url: Url of a hosted media.
:param str file_type: 'image'/'audio'/'video'/'file'.
:param bool is_reusable: Defines the attachment to be resusable, \
the response will have an attachment_id that can be used to \
re-send the attachment without need to upload it again. (You can \
use the post_reusable_attachment method to upload using the id).
:param str messaging_type: Identifies the message type from: RESPONSE,\
UPDATE AND MESSAGE_TAG (Default: RESPONSE, if MESSAGE_TAG, tag param \
is required)
:param str tag: Tag classifying the message, must be one of the \
following `tags <https://developers.facebook.com/docs/messenger-\
platform/send-messages/message-tags#supported_tags>`_
:return: `Response object <http://docs.python-requests.org/en/\
master/api/#requests.Response>`_
:facebook docs: `/contenttypes <https://developers.facebook.\
com/docs/messenger-platform/send-api-reference/contenttypes>`_
"""
url = MESSAGES_URL.format(access_token=PAGE_ACCESS_TOKEN)
payload = dict()
payload['recipient'] = {'id': fbid}
payload['messaging_type'] = messaging_type
if bool(tag) or messaging_type == "MESSAGE_TAG":
payload['tag'] = tag
attachment_payload = dict()
attachment_payload['url'] = media_url
if is_reusable:
attachment_payload['is_reusable'] = is_reusable
attachment = {"type": file_type, "payload": attachment_payload}
payload['message'] = {"attachment": attachment}
data = json.dumps(payload)
status = requests.post(url, headers=HEADER, data=data)
return status | fce03f1962038502bef623e227b7a643c2992c44 | 4,875 |
def create_or_update_dns_record(stack, record_name, record_type, record_value, hosted_zone_name, condition_field=""):
"""Create or Update Route53 Record Resource."""
return stack.stack.add_resource(RecordSetType(
'{0}'.format(record_name.replace('.', '').replace('*', 'wildcard')),
Condition=condition_field,
HostedZoneName='{0}.'.format(hosted_zone_name),
Type=record_type,
TTL="60",
Name='{0}.'.format(record_name),
ResourceRecords=record_value
)) | ba0d30dddde17967480a047fdc47242c1deaf4e6 | 4,877 |
def med_filt(x, k=201):
"""Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints.
"""
if x.ndim > 1:
x = np.squeeze(x)
med = np.median(x)
assert k % 2 == 1, "Median filter length must be odd."
assert x.ndim == 1, "Input must be one-dimensional."
k2 = (k - 1) // 2
y = np.zeros((len(x), k), dtype=x.dtype)
y[:, k2] = x
for i in range(k2):
j = k2 - i
y[j:, i] = x[:-j]
y[:j, i] = x[0]
y[:-j, -(i + 1)] = x[j:]
y[-j:, -(i + 1)] = med
return np.median(y, axis=1) | ea9abfd6fd4243b1d959f7b499cdceccd851e53f | 4,878 |
def test_plot_grid(od_cup_anno_bboxes, od_cup_path):
""" Test that `plot_grid` works. """
# test callable args
def callable_args():
return od_cup_anno_bboxes, od_cup_path
plot_grid(display_bboxes, callable_args, rows=1)
# test iterable args
od_cup_paths = [od_cup_path, od_cup_path, od_cup_path]
od_cup_annos = [od_cup_anno_bboxes, od_cup_anno_bboxes, od_cup_anno_bboxes]
def iterator_args():
for path, bboxes in zip(od_cup_paths, od_cup_annos):
yield bboxes, path
plot_grid(display_bboxes, iterator_args(), rows=1) | f41b9c54edd120af456195c417c23dbabbf5427b | 4,879 |
import copy
def sync_or_create_user(openid_user):
"""
Checks the user, returned by the authentication-service
Requires a user-dict with at least: sub, email, updated_at
"""
def _validate_user(openid_user):
error = False
msg = ''
if not openid_user.get('sub'):
error = True
msg += ' sub'
if not openid_user.get('email'):
error = True
msg += ' email'
if not openid_user.get('updated_at'):
error = True
msg += ' updated_at'
if error:
return {'error': True, 'msg': 'Missing claims:' + msg}
else:
return {'msg': 'valid openid_user'}
def _insert_user(openid_user):
user = copy.deepcopy(openid_user)
user['max_units'] = 10
# user['active_units'] = []
user['roles'] = ['user']
user['user_id'] = openid_user.get('sub')
# Generate additional, normalized key for db on insert or replace
if openid_user.get('username'):
federated_name = openid_user.get('username')
elif openid_user.get('nickname'):
federated_name = openid_user.get('nickname')
elif openid_user.get('name'):
federated_name = openid_user.get('name')
else:
federated_name = openid_user.get('email').split('@')[0]
user['federated_name'] = federated_name
if _put_item('users', user):
# Tells client, that user is first-time user
# '_action'-key does not persist
user['_action'] = 'inserted'
return user
else:
return {'error': True, 'msg': 'Unable to create user'}
def _sync_user(openid_user, db_user):
# NOTE: First update openid_user with existing local values, as they
# will be overwritten on the put_item-request!
user = copy.deepcopy(openid_user)
user['federated_name'] = db_user.get('federated_name')
user['max_units'] = db_user.get('max_units', 10)
# user['active_units'] = db_user.get('active_units', [])
user['roles'] = db_user.get('roles', ['user'])
user['user_id'] = db_user.get('user_id')
if _put_item('users', user, action='update'):
user['_action'] = 'updated'
return user
else:
return {'error': True, 'msg': 'Unable to sync user'}
valid_input = _validate_user(openid_user)
if valid_input.get('error'):
return valid_input
db_user = get_user(openid_user.get('sub'))
# If no existing user
if db_user.get('error'):
if db_user.get('msg') == 'Item does not exist':
return _insert_user(openid_user)
else:
return db_user
elif db_user.get('updated_at') != openid_user.get('updated_at'):
return _sync_user(openid_user, db_user)
else:
db_user['_action'] = 'checked'
return db_user | b8fb942900c9fd8c3720f473fb0b88285f91f3aa | 4,880 |
def related_tags(parser, token):
"""
Retrieves a list of instances of a given model which are tagged with
a given ``Tag`` and stores them in a context variable.
Usage::
{% related_tags [objects] as [varname] %}
The model is specified in ``[appname].[modelname]`` format.
The tag must be an instance of a ``Tag``, not the name of a tag.
Example::
{% tagged_objects comedy_tag in tv.Show as comedies %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError(_('%s tag requires exactly 3 arguments') % bits[0])
if bits[2] != 'as':
raise TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0])
#pdb.set_trace()
return RelatedTagsNode(bits[1], bits[3]) | 001b63f40c9f63e814398a3ab0eeb358f694dd97 | 4,882 |
from re import T
def assess():
""" RESTful CRUD controller """
# Load Models
assess_tables()
impact_tables()
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
if session.s3.mobile and r.method == "create" and r.interactive:
# redirect to mobile-specific form:
redirect(URL(f="assess_short_mobile"))
return True
response.s3.prep = prep
#table.incident_id.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Incident"),
# T("Optional link to an Incident which this Assessment was triggered by.")))
tabs = [
(T("Edit Details"), None),
(T("Baselines"), "baseline"),
(T("Impacts"), "impact"),
(T("Summary"), "summary"),
#(T("Requested"), "ritem"),
]
rheader = lambda r: assess_rheader(r, tabs)
return s3_rest_controller(rheader=rheader) | 7baf776ed295f6ad35272680c140c4283af7e90f | 4,883 |
def local_purity(H, y, nn=None, num_samples=10):
"""
:param H: embedding to evaluate
:param y: ground-truth classes
:param nn: number of neighbours to consider, if nn=None evaluate for nn=[1...size of max cluster]
:param num_samples: number of samples in the range (1, size of max cluster)
"""
if nn is None:
max_size_cluster = np.unique(y, return_counts=True)[1].max()
return np.fromiter((__local_purity(H, y, nn)
for nn in np.linspace(0, max_size_cluster, num_samples).astype(np.int32)), np.float32)
else:
return __local_purity(H, y, nn) | afbe924bb8516ba6f9172534f57df58689768547 | 4,884 |
import re
def flatten_sxpr(sxpr: str, threshold: int = -1) -> str:
"""
Returns S-expression ``sxpr`` as a one-liner without unnecessary
whitespace.
The ``threshold`` value is a maximum number of
characters allowed in the flattened expression. If this number
is exceeded the the unflattened S-expression is returned. A
negative number means that the S-expression will always be
flattened. Zero or (any postive integer <= 3) essentially means
that the expression will not be flattened. Example::
>>> flatten_sxpr('(a\\n (b\\n c\\n )\\n)\\n')
'(a (b c))'
:param sxpr: and S-expression in string form
:param threshold: maximum allowed string-length of the flattened
S-exrpession. A value < 0 means that it may be arbitrarily long.
:return: Either flattened S-expression or, if the threshold has been
overstepped, the original S-expression without leading or
trailing whitespace.
"""
assert RX_IS_SXPR.match(sxpr)
if threshold == 0:
return sxpr
flat = re.sub(r'\s(?=\))', '', re.sub(r'(?<!")\s+', ' ', sxpr).replace('\n', '')).strip()
if len(flat) > threshold > 0:
return sxpr.strip()
return flat | 9109894ca1eeb2055ca48bc8634e6382f9e5557f | 4,885 |
Subsets and Splits