content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def add_counter_text(img, box_shape, people_in):
"""
Add person counter text on the image
Args:
img (np.array): Image
box_shape (tuple): (width, height) of the counter box
people_in (int): Number representing the amount of
people inside the space
Returns:
(np.array): Updated image
"""
box_width, box_height = box_shape
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
# set in/capacity numbers
text_in = "{}".format(people_in)
text_cap = "{}".format(CAPACITY)
# import constants for re-use
TEXT_COUNTER_UP = TEXT_CONF["TEXT_COUNTER_UP"]
TEXT_COUNTER_DOWN = TEXT_CONF["TEXT_COUNTER_DOWN"]
# get shapes for parts of text
w_up, h_up = draw.textsize(TEXT_COUNTER_UP, stroke_width=1, font=FONT_SMALL)
w_down, h_down = draw.textsize(TEXT_COUNTER_DOWN, stroke_width=1, font=FONT_SMALL)
w_in, h_in = draw.textsize(text_in, stroke_width=1, font=FONT_SMALL)
w_cap, h_cap = draw.textsize(text_cap, stroke_width=1, font=FONT_SMALL)
w_slash, h_slash = draw.textsize(" / ", stroke_width=1, font=FONT_SMALL)
# calculate coordinates for each part of the text
textX_up = int((box_width - w_up) / 2)
textY_up = int(0.05 * box_height)
textX_down = int((box_width - w_down) / 2)
textY_down = int(0.1 * box_height + h_up)
textX_in = int((box_width - w_slash) / 2 - w_in)
textY_stat = int(0.2 * box_height + h_down + h_up)
textX_slash = int((box_width - w_slash) / 2)
textX_cap = int((box_width + w_slash) / 2)
# add text on image
draw.text(
(textX_up, textY_up),
TEXT_COUNTER_UP,
font=FONT_SMALL,
fill=WHITE,
stroke_width=1,
)
draw.text(
(textX_down, textY_down),
TEXT_COUNTER_DOWN,
font=FONT_SMALL,
fill=WHITE,
stroke_width=1,
)
draw.text(
(textX_in, textY_stat),
text_in,
font=FONT_SMALL,
fill=(0, 255, 0),
stroke_width=1,
)
draw.text(
(textX_slash, textY_stat), " / ", font=FONT_SMALL, fill=WHITE, stroke_width=1
)
draw.text(
(textX_cap, textY_stat), text_cap, font=FONT_SMALL, fill=WHITE, stroke_width=1
)
img = np.array(img_pil, dtype="uint8")
return img | ca182338a7dc11596b8375d788036d5de50381e2 | 3,837 |
def create_override(override):
"""Takes override arguments as dictionary and applies them to copy of current context"""
override_context = bpy.context.copy()
for key, value in override.items():
override_context[key] = value
return override_context | 25ecb761d8e9225081752fef10d2a6a885ba14d2 | 3,838 |
import json
def load_or_make_json(file, *, default=None):
"""Loads a JSON file, or makes it if it does not exist."""
if default is None:
default = {}
return __load_or_make(file, default, json.load, json.dump) | 3045cf141d26313fe8ffe60d6e74ff7af18ddce2 | 3,840 |
import warnings
def plot_predictions(image, df, color=None, thickness=1):
"""Plot a set of boxes on an image
By default this function does not show, but only plots an axis
Label column must be numeric!
Image must be BGR color order!
Args:
image: a numpy array in *BGR* color order! Channel order is channels first
df: a pandas dataframe with xmin, xmax, ymin, ymax and label column
color: color of the bounding box as a tuple of BGR color, e.g. orange annotations is (0, 165, 255)
thickness: thickness of the rectangle border line in px
Returns:
image: a numpy array with drawn annotations
"""
if image.shape[0] == 3:
raise ValueError("Input images must be channels last format [h, w, 3] not channels first [3, h, w], use np.rollaxis(image, 0, 3) to invert")
if image.dtype == "float32":
image = image.astype("uint8")
image = image.copy()
if not color:
if not ptypes.is_numeric_dtype(df.label):
warnings.warn("No color was provided and the label column is not numeric. Using a single default color.")
color=(0,165,255)
for index, row in df.iterrows():
if not color:
color = label_to_color(row["label"])
cv2.rectangle(image, (int(row["xmin"]), int(row["ymin"])), (int(row["xmax"]), int(row["ymax"])), color=color, thickness=thickness, lineType=cv2.LINE_AA)
return image | c666b1a92eefbc04abc7da1c3a4bc6cccde93769 | 3,841 |
from sympy import solveset, diff
from re import S
def stationary_points(f, symbol, domain=S.Reals):
"""
Returns the stationary points of a function (where derivative of the
function is 0) in the given domain.
Parameters
==========
f : Expr
The concerned function.
symbol : Symbol
The variable for which the stationary points are to be determined.
domain : Interval
The domain over which the stationary points have to be checked.
If unspecified, S.Reals will be the default domain.
Examples
========
>>> from sympy import Symbol, S, sin, log, pi, pprint, stationary_points
>>> from sympy.sets import Interval
>>> x = Symbol('x')
>>> stationary_points(1/x, x, S.Reals)
EmptySet()
>>> pprint(stationary_points(sin(x), x), use_unicode=False)
pi 3*pi
{2*n*pi + -- | n in Integers} U {2*n*pi + ---- | n in Integers}
2 2
>>> stationary_points(sin(x),x, Interval(0, 4*pi))
{pi/2, 3*pi/2, 5*pi/2, 7*pi/2}
"""
if isinstance(domain, EmptySet):
return S.EmptySet
domain = continuous_domain(f, symbol, domain)
set = solveset(diff(f, symbol), symbol, domain)
return set | 21011d7925c136de43f962a56edd5ffcc09c144f | 3,842 |
def _create_table(data_list, headers):
""" Create a table for given data list and headers.
Args:
data_list(list): list of dicts, which keys have to cover headers
headers(list): list of headers for the table
Returns:
new_table(tabulate): created table, ready to print
"""
list_table = list()
for row in data_list:
row_data = list()
for header in headers:
if header.lower() in row:
row_data.append(row[header.lower()])
else:
row_data.append(None)
list_table.append(row_data)
new_table = tabulate(list_table, headers=headers)
return new_table | d072857776c16128808b7e2b4b64075cc4894199 | 3,843 |
def _validate_attribute_id(this_attributes, this_id, xml_ids, enforce_consistency, name):
""" Validate attribute id.
"""
# the given id is None and we don't have setup attributes
# -> increase current max id for the attribute by 1
if this_id is None and this_attributes is None:
this_id = max(xml_ids) + 1
# the given id is None and we do have setup attributes
# set id to the id present in the setup
elif this_id is None and this_attributes is not None:
this_id = this_attributes[name]
# the given id is not None and we do have setup attributes
# -> check that the ids match (unless we are in over-write mode)
elif this_id is not None and this_attributes is not None:
if (this_id != this_attributes[name]) and enforce_consistency:
raise ValueError("Expect id %i for attribute %s, got %i" % (this_attributes[name],
name,
this_id))
return this_id | e85201c85b790576f7c63f57fcf282a985c22347 | 3,844 |
def Arrows2D(startPoints, endPoints=None,
shaftLength=0.8,
shaftWidth=0.09,
headLength=None,
headWidth=0.2,
fill=True,
c=None,
cmap=None,
alpha=1):
"""
Build 2D arrows between two lists of points `startPoints` and `endPoints`.
`startPoints` can be also passed in the form ``[[point1, point2], ...]``.
Color can be specified as a colormap which maps the size of the arrows.
:param float shaftLength: fractional shaft length
:param float shaftWidth: fractional shaft width
:param float headLength: fractional head length
:param float headWidth: fractional head width
:param bool fill: if False only generate the outline
:param c: color
:param float alpha: set transparency
:Example:
.. code-block:: python
from vedo import Grid, Arrows2D
g1 = Grid(sx=1, sy=1)
g2 = Grid(sx=1.2, sy=1.2).rotateZ(4)
arrs2d = Arrows2D(g1, g2, c='jet')
arrs2d.show(axes=1, bg='white')
|quiver|
"""
if isinstance(startPoints, Points): startPoints = startPoints.points()
if isinstance(endPoints, Points): endPoints = endPoints.points()
startPoints = np.array(startPoints)
if endPoints is None:
strt = startPoints[:,0]
endPoints = startPoints[:,1]
startPoints = strt
else:
endPoints = np.array(endPoints)
if headLength is None:
headLength = 1 - shaftLength
arr = Arrow2D((0,0,0), (1,0,0),
shaftLength, shaftWidth,
headLength, headWidth, fill)
orients = endPoints - startPoints
if orients.shape[1] == 2: # make it 3d
orients = np.c_[np.array(orients), np.zeros(len(orients))]
pts = Points(startPoints)
arrg = Glyph(pts,
arr.polydata(False),
orientationArray=orients,
scaleByVectorSize=True,
c=c, alpha=alpha).flat().lighting('off')
if c is not None:
arrg.color(c)
arrg.name = "Arrows2D"
return arrg | d2276def355c56c6fe494c29bab04cd6f1e28221 | 3,845 |
def filter_characters(results: list) -> str:
"""Filters unwanted and duplicate characters.
Args:
results: List of top 1 results from inference.
Returns:
Final output string to present to user.
"""
text = ""
for i in range(len(results)):
if results[i] == "$":
continue
elif i + 1 < len(results) and results[i] == results[i + 1]:
continue
else:
text += results[i]
return text | 6b2ca1446450751258e37b70f2c9cbe5110a4ddd | 3,846 |
def seq_alignment_files(file1, file2, outputfile=""):
"""This command takes 2 fasta files as input, each file contains a single sequence. It reads the 2 sequences from
files and get all their alignments along with the score. The -o is an optional parameter if we need the output to
be written on a file instead of the screen. """
try:
seq1 = SeqIO.read(file1, 'fasta')
seq2 = SeqIO.read(file2, 'fasta')
except OSError as Error:
print(Error)
return 'Please Enter a valid File name'
alignments = pairwise2.align.globalxx(seq1, seq2) # global alignment
if outputfile == '':
for alignment in alignments:
print(alignment)
print(format_alignment(*alignment))
else:
output_alignment(alignments, outputfile)
print('Alignmnet Done to File ', outputfile) | b225d97e29040040755cc3f2260b60f90c390bce | 3,847 |
def main(Block: type[_Block], n: int, difficulty: int) -> list[tuple[float, int]]:
"""Test can hash a block"""
times_and_tries = []
for i in range(n):
block = Block(rand_block_hash(), [t], difficulty=difficulty)
# print(f"starting {i}... ", end="", flush=True)
with time_it() as timer:
block.hash()
# print(f"took {timer.interval:.3g} seconds and {block.nonce+1} tries")
times_and_tries.append((timer.interval, block.nonce + 1))
return times_and_tries | 27c729604b3f3441e1ceb5f6d6d28f47d64fdb13 | 3,848 |
from typing import Union
from typing import SupportsFloat
def is_castable_to_float(value: Union[SupportsFloat, str, bytes, bytearray]) -> bool:
"""
prüft ob das objekt in float umgewandelt werden kann
Argumente : o_object : der Wert der zu prüfen ist
Returns : True|False
Exceptions : keine
>>> is_castable_to_float(1)
True
>>> is_castable_to_float('1')
True
>>> is_castable_to_float('1.0')
True
>>> is_castable_to_float('1,0')
False
>>> is_castable_to_float('True')
False
>>> is_castable_to_float(True)
True
>>> is_castable_to_float('')
False
>>> is_castable_to_float(None) # noqa
False
"""
try:
float(value)
return True
except (ValueError, TypeError):
return False | e3882c0e64da79dc9a0b74b4c2414c7bf29dd6c9 | 3,849 |
from operator import itemgetter
def list_unique(hasDupes):
"""Return the sorted unique values from a list"""
# order preserving
d = dict((x, i) for i, x in enumerate(hasDupes))
return [k for k, _ in sorted(d.items(), key=itemgetter(1))] | 0ba0fcb216400806aca4a11d5397531dc19482f6 | 3,850 |
def filter_by_networks(object_list, networks):
"""Returns a copy of object_list with all objects that are not in the
network removed.
Parameters
----------
object_list: list
List of datamodel objects.
networks: string or list
Network or list of networks to check for.
Returns
-------
filtered
List of filtered datamodel objects.
"""
filtered = [obj for obj in object_list if check_network(networks, obj)]
return filtered | 9ffb2cedd1508e5924f3a2894a2f842bc5673440 | 3,851 |
def score_per_year_by_country(country):
"""Returns the Global Terrorism Index (GTI) per year of the given country."""
cur = get_db().execute('''SELECT iyear, (
1*COUNT(*)
+ 3*SUM(nkill)
+ 0.5*SUM(nwound)
+ 2*SUM(case propextent when 1.0 then 1 else 0 end)
+ 2*SUM(case propextent when 2.0 then 1 else 0 end)
+ 2*SUM(case propextent when 3.0 then 1 else 0 end)
+ 2*SUM(case propextent when 4.0 then 1 else 0 end)) FROM Attacks WHERE iso_code="{}" GROUP BY iyear''' .format(country))
score = cur.fetchall()
cur.close()
return jsonify(score) | ac8992a0bd2227b7b9f5622b9395e4c7933af35a | 3,853 |
def build(options, is_training):
"""Builds a model based on the options.
Args:
options: A model_pb2.Model instance.
Returns:
A model instance.
Raises:
ValueError: If the model proto is invalid or cannot find a registered entry.
"""
if not isinstance(options, model_pb2.Model):
raise ValueError('The options has to be an instance of model_pb2.Model.')
for extension, model_proto in options.ListFields():
if extension in MODELS:
return MODELS[extension](model_proto, is_training)
raise ValueError('Invalid model config!') | 99fc2f283075091254743a9d70ecab3d7a65066d | 3,854 |
def string_to_rdkit(frmt: str, string: str, **kwargs) -> RDKitMol:
"""
Convert string representation of molecule to RDKitMol.
Args:
frmt: Format of string.
string: String representation of molecule.
**kwargs: Other keyword arguments for conversion function.
Returns:
RDKitMol corresponding to string representation.
"""
try:
converter = RDKIT_STRING_TO_MOL_CONVERTERS[frmt.lower()]
except KeyError:
raise ValueError(f'{frmt} is not a recognized RDKit format')
else:
remove_hs = kwargs.pop('removeHs', False) # Don't remove hydrogens by default
rdkit_mol = converter(string, removeHs=remove_hs, **kwargs)
return RDKitMol(rdkit_mol) | 34803a46d5228644bb3db614aca5580bcb286655 | 3,855 |
from datetime import datetime
def clean_datetime_remove_ms(atime):
"""
将时间对象的 毫秒 全部清零
:param atime:
:return:
"""
return datetime(atime.year, atime.month, atime.day, atime.hour, atime.minute, atime.second) | 94a47ad8802b3eb4d58d332d71bb3d3e0c67d947 | 3,856 |
def perDay(modified):
"""Auxiliary in provenance filtering: chunk the trails into daily bits."""
chunks = {}
for m in modified:
chunks.setdefault(dt.date(m[1]), []).append(m)
return [chunks[date] for date in sorted(chunks)] | ce9fe31c39c9c6c5e0753aa2dc6dc5113fb199e4 | 3,857 |
def login():
"""The screen to log the user into the system."""
# call create_all to create database tables if this is the first run
db.create_all()
# If there are no users, create a default admin and non-admin
if len(User.query.all()) == 0:
create_default_users()
# Redirect the user if already logged in
if current_user.is_authenticated:
# Send admins and non-admins to different pages
if current_user.admin:
return redirect(url_for('admin.admin_home'))
else:
return redirect(url_for('export.export_home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash("Invalid username or password")
return redirect(url_for('login.login'))
login_user(user)
current_app.logger.info(f"Logged in {user}")
# If the user was redirected here, send the user back to the original page
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
# If no next page given, default to these pages
if user.admin:
next_page = url_for('admin.admin_home')
else:
next_page = url_for('export.export_home')
return redirect(next_page)
nav_bar_title = "Login"
return render_template('login/login.html', title='Sign in', form=form, nav_bar_title=nav_bar_title) | 0912dca53b40677da9a9443c4500badf05fff8a8 | 3,859 |
def freight_sep_2014():
"""Find the number of freight of the month"""
for i in fetch_data_2014():
if i[1] == "Freight" and i[4] == "September":
num_0 = i[6]
return int(num_0) | b7f770362f7a85ffc92591a48660d01d7f784dc1 | 3,860 |
def piotroski_f(df_cy,df_py,df_py2):
"""function to calculate f score of each stock and output information as dataframe"""
f_score = {}
tickers = df_cy.columns
for ticker in tickers:
ROA_FS = int(df_cy.loc["NetIncome",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2) > 0)
CFO_FS = int(df_cy.loc["CashFlowOps",ticker] > 0)
ROA_D_FS = int(df_cy.loc["NetIncome",ticker]/(df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2 > df_py.loc["NetIncome",ticker]/(df_py.loc["TotAssets",ticker]+df_py2.loc["TotAssets",ticker])/2)
CFO_ROA_FS = int(df_cy.loc["CashFlowOps",ticker]/df_cy.loc["TotAssets",ticker] > df_cy.loc["NetIncome",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2))
LTD_FS = int((df_cy.loc["LTDebt",ticker] + df_cy.loc["OtherLTDebt",ticker])<(df_py.loc["LTDebt",ticker] + df_py.loc["OtherLTDebt",ticker]))
CR_FS = int((df_cy.loc["CurrAssets",ticker]/df_cy.loc["CurrLiab",ticker])>(df_py.loc["CurrAssets",ticker]/df_py.loc["CurrLiab",ticker]))
DILUTION_FS = int(df_cy.loc["CommStock",ticker] <= df_py.loc["CommStock",ticker])
GM_FS = int((df_cy.loc["GrossProfit",ticker]/df_cy.loc["TotRevenue",ticker])>(df_py.loc["GrossProfit",ticker]/df_py.loc["TotRevenue",ticker]))
ATO_FS = int(df_cy.loc["TotRevenue",ticker]/((df_cy.loc["TotAssets",ticker]+df_py.loc["TotAssets",ticker])/2)>df_py.loc["TotRevenue",ticker]/((df_py.loc["TotAssets",ticker]+df_py2.loc["TotAssets",ticker])/2))
f_score[ticker] = [ROA_FS,CFO_FS,ROA_D_FS,CFO_ROA_FS,LTD_FS,CR_FS,DILUTION_FS,GM_FS,ATO_FS]
f_score_df = pd.DataFrame(f_score,index=["PosROA","PosCFO","ROAChange","Accruals","Leverage","Liquidity","Dilution","GM","ATO"])
return f_score_df | 119a3dd426fbe5e8b5106cbebebf4b000799a839 | 3,862 |
from typing import Dict
def evaluate_circuit(
instances: Dict[str, SType],
connections: Dict[str, str],
ports: Dict[str, str],
) -> SDict:
"""evaluate a circuit for the given sdicts."""
# it's actually easier working w reverse:
reversed_ports = {v: k for k, v in ports.items()}
block_diag = {}
for name, S in instances.items():
block_diag.update(
{(f"{name},{p1}", f"{name},{p2}"): v for (p1, p2), v in sdict(S).items()}
)
sorted_connections = sorted(connections.items(), key=_connections_sort_key)
all_connected_instances = {k: {k} for k in instances}
for k, l in sorted_connections:
name1, _ = k.split(",")
name2, _ = l.split(",")
connected_instances = (
all_connected_instances[name1] | all_connected_instances[name2]
)
for name in connected_instances:
all_connected_instances[name] = connected_instances
current_ports = tuple(
p
for instance in connected_instances
for p in set([p for p, _ in block_diag] + [p for _, p in block_diag])
if p.startswith(f"{instance},")
)
block_diag.update(_interconnect_ports(block_diag, current_ports, k, l))
for i, j in list(block_diag.keys()):
is_connected = i == k or i == l or j == k or j == l
is_in_output_ports = i in reversed_ports and j in reversed_ports
if is_connected and not is_in_output_ports:
del block_diag[i, j] # we're no longer interested in these port combinations
circuit_sdict: SDict = {
(reversed_ports[i], reversed_ports[j]): v
for (i, j), v in block_diag.items()
if i in reversed_ports and j in reversed_ports
}
return circuit_sdict | 7dd6d019845dbf7f69c6324143d88d4d48af9dea | 3,863 |
def canonical_smiles_from_smiles(smiles, sanitize = True):
"""
Apply canonicalisation with rdkit
Parameters
------------
smiles : str
sanitize : bool
Wether to apply rdkit sanitisation, default yes.
Returns
---------
canonical_smiles : str
Returns None if canonicalisation fails
"""
try:
mol = Chem.MolFromSmiles(smiles, sanitize = sanitize)
mol.UpdatePropertyCache()
#mol = Chem.AddHs(mol)
Chem.GetSSSR(mol)
return Chem.MolToSmiles(mol,canonical=True, allHsExplicit=True, kekuleSmiles = False, allBondsExplicit = True, isomericSmiles = True)
except:
return None | 0c4dc4583d9a12439b915412cab8458e380a4e6c | 3,864 |
def get_ref(struct, ref, leaf=False):
"""
Figure out if a reference (e.g., "#/foo/bar") exists within a
given structure and return it.
"""
if not isinstance(struct, dict):
return None
parts = ref_parts(ref)
result = {}
result_current = result
struct_current = struct
for part in parts:
if part not in struct_current:
return None
result_current[part] = {}
result_current = result_current[part]
struct_current = struct_current[part]
if leaf:
return struct_current
result_current.update(struct_current)
return result | 61ebb2561c2c79c58c297c91ac266e9e786a5b7f | 3,865 |
def edit_maker_app(
operator,
app_maker_code,
app_name="",
app_url="",
developer="",
app_tag="",
introduction="",
add_user="",
company_code="",
):
"""
@summary: 修改 maker app
@param operator:操作者英文id
@param app_maker_code: maker app编码
@param app_name:app名称,可选参数,为空则不修改名称
@param app_url:app链接,可选参数,为空则不修改链接
@param developer: 填写开发者英文id列表,请用英文分号";"隔开, 可选参数,为空则不修改开发者
需传入修改后的所有开发者信息
@param app_tag: 可选 String 轻应用分类
@param introduction: 可选 String 轻应用描述
@param add_user: 冗余字段,多版本兼容
@param company_code: 冗余字段,多版本兼容
@return: {'result': True, 'message':u"APP Maker 修改成功"}
{'result': False, 'message':u"APP Maker 修改出错"}
"""
data = {
"bk_app_code": settings.APP_CODE,
"bk_app_secret": settings.SECRET_KEY,
"light_app_code": app_maker_code,
"app_name": app_name,
}
if app_url:
data["app_url"] = app_url
if developer:
data["developers"] = developer.split(",")
if app_tag:
data["app_tag"] = app_tag
if introduction:
data["introduction"] = introduction
resp = _request_paasv3_light_app_api(url=LIGHT_APP_API, method="patch", data=data)
return resp | abb2d57235e6c231b96182f989606060f8ebb4ab | 3,867 |
def fifo():
"""
Returns a callable instance of the first-in-first-out (FIFO) prioritization
algorithm that sorts ASDPs by timestamp
Returns
-------
prioritize: callable
a function that takes an ASDP type name and a dict of per-type ASDPDB
metadata, as returned by `asdpdb.load_asdp_metadata_by_type`, and
returns a list of dicts containing ordered ASDPs with metadata (in the
format expected by `asdpdb.save_asdp_ordering`)
"""
def prioritize(asdp_type, metadata):
# Extract metadata entries
ids = metadata['asdp_id']
sue = metadata['sue']
ts = metadata['timestamp']
untransmitted = metadata['downlink_status']
n_untransmitted = np.sum(untransmitted)
if n_untransmitted == 0:
logger.info(f'No untransmitted {asdp_type} products to prioritize')
return []
size_bytes = metadata['asdp_size_bytes']
sue_per_byte = sue / size_bytes
# Fill in bad values with zeros
sue_per_byte[np.isnan(sue_per_byte)] = 0.0
sue_per_byte[np.isinf(sue_per_byte)] = 0.0
order = np.argsort(ts)
for cand_id in order:
if untransmitted[cand_id]:
logger.info(
f'Selected ASDP {ids[cand_id]}, '
f'initial SUE = {sue_per_byte[cand_id]:.2e}'
)
products = [
{
'asdp_id': ids[cand_id],
'initial_sue': sue[cand_id],
'final_sue': sue[cand_id],
'initial_sue_per_byte': sue_per_byte[cand_id],
'final_sue_per_byte': sue_per_byte[cand_id],
'size_bytes': size_bytes[cand_id],
'timestamp': ts[cand_id],
}
for cand_id in order
if untransmitted[cand_id]
]
return products
return prioritize | 8f0d24c43a15467c9e6b9f195d12978664867bd3 | 3,868 |
def super(d, t):
"""Pressure p and internal energy u of supercritical water/steam
as a function of density d and temperature t (deg C)."""
tk = t + tc_k
tau = tstar3 / tk
delta = d / dstar3
taupow = power_array(tau, tc3)
delpow = power_array(delta, dc3)
phidelta = nr3[0] * delpow[-1] + sum([n * i * delpow[i - 1] * taupow[j] for
(i, j, n) in zip(ir3, jr3, nr3)])
phitau = sum([n * delpow[i] * j * taupow[j - 1] for
(i, j, n) in zip(ir3, jr3, nr3)])
rt = rconst * tk
p = d * rt * delta * phidelta
u = rt * tau * phitau
return (p, u) | 937d58264b94b041aafa63b88d5fd4498d4acb8e | 3,869 |
import tty
import logging
import json
def ls(query=None, quiet=False):
"""List and count files matching the query and compute total file size.
Parameters
----------
query : dict, optional
(default: None)
quiet : bool, optional
Whether to suppress console output.
"""
tty.screen.status('Searching ...', mode='static')
if query is None:
query = CONFIG['GENERAL']['QUERY']
file_list = scihub.search(query, verbose=True)
size = 0.0
for f in file_list:
size += f['size']
if not quiet:
msg = 'Found {0:d} files ({1}).'.format(len(file_list),
utils.b2h(size))
logging.info(msg)
tty.screen.result(msg)
for f in file_list:
msg = '{:>8} {}'.format(utils.b2h(f['size']), f['filename'])
# tty.update(f['filename'],msg)
logging.info(f['filename'])
#
# Write file_list to JSON file
# so it can be read later by the get() and store() commands.
#
if 'OUT_FILE' in CONFIG['GENERAL'] and \
CONFIG['GENERAL']['OUT_FILE'] is not None:
with open(CONFIG['GENERAL']['OUT_FILE'], 'w') as f:
json.dump(file_list, f, default=str, indent=2)
return file_list | acbf576170f34cfc09e4a3a8d64c1c313a7d3b51 | 3,870 |
def _create_full_gp_model():
"""
GP Regression
"""
full_gp_model = gpflow.models.GPR(
(Datum.X, Datum.Y),
kernel=gpflow.kernels.SquaredExponential(),
mean_function=gpflow.mean_functions.Constant(),
)
opt = gpflow.optimizers.Scipy()
opt.minimize(
full_gp_model.training_loss,
variables=full_gp_model.trainable_variables,
options=dict(maxiter=300),
)
return full_gp_model | bebe02e89e4ad17c5832cfced8f7cd1dce9a3b11 | 3,871 |
def read_file_header(fd, endian):
"""Read mat 5 file header of the file fd.
Returns a dict with header values.
"""
fields = [
('description', 's', 116),
('subsystem_offset', 's', 8),
('version', 'H', 2),
('endian_test', 's', 2)
]
hdict = {}
for name, fmt, num_bytes in fields:
data = fd.read(num_bytes)
hdict[name] = unpack(endian, fmt, data)
hdict['description'] = hdict['description'].strip()
v_major = hdict['version'] >> 8
v_minor = hdict['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict | d994f74a889cedd7e1524102ffd1c62bb3764a0f | 3,872 |
def shape_padleft(t, n_ones=1):
"""Reshape `t` by left-padding the shape with `n_ones` 1s.
See Also
--------
shape_padaxis
shape_padright
Dimshuffle
"""
_t = aet.as_tensor_variable(t)
pattern = ["x"] * n_ones + [i for i in range(_t.type.ndim)]
return _t.dimshuffle(pattern) | 44e68fed0ea7497ba244ad83fbd4ff53cec22f24 | 3,873 |
def zad1(x):
"""
Функция выбирает все элементы, идущие за нулём.
Если таких нет, возвращает None.
Если такие есть, то возвращает их максимум.
"""
zeros = (x[:-1] == 0)
if np.sum(zeros):
elements_to_compare = x[1:][zeros]
return np.max(elements_to_compare)
return None | e54f99949432998bf852afb8f7591af0af0b8b59 | 3,874 |
def skopt_space(hyper_to_opt):
"""Create space of hyperparameters for the gaussian processes optimizer.
This function creates the space of hyperparameter following skopt syntax.
Parameters:
hyper_to_opt (dict): dictionary containing the configuration of the
hyperparameters to optimize. This dictionary must follow the next
syntax:
.. code:: python
hyper_to_opt = {'hyperparam_1': {'type': ...,
'range: ...,
'step': ...},
'hyperparam_2': {'type': ...,
'range: ...,
'step': ...},
...
}
See the oficial documentation for more details.
Returns:
list: space of hyperparameters following the syntax required by the
gaussian processes optimization algorithm.
Example::
hyper_top_opt = {
'cnn_rnn_dropout':{
'type': 'uniform',
'range': [0,1]},
'optimizer_type':{
'type': 'choice',,
'range': ['Adadelta', 'Adam', 'RMSProp', 'SGD']},
'base_learning_rate':{
'type': 'loguniform',
'range': [-5, 0]},
'layer1_filters':{
'type': 'quniform',
'range': [16, 64],
'step': 1}}
Raises:
KeyError: if ``type`` is other than ``uniform``, ``quniform``,
``loguniform`` or ``choice``.
"""
space = []
# loop over the hyperparameters to optimize dictionary and add each
# hyperparameter to the space
for key, items in hyper_to_opt.items():
if items['type'] == 'uniform':
space.append(skopt.space.Real(items['range'][0],
items['range'][1],
name=key))
elif items['type'] == 'quniform':
space.append(skopt.space.Integer(items['range'][0],
items['range'][1],
name=key))
elif items['type'] == 'loguniform':
space.append(skopt.space.Real(items['range'][0],
items['range'][1],
name=key,
prior='log-uniform'))
elif items['type'] == 'choice':
space.append(skopt.space.Categorical(items['range'],
name=key))
else:
raise KeyError('The gaussian processes optimizer supports only \
uniform, quniform, loguniform and choice space types')
return space | bdfbc685b5fd51f8f28cb9b308d3962179d15c7e | 3,875 |
import torch
def process_text_embedding(text_match, text_diff):
"""
Process text embedding based on embedding type during training and evaluation
Args:
text_match (List[str]/Tensor): For matching caption, list of captions for USE embedding and Tensor for glove/fasttext embeddings
text_diff (List[str]/Tensor): For non-matching caption, list of captions for USE embedding and Tensor for glove/fasttext embeddings
Returns:
text_match (Tensor): Processed text-embedding for matching caption
text_diff (Tensor): Processed text-embedding for non-matching caption
"""
if embed_type == 'use':
text_match = torch.tensor(use_embed(text_match).numpy())
text_diff = torch.tensor(use_embed(text_diff).numpy())
text_match = text_match.to(device)
text_diff = text_diff.to(device)
return text_match, text_diff | 6f052cc29186f8bcc1598780bf7f437098774498 | 3,877 |
import requests
import json
import base64
def x5u_vulnerability(jwt=None, url=None, crt=None, pem=None, file=None):
"""
Check jku Vulnerability.
Parameters
----------
jwt: str
your jwt.
url: str
your url.
crt: str
crt path file
pem: str
pem file name
file: str
jwks file name
Returns
-------
str
your new jwt.
"""
if not is_valid_jwt(jwt):
raise InvalidJWT("Invalid JWT format")
if file is None:
file = "jwks_with_x5c.json"
jwt_json = jwt_to_json(jwt)
if "x5u" not in jwt_json[HEADER]:
raise InvalidJWT("Invalid JWT format JKU missing")
if crt is None or pem is None:
crt, pem = create_crt()
with open(crt) as f:
content = f.read()
f.close()
x5u = requests.get(jwt_json[HEADER]["x5u"]).json()
x5u["keys"][0]["x5c"] = (
content.replace("-----END CERTIFICATE-----", "")
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("\n", "")
)
if ".json" not in file:
file += ".json"
if not url.endswith("/"):
url += "/"
jwt_json[HEADER]["x5u"] = f"{url}{file}"
f = open(file, "w")
f.write(json.dumps(x5u))
f.close()
s = encode_jwt(jwt_json)
key = crypto.load_privatekey(crypto.FILETYPE_PEM, open(pem).read())
priv = key.to_cryptography_key()
sign = priv.sign(
bytes(s, encoding="UTF-8"),
algorithm=hashes.SHA256(),
padding=padding.PKCS1v15(),
)
return s + "." + base64.urlsafe_b64encode(sign).decode("UTF-8").rstrip("=") | 0424072951e99d0281a696b94889538c1d17ed81 | 3,878 |
def get_all_interactions(L, index_1=False):
"""
Returns a list of all epistatic interactions for a given sequence length.
This sets of the order used for beta coefficients throughout the code.
If index_1=True, then returns epistatic interactions corresponding to
1-indexing.
"""
if index_1:
pos = range(1, L+1)
else:
pos = range(L)
all_U = list(powerset(pos))
return all_U | f8a151e5d44f2e139820b3d06af3995f60945dd2 | 3,880 |
import xml
import math
def convertSVG(streamOrPath, name, defaultFont):
"""
Loads an SVG and converts it to a DeepSea vector image FlatBuffer format.
streamOrPath: the stream or path for the SVG file.
name: the name of the vector image used to decorate material names.
defaultFont: the default font to use.
The binary data is returned.
"""
svg = minidom.parse(streamOrPath)
materials = Materials(name)
commands = []
for rootNode in svg.childNodes:
if rootNode.nodeType != xml.dom.Node.ELEMENT_NODE:
continue
if rootNode.tagName == 'svg':
if rootNode.hasAttribute('viewBox'):
box = rootNode.getAttribute('viewBox').split()
if len(box) != 4:
raise Exception("Invalid view box '" + rootNode.getAttribute('viewbox') + "'")
if sizeFromString(box[0], 0.0) != 0.0 or sizeFromString(box[1], 0.0) != 0.0:
raise Exception("View box must have an origin of (0, 0)")
size = (sizeFromString(box[2], 0.0), sizeFromString(box[3], 0.0))
elif rootNode.hasAttribute('width') and rootNode.hasAttribute('height'):
size = (sizeFromString(rootNode.getAttribute('width'), 0.0),
sizeFromString(rootNode.getAttribute('height'), 0.0))
else:
raise Exception("No size set on SVG.")
diagonalSize = math.sqrt(size[0]*size[0] + size[1]*size[1])/math.sqrt(2)
for node in rootNode.childNodes:
if node.nodeType != xml.dom.Node.ELEMENT_NODE:
continue
if node.tagName == 'defs':
readMaterials(node, materials, size, diagonalSize)
else:
commands.extend(readShapes(node, defaultFont, materials, size, diagonalSize, \
Transform()))
break
builder = flatbuffers.Builder(0)
materials.write(builder)
commandOffsets = []
for command in commands:
commandOffsets.extend(command(builder))
VectorImage.StartCommandsVector(builder, len(commandOffsets))
for offset in reversed(commandOffsets):
builder.PrependUOffsetTRelative(offset)
commandsOffset = builder.EndVector()
VectorImage.Start(builder)
materials.writeToVectorImage(builder)
VectorImage.AddCommands(builder, commandsOffset)
VectorImage.AddSize(builder, CreateVector2f(builder, size[0], size[1]))
builder.Finish(VectorImage.End(builder))
return builder.Output() | f71b22af076a466f951815e73f83ea989f920cdf | 3,881 |
def to_accumulo(df, config: dict, meta: dict, compute=True, scheduler=None):
"""
Paralell write of Dask DataFrame to Accumulo Table
Parameters
----------
df : Dataframe
The dask.Dataframe to write to Accumulo
config : dict
Accumulo configuration to use to connect to accumulo
meta : dict
Data model to apply to dataframe
compute : bool
Should compute be called; immediately call write if True, delayed otherwise
scheduler : str
The scheduler to use, like “threads” or “processes”
Returns
-------
The number of Accumulo rows written if they were computed right away.
If not, the delayed tasks associated with the writing of the table
"""
dfs = df.to_delayed()
values = [delayed(pandas_write_dataframe)(config, d, meta) for d in dfs]
if compute:
return sum(delayed(values).compute(scheduler=scheduler))
else:
return values | 016ee1cc516b8fd6c055902002a196b30ceb0e07 | 3,882 |
def compute_euclidean_distance(x, y):
"""
Computes the euclidean distance between two tensorflow variables
"""
d = tf.reduce_sum(tf.square(x-y),axis=1,keep_dims=True)
return d | 26171d3a0c719d0744ab163b33590f4bb1f92480 | 3,883 |
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack("!BQxxxxxx", 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = "!BQxxxxxQxxxx"
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess | dcc4d8cf347486b0f10f1dd51d230bd6fb625551 | 3,884 |
def is_admin():
"""Checks if author is a server administrator, or has the correct permission tags."""
async def predicate(ctx):
return (
# User is a server administrator.
ctx.message.channel.permissions_for(ctx.message.author).administrator
# User is a developer.
or (ctx.author.id == developer_id)
# User has a permission tag.
or (discord.utils.get(ctx.author.roles, name=str(f"fox:{ctx.command.name}")))
)
return commands.check(predicate) | 70a87d8ae4970b05aa39339fec2aa1ade43d238a | 3,885 |
def send_message(chat_id):
"""Send a message to a chat
If a media file is found, send_media is called, else a simple text message
is sent
"""
files = request.files
if files:
res = send_media(chat_id, request)
else:
message = request.form.get("message", default="Empty Message")
res = g.driver.chat_send_message(chat_id, message)
if res:
return jsonify(res)
else:
return False | df77e115497cfc975b9fad6f9a3b43648349133e | 3,886 |
def get_neighbours(sudoku, row, col):
"""Funkcja zwraca 3 listy sasiadow danego pola, czyli np. wiersz tego pola, ale bez samego pola"""
row_neighbours = [sudoku[row][y] for y in range(9) if y != col]
col_neighbours = [sudoku[x][col] for x in range(9) if x != row]
sqr_neighbours = [sudoku[x][y] for x in range(9) if x//3 == row//3 for y in range(9) if y//3 == col//3 if x!=row or y!=col]
return row_neighbours, col_neighbours, sqr_neighbours | b10766fc8925b54d887925e1a684e368c0f3b550 | 3,887 |
import torch
import PIL
def to_ndarray(image):
"""
Convert torch.Tensor or PIL.Image.Image to ndarray.
:param image: (torch.Tensor or PIL.Image.Image) image to convert to ndarray
:rtype (ndarray): image as ndarray
"""
if isinstance(image, torch.Tensor):
return image.numpy()
if isinstance(image, PIL.Image.Image):
return np.array(image)
raise TypeError("to_ndarray: expect torch.Tensor or PIL.Image.Image") | f12444779e2d2eb78e3823821c8c6acec7c601a6 | 3,888 |
def calc_random_piv_error(particle_image_diameter):
"""
Caclulate the random error amplitude which is proportional to the diameter of the displacement correlation peak.
(Westerweel et al., 2009)
"""
c = 0.1
error = c*np.sqrt(2)*particle_image_diameter/np.sqrt(2)
return error | 91b02b658c0c6476739695017925c44c92bf67c8 | 3,890 |
def resolve(name, module=None):
"""Resolve ``name`` to a Python object via imports / attribute lookups.
If ``module`` is None, ``name`` must be "absolute" (no leading dots).
If ``module`` is not None, and ``name`` is "relative" (has leading dots),
the object will be found by navigating relative to ``module``.
Returns the object, if found. If not, propagates the error.
"""
name = name.split('.')
if not name[0]:
if module is None:
raise ValueError("relative name without base module")
module = module.split('.')
name.pop(0)
while not name[0]:
module.pop()
name.pop(0)
name = module + name
used = name.pop(0)
found = __import__(used)
for n in name:
used += '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found | d778ff9e4ea821be6795cc9007552e6c0afeb565 | 3,891 |
def fibonacci(n:int) -> int:
"""Return the `n` th Fibonacci number, for positive `n`."""
if 0 <= n <= 1:
return n
n_minus1, n_minus2 = 1,0
result = None
for f in range(n - 1):
result = n_minus2 + n_minus1
n_minus2 = n_minus1
n_minus1 = result
return result | 4be929f69dc9c35679af580767bfe047fc1963e9 | 3,892 |
import select
def get_budget(product_name, sdate):
"""
Budget for a product, limited to data available at the database
:param product_name:
:param sdate: starting date
:return: pandas series
"""
db = DB('forecast')
table = db.table('budget')
sql = select([table.c.budget]).where(table.c.product_name ==
product_name).order_by(asc('month'))
ans = db.query(sql).fetchall()
ret = []
for row in ans:
ret.append(float(row[0]))
date_index = pd.date_range(start = sdate, periods = len(ret), freq = 'M')
return pd.Series(data = ret, index = date_index) | a17ae7db2734c2c877a41eb0986016a4f0241f07 | 3,893 |
def _residual_block_basic(filters, kernel_size=3, strides=1, use_bias=False, name='res_basic',
kernel_initializer='he_normal', kernel_regularizer=regulizers.l2(1e-4)):
"""
Return a basic residual layer block.
:param filters: Number of filters.
:param kernel_size: Kernel size.
:param strides: Convolution strides
:param use_bias: Flag to use bias or not in Conv layer.
:param kernel_initializer: Kernel initialisation method name.
:param kernel_regularizer: Kernel regularizer.
:return: Callable layer block
"""
def layer_fn(x):
x_conv1 = _res_conv(
filters=filters, kernel_size=kernel_size, padding='same', strides=strides,
use_relu=True, use_bias=use_bias,
kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer,
name=name + '_cbr_1')(x)
x_residual = _res_conv(
filters=filters, kernel_size=kernel_size, padding='same', strides=1,
use_relu=False, use_bias=use_bias,
kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer,
name=name + '_cbr_2')(x_conv1)
merge = _merge_with_shortcut(kernel_initializer, kernel_regularizer,name=name)(x, x_residual)
merge = Activation('relu')(merge)
return merge
return layer_fn | 87c041f58de71d7bd2d3fcbe97ec35b8fa057468 | 3,894 |
def console_script(tmpdir):
"""Python script to use in tests."""
script = tmpdir.join('script.py')
script.write('#!/usr/bin/env python\nprint("foo")')
return script | be6a38bec8bb4f53de83b3c632ff3d26d88ef1c7 | 3,895 |
def parse_tpl_file(tpl_file):
""" parse a PEST-style template file to get the parameter names
Args:
tpl_file (`str`): path and name of a template file
Returns:
[`str`] : list of parameter names found in `tpl_file`
Example::
par_names = pyemu.pst_utils.parse_tpl_file("my.tpl")
"""
par_names = set()
with open(tpl_file, "r") as f:
try:
header = f.readline().strip().split()
assert header[0].lower() in [
"ptf",
"jtf",
], "template file error: must start with [ptf,jtf], not:" + str(header[0])
assert (
len(header) == 2
), "template file error: header line must have two entries: " + str(header)
marker = header[1]
assert len(marker) == 1, (
"template file error: marker must be a single character, not:"
+ str(marker)
)
for line in f:
par_line = set(line.lower().strip().split(marker)[1::2])
par_names.update(par_line)
# par_names.extend(par_line)
# for p in par_line:
# if p not in par_names:
# par_names.append(p)
except Exception as e:
raise Exception(
"error processing template file " + tpl_file + " :\n" + str(e)
)
# par_names = [pn.strip().lower() for pn in par_names]
# seen = set()
# seen_add = seen.add
# return [x for x in par_names if not (x in seen or seen_add(x))]
return [p.strip() for p in list(par_names)] | 01ed281f4ee9f1c51032d4f3655bd3e17b73bbb2 | 3,896 |
def get_single_image_results(pred_boxes, gt_boxes, iou_thr):
"""Calculates number of true_pos, false_pos, false_neg from single batch of boxes.
Args:
gt_boxes (list of list of floats): list of locations of ground truth
objects as [xmin, ymin, xmax, ymax]
pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)
and 'scores'
iou_thr (float): value of IoU to consider as threshold for a
true prediction.
Returns:
dict: true positives (int), false positives (int), false negatives (int)
"""
all_pred_indices = range(len(pred_boxes))
all_gt_indices = range(len(gt_boxes))
if len(all_pred_indices) == 0:
tp = 0
fp = 0
fn = len(gt_boxes)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
if len(all_gt_indices) == 0:
tp = 0
fp = len(pred_boxes)
fn = 0
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
gt_idx_thr = []
pred_idx_thr = []
ious = []
for ipb, pred_box in enumerate(pred_boxes):
for igb, gt_box in enumerate(gt_boxes):
iou = calc_iou_individual(pred_box, gt_box)
if iou > iou_thr:
gt_idx_thr.append(igb)
pred_idx_thr.append(ipb)
ious.append(iou)
args_desc = np.argsort(ious)[::-1]
if len(args_desc) == 0:
# No matches
tp = 0
fp = len(pred_boxes)
fn = len(gt_boxes)
else:
gt_match_idx = []
pred_match_idx = []
for idx in args_desc:
gt_idx = gt_idx_thr[idx]
pr_idx = pred_idx_thr[idx]
# If the boxes are unmatched, add them to matches
if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):
gt_match_idx.append(gt_idx)
pred_match_idx.append(pr_idx)
tp = len(gt_match_idx)
fp = len(pred_boxes) - len(pred_match_idx)
fn = len(gt_boxes) - len(gt_match_idx)
return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} | 3f3bc93641e2f7d04a21fed9a8d0c40fcbc9eacc | 3,898 |
def get_list(caller_id):
"""
@cmview_user
@response{list(dict)} PublicIP.dict property for each caller's PublicIP
"""
user = User.get(caller_id)
ips = PublicIP.objects.filter(user=user).all()
return [ip.dict for ip in ips] | 41f7855eb258df444b29dc85860e5e85ae6de441 | 3,899 |
def matrix_zeros(m, n, **options):
""""Get a zeros matrix for a given format."""
format = options.get('format', 'sympy')
dtype = options.get('dtype', 'float64')
spmatrix = options.get('spmatrix', 'csr')
if format == 'sympy':
return zeros(m, n)
elif format == 'numpy':
return _numpy_zeros(m, n, **options)
elif format == 'scipy.sparse':
return _scipy_sparse_zeros(m, n, **options)
raise NotImplementedError('Invaild format: %r' % format) | e4c87a85dd6a37868704205b21732d82a4ffb2df | 3,900 |
def make_password(password, salt=None):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(
UNUSABLE_PASSWORD_SUFFIX_LENGTH)
if not isinstance(password, (bytes, str)):
raise TypeError(
'Password must be a string or bytes, got %s.'
% type(password).__qualname__
)
hasher = PBKDF2PasswordHasher()
salt = salt or hasher.salt()
return hasher.encode(password, salt) | 6c39486c2eb88af278580cdf4b86b7b45489eef0 | 3,901 |
from typing import Union
from pathlib import Path
from typing import Tuple
import torch
from typing import Optional
from typing import Callable
from re import T
def compute_spectrogram(
audio: Union[Path, Tuple[torch.Tensor, int]],
n_fft: int,
win_length: Optional[int],
hop_length: int,
n_mels: int,
mel: bool,
time_window: Optional[Tuple[int, int]],
**kwargs,
) -> torch.Tensor:
"""
Get the spectrogram of an audio file.
Args:
audio: Path of the audio file or a (waveform, sample_rate) tuple.
n_fft:
win_length:
hop_length:
n_mels:
mel: If true we want melodic spectrograms.
time_window: A tuple of two time values such we get the sliced spectrogram w.r.t. that window.
kwargs:
"""
# See if we have to deal with an audio file or (waveform, sample rate).
if isinstance(audio, Path):
waveform, sample_rate = torchaudio.load(audio, format="ogg")
elif isinstance(audio[0], torch.Tensor) and isinstance(audio[1], int):
waveform = audio[0]
sample_rate = audio[1]
else:
raise Exception(
"Input audio worng, it must be either a path to an audio file or a (waveform, sample rate) tuple."
)
spectrogram: Callable
if not mel:
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
)
else:
# Mel Spectrogram transform.
spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
norm="slaney",
onesided=True,
n_mels=n_mels,
mel_scale="htk",
)
if time_window:
# We convert the time window from seconds to frames.
start, end = np.asarray(time_window) * sample_rate
waveform = waveform[:, start:end]
return spectrogram(waveform) | 918fc0c9273b2085ded2ca8d6dd5d4db758538f0 | 3,904 |
def decode_html_dir(new):
""" konvertiert bestimmte Spalte in HTML-Entities """
def decode(key):
return decode_html(unicode(new[key]))
if new.has_key('title') and new['title'].find('&') >= 0:
new['title'] = decode('title')
if new.has_key('sub_title') and new['sub_title'].find('&') >= 0:
new['sub_title'] = decode('sub_title')
if new.has_key('text') and new['text'].find('&') >= 0:
new['text'] = decode('text')
if new.has_key('text_more') and new['text_more'].find('&') >= 0:
new['text_more'] = decode('text_more')
if new.has_key('sections') and new['sections'].find('&') >= 0:
new['sections'] = decode('sections')
if new.has_key('section') and new['section'].find('&') >= 0:
new['section'] = decode('section')
if new.has_key('anti_spam_question'):
new['anti_spam_question'] = decode('anti_spam_question')
return new | 029483974a26befc2df8d92babf53f5a32be31f5 | 3,905 |
def dmsp_enz_deg(
c,
t,
alpha,
vmax,
vmax_32,
kappa_32,
k
):
"""
Function that computes dD32_dt and dD34_dt of DMSP
Parameters
----------
c: float.
Concentration of DMSP in nM.
t: int
Integration time in min.
alpha: float.
Alpha for cleavage by DddP from this study.
vmax: float.
Vmax for cleavage by DddP, calculated from the K M that the enzyme should have to
exhibit the pattern of d34S DMSP vs. time, in nM/min/nM enzyme
Vmax_d: float.
km: float.
K M that the enzyme should have to exhibit the pattern of d34S DMSP vs. time, in nM.
k: float.
Degradation rate of the enzyme, in min^-1.
Returns
-------
The dD32_dt and dD34_dt of DMSP
"""
# Unpack isotopes
enzyme, dmsp_34, dmsp_32 = c
#Calculate vmax_34 assuming that Vmax total = Vmax_32 + Vmax_34
#This assumption would only hold true at saturation
vmax_34 = vmax-vmax_32
#Determination of kappa 32 from kappa 34 and the fractionation factor
kappa_34 = kappa_32 * alpha
# Calculate dD34_dt
dD34_dt = - ((kappa_34 * enzyme * (vmax_34 * enzyme * dmsp_34/((vmax_34 * enzyme)+(kappa_34 * enzyme * dmsp_34)))))
# Calculate dD32_dt
dD32_dt = - ((kappa_32 * enzyme * (vmax_32 * enzyme * dmsp_32/((vmax_32 * enzyme)+(kappa_32 * enzyme * dmsp_32)))))
#Calculate dE_dt
dE_dt = -k*enzyme
return [dE_dt, dD34_dt, dD32_dt] | d5e4b77523ab469b61eec106a28e1e3143644bf7 | 3,907 |
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""Plots total amount of stocks with an active position, either short
or long.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.apply(lambda x: np.sum(x != 0), axis='columns')
df_holdings_by_month = df_holdings.resample('1M', how='mean')
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, net'],
loc=legend_loc)
ax.set_title('Holdings per Day')
ax.set_ylabel('Amount of holdings per day')
ax.set_xlabel('')
return ax | 5e375729aa48d0d3f8aada17268048a68a662421 | 3,908 |
def concatenation_sum(n: int) -> int:
"""
Algo:
1. Find length of num (n), i.e. number of digits 'd'.
2. Determine largest number with 'd - 1' digits => L = 10^(d - 1) - 1
3. Find diff => f = n - L
4. Now, the sum => s1 = f * d, gives us the number of digits in the string formed by all 'd'-digit numbers
less than or equal to 'n'.
5. Now, iteratively calculate and sum ((10^(d-i) - 10^(d-i-1)) * (d-i)) for i ∈ [1, d)
6. This will determine the number of digits in the string formed by all 'd-1', 'd-2', and so on -digits numbers.
:param n: Max number
:return: Number of digits in the string, formed by concatenating all the numbers from 1 to n.
"""
d = len(str(n))
L = 10**(d - 1) - 1
f = n - L
s1 = f * d
s2 = get_numdigs_sum_upto(d - 1)
return s1 + s2 | 644c994ee9b5af280feb233a40df51b519c4b9c6 | 3,910 |
def make_join_conditional(key_columns: KeyColumns, left_alias: str, right_alias: str) -> Composed:
"""
Turn a pair of aliases and a list of key columns into a SQL safe string containing
join conditionals ANDed together.
s.id1 is not distinct from d.id1 and s.id2 is not distinct from d.id2
"""
composed_aliases = {"left_alias": Identifier(left_alias), "right_alias": Identifier(right_alias)}
template = "{left_alias}.{column} {equality} {right_alias}.{column}"
composed_conditionals = [
SQL(template).format(
column=Identifier(c.name),
equality=SQL("=" if c.not_nullable else "is not distinct from"),
**composed_aliases,
)
for c in key_columns
]
return SQL(" and ").join(composed_conditionals) | c0b239598f606f35d3af0cbf8c34168137e05b9c | 3,911 |
def home():
""" Home interface """
return '''<!doctype html>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<body style="margin:0;font-family:sans-serif;color:white">
<form method="POST" action="analyse" enctype="multipart/form-data">
<label style="text-align:center;position:fixed;top:0;bottom:0;width:100%;background-position:center;background-size:cover;background-image:url(https://blog.even3.com.br/wp-content/uploads/2019/04/saiba-como-e-por-que-fazer-crachas-para-eventos-1.png)">
<br /><br />
<h1>Cara-crachá</h1>
<h3 id="processing" style="display:none">Processando...</h3>
<input type="file" name="file" onchange="processing.style.display='block';this.form.submit()" style="display:none" />
</label>
</form>
</body>
''' | d8a9c3449ac56b04ee1514729342ce29469c5c2f | 3,912 |
def _enable_mixed_precision_graph_rewrite_base(opt, loss_scale,
use_v1_behavior):
"""Enables mixed precision. See `enable_mixed_precision_graph_rewrite`."""
opt = _wrap_optimizer(opt, loss_scale, use_v1_behavior=use_v1_behavior)
config.set_optimizer_experimental_options({'auto_mixed_precision': True})
return opt | 8601ae6d24575e2bf5a7057bc06992088d473179 | 3,913 |
def selection_criteria_1(users, label_of_interest):
"""
Formula for Retirement/Selection score:
x = sum_i=1_to_n (r_i) — sum_j=1_to_m (r_j).
Where first summation contains reliability scores of users who have labeled it as the same
as the label of interest, second summation contains reliability scores of users who have
labeled it differently
Args:
users (list): List of users where each element is a tuple of the form (uid, ulabel,
f1 score)
label_of_interest (int): Label under consideration (left hand summation of formula)
Returns (int): 1 = select the subject id, 0 = don't select
"""
left_sum, right_sum = 0, 0
threshold = 2.0
for user in users:
uid, ulabel, f1_score = user
if ulabel == label_of_interest:
left_sum += f1_score
else:
right_sum += f1_score
if left_sum - right_sum >= threshold:
return 1
else:
return 0 | 8255fd3645d5b50c43006d2124d06577e3ac8f2d | 3,915 |
import requests
from typing import cast
def get_default_product_not_found(product_category_id: str) -> str:
"""Get default product.
When invalid options are provided, the defualt product is returned. Which happens to be unflavoured whey at 2.2 lbs.
This is PRODUCT_INFORMATION.
"""
response = requests.get(f'https://us.myprotein.com/{product_category_id}.variations')
response.raise_for_status()
dom = bs4.BeautifulSoup(response.text, 'html.parser')
# data-child-id is the attribute that contains the canonical product id
product_id_node = dom.find(attrs={'data-child-id': True})
if not product_id_node:
err_msg = f'Could not get data to resolve options to product id. Url: {response.url}'
raise ValueError(err_msg)
return cast(str, product_id_node['data-child-id']) | 4464a56de2ff514a71d5d06b1684f04a9ed8e564 | 3,916 |
import re
def book_number_from_path(book_path: str) -> float:
"""
Parses the book number from a directory string.
Novellas will have a floating point value like "1.1" which indicates that it was the first novella
to be published between book 1 and book 2.
:param book_path: path of the currently parsed book
:return: book number
"""
num = int(re.findall(r'[0-9]{2}', book_path)[-1])
return num / 10 | 087cb0b8cd0c48c003175a05ed0d7bb14ad99ac3 | 3,917 |
def intervals_split_merge(list_lab_intervals):
"""
对界限列表进行融合
e.g.
如['(2,5]', '(5,7]'], 融合后输出为 '(2,7]'
Parameters:
----------
list_lab_intervals: list, 界限区间字符串列表
Returns:
-------
label_merge: 合并后的区间
"""
list_labels = []
# 遍历每个区间, 取得左值右值字符串组成列表
for lab in list_lab_intervals:
for s in lab.split(','):
list_labels.append(s.replace('(', '').replace(')', '').replace(']', ''))
list_lab_vals = [float(lab) for lab in list_labels]
# 取得最大最小值的索引
id_max_val = list_lab_vals.index(max(list_lab_vals))
id_min_val = list_lab_vals.index(min(list_lab_vals))
# 取得最大最小值的字符串
lab_max_interval = list_labels[id_max_val]
lab_min_interval = list_labels[id_min_val]
# 如果右边界限的值为+Inf,则改为')', 其他为']'
l_label = '('
if lab_max_interval == '+Inf':
r_label = ')'
else:
r_label = ']'
label_merge = l_label + lab_min_interval + ',' + lab_max_interval + r_label
return label_merge | a9e99ec6fc51efb78a4884206a72f7f4ad129dd4 | 3,918 |
def antique(bins, bin_method=BinMethod.category):
"""CARTOColors Antique qualitative scheme"""
return scheme('Antique', bins, bin_method) | 718ca4c2b9efede292bb5e8e1eb5128e6200a454 | 3,919 |
import json
def do_request(batch_no, req):
"""execute one request. tail the logs. wait for completion"""
tmp_src = _s3_split_url(req['input'])
cpy_dst = _s3_split_url(req['output'])
new_req = {
"src_bucket": tmp_src[0],
"src_key": tmp_src[1],
"dst_bucket": cpy_dst[0],
"dst_key": cpy_dst[1],
"digests": req["digests"]
}
delete_mismatch = req.get('delete_mismatch', False)
log.info("REQ%s data-rehash request: %s", batch_no, json.dumps(new_req, sort_keys=True, indent=4, separators=(",", ": ")))
code, response = lambdas.invoke_sync(lambdas.DATA_REHASH, Payload=new_req)
data = response['Payload'].read().decode("ascii")
if code != 0:
raise Exception("data-rehash failed to complete: %s" % (data,))
data_obj = json.loads(data)
if data_obj.get('error', None):
if "mismatch" in data_obj['error']:
session = boto3.session.Session()
s3 = session.client('s3', config=botocore.config.Config(read_timeout=300, retries={'max_attempts': 0}))
log.info("REQ%s deleting mismatchfile: Bucket=%s Key=%s", batch_no, tmp_src[0], tmp_src[1])
try:
s3.delete_object(Bucket=tmp_src[0], Key=tmp_src[1])
except Exception as delete_exc:
log.error("REQ%s delete failed", exc_info=delete_exc)
raise Exception("data-rehash returned an error: %s" % (data_obj,))
return data_obj | 6e4b8591abfe8a1c106a0ede1e6aa3f6712afd4a | 3,920 |
def _robot_barcode(event: Message) -> str:
"""Extracts a robot barcode from an event message.
Args:
event (Message): The event
Returns:
str: robot barcode
"""
return str(
next(
subject["friendly_name"] # type: ignore
for subject in event.message["event"]["subjects"] # type: ignore
if subject["role_type"] == "robot" # type: ignore
)
) | 5ffb6567ebb103fc534390d13876d9c1fa956169 | 3,922 |
from typing import List
from typing import Union
def check_thirteen_fd(fds: List[Union[BI, FakeBI]]) -> str:
"""识别十三段形态
:param fds: list
由远及近的十三段形态
:return: str
"""
v = Signals.Other.value
if len(fds) != 13:
return v
direction = fds[-1].direction
fd1, fd2, fd3, fd4, fd5, fd6, fd7, fd8, fd9, fd10, fd11, fd12, fd13 = fds
max_high = max([x.high for x in fds])
min_low = min([x.low for x in fds])
if direction == Direction.Down:
if min_low == fd13.low and max_high == fd1.high:
# aAbBc式底背驰,fd2-fd6构成A,fd8-fd12构成B
if min(fd2.high, fd4.high, fd6.high) > max(fd2.low, fd4.low, fd6.low) > fd8.high \
and min(fd8.high, fd10.high, fd12.high) > max(fd8.low, fd10.low, fd12.low) \
and min(fd2.low, fd4.low, fd6.low) > max(fd8.high, fd10.high, fd12.high) \
and fd13.power < fd7.power:
v = Signals.LA0.value
# ABC式底背驰,A5B3C5
if fd5.low < min(fd1.low, fd3.low) and fd9.high > max(fd11.high, fd13.high) \
and fd8.high > fd6.low and fd1.high - fd5.low > fd9.high - fd13.low:
v = Signals.LA0.value
if fd13.power < max(fd11.power, fd9.power):
v = Signals.LB0.value
# ABC式底背驰,A3B5C5
if fd3.low < min(fd1.low, fd5.low) and fd9.high > max(fd11.high, fd13.high) \
and min(fd4.high, fd6.high, fd8.high) > max(fd4.low, fd6.low, fd8.low) \
and fd1.high - fd3.low > fd9.high - fd13.low:
v = Signals.LA0.value
if fd13.power < max(fd11.power, fd9.power):
v = Signals.LB0.value
# ABC式底背驰,A5B5C3
if fd5.low < min(fd1.low, fd3.low) and fd11.high > max(fd9.high, fd13.high) \
and min(fd6.high, fd8.high, fd10.high) > max(fd6.low, fd8.low, fd10.low) \
and fd1.high - fd5.low > fd11.high - fd13.low:
v = Signals.LA0.value
if fd13.power < fd11.power:
v = Signals.LB0.value
elif direction == Direction.Up:
if max_high == fd13.high and min_low == fd1.low:
# aAbBC式顶背驰,fd2-fd6构成A,fd8-fd12构成B
if fd8.low > min(fd2.high, fd4.high, fd6.high) >= max(fd2.low, fd4.low, fd6.low) \
and min(fd8.high, fd10.high, fd12.high) >= max(fd8.low, fd10.low, fd12.low) \
and max(fd2.high, fd4.high, fd6.high) < min(fd8.low, fd10.low, fd12.low) \
and fd13.power < fd7.power:
v = Signals.SA0.value
# ABC式顶背驰,A5B3C5
if fd5.high > max(fd3.high, fd1.high) and fd9.low < min(fd11.low, fd13.low) \
and fd8.low < fd6.high and fd5.high - fd1.low > fd13.high - fd9.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < max(fd11.power, fd9.power):
v = Signals.SB0.value
# ABC式顶背驰,A3B5C5
if fd3.high > max(fd5.high, fd1.high) and fd9.low < min(fd11.low, fd13.low) \
and min(fd4.high, fd6.high, fd8.high) > max(fd4.low, fd6.low, fd8.low) \
and fd3.high - fd1.low > fd13.high - fd9.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < max(fd11.power, fd9.power):
v = Signals.SB0.value
# ABC式顶背驰,A5B5C3
if fd5.high > max(fd3.high, fd1.high) and fd11.low < min(fd9.low, fd13.low) \
and min(fd6.high, fd8.high, fd10.high) > max(fd6.low, fd8.low, fd10.low) \
and fd5.high - fd1.low > fd13.high - fd11.low:
v = Signals.SA0.value
# C内部顶背驰,形成双重顶背驰
if fd13.power < fd11.power:
v = Signals.SB0.value
else:
raise ValueError("direction 的取值错误")
return v | 95c308c2560cc7a337e4a1719836c3df74ab1bbe | 3,924 |
from typing import List
def set_process_tracking(template: str, channels: List[str]) -> str:
"""This function replaces the template placeholder for the process tracking with the correct process tracking.
Args:
template: The template to be modified.
channels: The list of channels to be used.
Returns:
The modified template.
"""
tracking = ""
for channel in channels:
tracking += " ULong64_t {ch}_processed = 0;\n".format(ch=channel)
tracking += " std::mutex {ch}_bar_mutex;\n".format(ch=channel)
tracking += " auto c_{ch} = {ch}_df_final.Count();\n".format(ch=channel)
tracking += " c_{ch}.OnPartialResultSlot(quantile, [&{ch}_bar_mutex, &{ch}_processed, &quantile](unsigned int /*slot*/, ULong64_t /*_c*/) {{".format(
ch=channel
)
tracking += (
"\n std::lock_guard<std::mutex> lg({ch}_bar_mutex);\n".format(
ch=channel
)
)
tracking += " {ch}_processed += quantile;\n".format(ch=channel)
tracking += ' Logger::get("main - {ch} Channel")->info("{{}} Events processed ...", {ch}_processed);\n'.format(
ch=channel
)
tracking += " });\n"
return template.replace("{PROGRESS_CALLBACK}", tracking) | 0cf720bd56a63939541a06e60492472f92c4e589 | 3,925 |
def solve(instance: Instance) -> InstanceSolution:
"""Solves the P||Cmax problem by using a genetic algorithm.
:param instance: valid problem instance
:return: generated solution of a given problem instance
"""
generations = 512
population_size = 128
best_specimens_number = 32
generator = solution_generator(instance, population_size, best_specimens_number)
best_solution = GeneticSolution(instance, [0 for _ in range(len(instance.tasks_durations))])
for _, solution in zip(range(generations), generator):
best_solution = min(best_solution, solution, key=lambda x: x.total_time)
return best_solution.to_instance_solution() | f8a82a066de29e0c149c3c5f01821af080619764 | 3,926 |
def payee_transaction():
"""Last transaction for the given payee."""
entry = g.ledger.attributes.payee_transaction(request.args.get("payee"))
return serialise(entry) | 47a21c7921cae4be30b6eefbbde43bfdf5a38013 | 3,927 |
def represent(element: Element) -> str:
"""Represent the regular expression as a string pattern."""
return _Representer().visit(element) | dfd44499aa1f63248c1a6632131974b242fedf95 | 3,928 |
def read_dynamo_table(gc, name, read_throughput=None, splits=None):
"""
Reads a Dynamo table as a Glue DynamicFrame.
:param awsglue.context.GlueContext gc: The GlueContext
:param str name: The name of the Dynamo table
:param str read_throughput: Optional read throughput - supports values from "0.1" to "1.5", inclusive.
:param str splits: Optional number of input splits - defaults to the SparkContext default parallelism.
:rtype: awsglue.dynamicframe.DynamicFrame
"""
connection_options = {
'dynamodb.input.tableName': name,
'dynamodb.splits': str(splits or gc.spark_session.sparkContext.defaultParallelism)
}
if read_throughput:
connection_options['dynamodb.throughput.read.percent'] = str(read_throughput)
return gc.create_dynamic_frame_from_options(connection_type='dynamodb', connection_options=connection_options) | 5f789626cb3fc8004532cc59bdae128b744b111e | 3,929 |
import six
def convert_to_bytes(text):
"""
Converts `text` to bytes (if it's not already).
Used when generating tfrecords. More specifically, in function call `tf.train.BytesList(value=[<bytes1>, <bytes2>, ...])`
"""
if six.PY2:
return convert_to_str(text) # In python2, str is byte
elif six.PY3:
if isinstance(text, bytes):
return text
else:
return convert_to_unicode(text).encode('utf-8')
else:
raise ValueError("Not running on Python2 or Python 3?") | da10be9cb88a80f66becead41400b3a4eb6152a2 | 3,930 |
from typing import OrderedDict
def xreplace_constrained(exprs, make, rule=None, costmodel=lambda e: True, repeat=False):
"""
Unlike ``xreplace``, which replaces all objects specified in a mapper,
this function replaces all objects satisfying two criteria: ::
* The "matching rule" -- a function returning True if a node within ``expr``
satisfies a given property, and as such should be replaced;
* A "cost model" -- a function triggering replacement only if a certain
cost (e.g., operation count) is exceeded. This function is optional.
Note that there is not necessarily a relationship between the set of nodes
for which the matching rule returns True and those nodes passing the cost
model check. It might happen for example that, given the expression ``a + b``,
all of ``a``, ``b``, and ``a + b`` satisfy the matching rule, but only
``a + b`` satisfies the cost model.
:param exprs: The target SymPy expression, or a collection of SymPy expressions.
:param make: Either a mapper M: K -> V, indicating how to replace an expression
in K with a symbol in V, or a function, used to construct new, unique
symbols. Such a function should take as input a parameter, used to
enumerate the new symbols.
:param rule: The matching rule (a lambda function). May be left unspecified if
``make`` is a mapper.
:param costmodel: The cost model (a lambda function, optional).
:param repeat: Repeatedly apply ``xreplace`` until no more replacements are
possible (optional, defaults to False).
"""
found = OrderedDict()
rebuilt = []
# Define /replace()/ based on the user-provided /make/
if isinstance(make, dict):
rule = rule if rule is not None else (lambda i: i in make)
replace = lambda i: make[i]
else:
assert callable(make) and callable(rule)
def replace(expr):
if isinstance(make, dict):
return make[expr]
temporary = found.get(expr)
if temporary:
return temporary
else:
temporary = make(replace.c)
found[expr] = temporary
replace.c += 1
return temporary
replace.c = 0 # Unique identifier for new temporaries
def run(expr):
if expr.is_Atom or expr.is_Indexed:
return expr, rule(expr)
elif expr.is_Pow:
base, flag = run(expr.base)
if flag and costmodel(base):
return expr.func(replace(base), expr.exp, evaluate=False), False
else:
return expr.func(base, expr.exp, evaluate=False), flag
else:
children = [run(a) for a in expr.args]
matching = [a for a, flag in children if flag]
other = [a for a, _ in children if a not in matching]
if matching:
matched = expr.func(*matching, evaluate=False)
if len(matching) == len(children) and rule(expr):
# Go look for longer expressions first
return matched, True
elif rule(matched) and costmodel(matched):
# Replace what I can replace, then give up
rebuilt = expr.func(*(other + [replace(matched)]), evaluate=False)
return rebuilt, False
else:
# Replace flagged children, then give up
replaced = [replace(e) for e in matching if costmodel(e)]
unreplaced = [e for e in matching if not costmodel(e)]
rebuilt = expr.func(*(other + replaced + unreplaced), evaluate=False)
return rebuilt, False
return expr.func(*other, evaluate=False), False
# Process the provided expressions
for expr in as_tuple(exprs):
assert expr.is_Equality
root = expr.rhs
while True:
ret, _ = run(root)
if repeat and ret != root:
root = ret
else:
rebuilt.append(expr.func(expr.lhs, ret))
break
# Post-process the output
found = [Eq(v, k) for k, v in found.items()]
return found + rebuilt, found | f24f0bb1356c5613c012fe405691b1b493ffc6a2 | 3,931 |
import re
def get_comp_rules() -> str:
"""
Download the comp rules from Wizards site and return it
:return: Comp rules text
"""
response = download_from_wizards(COMP_RULES)
# Get the comp rules from the website (as it changes often)
# Also split up the regex find so we only have the URL
comp_rules_url: str = re.findall(r"href=\".*\.txt\"", response)[0][6:-1]
response = download_from_wizards(comp_rules_url).replace("’", "'")
return response | dbb48b391305199182a2bf66bed62dcd91dc0071 | 3,932 |
def delete_vpc(vpc_id):
"""Delete a VPC."""
client = get_client("ec2")
params = {}
params["VpcId"] = vpc_id
return client.delete_vpc(**params) | 5c1a043d837ff1bc0cab41ccdbe784688966a275 | 3,933 |
def test_network_xor(alpha = 0.1, iterations = 1000):
"""Creates and trains a network against the XOR/XNOR data"""
n, W, B = network_random_gaussian([2, 2, 2])
X, Y = xor_data()
return n.iterate_network(X, Y, alpha, iterations) | cb05f01f589d7e224d1a0a87f594a075228741fc | 3,934 |
from pathlib import Path
import shutil
def assemble_book(draft__dir: Path, work_dir: Path, text_dir: Path) -> Path:
"""Merge contents of draft book skeleton with test-specific files for
the book contents.
"""
book_dir = work_dir / "test-book"
# Copy skeleton from draft__dir
shutil.copytree(draft__dir, book_dir)
# Add metadata and text files for test book
if (text_dir / "content.opf").is_file():
shutil.copy(text_dir / "content.opf", book_dir / "src" / "epub")
for file in text_dir.glob("*.xhtml"):
shutil.copy(file, book_dir / "src" / "epub" / "text")
# Rebuild file metadata
must_run(f"se print-manifest-and-spine --in-place {book_dir}")
must_run(f"se print-toc --in-place {book_dir}")
return book_dir | 51ec6ed21760feeff3eeee6ee6fa802383b5afa3 | 3,935 |
def merid_advec_spharm(arr, v, radius):
"""Meridional advection using spherical harmonics."""
_, d_dy = horiz_gradient_spharm(arr, radius)
return v * d_dy | 7973f99b60ad9d94b6858d28d8877f5c814160c2 | 3,936 |
def run_win_pct(team_name, df):
"""
Function that calculates a teams winning percentage Year over Year (YoY)
Calculation:
Number of wins by the total number of competitions.
Then multiply by 100 = win percentage.
Number of loses by the total number of competitions.
Then multiply by 100 = loss percentage
this function also takes into account the home and away win/loss
percentages.
:param team_name: Takes in the state of the team_names dropdown
:return:a dataframe That returns percentages for specific teams
"""
df['home_team'] = df['home_team'].str.lower()
df['away_team'] = df['away_team'].str.lower()
team_name = team_name.lower()
df_home = df[df['home_team'] == team_name]
df_away = df[df['away_team'] == team_name]
frames = [df_home,df_away]
df_fill = pd.concat(frames)
df = home_vs_away(df_fill, team_name)
home_matches = df[df['home_team'] == team_name]
away_matches = df[df['away_team'] == team_name]
home_matches = home_matches.drop(columns = ['away_team'])
away_matches = away_matches.drop(columns = ['home_team'])
#wins per season
home_team_win = home_matches.groupby(["home_team","dateYear"])["outcome"].apply(
lambda x: x[x.str.contains("win")].count()).reset_index()
away_team_win = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('win')].count()).reset_index()
home_team_loss = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('lose')].count()).reset_index()
away_team_loss = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('lose')].count()).reset_index()
home_team_tie = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('draw')].count()).reset_index()
away_team_tie = away_matches.groupby(['away_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('draw')].count()).reset_index()
#matches played per season
searchFor = ['win','lose','draw']
matches_home = home_matches.groupby(['home_team','dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index()
matches_away = away_matches.groupby(['away_team', 'dateYear'])['outcome'].apply(
lambda x: x[x.str.contains('|'.join(searchFor))].count()).reset_index()
#goals for and against
match_numbers = matches_home.merge(matches_away, how='left', left_on='dateYear', right_on='dateYear')
loss_merge = home_team_loss.merge(away_team_loss, how='left', left_on='dateYear', right_on='dateYear')
tie_merge = home_team_tie.merge(away_team_tie, how='left', left_on='dateYear', right_on='dateYear')
fin = home_team_win.merge(away_team_win, how = 'left', left_on='dateYear', right_on='dateYear')
fin['Total Wins'] = fin['outcome_x'] + fin['outcome_y']
fin['Total Losses'] = loss_merge['outcome_x'] + loss_merge['outcome_y']
fin['Total Draws'] = tie_merge['outcome_x'] + tie_merge['outcome_y']
fin['Total Matches'] = match_numbers['outcome_x'] + match_numbers['outcome_y']
fin['Win PCT'] = (fin['Total Wins'] / fin['Total Matches'] * 100).round(2)
fin['Loss PCT'] = (fin['Total Losses'] / fin['Total Matches'] * 100).round(2)
fin['Draw PCT'] = (fin['Total Draws'] / fin['Total Matches'] * 100).round(2)
#home match percentage
fin['Home Win PCT'] = (home_team_win['outcome'] / matches_home['outcome'] * 100).round(2)
fin['Away Win PCT'] = (away_team_win['outcome'] / matches_away['outcome'] * 100).round(2)
fin['Home Loss PCT'] = (home_team_loss['outcome'] / matches_home['outcome'] * 100).round(2)
fin['Away Loss PCT'] = (away_team_loss['outcome'] / matches_away['outcome'] * 100).round(2)
return fin | 3fc071cd7e89f68216286b0b6422a95ce8f690f6 | 3,937 |
def get_container_info(pi_status):
"""
Expects a dictionary data structure that include keys and values of the
parameters that describe the containers running in a Raspberry Pi computer.
Returns the input dictionary populated with values measured from the current
status of one or more containers running in the Pi.
"""
pi_status['containers'] = []
if len(client.containers()) == 0:
print 'No container running'
new_container={
'id': 'None',
'cpuUsage': '0.0',
'memUsage': '0.0',
'name': 'None', # the client.container() returns a list of names.
'status': 'None', # as a temporary solution, I take the first name
'image': 'None', # of the list.
'port_host': '0', # the client.container() returns a list of ports
'port_container': '0'} # getting the first, is a tmp solution
pi_status['containers'].append(new_container)
else:
print 'num container %d' % len(client.containers())
for container in client.containers():
cmd = "docker stats %s --no-stream | grep %s | awk \'{print $2}\' " % (container['Id'], container['Id'])
cpuUsage = system_call(cmd)
cpuUsage_str = cpuUsage.replace("\n", "")
cpuUsage_str = cpuUsage_str.replace("%", "")
cmd = "docker stats %s --no-stream | grep %s | awk \'{print $6}\' " % (container['Id'], container['Id'])
memUsage = system_call(cmd)
memUsage_str = memUsage.replace("\n", "")
memUsage_str = memUsage_str.replace("%", "")
#dict_port_host= container['Ports'][0]
#p_int=dict_port_host['PublicPort']
#port_host_str= str(p_int).replace("\n", "")
new_container={
'id': container['Id'],
'cpuUsage': cpuUsage_str,
'memUsage': memUsage_str,
'name': container['Names'][0], # the client.container() returns a list of names.
'status': container['Status'], # as a temporary solution, I take the first name
'image': container['Image'], # of the list.
'port_host': '80', # the client.container() returns a list of ports
'port_container': '8000'} # getting the first, is a tmp solution
pi_status['containers'].append(new_container)
return (len((pi_status['containers']))) | a488e7afa9c2e003edb3138c1d78e434921dbf3e | 3,938 |
import math
def formatSI(n: float) -> str:
"""Format the integer or float n to 3 significant digits + SI prefix."""
s = ''
if n < 0:
n = -n
s += '-'
if type(n) is int and n < 1000:
s = str(n) + ' '
elif n < 1e-22:
s = '0.00 '
else:
assert n < 9.99e26
log = int(math.floor(math.log10(n)))
i, j = divmod(log, 3)
for _try in range(2):
templ = '%.{}f'.format(2 - j)
val = templ % (n * 10 ** (-3 * i))
if val != '1000':
break
i += 1
j = 0
s += val + ' '
if i != 0:
s += 'yzafpnum kMGTPEZY'[i + 8]
return s | ddbbb70e66d368253d29c3223eee7a5926518efd | 3,939 |
import scipy
def pemp(stat, stat0):
""" Computes empirical values identically to bioconductor/qvalue empPvals """
assert len(stat0) > 0
assert len(stat) > 0
stat = np.array(stat)
stat0 = np.array(stat0)
m = len(stat)
m0 = len(stat0)
statc = np.concatenate((stat, stat0))
v = np.array([True] * m + [False] * m0)
perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable
v = v[perm]
u = np.where(v)[0]
p = (u - np.arange(m)) / float(m0)
# ranks can be fractional, we round down to the next integer, ranking returns values starting
# with 1, not 0:
ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1
p = p[ranks]
p[p <= 1.0 / m0] = 1.0 / m0
return p | 7d046666687ede0b671c00d5c691ac520179e11f | 3,940 |
def help_message() -> str:
"""
Return help message.
Returns
-------
str
Help message.
"""
msg = f"""neocities-sync
Sync local directories with neocities.org sites.
Usage:
neocities-sync options] [--dry-run] [-c CONFIG] [-s SITE1] [-s SITE2] ...
Options:
-C CONFIG_FILE Path to the config file to use.
(defaults to "{config_file_path_unexpanded}".)
-s SITE Which site to sync (as specified in the config file).
The default is to sync all sites in the config file.
--dry-run Do not actually upload anything.
-v Verbose output.
-q Quiet output.
-h, --help Show this help message and exit.
Config file:
The config file is an ini file, located at "{config_file_path_unexpanded}".
Each section of the config file describes a different site (the name of the
section doesn't need to be the same as the site's domain, since the api_key
suffices to identify the site).
The keys of the config file are:
api_key (str) [required]
The api key of the site.
root_dir (path) [required]
The local directory to sync.
sync_disallowed (yes/no) [default: no]
Whether to sync files that are only allowed for paying users.
sync_hidden (yes/no) [default: no]
Whether to sync hidden files.
sync_vcs (yes/no) [default: no]
Whether to sync version control files.
allowed_extensions (list of str) [default: not set]
Which file extensions to sync. If not set, all files are synced.
remove_empty_dirs (yes/no) [default: yes]
Whether to remove empty directories after sync.
Example config:
[site1]
api_key = 6b9b522e7d8d93e88c464aafc421a61b
root_dir = ~/path/to/site1
allowed_extensions = .html .css .js
remove_empty_dirs = no
[site2]
api_key = 78559e6ebc35fe33eec21de05666a243
root_dir = /var/www/path/to/site2
allowed_extensions = .html .css .js .woff2
.neocitiesignore
In any subdirectory of the root directory, a file named ".neocitiesignore"
can be used to specify which files to ignore. The syntax is the same as
the one for ".gitignore".
Credits:
This software was developed by Andre Kugland <[email protected]>."""
return msg | 8c2d0c31513e36c1ef1c9f0b096d264449dafdee | 3,941 |
def fuzzyCompareDouble(p1, p2):
"""
compares 2 double as points
"""
return abs(p1 - p2) * 100000. <= min(abs(p1), abs(p2)) | e2a93a993147e8523da0717d08587250003f9269 | 3,942 |
def filter_date_df(date_time, df, var="date"):
"""Filtrar dataframe para uma dada lista de datas.
Parameters
----------
date_time: list
list with dates.
df: pandas.Dataframe
var: str
column to filter, default value is "date" but can be adaptable for other ones.
Returns
-------
df_filter: pandas.Dataframe
Examples
--------
>>> file1 = './data/WIN$N_1M_2015.08.12_2015.12.30_.csv',
>>> file2 = './data/WIN$N_10M_2013.11.08_2021.01.22_.csv'
>>> dates = filter_overlapping_dates(file1, file2)
>>> df1 = pandas.read_csv(file1)
>>> filter_date_df(dates_overlapping, df1).head()
date hour open high low close real_volume tick_volume
0 2015.08.12 09:00:00 50280 50430 50255 50405 976 217
1 2015.08.12 09:01:00 50405 50440 50335 50400 1589 445
2 2015.08.12 09:02:00 50395 50410 50355 50355 465 102
3 2015.08.12 09:03:00 50350 50360 50320 50325 474 150
4 2015.08.12 09:04:00 50325 50330 50090 50190 2078 747
"""
filters = [True if date in date_time else False for date in df[var]]
df_filter = df[filters]
df_filter = df_filter.drop(columns=["spread"], errors="ignore")
df_filter = df_filter.dropna().drop_duplicates()
df_filter = df_filter.sort_values(by=["date", "hour"])
df_filter = df_filter.reset_index(drop=True)
df_filter = format_hour(df_filter)
return df_filter | 6d3002917ef0786e8b128a2a02df3fabb9997aab | 3,943 |
import urllib
def pproxy_desired_access_log_line(url):
"""Return a desired pproxy log entry given a url."""
qe_url_parts = urllib.parse.urlparse(url)
protocol_port = '443' if qe_url_parts.scheme == 'https' else '80'
return 'http {}:{}'.format(qe_url_parts.hostname, protocol_port) | 4c056b1d2cc11a72cf63400734807b9b074f147c | 3,944 |
import socket
def unused_port() -> int:
"""Return a port that is unused on the current host."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
return s.getsockname()[1] | 26d72e1a529edd37b14ac746bcb4082c1d1b9061 | 3,945 |
def get_axioma_risk_free_rate(conn) :
"""
Get the USD risk free rate provided by Axioma and converted it into
a daily risk free rate assuming a 252 trading data calendar.
"""
query = """
select
data_date,
Risk_Free_Rate
from
axioma_currency
where
currencycode = 'USD'
order by
data_date
"""
df = pd.read_sql_query(query, conn.sql.CONN)
df['Risk_Free_Rate'] = df['Risk_Free_Rate'].astype('float32')
df[RFR] = (1 + df['Risk_Free_Rate']) ** (1.0/252.0) - 1
df.drop(columns = ['Risk_Free_Rate'], inplace = True)
return df | 2c6c680ef36c247b67c481ff4dde685afc4bad4d | 3,946 |
import numbers
import time
import warnings
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan):
"""override the sklearn.model_selection._validation._fit_and_score
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_failed : bool
The estimator failed to fit.
"""
if estimator.__class__.__name__ != 'KerasGBatchClassifier':
return _sk_fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params,
return_train_score=return_train_score,
return_parameters=return_parameters,
return_n_test_samples=return_n_test_samples,
return_times=return_times,
return_estimator=return_estimator,
split_progress=split_progress,
candidate_progress=candidate_progress,
error_score=error_score)
if not isinstance(error_score, numbers.Number) and error_score != 'raise':
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += (f"; {candidate_progress[0]+1}/"
f"{candidate_progress[1]}")
if verbose > 1:
if parameters is None:
params_msg = ''
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = (', '.join(f'{k}={parameters[k]}'
for k in sorted_keys))
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for k, v in parameters.items():
cloned_parameters[k] = clone(v, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exc()),
FitFailedWarning)
result["fit_failed"] = True
else:
result["fit_failed"] = False
fit_time = time.time() - start_time
test_scores = estimator.evaluate(X_test, y_test, scorer,
error_score)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = estimator.evaluate(
X_train, y_train, scorer, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += (f"(train={train_scores:.3f}, "
f"test={test_scores:.3f})")
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result | 6330fb95709e74471b72b58297b3ce3c7d483449 | 3,948 |
from typing import Dict
from typing import List
def prettify_eval(set_: str, accuracy: float, correct: int, avg_loss: float, n_instances: int,
stats: Dict[str, List[int]]):
"""Returns string with prettified classification results"""
table = 'problem_type accuracy\n'
for k in sorted(stats.keys()):
accuracy_ = stats[k][0]/stats[k][1]
accuracy_ = accuracy_*100
table += k
table += ' '
table += '{:.2f}%\n'.format(accuracy_)
return '\n' + set_ + ' set average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
avg_loss, correct, n_instances, accuracy) + table + '\n' | 5e5ba8ffa62668e245daa2ada9fc09747b5b6dd2 | 3,949 |
def load_location(doc_name):
"""Load a location from db by name."""
doc_ref = get_db().collection("locations").document(doc_name)
doc = doc_ref.get()
if not doc.exists:
return None
else:
return doc.to_dict() | 900450ec3a1c033a9c11baed611170457660754f | 3,951 |
def plotMultiROC(y_true, # list of true labels
y_scores, # array of scores for each class of shape [n_samples, n_classes]
title = 'Multiclass ROC Plot',
n_points=100, # reinterpolates to have exactly N points
labels = None, # list of labels for each class
threshdot = None,
plot=True, # 1/0. If 0, returns plotly json object, but doesnt plot
):
"""
Makes a multiclass ROC plot. Can also be used for binary ROC plot
"""
y_true = np.array(y_true)
y_scores = np.array(y_scores)
if y_scores.ndim == 1: # convert to [n_samples, n_classes] even if 1 class
y_scores = np.atleast_2d(y_scores).T
N, n_classes = y_scores.shape
if n_classes == 1: # needed to avoid inverting when doing binary classification
y_scores *= -1
if threshdot is not None:
threshdot *= -1
# calc ROC curves & AUC
fpr = dict()
tpr = dict()
thresh = dict()
thresh_txt = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], thresh[i] = sk.metrics.roc_curve(y_true == i, y_scores[:, i])
roc_auc[i] = sk.metrics.auc(fpr[i], tpr[i])
if n_points is not None:
x = np.linspace(0, 1, n_points)
indxs = np.searchsorted(tpr[i], x)
tpr[i] = tpr[i][indxs]
fpr[i] = fpr[i][indxs]
thresh[i] = thresh[i][indxs]
thresh_txt[i] = ['T=%.4f' % t for t in thresh[i]]
if labels is None:
labels = ['C%d' % n for n in range(1, n_classes+1)]
labels = [str(x) for x in labels] # convert labels to str
# make traces
traces = []
[traces.append(go.Scatter(y=tpr[i], x=fpr[i], name=labels[i] + '. AUC= %.2f' % (roc_auc[i]), text=thresh_txt[i],
legendgroup=str(i), line={'width': 1}))
for i in range(n_classes)]
traces += [go.Scatter(y=[0, 1], x=[0, 1], name='Random classifier', line={'width': 1, 'dash': 'dot'})]
if threshdot is not None:
for i in range(n_classes):
c_indx = (np.abs(thresh[i]-threshdot)).argmin()
traces += [go.Scatter(x=[fpr[i][c_indx]]*2, y=[tpr[i][c_indx]]*2, mode='markers',
name='Threshold', legendgroup=str(i), showlegend=False)]
# make layout
layout = go.Layout(title=title,
xaxis={'title': 'FPR'},
yaxis={'title': 'TPR'},
legend=dict(x=1),
hovermode='closest',
)
fig = go.Figure(data=traces, layout=layout)
return plotOut(fig, plot) | a8ca19b92f7f3539d8550cf63121a46d36e59cbf | 3,952 |
def fasta_to_dict(fasta_file):
"""Consolidate deflines and sequences from FASTA as dictionary"""
deflines = []
sequences = []
sequence = ""
with open(fasta_file, "r") as file:
for line in file:
if line.startswith(">"):
deflines.append(line.rstrip().lstrip('>'))
if sequence:
sequences.append(sequence)
sequence = ""
else:
sequence += line.rstrip()
sequences.append(sequence)
fasta_dict = {}
for x, defline in enumerate(deflines):
fasta_dict[defline]=sequences[x]
return fasta_dict | e1740ad29672e5239d575df963e21a0bf5caee08 | 3,953 |
def find_roots(graph):
"""
return nodes which you can't traverse down any further
"""
return [n for n in graph.nodes() if len(list(graph.predecessors(n))) == 0] | 7dbf755d2b76f066370d149638433c6693e8e7b9 | 3,954 |
Subsets and Splits