content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def circle_location_Pass(circle_, image_, margin=0.15):
"""
Function for check if the circle_ is overlapping
with the margin of the image_.
"""
cy, cx, rad, accum = circle_
image_sizeY_, image_sizeX_ = image_.shape[0], image_.shape[1]
margin_min_x = int(image_sizeX_ * margin)
margin_max_x = int(image_sizeX_ * (1 - margin))
margin_min_y = int(image_sizeY_ * margin)
margin_max_y = int(image_sizeY_ * (1 - margin))
margin_min_xh = int(image_sizeX_ * margin/2.)
margin_max_xh = int(image_sizeX_ * (1 - margin/2.))
margin_min_yh = int(image_sizeY_ * margin/2.)
margin_max_yh = int(image_sizeY_ * (1 - margin/2.))
if cy<margin_min_y or cy>margin_max_y:
return False
if cx<margin_min_x or cx>margin_max_x:
return False
if cy-rad<margin_min_yh or cy+rad>margin_max_yh:
return False
if cx-rad<margin_min_xh or cx+rad>margin_max_xh:
return False
return True | 4ad94552bc1bf06282a691edede89a65f8b9c328 | 3,657,000 |
import ftplib
def session_factory(
base_class=ftplib.FTP,
port=21,
use_passive_mode=None,
*,
encrypt_data_channel=True,
debug_level=None,
):
"""
Create and return a session factory according to the keyword
arguments.
base_class: Base class to use for the session class (e. g.
`ftplib.FTP_TLS` or `M2Crypto.ftpslib.FTP_TLS`, default is
`ftplib.FTP`).
port: Port number (integer) for the command channel (default 21).
If you don't know what "command channel" means, use the default or
use what the provider gave you as "the FTP port".
use_passive_mode: If `True`, explicitly use passive mode. If
`False`, explicitly don't use passive mode. If `None` (default),
let the `base_class` decide whether it wants to use active or
passive mode.
encrypt_data_channel: If `True` (the default), call the `prot_p`
method of the base class if it has the method. If `False` or
`None` (`None` is the default), don't call the method.
debug_level: Debug level (integer) to be set on a session
instance. The default is `None`, meaning no debugging output.
This function should work for the base classes `ftplib.FTP`,
`ftplib.FTP_TLS`. Other base classes should work if they use the
same API as `ftplib.FTP`.
Usage example:
my_session_factory = session_factory(
base_class=ftplib.FTP_TLS,
use_passive_mode=True,
encrypt_data_channel=True)
with ftputil.FTPHost(host, user, password,
session_factory=my_session_factory) as host:
...
"""
class Session(base_class):
"""Session factory class created by `session_factory`."""
def __init__(self, host, user, password):
super().__init__()
self.connect(host, port)
if debug_level is not None:
self.set_debuglevel(debug_level)
self.login(user, password)
# `set_pasv` can be called with `True` (causing passive
# mode) or `False` (causing active mode).
if use_passive_mode is not None:
self.set_pasv(use_passive_mode)
if encrypt_data_channel and hasattr(base_class, "prot_p"):
self.prot_p()
return Session | 9fa29732dc14705317e4bbb3752330de5f0282c6 | 3,657,001 |
def calculate_molecular_mass(symbols):
"""
Calculate the mass of a molecule.
Parameters
----------
symbols : list
A list of elements.
Returns
-------
mass : float
The mass of the molecule
"""
mass = 0
for i in range(len(symbols)):
mass = mass + atomic_weights[symbols[i]]
return mass | 7ac18cffc02652428b51009d2bf304301def96dd | 3,657,002 |
def _color_str(string, color):
"""Simple color formatter for logging formatter"""
# For bold add 1; after "["
start_seq = '\033[{:d}m'.format(COLOR_DICT[color])
return start_seq + string + '\033[0m' | 715b0b597885f1cffa352cc01bdb743c3ed23dd4 | 3,657,003 |
def parser_tool_main(args):
"""Main function for the **parser** tool.
This method will parse a JSON formatted Facebook conversation,
reports informations and retrieve data from it, depending on the
arguments passed.
Parameters
----------
args : Namespace (dict-like)
Arguments passed by the `ArgumentParser`.
See Also
--------
FBParser: Class used for the **parser** tool.
main : method used for parsing arguments
"""
with args.cookie as f:
user_raw_data = f.read()
print("[+] - Parsing JSON for {} files".format(len(args.infile)))
data_formatted = build_fmt_str_from_enum(args.data)
print("[+] - Parsing JSON to retrieve {}".format(data_formatted))
fb_parser = FBParser(user_raw_data,
infile_json=args.infile, mode=args.mode,
data=args.data, output=args.output,
threads=args.threads)
fb_parser.parse(to_stdout=True, verbose=args.verbose)
print("[+] - JSON parsed succesfully, saving results "
"inside folder '" + str(args.output) + "'")
return 0 | 1e07a60e78b042c6c229410e5d1aaf306e692f61 | 3,657,004 |
from functools import reduce
def merge(from_args):
"""Merge a sequence of operations into a cross-product tree.
from_args: A dictionary mapping a unique string id to a
raco.algebra.Operation instance.
Returns: a single raco.algebra.Operation instance and an opaque
data structure suitable for passing to the rewrite_refs function.
"""
assert len(from_args) > 0
def cross(x, y):
return algebra.CrossProduct(x, y)
from_ops = from_args.values()
op = reduce(cross, from_ops)
return (op, __calculate_offsets(from_args)) | e3690a26fc9e3e604984aab827617ffc535f63d3 | 3,657,005 |
import subprocess
import mimetypes
def get_content_type(file_resource):
"""Gets a file's MIME type.
Favors returning the result of `file -b --mime ...` if the command is
available and users have enabled it. Otherwise, it returns a type based on the
file's extension.
Args:
file_resource (resource_reference.FileObjectResource): The file to return a
type for.
Returns:
A MIME type (str).
If a type cannot be guessed, request_config_factory.DEFAULT_CONTENT_TYPE is
returned.
"""
if file_resource.storage_url.is_pipe:
return request_config_factory.DEFAULT_CONTENT_TYPE
path = file_resource.storage_url.object_name
# Some common extensions are not recognized by the mimetypes library and
# "file" command, so we'll hard-code support for them.
for extension, content_type in COMMON_EXTENSION_RULES.items():
if path.endswith(extension):
return content_type
if (not platforms.OperatingSystem.IsWindows() and
properties.VALUES.storage.use_magicfile.GetBool()):
output = subprocess.run(['file', '-b', '--mime', path],
check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
content_type = output.stdout.strip()
else:
content_type, _ = mimetypes.guess_type(path)
if content_type:
return content_type
return request_config_factory.DEFAULT_CONTENT_TYPE | d58e60262cd762ae412580effc022f643132cb69 | 3,657,006 |
def graph(task_id):
"""Return the graph.json results"""
return get_file(task_id, "graph.json") | 4d8728d3b61cf62057525054d8eafa127b1c48ff | 3,657,007 |
def parse_components_from_aminochange(aminochange):
""" Returns a dictionary containing (if possible) 'ref', 'pos', and 'alt'
characteristics of the supplied aminochange string.
If aminochange does not parse, returns None.
:param aminochange: (str) describing amino acid change
:return: dict or None
"""
match = re_aminochange_comp_long.match(aminochange)
if match:
# reverse long-form amino strings to short-form.
stuff = match.groupdict()
return {'ref': amino_acid_map[stuff['ref']],
'pos': stuff['pos'],
'alt': amino_acid_map[stuff['alt']],
}
else:
match = re_aminochange_comp_short.match(aminochange)
return match.groupdict()
return None | 69877d635b58bdc3a8a7f64c3c3d86f59a7c7548 | 3,657,008 |
import random
import string
import csv
def get_logs_csv():
"""
get target's logs through the API in JSON type
Returns:
an array with JSON events
"""
api_key_is_valid(app, flask_request)
target = get_value(flask_request, "target")
data = logs_to_report_json(target)
keys = data[0].keys()
filename = "report-" + now(
model="%Y_%m_%d_%H_%M_%S"
) + "".join(
random.choice(
string.ascii_lowercase
) for _ in range(10)
)
with open(filename, "w") as report_path_filename:
dict_writer = csv.DictWriter(
report_path_filename,
fieldnames=keys,
quoting=csv.QUOTE_ALL
)
dict_writer.writeheader()
for event in data:
dict_writer.writerow(
{
key: value for key, value in event.items() if key in keys
}
)
with open(filename, 'r') as report_path_filename:
reader = report_path_filename.read()
return Response(
reader, mimetype='text/csv',
headers={
'Content-Disposition': 'attachment;filename=' + filename + '.csv'
}
) | f9296cfc7c6559ebccbfa29268e3b22875fb9fed | 3,657,009 |
def _cache_key_format(lang_code, request_path, qs_hash=None):
"""
função que retorna o string que será a chave no cache.
formata o string usando os parâmetros da função:
- lang_code: código do idioma: [pt_BR|es|en]
- request_path: o path do request
- qs_hash: o hash gerado a partir dos parametros da querystring (se não for None)
"""
cache_key = "/LANG=%s/PATH=%s" % (lang_code, request_path)
if qs_hash is not None:
cache_key = "%s?QS=%s" % (cache_key, qs_hash)
return cache_key | 365b1ff144f802e024da5d6d5b25b015463da8b3 | 3,657,010 |
from typing import Iterable
from pathlib import Path
from typing import Callable
from typing import Any
from typing import List
def select_from(paths: Iterable[Path],
filter_func: Callable[[Any], bool] = default_filter,
transform: Callable[[Path], Any] = None,
order_func: Callable[[Any], Any] = None,
order_asc: bool = True,
fn_base: int = 10,
limit: int = None) -> (List[Any], List[Path]):
"""Filter, order, and truncate the given paths based on the filter and
other parameters.
:param paths: A list of paths to filter, order, and limit.
:param transform: Function to apply to each path before applying filters
or ordering. The filter and order functions should expect the type
returned by this.
:param filter_func: A function that takes a directory, and returns whether
to include that directory. True -> include, False -> exclude
:param order_func: A function that returns a comparable value for sorting,
as per the list.sort keys argument. Items for which this returns
None are removed.
:param order_asc: Whether to sort in ascending or descending order.
:param fn_base: Number base for file names. 10 by default, ensure dir name
is a valid integer.
:param limit: The max items to return. None denotes return all.
:returns: A filtered, ordered list of transformed objects, and the list
of untransformed paths.
"""
if transform is None:
transform = lambda v: v
selected = []
for path in paths:
if not path.is_dir():
continue
try:
int(path.name, fn_base)
except ValueError:
continue
try:
item = transform(path)
except ValueError:
continue
if not filter_func(item):
continue
if order_func is not None and order_func(item) is None:
continue
selected.append((item, path))
if order_func is not None:
selected.sort(key=lambda d: order_func(d[0]), reverse=not order_asc)
return SelectItems(
[item[0] for item in selected][:limit],
[item[1] for item in selected][:limit]) | d952d7d81932c5f6d206c39a5ac12aae1e940431 | 3,657,011 |
import torch
from typing import Counter
def dbscan(data:torch.Tensor, epsilon:float, **kwargs) -> torch.Tensor:
"""
Generate mask using DBSCAN.
Note, data in the largest cluster have True values.
Parameters
----------
data: torch.Tensor
input data with shape (n_samples, n_features)
epsilon: float
DBSCAN epsilon
**kwargs:
passed to DBSCAN()
Returns
-------
mask (torch.Tensor)
"""
group = DBSCAN(eps=epsilon, **kwargs).fit(data.cpu().numpy())
label = Counter(group.labels_)
label = max(label, key=label.get)
return torch.tensor(group.labels_ == label).to(data.device) | 0121b8b9dceaf9fc8399ffd75667afa6d34f66e1 | 3,657,012 |
import copy
def simulate_multivariate_ts(mu, alpha, beta, num_of_nodes=-1,\
Thorizon = 60, seed=None, output_rejected_data=False):
"""
Inputs:
mu: baseline intesnities M X 1 array
alpha: excitiation rates of multivariate kernel pf HP M X M array
beta: decay rates of kernel of multivariate HP
node: k-th node of multivariate HP
"""
#################
# Initialisation
#################
if num_of_nodes < 0:
num_of_nodes = np.shape(mu)[0]
rng = default_rng(seed) # get instance of random generator
ts = [num_of_nodes * np.array([])] # create M number of empty lise to store ordered set of timestamps of each nodes
t = 0 # initialise current time to be 0
num_of_events = np.zeros(num_of_nodes) # set event counter to be 0 for all nodes
epsilon = 10**(-10) # This was used in many HP code
M_star = copy.copy(mu) # upper bound at current time t = 0
accepted_event_intensity = [num_of_nodes * np.array([])]
rejected_points = [num_of_nodes * np.array([])]; rpy = [num_of_nodes * np.array([])] # containter for rejected time points and their correspodning intensities
M_x = [num_of_nodes * []]; M_y = [num_of_nodes * np.array([])] # M_y stores Maximum or upper bound of current times while M_x stores their x-values
#################
# Begin loop
#################
while(t < Thorizon):
previous_M_star = M_star; previous_t = t
M_star = np.sum(multiv_cif(t+epsilon, ts, mu, alpha, beta)) # compute upper bound of intensity using conditional intensity function
u = rng.uniform(0,1) # draw a uniform random number between interval (0,1)
tau = -np.log(u)/M_star # sample inter-arrival time
t = t + tau # update current time by adding tau to current time (hence t is the candidate point)
M_x += [previous_t,t]
M_y += [previous_M_star]
s = rng.uniform(0,1) # draw another standard uniform random number
M_t = np.sum(multiv_cif(t, ts, mu, alpha, beta)) # compute intensity function at current time t
if t <= Thorizon:
##########################
## Rejection Sampling test where probability of acceptance: M_t/M_star
if s <= M_t/M_star:
k = 0 # initialise k to be the first node '0'
# Search for node k such that the 'while condition' below is satisfied
while s*M_star <= np.sum(multiv_cif(t, ts, mu, alpha, beta)[0:k+1]):
k += 1
num_of_events[k] += 1 # update number of points in node k
ts[k] = np.append(ts[k], float(t)) # accept candidate point t in node k
accepted_event_intensity.append(M_t)
else:
rejected_points += [t]
rpy += [M_t]
else:
break
if output_rejected_data:
return ts, num_of_events, accepted_event_intensity, rejected_points, rpy
else:
return ts, num_of_events | 85ab71fa3f2b16cbe296d21d6bc43c15c94aa40a | 3,657,013 |
import base64
def token_urlsafe(nbytes):
"""Return a random URL-safe text string, in Base64 encoding.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA'
"""
tok = token_bytes(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') | 1855dc44cec1ddd0c6c83d0f765c15fd98d1ec98 | 3,657,014 |
def sha206a_get_pk_useflag_count(pk_avail_count):
"""
calculates available Parent Key use counts
Args:
pk_avail_count counts available bit's as 1 (int)
Returns:
Status Code
"""
if not isinstance(pk_avail_count, AtcaReference):
status = Status.ATCA_BAD_PARAM
else:
c_pk_avail_count = c_uint8(pk_avail_count.value)
status = get_cryptoauthlib().sha206a_get_pk_useflag_count(byref(c_pk_avail_count))
pk_avail_count.value = c_pk_avail_count.value
return status | 389174a21efe1ca78037b479895035b4bdd66b87 | 3,657,015 |
from typing import Tuple
def rotate_points_around_origin(
x: tf.Tensor,
y: tf.Tensor,
angle: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Rotates points around the origin.
Args:
x: Tensor of shape [batch_size, ...].
y: Tensor of shape [batch_size, ...].
angle: Tensor of shape [batch_size, ...].
Returns:
Rotated x, y, each with shape [batch_size, ...].
"""
tx = tf.cos(angle) * x - tf.sin(angle) * y
ty = tf.sin(angle) * x + tf.cos(angle) * y
return tx, ty | 8d4bf5f94964271f640def7d7e2b4242fbfe8e7b | 3,657,016 |
import inspect
def form_of(state):
"""Return the form of the given state."""
if hasattr(state, "__form__"):
if callable(state.__form__) and not inspect.isclass(state.__form__):
return state.__form__()
else:
return state.__form__
else:
raise ValueError(f"{state} has no form") | e39aa7db7b324ab38b65232b34b987b862812c54 | 3,657,017 |
def poly_to_geopandas(polys, columns):
"""
Converts a GeoViews Paths or Polygons type to a geopandas dataframe.
Parameters
----------
polys : gv.Path or gv.Polygons
GeoViews element
columns: list(str)
List of columns
Returns
-------
gdf : Geopandas dataframe
"""
rows = []
for g in polys.geom():
rows.append(dict({c: '' for c in columns}, geometry=g))
return gpd.GeoDataFrame(rows, columns=columns+['geometry']) | 889fc5b1bf5bf15cd9612c40e7bf14b1c05043f6 | 3,657,018 |
def get_sequences(query_file=None, query_ids=None):
"""Convenience function to get dictionary of query sequences from file or IDs.
Parameters:
query_file (str): Path to FASTA file containing query protein sequences.
query_ids (list): NCBI sequence accessions.
Raises:
ValueError: Did not receive values for query_file or query_ids.
Returns:
sequences (dict): Dictionary of query sequences keyed on accession.
"""
if query_file and not query_ids:
with open(query_file) as query:
sequences = parse_fasta(query)
elif query_ids:
sequences = efetch_sequences(query_ids)
else:
raise ValueError("Expected 'query_file' or 'query_ids'")
return sequences | 8056ce1c98b7a4faa4bb5a02505d527df31c7c8b | 3,657,019 |
import os
def _get_tickets(manifest, container_dir):
"""Get tickets."""
principals = set(manifest.get('tickets', []))
if not principals:
return False
tkts_spool_dir = os.path.join(
container_dir, 'root', 'var', 'spool', 'tickets')
try:
tickets.request_tickets(
context.GLOBAL.zk.conn,
manifest['name'],
tkts_spool_dir,
principals
)
except Exception:
_LOGGER.exception('Exception processing tickets.')
raise exc.ContainerSetupError('Get tickets error',
app_abort.AbortedReason.TICKETS)
# Check that all requested tickets are valid.
for princ in principals:
krbcc_file = os.path.join(tkts_spool_dir, princ)
if not tickets.krbcc_ok(krbcc_file):
_LOGGER.error('Missing or expired tickets: %s, %s',
princ, krbcc_file)
raise exc.ContainerSetupError(princ,
app_abort.AbortedReason.TICKETS)
else:
_LOGGER.info('Ticket ok: %s, %s', princ, krbcc_file)
return True | 39d73322620ea9a6f1da4bfb693336dfc68748bb | 3,657,020 |
def random_show_date(database_connection: mysql.connector.connect) -> str:
"""Return a random show date from the ww_shows table"""
database_connection.reconnect()
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate FROM ww_shows s "
"WHERE s.showdate <= NOW() "
"ORDER BY RAND() "
"LIMIT 1;")
cursor.execute(query)
result = cursor.fetchone()
cursor.close()
if not result:
return None
return result["showdate"].isoformat() | e3afdf9aa1fe9a02adab72c424caa80d60280699 | 3,657,021 |
def get_output_tensor(interpreter, index):
"""Returns the output tensor at the given index."""
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details["index"]))
return tensor | 158db3fc7ba13ee44d422248a9b96b7738a486e3 | 3,657,022 |
def make_d_mappings(n_dir, chain_opts):
"""Generate direction to solution interval mapping."""
# Get direction dependence for all terms.
dd_terms = [dd for _, dd in yield_from(chain_opts, "direction_dependent")]
# Generate a mapping between model directions gain directions.
d_map_arr = (np.arange(n_dir, dtype=np.int32)[:, None] * dd_terms).T
return d_map_arr | fd9eddf81b4388e3fa40c9b65a591af9aabf9014 | 3,657,023 |
from typing import cast
from typing import Dict
import os
import cmd
import traceback
import pprint
def main():
"""Loop to test the postgres generation with REPL"""
envs = cast(Dict[str, str], os.environ)
if "HAYSTACK_DB" not in envs:
envs["HAYSTACK_DB"] = "sqlite3:///:memory:"
provider = get_provider("shaystack.providers.sql", envs)
conn = cast(SQLProvider, provider).get_connect()
scheme = urlparse(envs["HAYSTACK_DB"]).scheme
# noinspection PyMethodMayBeStatic
class HaystackRequest(cmd.Cmd):
""" Haystack REPL interface """
__slots__ = ("conn",)
# noinspection PyShadowingNames
def __init__(self, conn):
super().__init__()
self.conn = conn
def do_python(self, arg: str) -> None: # pylint: disable=no-self-use
# noinspection PyBroadException
try:
_, python_code = _filter_to_python(arg)
print(python_code)
print()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
def do_pg(self, arg: str) -> None:
# noinspection PyBroadException
try:
sql_request = pg_sql_filter("haystack", arg, FAKE_NOW, 1, "customer")
print(sql_request)
print()
if scheme.startswith("postgres"):
cursor = self.conn.cursor()
cursor.execute(sql_request)
cursor.close()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_mysql(self, arg: str) -> None:
# noinspection PyBroadException
try:
sql_request = mysql_sql_filter("haystack", arg, FAKE_NOW, 1, "customer")
print(sql_request)
print()
if scheme.startswith("mysql"):
cursor = self.conn.cursor()
cursor.execute(sql_request)
cursor.close()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_sqlite(self, arg: str) -> None:
# noinspection PyBroadException
try:
sql_request = sqlite_sql_filter("haystack", arg, FAKE_NOW, 1, "customer")
print(sql_request)
print()
if scheme.startswith("sqlite"):
cursor = self.conn.cursor()
cursor.execute(sql_request)
cursor.close()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_mongo(self, arg: str) -> None: # pylint: disable=no-self-use
# noinspection PyBroadException
try:
mongo_request = _mongo_filter(arg, FAKE_NOW, 1, "customer")
pprint.PrettyPrinter(indent=4).pprint(mongo_request)
print()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
conn.rollback()
def do_bye(self, _: str) -> bool: # pylint: disable=unused-argument,no-self-use
return True
try:
HaystackRequest(conn).cmdloop()
except KeyboardInterrupt:
return 0
return 0 | caaaaafd9407417e4be334963c72a2b5d0c970fb | 3,657,024 |
def _calculateVolumeByBoolean(vtkDataSet1,vtkDataSet2,iV):
"""
Function to calculate the volumes of a cell intersecting a mesh.
Uses a boolean polydata filter to calculate the intersection,
a general implementation but slow.
"""
# Triangulate polygon and calc normals
baseC = vtkTools.dataset.getCell2vtp(vtkDataSet2,iV)
baseVol = vtkTools.polydata.calculateVolume(baseC)
# print iV, baseVol
# Extract cells from the first mesh that intersect the base cell
extractCells = vtkTools.extraction.extractDataSetWithPolygon(vtkDataSet1,baseC,extInside=True,extBoundaryCells=True,extractBounds=True)
extInd = npsup.vtk_to_numpy(extractCells.GetCellData().GetArray('id'))
# print extInd
# Assert if there are no cells cutv
assert extractCells.GetNumberOfCells() > 0, 'No cells in the clip, cell id {:d}'.format(iV)
# Calculate the volumes of the clipped cells and insert to the matrix
volL = []
for nrCC,iR in enumerate(extInd):
tempCell = vtkTools.dataset.thresholdCellId2vtp(extractCells,iR)
# Find the intersection of the 2 cells
boolFilt = vtk.vtkBooleanOperationPolyDataFilter()
boolFilt.SetInputData(0,tempCell)
boolFilt.SetInputData(1,baseC)
boolFilt.SetOperationToIntersection()
# If they intersect, calculate the volumes
if boolFilt.GetOutput().GetNumberOfPoints() > 0:
cleanInt = vtkTools.polydata.cleanPolyData(boolFilt.GetOutputPort())
del3dFilt = vtk.vtkDelaunay3D()
del3dFilt.SetInputData(cleanInt)
del3dFilt.Update()
# Get the output
intC = vtkTools.extraction.vtu2vtp(del3dFilt.GetOutput())
intVol = vtkTools.polydata.calculateVolume(tempCell)
# Calculate the volume
volVal = intVol/baseVol
# print iR, intVol, volVal
# Insert the value
if volVal > 0.0:
volL.append(volVal)
return extInd,np.array(volL) | a2c30133973527fb339c9d1e33cc2a937b35d958 | 3,657,025 |
def WebChecks(input_api, output_api):
"""Run checks on the web/ directory."""
if input_api.is_committing:
error_type = output_api.PresubmitError
else:
error_type = output_api.PresubmitPromptWarning
output = []
output += input_api.RunTests([input_api.Command(
name='web presubmit',
cmd=[
input_api.python_executable,
input_api.os_path.join('web', 'web.py'),
'presubmit',
],
kwargs={},
message=error_type,
)])
return output | 5fb828cc98da71bd231423223336ec81e02505ff | 3,657,026 |
from HUGS.Util import load_hugs_json
def synonyms(species: str) -> str:
"""
Check to see if there are other names that we should be using for
a particular input. E.g. If CFC-11 or CFC11 was input, go on to use cfc-11,
as this is used in species_info.json
Args:
species (str): Input string that you're trying to match
Returns:
str: Matched species string
"""
# Load in the species data
species_data = load_hugs_json(filename="acrg_species_info.json")
# First test whether site matches keys (case insensitive)
matched_strings = [k for k in species_data if k.upper() == species.upper()]
# Used to access the alternative names in species_data
alt_label = "alt"
# If not found, search synonyms
if not matched_strings:
for key in species_data:
# Iterate over the alternative labels and check for a match
matched_strings = [s for s in species_data[key][alt_label] if s.upper() == species.upper()]
if matched_strings:
matched_strings = [key]
break
if matched_strings:
updated_species = matched_strings[0]
return updated_species
else:
raise ValueError(f"Unable to find synonym for species {species}") | 31013464ce728cc3ed93b1a9318af3dbcf3f65ec | 3,657,027 |
def _blkid_output(out):
"""
Parse blkid output.
"""
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type", None) == "xfs":
dev["label"] = dev.get("label")
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in mounts:
if data.get(device):
data[device].update(mounts[device])
return data | 2cbcbb3ec9b732c3c02183f43ca5a5d5e876af71 | 3,657,028 |
def as_iso_datetime(qdatetime):
""" Convert a QDateTime object into an iso datetime string.
"""
return qdatetime.toString(Qt.ISODate) | 8dba5d1d6efc0dc17adc26a5687923e067ca3c29 | 3,657,029 |
def spec_means_and_magnitudes(action_spec):
"""Get the center and magnitude of the ranges in action spec."""
action_means = tf.nest.map_structure(
lambda spec: (spec.maximum + spec.minimum) / 2.0, action_spec)
action_magnitudes = tf.nest.map_structure(
lambda spec: (spec.maximum - spec.minimum) / 2.0, action_spec)
return tf.cast(
action_means, dtype=tf.float32), tf.cast(
action_magnitudes, dtype=tf.float32) | 119054966a483bb60e80941a6bf9dc5a4a0778f6 | 3,657,030 |
def clean_data(df):
"""
Clean Data :
1. Clean and Transform Category Columns from categories csv
2.Drop Duplicates
3.Remove any missing values
Args:
INPUT - df - merged Dataframe from load_data function
OUTPUT - Returns df - cleaned Dataframe
"""
# Split categories into separate category columns
categories = df['categories'].str.split(';', expand=True)
row = categories.iloc[0]
# Get new column names from category columns
category_colnames = row.apply(lambda x: x.rstrip('- 0 1'))
categories.columns = category_colnames
# Convert category values to 0 or 1
categories = categories.applymap(lambda s: int(s[-1]))
# Drop the original categories column from Dataframe
df.drop('categories', axis=1, inplace=True)
# Concatenate the original dataframe with the new `categories` dataframe
df_final = pd.concat([df, categories], axis=1)
#Drop missing values and duplicates from the dataframe
df_final.drop_duplicates(subset='message', inplace=True)
df_final.dropna(subset=category_colnames, inplace=True)
#Refer ETL Pipeline preparation Notebook to understand why these columns are dropped
df_final = df_final[df_final.related != 2]
df_final = df_final.drop('child_alone', axis=1)
return df_final | 752d675d8ac5e27c61c9b8c90acee4cdab8c08fc | 3,657,031 |
def commitFile(file: str = None, message: str = None, debug: bool = False) -> bool:
"""Commit a file when it is changed.
:param file: The name of the file we want to commit.
:type file: str
:param message: The commit message we want to use.
:type message: str
:param debug: If we want debug logging enabled.
:type debug: Bool
:rtype: bool
:return: When committed (True), or no commit has been made (False)
"""
changelogdUpdated = ["git", "status", "|", "grep", file, "|", "wc", "-l"]
changelogdUpdatedOutput = int(generic.executeCommand(command=changelogdUpdated))
if changelogdUpdatedOutput >= 1:
# gitCommitCommand = ["git", "commit", "-m", {m}, {f}.format(m=message, f=file)]
gitCommitCommand = ["git", "commit", "-m", message, file]
generic.executeCommand(command=gitCommitCommand, shell=False, debug=debug)
return True
return False | 2821da94cf727ee4d5098ccacc78c8368e7899aa | 3,657,032 |
def all_pairs_shortest_path_length(G,cutoff=None):
""" Compute the shortest path lengths between all nodes in G.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary of shortest path lengths keyed by source and target.
Notes
-----
The dictionary returned only has keys for reachable node pairs.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.all_pairs_shortest_path_length(G)
>>> print(length[1][4])
3
>>> length[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
"""
paths={}
for n in G:
paths[n]=single_source_shortest_path_length(G,n,cutoff=cutoff)
return paths | 1d312a71bd97d4f1a51a8b1e24331d54055bc156 | 3,657,033 |
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None):
""" Figure out based on the possible columns inputs which columns to keep.
Args:
gctoo (GCToo object):
cid (list of strings):
col_bool (boolean array):
cidx (list of integers):
exclude_cid (list of strings):
Returns:
cols_to_keep (list of strings): col ids to be kept
"""
# Use cid if provided
if cid is not None:
assert type(cid) == list, "cid must be a list. cid: {}".format(cid)
cols_to_keep = [gctoo_col for gctoo_col in gctoo.meth_df.columns if gctoo_col in cid]
# Tell user if some cids not found
num_missing_cids = len(cid) - len(cols_to_keep)
if num_missing_cids != 0:
logger.info("{} cids were not found in the GCT.".format(num_missing_cids))
# Use col_bool if provided
elif col_bool is not None:
assert len(col_bool) == gctoo.meth_df.shape[1], (
"col_bool must have length equal to gctoo.meth_df.shape[1]. " +
"len(col_bool): {}, gctoo.meth_df.shape[1]: {}".format(
len(col_bool), gctoo.meth_df.shape[1]))
cols_to_keep = gctoo.meth_df.columns[col_bool].values
# Use cidx if provided
elif cidx is not None:
assert type(cidx[0]) is int, (
"cidx must be a list of integers. cidx[0]: {}, " +
"type(cidx[0]): {}").format(cidx[0], type(cidx[0]))
assert max(cidx) <= gctoo.meth_df.shape[1], (
"cidx contains an integer larger than the number of columns in " +
"the GCToo. max(cidx): {}, gctoo.meth_df.shape[1]: {}").format(
max(cidx), gctoo.meth_df.shape[1])
cols_to_keep = gctoo.meth_df.columns[cidx].values
# If cid, col_bool, and cidx are all None, return all columns
else:
cols_to_keep = gctoo.meth_df.columns.values
# Use exclude_cid if provided
if exclude_cid is not None:
# Keep only those columns that are not in exclude_cid
cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid]
return cols_to_keep | 1215a392ecb068e2d004c64cf56f2483c722f3f6 | 3,657,034 |
import shutil
def check_zenity():
""" Check if zenity is installed """
warning = '''zenity was not found in your $PATH
Installation is recommended because zenity is used to
indicate that protonfixes is doing work while waiting
for a game to launch. To install zenity use your system's
package manager.
'''
if not shutil.which('zenity'):
log.warn(warning)
return False
return True | decea9be11e0eb1d866ed295cb33a06aa663a432 | 3,657,035 |
def get_auth_token():
"""
Return the zerotier auth token for accessing its API.
"""
with open("/var/snap/zerotier-one/common/authtoken.secret", "r") as source:
return source.read().strip() | bd74fde05fbb375f8899d4e5d552ad84bcd80573 | 3,657,036 |
def sph_harm_transform(f, mode='DH', harmonics=None):
""" Project spherical function into the spherical harmonics basis. """
assert f.shape[0] == f.shape[1]
if isinstance(f, tf.Tensor):
sumfun = tf.reduce_sum
def conjfun(x): return tf.conj(x)
n = f.shape[0].value
else:
sumfun = np.sum
conjfun = np.conj
n = f.shape[0]
assert np.log2(n).is_integer()
if harmonics is None:
harmonics = sph_harm_all(n)
a = DHaj(n, mode)
f = f*np.array(a)[np.newaxis, :]
real = is_real_sft(harmonics)
coeffs = []
for l in range(n // 2):
row = []
minl = 0 if real else -l
for m in range(minl, l+1):
# WARNING: results are off by this factor, when using driscoll1994computing formulas
factor = 2*np.sqrt(np.pi)
row.append(sumfun(factor * np.sqrt(2*np.pi)/n *
f * conjfun(harmonics[l][m-minl])))
coeffs.append(row)
return coeffs | a88f9a71fa19a57441fdfe88e8b0632cc08fb413 | 3,657,037 |
def create_model(experiment_settings:ExperimentSettings) -> OuterModel:
"""
function creates an OuterModel with provided settings.
Args:
inner_settings: an instannce of InnerModelSettings
outer_settings: an instannce of OuterModelSettings
"""
model = OuterModel(experiment_settings.outer_settings)
model.compile(
loss= experiment_settings.outer_settings.loss,
optimizer=experiment_settings.outer_settings.optimizer,
metrics=experiment_settings.outer_settings.metrics,
)
return model | e6af03c5afd53a39e6929dba71990f91ff8ffbb3 | 3,657,038 |
import pickle
def LoadTrainingTime(stateNum):
"""
Load the number of seconds spent training
"""
filename = 'time_' + str(stateNum) + '.pth'
try:
timeVals = pickle.load( open(GetModelPath() + filename, "rb"))
return timeVals["trainingTime"]
except:
print("ERROR: Failed to load training times! Returning 0")
return 0 | 1db59103bf3e31360237951241b90b3a85dae2bc | 3,657,039 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 15 epochs"""
lr = args.lr * (0.1 ** (epoch // args.lr_epochs))
print('Learning rate:', lr)
for param_group in optimizer.param_groups:
if args.retrain and ('mask' in param_group['key']): # retraining
param_group['lr'] = 0.0
elif args.prune_target and ('mask' in param_group['key']):
if args.prune_target in param_group['key']:
param_group['lr'] = lr
else:
param_group['lr'] = 0.0
else:
param_group['lr'] = lr
return lr | dc08034b0176ac0062d6fc7640a115f916a663a8 | 3,657,040 |
def disk_status(hardware, disk, dgtype):
"""
Status disk
"""
value = int(float(disk['used']) / float(disk['total']) * 100.0)
if value >= 90:
level = DiagnosticStatus.ERROR
elif value >= 70:
level = DiagnosticStatus.WARN
else:
level = DiagnosticStatus.OK
# Make board diagnostic status
d_board = DiagnosticStatus(
level=level,
name='jetson_stats {type} disk'.format(type=dgtype),
message="{0:2.1f}GB/{1:2.1f}GB".format(disk['used'], disk['total']),
hardware_id=hardware,
values=[
KeyValue(key="Used", value=str(disk['used'])),
KeyValue(key="Total", value=str(disk['total'])),
KeyValue(key="Unit", value="GB")])
return d_board | f248ccb0ba07106c3ed923f9ac7bc2e85d9b5e63 | 3,657,041 |
def hr_admin(request):
""" Views for HR2 Admin page """
user = request.user
# extra_info = ExtraInfo.objects.select_related().get(user=user)
designat = HoldsDesignation.objects.select_related().get(user=user)
if designat.designation.name =='hradmin':
template = 'hr2Module/hradmin.html'
# searched employee
query = request.GET.get('search')
if(request.method == "GET"):
if(query != None):
emp = ExtraInfo.objects.filter(
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)|
Q(id__icontains=query)
).distinct()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
context = {'emps': emp}
return render(request, template, context)
else:
return HttpResponse('Unauthorized', status=401) | 1b2c1027f8f4caf716019d9e5500223f76119a0b | 3,657,042 |
import six
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {name: module.test_extra() for name, module in six.iteritems(all_)} | 5538f81891c0388ae0f5e312cb6c521ee19d18a5 | 3,657,043 |
import torch
def _switch_component(
x: torch.Tensor, ones: torch.Tensor, zeros: torch.Tensor
) -> torch.Tensor:
"""
Basic component of switching functions.
Args:
x (torch.Tensor): Switch functions.
ones (torch.Tensor): Tensor with ones.
zeros (torch.Tensor): Zero tensor
Returns:
torch.Tensor: Output tensor.
"""
x_ = torch.where(x <= 0, ones, x)
return torch.where(x <= 0, zeros, torch.exp(-ones / x_)) | 8d60c09428440be704e8ced9b8ac19219a0d0b04 | 3,657,044 |
def get_vector(x_array, y_array, pair):
"""This function is for calculating a vector of a bone from the openpose skelleton"""
x = x_array[:,pair[0]]-x_array[:,pair[1]]
y = y_array[:,pair[0]]-y_array[:,pair[1]]
return [x, y] | e2bfcce3952c6b0a2c8cd9c67c4cd7b52547694d | 3,657,045 |
def update_bar(tweets_json, handle):
"""
Pull data from signal and updates aggregate bar graph
This is using thresholds that combine toxicity and severe toxicity models
suggested by Lucas.
"""
if not tweets_json:
raise PreventUpdate('no data yet!')
tweets_df = pd.read_json(tweets_json, orient='split')
low_count = tweets_df['LOW_LEVEL'].value_counts().get(True, 0)
med_count = tweets_df['MED_LEVEL'].value_counts().get(True, 0)
hi_count = tweets_df['HI_LEVEL'].value_counts().get(True, 0)
begin_date = tweets_df['display_time'].iloc[-1]
end_date = tweets_df['display_time'].iloc[0]
title = f"tweets at {handle}: {begin_date} – {end_date} (UTC)"
data = dict(
type='bar',
x=['Low', 'Medium', 'High'],
y=[low_count, med_count, hi_count],
marker=dict(
color=[colors['low'],
colors['medium'],
colors['high']])
)
return {
'data': [data],
'layout': dict(
type='layout',
title=title,
xaxis={'title': 'toxicity level'},
yaxis={'title': 'count'},
)
} | 1c523a455393ce211b8ef6483ee25b981e028bd0 | 3,657,046 |
import argparse
def defineConsole():
"""
defines the program console line commands
"""
parser = argparse.ArgumentParser(description="SBML to BNGL translator")
parser.add_argument(
"-f1", "--file1", type=str, help="reference file", required=True
)
parser.add_argument(
"-f2", "--file2", type=str, help="comparison file", required=True
)
# parser.add_argument('-o', '--output', type=str, help='output file', required=True)
return parser | 77f403040cf250810c5b4098c6b9818e5f17117e | 3,657,047 |
from typing import List
from typing import Dict
def render_foreign_derivation(tpl: str, parts: List[str], data: Dict[str, str]) -> str:
"""
>>> render_foreign_derivation("bor", ["en", "ar", "الْعِرَاق", "", "Iraq"], defaultdict(str))
'Arabic <i>الْعِرَاق</i> (<i>ālʿrāq</i>, “Iraq”)'
>>> render_foreign_derivation("der", ["en", "fro", "-"], defaultdict(str))
'Old French'
>>> render_foreign_derivation("etyl", ["enm", "en"], defaultdict(str))
'Middle English'
>>> render_foreign_derivation("etyl", ["grc"], defaultdict(str))
'Ancient Greek'
>>> render_foreign_derivation("inh", ["en", "enm", "water"], defaultdict(str))
'Middle English <i>water</i>'
>>> render_foreign_derivation("inh", ["en", "ang", "wæter", "", "water"], defaultdict(str))
'Old English <i>wæter</i> (“water”)'
>>> render_foreign_derivation("inh", ["en", "ang", "etan"], defaultdict(str, {"t":"to eat"}))
'Old English <i>etan</i> (“to eat”)'
>>> render_foreign_derivation("inh", ["en", "ine-pro", "*werdʰh₁om", "*wr̥dʰh₁om"], defaultdict(str))
'Proto-Indo-European <i>*wr̥dʰh₁om</i>'
>>> render_foreign_derivation("noncog", ["fro", "livret"], defaultdict(str, {"t":"book, booklet"}))
'Old French <i>livret</i> (“book, booklet”)'
>>> render_foreign_derivation("noncog", ["xta", "I̱ta Ita"], defaultdict(str, {"lit":"flower river"})) #xochopa
'Alcozauca Mixtec <i>I̱ta Ita</i> (literally “flower river”)'
>>> render_foreign_derivation("noncog", ["egy", "ḫt n ꜥnḫ", "", "grain, food"], defaultdict(str, {"lit":"wood/stick of life"}))
'Egyptian <i>ḫt n ꜥnḫ</i> (“grain, food”, literally “wood/stick of life”)'
>>> render_foreign_derivation("cal", ["fr" , "en", "light year"], defaultdict(str, {"alt":"alt", "tr":"tr", "t":"t", "g":"m", "pos":"pos", "lit":"lit"}))
'Calque of English <i>alt</i> <i>m</i> (<i>tr</i>, “t”, pos, literally “lit”)'
>>> render_foreign_derivation("pcal", ["en" , "de", "Leberwurst"], defaultdict(str, {"nocap":"1"}))
'partial calque of German <i>Leberwurst</i>'
>>> render_foreign_derivation("sl", ["en", "ru", "пле́нум", "", "plenary session"], defaultdict(str, {"nocap":"1"}))
'semantic loan of Russian <i>пле́нум</i> (<i>plenum</i>, “plenary session”)'
>>> render_foreign_derivation("learned borrowing", ["en", "la", "consanguineus"], defaultdict(str))
'Learned borrowing from Latin <i>consanguineus</i>'
>>> render_foreign_derivation("learned borrowing", ["en", "LL.", "trapezium"], defaultdict(str, {"notext":"1"}))
'Late Latin <i>trapezium</i>'
>>> render_foreign_derivation("slbor", ["en", "fr", "mauvaise foi"], defaultdict(str, {"nocap":"1"}))
'semi-learned borrowing from French <i>mauvaise foi</i>'
>>> render_foreign_derivation("obor", ["en", "ru", "СССР"], defaultdict(str))
'Orthographic borrowing from Russian <i>СССР</i> (<i>SSSR</i>)'
>>> render_foreign_derivation("unadapted borrowing", ["en", "ar", "قِيَاس", "", "measurement, analogy"], defaultdict(str))
'Unadapted borrowing from Arabic <i>قِيَاس</i> (<i>qīās</i>, “measurement, analogy”)'
>>> render_foreign_derivation("psm", ["en", "yue", "-"], defaultdict(str))
'Phono-semantic matching of Cantonese'
>>> render_foreign_derivation("translit", ["en", "ar", "عَالِيَة"], defaultdict(str))
'Transliteration of Arabic <i>عَالِيَة</i> (<i>ʿālī</i>)'
>>> render_foreign_derivation("back-form", ["en", "zero derivation"], defaultdict(str, {"nocap":"1"}))
'back-formation from <i>zero derivation</i>'
>>> render_foreign_derivation("bf", ["en"], defaultdict(str))
'Back-formation'
>>> render_foreign_derivation("l", ["cs", "háček"], defaultdict(str))
'háček'
>>> render_foreign_derivation("l", ["en", "go", "went"], defaultdict(str))
'went'
>>> render_foreign_derivation("l", ["en", "God be with you"], defaultdict(str))
'God be with you'
>>> render_foreign_derivation("l", ["la", "similis"], defaultdict(str, {"t":"like"}))
'similis (“like”)'
>>> render_foreign_derivation("l", ["la", "similis", "", "like"], defaultdict(str))
'similis (“like”)'
>>> render_foreign_derivation("l", ["mul", "☧", ""], defaultdict(str))
'☧'
>>> render_foreign_derivation("l", ["ru", "ру́сский", "", "Russian"], defaultdict(str, {"g":"m"}))
'ру́сский <i>m</i> (<i>russkij</i>, “Russian”)'
>>> render_foreign_derivation("link", ["en", "water vapour"], defaultdict(str))
'water vapour'
>>> render_foreign_derivation("ll", ["en", "cod"], defaultdict(str))
'cod'
>>> render_foreign_derivation("m", ["en", "more"], defaultdict(str))
'<b>more</b>'
>>> render_foreign_derivation("m", ["enm", "us"], defaultdict(str))
'<i>us</i>'
>>> render_foreign_derivation("m", ["ine-pro", "*h₁ed-"], defaultdict(str, {"t":"to eat"}))
'<i>*h₁ed-</i> (“to eat”)'
>>> render_foreign_derivation("m", ["ar", "عِرْق", "", "root"], defaultdict(str))
'<i>عِرْق</i> (<i>ʿrq</i>, “root”)'
>>> render_foreign_derivation("m", ["pal"], defaultdict(str, {"tr":"ˀl'k'", "ts":"erāg", "t":"lowlands"}))
"(<i>ˀl'k'</i> /erāg/, “lowlands”)"
>>> render_foreign_derivation("m", ["ar", "عَرِيق", "", "deep-rooted"], defaultdict(str))
'<i>عَرِيق</i> (<i>ʿrīq</i>, “deep-rooted”)'
>>> render_foreign_derivation("langname-mention", ["en", "-"], defaultdict(str))
'English'
>>> render_foreign_derivation("m+", ["en", "-"], defaultdict(str))
'English'
>>> render_foreign_derivation("m+", ["ja", "力車"], defaultdict(str, {"tr":"rikisha"}))
'Japanese <i>力車</i> (<i>rikisha</i>)'
""" # noqa
# Short path for the {{m|en|WORD}} template
if tpl == "m" and len(parts) == 2 and parts[0] == "en" and not data:
return strong(parts[1])
mentions = (
"back-formation",
"back-form",
"bf",
"l",
"link",
"ll",
"mention",
"m",
)
dest_lang_ignore = (
"cog",
"cognate",
"etyl",
"langname-mention",
"m+",
"nc",
"ncog",
"noncog",
"noncognate",
*mentions,
)
if tpl not in dest_lang_ignore:
parts.pop(0) # Remove the destination language
dst_locale = parts.pop(0)
if tpl == "etyl" and parts:
parts.pop(0)
phrase = ""
starter = ""
word = ""
if data["notext"] != "1":
if tpl in ("calque", "cal", "clq"):
starter = "calque of "
elif tpl in ("partial calque", "pcal"):
starter = "partial calque of "
elif tpl in ("semantic loan", "sl"):
starter = "semantic loan of "
elif tpl in ("learned borrowing", "lbor"):
starter = "learned borrowing from "
elif tpl in ("semi-learned borrowing", "slbor"):
starter = "semi-learned borrowing from "
elif tpl in ("orthographic borrowing", "obor"):
starter = "orthographic borrowing from "
elif tpl in ("unadapted borrowing", "ubor"):
starter = "unadapted borrowing from "
elif tpl in ("phono-semantic matching", "psm"):
starter = "phono-semantic matching of "
elif tpl in ("transliteration", "translit"):
starter = "transliteration of "
elif tpl in ("back-formation", "back-form", "bf"):
starter = "back-formation"
if parts:
starter += " from"
phrase = starter if data["nocap"] == "1" else starter.capitalize()
lang = langs.get(dst_locale, "")
phrase += lang if tpl not in mentions else ""
if parts:
word = parts.pop(0)
if word == "-":
return phrase
word = data["alt"] or word
gloss = data["t"] or data["gloss"]
if parts:
word = parts.pop(0) or word # 4, alt=
if tpl in ("l", "link", "ll"):
phrase += f" {word}"
elif word:
phrase += f" {italic(word)}"
if data["g"]:
phrase += f' {italic(data["g"])}'
trans = ""
if not data["tr"]:
trans = transliterate(dst_locale, word)
if parts:
gloss = parts.pop(0) # 5, t=, gloss=
phrase += gloss_tr_poss(data, gloss, trans)
return phrase.lstrip() | af3c37664e683d9bff610ad1fa53a167f5390988 | 3,657,048 |
def create_from_ray(ray):
"""Converts a ray to a line.
The line will extend from 'ray origin -> ray origin + ray direction'.
:param numpy.array ray: The ray to convert.
:rtype: numpy.array
:return: A line beginning at the ray start and extending for 1 unit
in the direction of the ray.
"""
# convert ray relative direction to absolute
# position
return np.array([ray[0], ray[0] + ray[1]], dtype=ray.dtype) | 6d0429abbacd235f95636369985bea8a17117409 | 3,657,049 |
from typing import List
def cluster_sampling(sents: List[Sentence], tag_type: str, **kwargs) -> List[int]:
"""Cluster sampling.
We create cluster sampling as a kind of diversity sampling method.
Different with most of sampling methods that are based on sentence level,
Cluster sampling method is implemented on entity level.
Cluster sampling classify all entity into cluster, and find the centen in each cluster.
We calculate the similarity between center and entity in the same cluster,
the low similarity pair means high diversity.
Args:
sents (List[Sentence]): [description]
tag_type (str): [description]
Returns:
List[int]: [description]
"""
label_names = kwargs["label_names"]
if "O" in label_names:
label_names.remove("O")
embeddings = kwargs["embeddings"]
embedding_dim = None
# Get entities in each class, each entity has {sent_idx, token_idx, token_text, token_embedding}
label_entity_list = []
for sent_idx, sent in enumerate(sents):
if len(sent.get_spans("ner")) != 0:
embeddings.embed(sent)
for token_idx, token in enumerate(sent):
tag = token.get_tag("ner")
if (
tag.value == "O"
): # Skip if the "O" label. tag.value is the label name
continue
tag_info = {
"sent_idx": sent_idx,
"token_idx": token_idx,
"token_text": token.text,
"token_embedding": token.embedding,
}
if embedding_dim is None:
embedding_dim = len(token.embedding.shape) - 1
label_entity_list.append(tag_info)
# Get all entity embedding matrix
entity_embedding_matrix = [tag["token_embedding"] for tag in label_entity_list]
if entity_embedding_matrix == []:
return random_sampling(sents)
else:
entity_embedding_matrix = stack(entity_embedding_matrix)
# Clustering
kmeans = KMeans(n_clusters=len(label_names))
kmeans.fit(entity_embedding_matrix)
cluster_centers_matrix = kmeans.cluster_centers_
entity_labels = kmeans.labels_
# Find the center in matrix
center_cluster_num = {} # {center_num_in_cluster: center_index_in_matrix}
for i, token_matrix in enumerate(entity_embedding_matrix):
for center_matrix in cluster_centers_matrix:
if center_matrix == token_matrix:
center_num_in_cluster = entity_labels[i]
center_cluster_num[center_num_in_cluster] = i
# Find the entity in each cluster
label_entity_cluster = {
cluster_num: {"cluster_center_idx": 0, "cluster_member_idx": []}
for cluster_num in center_cluster_num.keys()
}
for cluster_num in label_entity_cluster.keys():
label_entity_cluster[cluster_num]["cluster_center"] = center_cluster_num[
cluster_num
]
for i, entity_cluster_num in enumerate(entity_labels):
if entity_cluster_num == cluster_num:
label_entity_cluster[cluster_num]["cluster_member_idx"].append(i)
# Calculate each the similarity between center and entities
for cluster_num, cluster_info in label_entity_cluster.items():
center_idx = cluster_info["cluster_center_idx"]
scores = []
for member_idx in cluster_info["cluster_member_idx"]:
cos = nn.CosineSimilarity(dim=embedding_dim)
cosine_score = cos(
entity_embedding_matrix[center_idx], entity_embedding_matrix[member_idx]
)
scores.append(cosine_score)
label_entity_cluster["sim_scores"] = scores
# Used for debug the order
for cluster_num, cluster_info in label_entity_cluster.items():
cluster_member_idx = cluster_info["cluster_member_idx"]
sim_scores = cluster_info["sim_scores"]
cluster_info["sim_scores"] = [
x for _, x in sorted(zip(sim_scores, cluster_member_idx))
]
cluster_info["cluster_member_idx"] = sorted(sim_scores)
# Flat the entity score
entity_scores = [0] * len(label_entity_list)
for cluster_num, cluster_info in label_entity_cluster.items():
for i, member_idx in enumerate(cluster_info["cluster_member_idx"]):
entity_scores[member_idx] += cluster_info["sim_scores"][i]
# Reorder the sentence index
sentence_scores = [99] * len(sents)
for entity_idx, entity_info in enumerate(label_entity_list):
sent_idx = entity_info["sent_idx"]
sentence_scores[sent_idx] += entity_scores[entity_idx]
ascend_indices = np.argsort(sentence_scores)
return ascend_indices | a953ec5eced13e626a3b00769a7e5d505fcb1692 | 3,657,050 |
import os
def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
"""Returns a list of directories extracted from the given list of paths."""
dir_names = set()
for path in paths:
while True:
path = os.path.dirname(path)
if not path or path == os.path.sep:
break
dir_names.add(path + os.path.sep)
return sorted(dir_names) | 3472093ffb4870082e7d198410118169683ed786 | 3,657,051 |
def opts2dict(opts):
"""Converts options returned from an OptionParser into a dict"""
ret = {}
for k in dir(opts):
if callable(getattr(opts, k)):
continue
if k.startswith('_'):
continue
ret[k] = getattr(opts, k)
return ret | cfa828f0248ff7565aabbb5c37a7bc6fa38c6450 | 3,657,052 |
def combined_directions(a_list, b_list):
"""
Takes two NoteList objects.
Returns a list of (3)tuples each of the form:
(
int: a dir,
int: b dir,
(int: bar #, float: beat #)
)
"""
onsets = note_onsets(a_list, b_list)
a_dirs = directions(a_list)
b_dirs = directions(b_list)
dirs = {}
for time in onsets:
dirs[time] = (0, 0)
for dir, time in a_dirs:
dirs[time] = (dir, dirs[time][1])
for dir, time in b_dirs:
dirs[time] = (dirs[time][0], dir)
return [
(dirs[time][0], dirs[time][1], time)
for time in onsets
] | 8b66d4de725c51b1abdedb8a8e4c48e78f4ca953 | 3,657,053 |
def _naive_csh_seismology(l, m, theta, phi):
"""
Compute the spherical harmonics according to the seismology convention, in a naive way.
This appears to be equal to the sph_harm function in scipy.special.
"""
return (lpmv(m, l, np.cos(theta)) * np.exp(1j * m * phi) *
np.sqrt(((2 * l + 1) * factorial(l - m))
/
(4 * np.pi * factorial(l + m)))) | ba2a17f0dfa6035a05d16c8af79310657fe6ecd7 | 3,657,054 |
def is_room_valid(room):
"""Check if room is valid."""
_, names, checksum = room
letters = defaultdict(int)
complete_name = ''.join(names)
for letter in complete_name:
letters[letter] += 1
sorted_alphabetic = sorted(letters)
sorted_by_occurrences = sorted(
sorted_alphabetic, key=letters.__getitem__, reverse=True)
return ''.join(sorted_by_occurrences).startswith(checksum) | b893cf97ee28b033741e4b2797b2a4aef485324f | 3,657,055 |
from typing import Dict
def _get_attributes_entropy(dataset: FingerprintDataset,
attributes: AttributeSet
) -> Dict[Attribute, float]:
"""Give a dictionary with the entropy of each attribute.
Args:
dataset: The fingerprint dataset used to compute the entropy.
attributes: The attributes for which we compute the entropy.
Raises:
ValueError: There are attributes and the fingerprint dataset is empty.
KeyError: An attribute is not in the fingerprint dataset.
Returns:
A dictionary with each attribute (Attribute) and its entropy.
"""
# Some checks before starting the exploration
if attributes and dataset.dataframe.empty:
raise ValueError('Cannot compute the entropy on an empty dataset.')
for attribute in attributes:
if attribute not in dataset.candidate_attributes:
raise KeyError(f'The attribute {attribute} is not in the dataset.')
# We will work on a dataset with only a fingerprint per browser to avoid
# overcounting effects
df_one_fp_per_browser = dataset.get_df_w_one_fp_per_browser()
# If we execute on a single process
if not params.getboolean('Multiprocessing', 'explorations'):
logger.debug('Measuring the attributes entropy on a single process...')
return _compute_attribute_entropy(df_one_fp_per_browser, attributes)
# The dictionary to update when using multiprocessing
logger.debug('Measuring the attributes entropy using multiprocessing...')
attributes_entropy = {}
# Infer the number of cores to use
free_cores = params.getint('Multiprocessing', 'free_cores')
nb_cores = max(cpu_count() - free_cores, 1)
attributes_per_core = int(ceil(len(attributes)/nb_cores))
logger.debug(f'Sharing {len(attributes)} attributes over '
f'{nb_cores}(+{free_cores}) cores, hence '
f'{attributes_per_core} attributes per core.')
def update_attributes_entropy(attrs_entropy: Dict[Attribute, float]):
"""Update the complete dictionary attributes_entropy.
Args:
attrs_size: The dictionary containing the subset of the results
computed by a process.
Note: This is executed by the main thread and does not pose any
concurrency or synchronization problem.
"""
for attribute, attribute_entropy in attrs_entropy.items():
attributes_entropy[attribute] = attribute_entropy
# Spawn a number of processes equal to the number of cores
attributes_list = list(attributes)
async_results = []
with Pool(processes=nb_cores) as pool:
for process_id in range(nb_cores):
# Generate the candidate attributes for this process
start_id = process_id * attributes_per_core
end_id = (process_id + 1) * attributes_per_core
attributes_subset = AttributeSet(attributes_list[start_id:end_id])
async_result = pool.apply_async(
_compute_attribute_entropy,
args=(df_one_fp_per_browser, attributes_subset),
callback=update_attributes_entropy)
async_results.append(async_result)
# Wait for all the processes to finish (otherwise we would exit
# before collecting their result)
for async_result in async_results:
async_result.wait()
return attributes_entropy | 616abbd292f10d0a01d7d56ab5636ac5883fa230 | 3,657,056 |
def _mag_shrink_hard(x, r, t):
""" x is the input, r is the magnitude and t is the threshold
"""
gain = (r >= t).float()
return x * gain | da795bcfc2a6e4bfa3e54d1334c9d8865141a4f1 | 3,657,057 |
from sys import base_prefix
def is_macports_env():
"""
Check if Python interpreter was installed via Macports command 'port'.
:return: True if Macports else otherwise.
"""
# Python path prefix should start with Macports prefix.
env_prefix = get_macports_prefix()
if env_prefix and base_prefix.startswith(env_prefix):
return True
return False | b90c43f7ef267ab237e8f6c205eb2a62969b5539 | 3,657,058 |
def wiki_data(request, pro_id):
""" 文章标题展示 """
data = models.Wiki.objects.filter(project_id=pro_id).values('id', 'title', 'parent_id').order_by('deepth')
return JsonResponse({'status': True, 'data': list(data)}) | 6dfbb79b78133935356bd87cc24a294ed0001b73 | 3,657,059 |
import os
import json
def create_task_spec_def():
"""Returns the a :class:`TaskSpecDef` based on the environment variables for distributed training.
References
----------
- `ML-engine trainer considerations <https://cloud.google.com/ml-engine/docs/trainer-considerations#use_tf_config>`__
- `TensorPort Distributed Computing <https://www.tensorport.com/documentation/code-details/>`__
"""
if 'TF_CONFIG' in os.environ:
# TF_CONFIG is used in ML-engine
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
cluster_data = env.get('cluster', None) or {'ps': None, 'worker': None, 'master': None}
return TaskSpecDef(
task_type=task_data['type'],
index=task_data['index'],
trial=task_data['trial'] if 'trial' in task_data else None,
ps_hosts=cluster_data['ps'],
worker_hosts=cluster_data['worker'],
master=cluster_data['master'] if 'master' in cluster_data else None)
elif 'JOB_NAME' in os.environ:
# JOB_NAME, TASK_INDEX, PS_HOSTS, WORKER_HOSTS and MASTER_HOST are used in TensorPort
return TaskSpecDef(
task_type=os.environ['JOB_NAME'],
index=os.environ['TASK_INDEX'],
ps_hosts=os.environ.get('PS_HOSTS', None),
worker_hosts=os.environ.get('WORKER_HOSTS', None),
master=os.environ.get('MASTER_HOST', None))
else:
raise Exception('You need to setup TF_CONFIG or JOB_NAME to define the task.') | fdf1680e41f072ebf0c9b2b228095fba91d5af09 | 3,657,060 |
def many_capitalized_words(s):
"""Returns a function to check percentage of capitalized words.
The function returns 1 if percentage greater then 65% and 0 otherwise.
"""
return 1 if capitalized_words_percent(s) > 66 else 0 | cc82a2708defd545a1170bfeabb5848e3092fc39 | 3,657,061 |
def cmd_te_solution_build(abs_filename,wait=False,print_output=False,clear_output=False):
"""ソリューションをビルドする(テキストエディタ向け)
ファイルが含まれるVisual Studioを探し出してソリューションをビルドする。
VisualStudioの「メニュー -> ビルド -> ソリューションのビルド」と同じ動作。
abs_filename- ファイル名の絶対パス
(Ex.) c:/project/my_app/src/main.cpp
wait - True ビルド終了まで待つ(完了復帰)
False 即時復帰
print_output- True コンパイル結果をコンソールへ表示
False 何もしない
clear_output- True VisualStudioの出力ウインドウをクリアする
False 何もしない
"""
return _te_main(cmd_solution_build, abs_filename,wait,print_output,clear_output) | db48988d483da6ae9a012460e0d5fdd326d5ae40 | 3,657,062 |
def log_ratio_measure(
segmented_topics, accumulator, normalize=False, with_std=False, with_support=False):
"""
If normalize=False:
Popularly known as PMI.
This function calculates the log-ratio-measure which is used by
coherence measures such as c_v.
This is defined as: m_lr(S_i) = log[(P(W', W*) + e) / (P(W') * P(W*))]
If normalize=True:
This function calculates the normalized-log-ratio-measure, popularly knowns as
NPMI which is used by coherence measures such as c_v.
This is defined as: m_nlr(S_i) = m_lr(S_i) / -log[P(W', W*) + e]
Args:
segmented_topics (list): Output from the segmentation module of the segmented
topics. Is a list of list of tuples.
accumulator: word occurrence accumulator from probability_estimation.
with_std (bool): True to also include standard deviation across topic segment
sets in addition to the mean coherence for each topic; default is False.
with_support (bool): True to also include support across topic segments. The
support is defined as the number of pairwise similarity comparisons were
used to compute the overall topic coherence.
Returns:
list : of log ratio measure for each topic.
"""
topic_coherences = []
num_docs = float(accumulator.num_docs)
for s_i in segmented_topics:
segment_sims = []
for w_prime, w_star in s_i:
w_prime_count = accumulator[w_prime]
w_star_count = accumulator[w_star]
co_occur_count = accumulator[w_prime, w_star]
if normalize:
# For normalized log ratio measure
numerator = log_ratio_measure([[(w_prime, w_star)]], accumulator)[0]
co_doc_prob = co_occur_count / num_docs
m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))
else:
# For log ratio measure without normalization
numerator = (co_occur_count / num_docs) + EPSILON
denominator = (w_prime_count / num_docs) * (w_star_count / num_docs)
m_lr_i = np.log(numerator / denominator)
segment_sims.append(m_lr_i)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences | 73fec59f84402066ccbbcd25d30cc69698f6b721 | 3,657,063 |
def _calculate_monthly_anomaly(data, apply_filter=False, base_period=None,
lat_name=None, lon_name=None, time_name=None):
"""Calculate monthly anomalies at each grid point."""
# Ensure that the data provided is a data array
data = rdu.ensure_data_array(data)
# Get coordinate names
lat_name = lat_name if lat_name is not None else rdu.get_lat_name(data)
lon_name = lon_name if lon_name is not None else rdu.get_lon_name(data)
time_name = time_name if time_name is not None else rdu.get_time_name(data)
# Get subset of data to use for computing anomalies
base_period = rdu.check_base_period(
data, base_period=base_period, time_name=time_name)
input_frequency = rdu.detect_frequency(data, time_name=time_name)
if input_frequency not in ('daily', 'monthly'):
raise RuntimeError(
'Can only calculate anomalies for daily or monthly data')
if input_frequency == 'daily':
data = data.resample({time_name: '1MS'}).mean()
base_period_data = data.where(
(data[time_name] >= base_period[0]) &
(data[time_name] <= base_period[1]), drop=True)
monthly_clim = base_period_data.groupby(
base_period_data[time_name].dt.month).mean(time_name)
monthly_anom = data.groupby(data[time_name].dt.month) - monthly_clim
if apply_filter:
monthly_anom = monthly_anom.rolling(
{time_name: 3}).mean().dropna(time_name, how='all')
# Approximate sampling frequency
seconds_per_day = 60 * 60 * 24.0
fs = 1.0 / (seconds_per_day * 30)
# Remove all modes with period greater than 7 years
fmin = 1.0 / (seconds_per_day * 365.25 * 7)
monthly_anom = _apply_fft_high_pass_filter(
monthly_anom, fmin=fmin, fs=fs, detrend=True,
time_name=time_name)
return monthly_anom | 397bffb8f22ae26cf2c41cd8c056951ef55d692d | 3,657,064 |
def process_song(song_id):
"""
歌曲id、歌曲名、歌手id、所属专辑id、歌词、评论数
process song information
:param song_id: 歌曲id
:return: 处理状态(True or False)
"""
log("正在处理歌曲:{}".format(song_id))
if db.hexists("song:" + song_id, "id"):
log("有缓存(已做过处理),歌曲id:{}".format(song_id))
return True
else:
song_url = url_prefix + "song?id={}".format(song_id)
song_html = process_url(song_url)
song_content = pq(song_html)
head_data = song_content(".cnt")
song_name = head_data(".tit").text()
# todo 增加多歌手的元素选取
sid = head_data("p:nth-child(2) a").attr("href").split("=")[1]
album_id = head_data("p:nth-child(3) a").attr("href").split("=")[1]
lyric = process_lyric_from_html(song_content)
comment_count = head_data("#cnt_comment_count").text()
data = {
"id": song_id,
"name": song_name,
"singer_id": sid,
"album_id": album_id,
"lyric": lyric,
"comment_count": comment_count
}
try:
db.hmset("song:" + song_id, data)
except Exception as e:
log("song存入Redis时发生错误:{}".format(e))
return False
log("歌曲{}({})处理完毕".format(song_id, song_name))
return True | 148953bd42ce8aba3bf6b90aed7a5276dd0794c3 | 3,657,065 |
import os
def expand_path(path):
"""
Convert a path to an absolute path. This does home directory expansion,
meaning a leading ~ or ~user is translated to the current or given user's
home directory. Relative paths are relative to the current working
directory.
:param path: Relative or absolute path of file.
:return: Absolute path
"""
return os.path.abspath(os.path.expanduser(path)) | dc73eb377fd5b16091596f4345ee024c3d42e5bc | 3,657,066 |
import pprint
def oxe_system_alaw_to_mulaw(host, token, mode):
"""Summary
Args:
host (TYPE): Description
token (TYPE): Description
mode (TYPE): Description
Returns:
TYPE: Description
"""
payload = {
'T0_Mu_Law': mode
}
packages.urllib3.disable_warnings(packages.urllib3.exceptions.InsecureRequestWarning)
try:
modification = put(
'https://' + host +
'/api/mgt/1.0/Node/1/System_Parameters/1/System_Parameters_2/1/System_/T0_Mu_Law',
json=payload,
headers=oxe_set_headers(token, 'PUT'),
verify=False)
except exceptions.RequestException as e:
pprint(e)
return modification.status_code | 19bb98f8326e84cde83691028a2fc2585a7abe6e | 3,657,067 |
def update_weights(comment_weights, comment_usage):
"""Updates the weights used to upvote comments so that the actual voting
power usage is equal to the estimated usage.
"""
desired_usage = 1.0 - VP_COMMENTS / 100.0
actual_usage = 1.0 - comment_usage / 100.0
scaler = np.log(desired_usage) / np.log(actual_usage)
for category in comment_weights.keys():
comment_weights[category] *= scaler
return comment_weights | 19d2f0a9ec790c26000946c0b91ef3bc00f36905 | 3,657,068 |
import math
def smaller2k(n):
"""
Returns power of 2 which is smaller than n. Handles negative numbers.
"""
if n == 0: return 0
if n < 0:
return -2**math.ceil(math.log2(-n))
else:
return 2**math.floor(math.log2(n)) | 0d0bbbf95cb22bf1b9ffb29012075534bcc9646d | 3,657,069 |
from PIL import Image
from lxml import etree
import sys
import os.path
import pickle
import time
import urllib.request
import io
import requests
import tensorflow as tf
from mynet import CaffeNet
def create_anime_image_data(anime):
"""Create (or load) a dict for each anime that has a high level CNN
representation of the associated MAL image.
Parameters:
-----------
anime : Pandas dataframe
the dataframe corresponding to the list of all anime in the dataset.
Returns:
--------
image_data : dict
A dict where each title is a key and the CNN representation of its MAL
image is the value.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
fname = dir_path + '/../data/image_data.p'
if os.path.isfile(fname):
print('Using cached image data.')
return pickle.load(open(fname, 'rb'))
# To import mynet from a directory below, I must add that directory to path
sys.path.insert(0, dir_path + '/../')
#MAL credentials
username = 'username'; password = 'password'
#Get the tensorflow model started
images = tf.placeholder(tf.float32, [None, 224, 224, 3])
net = CaffeNet({'data':images})
sesh = tf.Session()
sesh.run(tf.global_variables_initializer())
# Load the data
net.load('mynet.npy', sesh)
image_data = {}
width, height = (225, 350) #all MAL images are this size
new_width, new_height = (224, 224)
left = int((width - new_width)/2)
top = int((height - new_height)/2)
right = (left+new_width)
bottom = (top + new_height)
# Now to actually construct the dataset
for name in anime.name:
#First, get the full anime XML from MAL's search query
title = "+".join(name.split() )
query = 'https://%s:%[email protected]/api/anime/search.xml?q=%s' \
% (username, password, title)
r = requests.get(query)
#Make sure that the request goes through
while r.status_code != requests.codes.ok:
r = requests.get(query)
time.sleep(1.0) # don't overload their server...
#From the XML file, pull all images that fit the query
doc = etree.fromstring(r.content)
image = doc.xpath('.//image/text()')
''' For sake of simplicity, I assume that the first image,
corresponding to the first matching response to the query, is what
we want. This isn't strictly correct, but for my goals here it's
good enough.'''
URL = image[0]
with urllib.request.urlopen(URL) as url:
f = io.BytesIO(url.read())
img = Image.open(f, 'r')
#Center crop image so it's 225x225x3, and convert to numpy.
img = np.array(img.crop((left, top, right, bottom)))
#Now use the Illustration2Vec pre-trained model to extract features.
output = sesh.run(net.get_output(), feed_dict={images: img[None,:]})
image_data[name] = output
print('Finished with ' + anime.name)
pickle.dump(image_data, open(fname, 'wb'))
sesh.close() | fbe6fc4bbfd3623c40bf78c8c33bc960bea307d2 | 3,657,070 |
def deferred_bots_for_alias(alias):
"""Returns a dict where the keys are bot names whose commands have an alias
that conflicts with the provided alias, and the values are a list of
prefixes that would cause that conflict."""
return {
# TODO Support more prefixes than one
config['name']: [config['prefix']]
for config in CONFIG['deferral']
if alias.lower() in config['commands']
} | 338776546622ed0bb6290b2d93ddb3129e764d02 | 3,657,071 |
import opcode
def modeify(intcode, i):
"""Apply a mode to a parameter"""
j = i + 1
_opcode = opcode(intcode[i])
params = intcode[j: j + _opcode['param_count']]
modes = _opcode['modes']
mode_covert = {
0: lambda x: intcode[x], # position mode
1: lambda x: x # immediate mode
}
output = [mode_covert[mode](param) for mode, param in zip(modes, params)]
return output | 230fb2e43c33558d94a7d60c6dd16978098421aa | 3,657,072 |
def unwind(g, num):
"""Return <num> first elements from iterator <g> as array."""
return [next(g) for _ in range(num)] | 59b724ca27729b4fc20d19a40f95d590025307c4 | 3,657,073 |
def find_best_control(db, input_features, max_distance=200.0, debug=False, control_cache=None):
"""
Search all controls with AST vector magnitudes within max_distance and find the best hit (lowest product of AST*call distance)
against suitable controls. Does not currently use literal distance for the calculation. Could be improved.... returns up to two hits
representing the best and next best hits (although the latter may be None).
"""
assert db is not None
origin_url = input_features.get('url', input_features.get('id')) # LEGACY: url field used to be named id field
cited_on = input_features.get('origin', None) # report owning HTML page also if possible (useful for data analysis)
origin_js_id = input_features.get("js_id", None) # ensure we can find the script directly without URL lookup
if isinstance(origin_js_id, tuple) or isinstance(origin_js_id, list): # BUG FIXME: should not be a tuple but is... where is that coming from??? so...
origin_js_id = origin_js_id[0]
assert isinstance(origin_js_id, str) and len(origin_js_id) == 24
best_distance = float('Inf')
input_ast_vector, ast_sum = calculate_ast_vector(input_features['statements_by_count']) # NB: UNweighted vector
fcall_sum = sum(input_features['calls_by_count'].values())
best_control = BestControl(control_url='', origin_url=origin_url, cited_on=cited_on,
sha256_matched=False,
ast_dist=float('Inf'),
function_dist=float('Inf'),
literal_dist=0.0,
diff_functions='',
origin_js_id=origin_js_id)
second_best_control = None
# we open the distance to explore "near by" a little bit... but the scoring for these hits is unchanged
if debug:
print("find_best_control({})".format(origin_url))
plausible_controls = find_plausible_controls(db, ast_sum, fcall_sum, max_distance=max_distance)
feasible_controls = find_feasible_controls(db, plausible_controls, debug=debug, control_cache=control_cache)
for fc_tuple in feasible_controls:
control, control_ast_sum, control_ast_vector, control_call_vector = fc_tuple # NB: unweighted ast vector
assert isinstance(control, dict)
assert control_ast_sum > 0
assert isinstance(control_ast_vector, list)
control_url = control.get('origin')
# compute what we can for now and if we can update it later we will. Otherwise the second_best control may have some fields not-computed
new_distance, ast_dist, call_dist, diff_functions = distance(input_ast_vector, control_ast_vector,
input_features['calls_by_count'], control_call_vector, debug=debug)
if call_dist < 5.0 and new_distance > max_distance:
print("WARNING: rejecting possibly feasible control due to bad total distance: {} {} {} {} {}".format(new_distance, ast_dist, call_dist, control_url, origin_url))
if new_distance < best_distance and new_distance <= max_distance:
if debug:
print("Got good distance {} for {} (was {}, max={})".format(new_distance, control_url, best_distance, max_distance))
new_control = BestControl(control_url=control_url, # control artefact from CDN (ground truth)
origin_url=origin_url, # JS at spidered site
origin_js_id=origin_js_id,
cited_on=cited_on,
sha256_matched=False,
ast_dist=ast_dist,
function_dist=call_dist,
literal_dist=0.0,
diff_functions=' '.join(diff_functions))
# NB: look at product of two distances before deciding to update best_* - hopefully this results in a lower false positive rate
# (with accidental ast hits) as the number of controls in the database increases
if best_control.is_better(new_control, max_distance=max_distance):
second_dist = second_best_control.distance() if second_best_control is not None else 0.0
if second_best_control is None or second_dist > new_control.distance():
if debug:
print("NOTE: improved second_best control was {} now is {}".format(second_best_control, new_control))
second_best_control = new_control
# NB: dont update best_* since we dont consider this hit a replacement for current best_control
else:
best_distance = new_distance
second_best_control = best_control
best_control = new_control
if best_distance < 0.00001: # small distance means we can try for a hash match against control?
assert control_url == best_control.control_url
hash_match = (control['sha256'] == input_features['sha256'])
best_control.sha256_matched = hash_match
break # save time since we've likely found the best control but this may mean next_best_control is not second best in rare cases
else:
if debug:
print("Rejecting control {} ast_dist={} fcall_dist={} total={}".format(control['origin'], ast_dist, call_dist, new_distance))
# NB: literal fields in best_control/next_best_control are updated elsewhere... not here
return (best_control, second_best_control) | 21589a3070f59f556a7cc540b5d69839fbb95327 | 3,657,074 |
import re
def CPPComments(text):
"""Remove all C-comments and replace with C++ comments."""
# Keep the copyright header style.
line_list = text.splitlines(True)
copyright_list = line_list[0:10]
code_list = line_list[10:]
copy_text = ''.join(copyright_list)
code_text = ''.join(code_list)
# Remove */ for C-comments, don't care about trailing blanks.
comment_end = re.compile(r'\n[ ]*\*/[ ]*')
code_text = re.sub(comment_end, '', code_text)
comment_end = re.compile(r'\*/')
code_text = re.sub(comment_end, '', code_text)
# Remove comment lines in the middle of comments, replace with C++ comments.
comment_star = re.compile(r'(?<=\n)[ ]*(?!\*\w)\*[ ]*')
code_text = re.sub(comment_star, r'// ', code_text)
# Remove start of C comment and replace with C++ comment.
comment_start = re.compile(r'/\*[ ]*\n')
code_text = re.sub(comment_start, '', code_text)
comment_start = re.compile(r'/\*[ ]*(.)')
code_text = re.sub(comment_start, r'// \1', code_text)
# Add copyright info.
return copy_text + code_text | 0dd490f5497c073534abc30944bd49d0a3cf7e3e | 3,657,075 |
def get_bulk_statement(
stmt_type, table_name, column_names, dicts=True, value_string="%s", odku=False
):
"""Get a SQL statement suitable for use with bulk execute functions
Parameters
----------
stmt_type : str
One of REPLACE, INSERT, or INSERT IGNORE. **Note:** Backend support for
this varies.
table_name : str
Name of SQL table to use in statement
column_names : list
A list of column names to load
dicts : bool, optional
If true, assume the data will be a list of dict rows
value_string : str, optional
The parameter replacement string used by the underyling DB API
odku : bool or list, optional
If true, add ON DUPLICATE KEY UPDATE clause for all columns. If a list
then only add it for the specified columns. **Note:** Backend support for
this varies.
Returns
-------
sql : str
The sql query string to use with bulk execute functions
"""
if not stmt_type.lower() in ("replace", "insert", "insert ignore"):
raise AssertionError("Invalid statement type: %s" % stmt_type)
columns_clause = ", ".join(["`%s`" % c for c in column_names])
if dicts:
values_clause = ", ".join(["%%(%s)s" % c for c in column_names])
else:
values_clause = ", ".join(["%s" % value_string for c in column_names])
sql = "%s INTO %s (%s) VALUES (%s)" % (
stmt_type,
table_name,
columns_clause,
values_clause,
)
if odku:
odku_cols = column_names
if isinstance(odku, (list, tuple)):
odku_cols = odku
odku_clause = ", ".join(["%s=VALUES(%s)" % (col, col) for col in odku_cols])
sql = sql + " ON DUPLICATE KEY UPDATE %s" % odku_clause
return escape_string(sql) | ba2277fc6f84d79a97d70cf98d2e26f308b8fa82 | 3,657,076 |
def map_remove_by_value_range(bin_name, value_start, value_end, return_type, inverted=False):
"""Creates a map_remove_by_value_range operation to be used with operate or operate_ordered
The operation removes items, with values between value_start(inclusive) and
value_end(exclusive) from the map
Args:
bin_name (str): The name of the bin containing the map.
value_start: The start of the range of values to be removed. (Inclusive)
value_end: The end of the range of values to be removed. (Exclusive)
return_type (int): Value specifying what should be returned from the operation.
This should be one of the aerospike.MAP_RETURN_* values.
inverted (bool): If True, values outside of the specified range will be removed, and
values inside of the range will be kept. Default: False
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
op_dict = {
OP_KEY: aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE,
BIN_KEY: bin_name,
VALUE_KEY: value_start,
RANGE_KEY: value_end,
RETURN_TYPE_KEY: return_type,
INVERTED_KEY: inverted
}
return op_dict | 42a49aefb92f61a3064e532390bdcf26b6266f40 | 3,657,077 |
def rationalApproximation(points, N, tol=1e-3, lowest_order_only=True):
"""
Return rational approximations for a set of 2D points.
For a set of points :math:`(x,y)` where :math:`0 < x,y \\leq1`, return all
possible rational approximations :math:`(a,b,c) \\; a,b,c \\in \\mathbb{Z}`
such that :math:`(x,y) \\approx (a/c, b/c)`.
Arguments:
points: 2D (L x 2) points to approximate
N: max order
Returns:
``dict``: Dictionary with ``points`` as *keys* and the corresponding
``set`` of tuples ``(a,b,c)`` as values.
"""
L,_ = points.shape
# since this solutions assumes a>0, a 'quick' hack to also obtain solutions
# with a < 0 is to flip the dimensions of the points and explore those
# solutions as well
points = np.vstack((points, np.fliplr(points)))
solutions = defaultdict(set)
sequences = {1: set(fareySequence(1))}
for n in range(2, N+1):
sequences[n] = set(fareySequence(n)) - sequences[n-1]
for h,k in fareySequence(N,1):
if 0 in (h,k):
continue
# print h,k
for x,y in resonanceSequence(N, k):
# avoid 0-solutions
if 0 in (x,y):
continue
norm = np.sqrt(x**2+y**2)
n = np.array([ y/norm, x/norm]) * np.ones_like(points)
n[points[:,0] < h/k, 0] *= -1 # points approaching from the left
# nomenclature inspired in http://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Vector_formulation
ap = np.array([h/k, 0]) - points
apn = np.zeros((1,L))
d = np.zeros_like(points)
apn = np.sum(n*ap, 1, keepdims=True)
d = ap - apn*n
## DON'T RETURN IMMEDIATELY; THERE MIGHT BE OTHER SOLUTIONS OF THE SAME ORDER
indices, = np.nonzero(np.sqrt(np.sum(d*d,1)) <= tol)
for i in indices:
# print "h/k:", h , "/", k
# print "point:", points[i,:]
if points[i,0] >= h/k:
if i<L:
# print "non-flipped >= h/k"
solutions[i].add((x,-y, h*x/k))
# print i, (x,-y, h*x/k)
elif x*(-y)<0: # only consider solutions where (a,b) have different sign for the "flipped" points (the other solutions should have already been found for the non-flipped points)
# print "flipped >= h/k"
solutions[i-L].add((-y, x, h*x/k))
# print i-L, (-y, x, h*x/k)
else:
if i<L:
# print "non-flipped < h/k"
solutions[i].add((x, y, h*x/k))
# print i, (x, y, h*x/k)
elif x*y>0: # only consider solutions where (a,b) have different sign for the "flipped" points (the other solutions should have already been found for the non-flipped points)
# print "flipped < h/k"
solutions[i-L].add((y, x, h*x/k))
# print i-L, (y, x, h*x/k)
if lowest_order_only:
# removed = 0
for k in solutions:
# keep lowest order solutions only
lowest_order = 2*N
s = set([])
for sol in solutions[k]:
K = abs(sol[0])+abs(sol[1])+abs(sol[2])
if K == lowest_order:
s.add(sol)
elif K < lowest_order:
lowest_order = K
# if len(s) > 0:
# print("point: ({},{}) -> removing {} for {}".format(points[k,0], points[k,1], s, sol))
# removed += len(s)
s = set([sol])
solutions[k] = s
# print("Removed {} solutions".format(removed))
return solutions | 614c230ad7fd68cb60d0203cba2bd15e30f3f36a | 3,657,078 |
import subprocess
def get_notebook_server_instance(try_use_existing=False):
"""Create a notebook server instance to use. Optionally attempting to re-use existing
instances.
"""
pid = get_cache_pid()
servers = list_running_servers()
# If we already have a server, use that
for server in servers:
if server["pid"] == pid:
return (server, None)
# Otherwise, if we are allowed, try to piggyback on another session
if try_use_existing and servers:
return (servers[0], None)
# Fine, I'll make my own server, with blackjack, and userhooks!
try:
server_process = subprocess.Popen(["jupyter", "notebook", "--no-browser"])
except OSError as err:
raise RuntimeError("Failed to start server: {}".format(err))
print("Started Jupyter Notebook server pid {}".format(server_process.pid))
# wait for 1 second for server to come up
sleep(1)
server = None
for retry in range(5):
try:
server = {s["pid"]: s for s in list_running_servers()}[server_process.pid]
break
except KeyError:
# Sleep for increasing times to give server a chance to come up
sleep(5)
if server:
return (server, server_process)
# Don't leave orphans!
server_process.kill()
raise RuntimeError("Failed to acquire server instance after 25s") | b11932b2be3319be388913427ef7623690fa11f1 | 3,657,079 |
def to_dict(doc, fields):
"""Warning: Using this convenience fn is probably not as efficient as the
plain old manually building up a dict.
"""
def map_field(prop):
val = getattr(doc, prop)
if isinstance(val, list):
return [(e.to_dict() if hasattr(e, 'to_dict') else e) for e in val]
else:
return val.to_dict() if hasattr(val, 'to_dict') else val
return {f: map_field(f) for f in fields} | cb51e3dfdf8c313f218e38d8693af9e7c6bf5045 | 3,657,080 |
import time
def _auto_wrap_external(real_env_creator):
"""Wrap an environment in the ExternalEnv interface if needed.
Args:
real_env_creator (fn): Create an env given the env_config.
"""
def wrapped_creator(env_config):
real_env = real_env_creator(env_config)
if not isinstance(real_env, (ExternalEnv, ExternalMultiAgentEnv)):
logger.info(
"The env you specified is not a supported (sub-)type of "
"ExternalEnv. Attempting to convert it automatically to "
"ExternalEnv."
)
if isinstance(real_env, MultiAgentEnv):
external_cls = ExternalMultiAgentEnv
else:
external_cls = ExternalEnv
class ExternalEnvWrapper(external_cls):
def __init__(self, real_env):
super().__init__(
observation_space=real_env.observation_space,
action_space=real_env.action_space,
)
def run(self):
# Since we are calling methods on this class in the
# client, run doesn't need to do anything.
time.sleep(999999)
return ExternalEnvWrapper(real_env)
return real_env
return wrapped_creator | ef7f0c7ecdf3eea61a4e9dc0ad709e80d8a09e08 | 3,657,081 |
def _get_binary_link_deps(
base_path,
name,
linker_flags = (),
allocator = "malloc",
default_deps = True):
"""
Return a list of dependencies that should apply to *all* binary rules that link C/C++ code.
This also creates a sanitizer configuration rule if necessary, so this function
should not be called more than once for a given rule.
Args:
base_path: The package path
name: The name of the rule
linker_flags: If provided, flags to pass to allocator/converage/sanitizers to
make sure proper dependent rules are generated.
allocator: The allocator to use. This is generally set by a configuration option
and retreived in alloctors.bzl
default_deps: If set, add in a list of "default deps", dependencies that
should generally be added to make sure binaries work consistently.
e.g. common/init
Returns:
A list of `RuleTarget` structs that should be added as dependencies.
"""
deps = []
# If we're not using a sanitizer add allocator deps.
if sanitizers.get_sanitizer() == None:
deps.extend(allocators.get_allocator_deps(allocator))
# Add in any dependencies required for sanitizers.
deps.extend(sanitizers.get_sanitizer_binary_deps())
deps.append(
_create_sanitizer_configuration(
base_path,
name,
linker_flags,
),
)
# Add in any dependencies required for code coverage
if coverage.get_coverage():
deps.extend(coverage.get_coverage_binary_deps())
# We link in our own implementation of `kill` to binaries (S110576).
if default_deps:
deps.append(_COMMON_INIT_KILL)
return deps | 06a52934a0c121b606c79a6f5ae58863645bba34 | 3,657,082 |
def create_dummy_ligand(ligand, cut_idx=None):
"""
Takes mol object and splits it based on a primary amine such that the frags can connect to
the tertiary amine on the Mo core.
Args:
cut_idx tuple(int):
ligand (mol):
Returns:
ligands List(mol) :
"""
# TODO AllChem.ReplaceCore() could be used here instead
# Initialize dummy mol
dummy = Chem.MolFromSmiles("*")
# Create explicit hydrogens
ligand = Chem.AddHs(ligand)
# Get the neigbouring bonds to the amine given by cut_idx
atom = ligand.GetAtomWithIdx(cut_idx)
# Create list of tuples that contain the amine idx a nd idx of neighbor.
indices = [
(cut_idx, x.GetIdx()) for x in atom.GetNeighbors() if x.GetAtomicNum() != 1
][0]
# Get the bonds to the neighbors.
bond = []
bond.append(ligand.GetBondBetweenAtoms(indices[0], indices[1]).GetIdx())
# Get the two fragments, the ligand and the NH2
frag = Chem.FragmentOnBonds(ligand, bond, addDummies=True, dummyLabels=[(1, 1)])
frags = Chem.GetMolFrags(frag, asMols=True, sanitizeFrags=False)
# Pattern for NH2+dummy
smart = "[1*][N]([H])([H])"
patt = Chem.MolFromSmarts(smart)
# Get the ligand that is not NH2
ligands = [struct for struct in frags if len(struct.GetSubstructMatches(patt)) == 0]
return ligands[0] | b74bc21003c33234d310121331ab61887536709e | 3,657,083 |
def double2pointerToArray(ptr, n, m_sizes):
""" Converts ctypes 2D array into a 2D numpy array.
Arguments:
ptr: [ctypes double pointer]
n: [int] number of cameras
m_sizes: [list] number of measurements for each camera
Return:
arr_list: [list of ndarrays] list of numpy arrays, each list entry containing data for individual
cameras
"""
arr_list = []
# Go through every camera
for i in range(n):
# Init a new empty data array
arr = np.zeros(shape=(m_sizes[i]))
# Go through ctypes array and extract data for this camera
for j in range(m_sizes[i]):
arr[j] = ptr[i][j]
# Add the data for this camera to the final list
arr_list.append(arr)
return arr_list | f556c5a36f645c6047c3b487b7cd865edc3b76db | 3,657,084 |
def read_varint(stream: bytes):
"""
读取 varint。
Args:
stream (bytes): 字节流。
Returns:
tuple[int, int],真实值和占用长度。
"""
value = 0
position = 0
shift = 0
while True:
if position >= len(stream):
break
byte = stream[position]
value += (byte & 0b01111111) << shift
if byte & 0b10000000 == 0:
break
position += 1
shift += 7
return value, position + 1 | 58c8187501dc08b37f777256474f95412649bf04 | 3,657,085 |
import argparse
def get_arguments():
"""
get commandline arguments
"""
# Parse command line arguments
parser = argparse.ArgumentParser(description="P1 reader interface")
parser.add_argument("--config-file",
default=__file__.replace('.py', '.yml').replace('/bin/', '/etc/'),
help="P1 config file, default %(default)s",
metavar='FILE'
)
parser.add_argument("--log",
help="Set log level (default info)",
choices=['debug', 'info', 'warning', 'error', 'critical'],
default="info"
)
parser.add_argument("--debug",
action='store_true',
help="debug mode"
)
parser.add_argument('--version',
action='version',
version=__version__
)
arguments = parser.parse_args()
return arguments | f35a364c96705c764064c536519dc9d3730d9310 | 3,657,086 |
def any(array, mapFunc):
"""
Checks if any of the elements of array returns true, when applied on a function that returns a boolean.
:param array: The array that will be checked, for if any of the elements returns true, when applied on the function. \t
:type array: [mixed] \n
:param mapFunc: The function that gives a boolean value, when applied on the element of the array. \t
:type mapFunc: function \n
:returns: Whether any of the elements of the array, returned true or not. \t
:rtype: : bool \n
"""
for elem in array:
if mapFunc(elem):
return True
return False | 1e635da691fd1c2fc9d99e15fd7fa0461a7bdf0e | 3,657,087 |
def qt_point_to_point(qt_point, unit=None):
"""Create a Point from a QPoint or QPointF
Args:
qt_point (QPoint or QPointF): The source point
unit (Unit): An optional unit to convert
values to in the output `Point`. If omitted, values
in the output `Point` will be plain `int` or `float` values.
Returns: Point
"""
if unit:
return Point(qt_point.x(), qt_point.y()).to_unit(unit)
else:
return Point(qt_point.x(), qt_point.y()) | 595dacc2d39d126822bf680e1ed1784c05deb6d7 | 3,657,088 |
import requests
import json
def apiRequest(method, payload=None):
"""
Get request from vk server
:param get: method for vkApi
:param payload: parameters for vkApi
:return: answer from vkApi
"""
if payload is None:
payload = {}
if not ('access_token' in payload):
payload.update({'access_token': GROUP_TOKEN, 'v': V})
response = requests.post(BASE_URL + method, payload)
data = json.loads(response.text)
return data | b60c77aec5ae500b9d5e9901216c7ff7c93676ad | 3,657,089 |
def page_required_no_auth(f):
"""Full page, requires user to be logged out to access, otherwise redirects to main page."""
@wraps(f)
def wrapper(*args, **kwargs):
if "username" in session:
return redirect("/")
else:
return f(*args, **kwargs)
return wrapper | 7d7d314e10dcaf1d81ca5c713afd3da6a021247d | 3,657,090 |
import argparse
import re
def parse_arguments(args):
"""
Parse all given arguments.
:param args: list
:return: argparse.Namespace
"""
parser = argparse.ArgumentParser(
description=__description__,
epilog="Example-usage in apache-config:\n"
'CustomLog "| /path/to/anonip.py '
'[OPTIONS] --output /path/to/log" '
"combined\n ",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-4",
"--ipv4mask",
metavar="INTEGER",
help="truncate the last n bits (default: %(default)s)",
type=lambda x: _validate_ipmask(x, 32),
)
parser.set_defaults(ipv4mask=12)
parser.add_argument(
"-6",
"--ipv6mask",
type=lambda x: _validate_ipmask(x, 128),
metavar="INTEGER",
help="truncate the last n bits (default: %(default)s)",
)
parser.set_defaults(ipv6mask=84)
parser.add_argument(
"-i",
"--increment",
metavar="INTEGER",
type=lambda x: _validate_integer_ht_0(x),
help="increment the IP address by n (default: %(default)s)",
)
parser.set_defaults(increment=0)
parser.add_argument("-o", "--output", metavar="FILE", help="file to write to")
parser.add_argument(
"--input", metavar="FILE", help="File or FIFO to read from (default: stdin)"
)
parser.add_argument(
"-c",
"--column",
metavar="INTEGER",
dest="columns",
nargs="+",
type=lambda x: _validate_integer_ht_0(x),
help="assume IP address is in column n (1-based indexed; default: 1)",
)
parser.add_argument(
"-l",
"--delimiter",
metavar="STRING",
type=str,
help='log delimiter (default: " ")',
)
parser.add_argument(
"--regex",
metavar="STRING",
nargs="+",
help="regex for detecting IP addresses (use optionally instead of -c)",
type=regex_arg_type,
)
parser.add_argument(
"-r",
"--replace",
metavar="STRING",
help="replacement string in case address parsing fails (Example: 0.0.0.0)",
)
parser.add_argument(
"-p",
"--skip-private",
dest="skip_private",
action="store_true",
help="do not mask addresses in private ranges. "
"See IANA Special-Purpose Address Registry.",
)
parser.add_argument(
"-d", "--debug", action="store_true", help="print debug messages"
)
parser.add_argument("-v", "--version", action="version", version=__version__)
args = parser.parse_args(args)
if args.regex and (args.columns is not None or args.delimiter is not None):
raise parser.error(
'Ambiguous arguments: When using "--regex", "-c" and "-l" can\'t be used.'
)
if not args.regex and args.columns is None:
args.columns = [1]
if not args.regex and args.delimiter is None:
args.delimiter = " "
if args.regex:
try:
args.regex = re.compile(r"|".join(args.regex))
except re.error: # pragma: no cover
raise argparse.ArgumentTypeError("Failed to compile concatenated regex!")
return args | 7d24618fc40835488a7d05a748f462826311a30a | 3,657,091 |
import sympy
def generate_forward():
"""
Generate dataset with forward method
It tries to integrate random function.
The integral may not be symbolically possible, or may contains invalid operators.
In those cases, it returns None.
"""
formula = symbolic.fixed_init(15)
integrated = sympy.integrate(formula, symbolic.x, meijerg=False)
if symbolic.is_integral_valid(integrated):
return (formula, integrated)
else:
return None | 91a91e5b23f3f59b49d8f7102585ff7fbfbbf6c4 | 3,657,092 |
import pickle
def load_agent(agent_args, domain_settings, experiment_settings):
"""
This function loads the agent from the results directory results/env_name/method_name/filename
Args:
experiment_settings
Return:
sarsa_lambda agent
"""
with open('results/' + experiment_settings['env'] + '/sarsa_lambda/agents/' + experiment_settings['filename'] + '.pkl', 'rb') as input:
my_agent = pickle.load(input)
return my_agent, None | a5769c952d9fcc583b8fb909e6e772c83b7126ca | 3,657,093 |
def unpickle_robust(bytestr):
""" robust unpickle of one byte string """
fin = BytesIO(bytestr)
unpickler = robust_unpickler(fin)
return unpickler.load() | 42fee03886b36aef5ab517e0abcb2cc2ecfd6a8b | 3,657,094 |
def build_ins_embed_branch(cfg, input_shape):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.INS_EMBED_HEAD.NAME
return INS_EMBED_BRANCHES_REGISTRY.get(name)(cfg, input_shape) | 4d8242614426a13f9e93a241184bd3d8f57ef648 | 3,657,095 |
def atl03sp(ipx_region, parm, asset=icesat2.DEFAULT_ASSET):
"""
Performs ATL03 subsetting in parallel on ATL03 data and returns photon segment data.
See the `atl03sp <../api_reference/icesat2.html#atl03sp>`_ function for more details.
Parameters
----------
ipx_region: Query
icepyx region object defining the query of granules to be processed
parms: dict
parameters used to configure ATL03 subsetting (see `Parameters <../user_guide/ICESat-2.html#parameters>`_)
asset: str
data source asset (see `Assets <../user_guide/ICESat-2.html#assets>`_)
Returns
-------
list
ATL03 segments (see `Photon Segments <../user_guide/ICESat-2.html#photon-segments>`_)
"""
try:
version = ipx_region.product_version
resources = ipx_region.avail_granules(ids=True)[0]
except:
logger.critical("must supply an icepyx query as region")
return icesat2.__emptyframe()
# try to get the subsetting region
if ipx_region.extent_type in ('bbox','polygon'):
parm.update({'poly': to_region(ipx_region)})
return icesat2.atl03sp(parm, asset, version=version, resources=resources) | 8c822af0d2f9b6e42bd6a1efeb29249a04079e66 | 3,657,096 |
def get_sample_activity_from_batch(activity_batch, idx=0):
"""Return layer activity for sample ``idx`` of an ``activity_batch``.
"""
return [(layer_act[0][idx], layer_act[1]) for layer_act in activity_batch] | 0302fdf215e63d6cbcd5dafc1bd36ae3d27712f2 | 3,657,097 |
def _reorder_for_qbb_experiment(df: pd.DataFrame) -> pd.DataFrame:
"""By default the entries are ordered alphabetically. We want SPOTA, EPOpt, PPO"""
print("Changed the order")
return df.iloc[[2, 0, 1]] | beccd22a765eb526ed855fd34dde4a05e2b394f2 | 3,657,098 |
def get_field(self, *args, is_squeeze=False, node=None, is_rthetaz=False):
"""Get the value of variables stored in Solution.
Parameters
----------
self : SolutionData
an SolutionData object
*args: list of strings
List of axes requested by the user, their units and values (optional)
Returns
-------
field: array
an array of field values
"""
axname, _ = self.get_axes_list()
symbol = self.field.symbol
if len(args) == 0:
field_dict = self.field.get_along(tuple(axname), is_squeeze=is_squeeze)
else:
field_dict = self.field.get_along(*args, is_squeeze=is_squeeze)
field = field_dict[symbol]
return field | e93455cbc4b306762336fd13603342e9d92badd1 | 3,657,099 |
Subsets and Splits