content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import json
def login():
"""
login an existing user
"""
try:
username = json.loads(request.data.decode())['username'].replace(" ", "")
password = json.loads(request.data.decode())['password'].replace(" ", "")
user = User(username, "", "")
user = user.exists()
if check_password_hash(user.password_hash, password):
"""token if password is correct"""
token = auth_encode(user.user_id)
if token:
response = {'response': 'login successful', 'token': token.decode()}
return jsonify(response), 200
else:
return jsonify({'response': 'invalid username/password'}), 422
except (KeyError, ValueError) as ex:
print('error in login', ex)
return jsonify({'response': 'json body must contain username and password'}), 400
except (psycopg2.DatabaseError, psycopg2.IntegrityError, Exception) as ex:
print('error in login', ex)
return jsonify({'response': 'user not found'}), 404 | 8e09725c37ac897efefd3cd546ce929cdf799716 | 3,656,978 |
def soma_radius(morph):
"""Get the radius of a morphology's soma."""
return morph.soma.radius | 2f9991a2f9240965bdb69a1a14814ed99bf60f86 | 3,656,979 |
async def async_get_authorization_server(hass: HomeAssistant) -> AuthorizationServer:
"""Return authorization server."""
return AuthorizationServer(
authorize_url=AUTHORIZATION_ENDPOINT,
token_url=TOKEN_ENDPOINT,
) | 99d7c0d25168d07d0d27ee95e6ee0b59cb6d48c0 | 3,656,980 |
from typing import Optional
def check_proposal_functions(
model: Model, state: Optional[flow.SamplingState] = None, observed: Optional[dict] = None,
) -> bool:
"""
Check for the non-default proposal generation functions
Parameters
----------
model : pymc4.Model
Model to sample posterior for
state : Optional[flow.SamplingState]
Current state
observed : Optional[Dict[str, Any]]
Observed values (optional)
"""
(_, state, _, _, continuous_distrs, discrete_distrs) = initialize_state(
model, observed=observed, state=state
)
init = state.all_unobserved_values
init_state = list(init.values())
init_keys = list(init.keys())
for i, state_part in enumerate(init_state):
untrs_var, unscoped_tr_var = scope_remove_transformed_part_if_required(
init_keys[i], state.transformed_values
)
# get the distribution for the random variable name
distr = continuous_distrs.get(untrs_var, None)
if distr is None:
distr = discrete_distrs[untrs_var]
func = distr._default_new_state_part
if callable(func):
return True
return False | 3d0d14f800f3d499de0c823dd2df8b852573c56f | 3,656,981 |
def smaller_n(n1, n2):
""" Compare two N_Numbers and returns smaller one. """
p1, s1 = n1
p2, s2 = n2
p1l = len(str(p1)) + s1
p2l = len(str(p2)) + s2
if p1l < p2l:
return n1
elif p1l > p2l:
return n2
p1 = p1.ljust(36, '9')
p2 = p2.ljust(36, '9')
if p1 <= p2:
return n1
else:
return n2 | 1f5922b74bdb8e5ee4dba7a85a9a70efdb024c59 | 3,656,982 |
def sortDict(dictionary: dict):
"""Lambdas made some cringe and stupid thing some times, so this dirty thing was developed"""
sortedDictionary = {}
keys = list(dictionary.keys())
keys.sort()
for key in keys:
sortedDictionary[key] = dictionary[key]
return sortedDictionary | ed61adf95f2b8c1c4414f97d84b8863596681478 | 3,656,983 |
def elina_linexpr0_alloc(lin_discr, size):
"""
Allocate a linear expressions with coefficients by default of type ElinaScalar and c_double.
If sparse representation, corresponding new dimensions are initialized with ELINA_DIM_MAX.
Parameters
----------
lin_discr : c_uint
Enum of type ElinaLinexprDiscr that defines the representation (sparse or dense).
size : c_size_t
Size of the internal array.
Returns
-------
linexpr : ElinaLinexpr0Ptr
Pointer to the newly allocated ElinaLinexpr0
"""
linexpr = None
try:
elina_linexpr0_alloc_c = elina_auxiliary_api.elina_linexpr0_alloc
elina_linexpr0_alloc_c.restype = ElinaLinexpr0Ptr
elina_linexpr0_alloc_c.argtypes = [c_uint, c_size_t]
linexpr = elina_linexpr0_alloc_c(lin_discr, size)
except:
print('Problem with loading/calling "elina_linexpr0_alloc" from "libelinaux.so"')
print('Make sure you are passing c_uint, c_size_t to the function')
return linexpr | 56bbaa01ba3b9bbe657240abdf8fb92daa527f29 | 3,656,984 |
def FrameTag_get_tag():
"""FrameTag_get_tag() -> std::string"""
return _RMF.FrameTag_get_tag() | 21392f22a0b67f86c5a3842ab6befc4b1e3938c6 | 3,656,985 |
def noise4(x: float, y: float, z: float, w: float) -> float:
"""
Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.
"""
return _default.noise4(x, y, z, w) | 75b5911e9b8b4a08abba9540992e812d2a1dee83 | 3,656,986 |
def damerau_levenshtein_distance(word1: str, word2: str) -> int:
"""Calculates the distance between two words."""
inf = len(word1) + len(word2)
table = [[inf for _ in range(len(word1) + 2)] for _ in range(len(word2) + 2)]
for i in range(1, len(word1) + 2):
table[1][i] = i - 1
for i in range(1, len(word2) + 2):
table[i][1] = i - 1
da = {}
for col, c1 in enumerate(word1, 2):
last_row = 0
for row, c2 in enumerate(word2, 2):
last_col = da.get(c2, 0)
addition = table[row - 1][col] + 1
deletion = table[row][col - 1] + 1
substitution = table[row - 1][col - 1] + (0 if c1 == c2 else 1)
transposition = (
table[last_row - 1][last_col - 1]
+ (col - last_col - 1)
+ (row - last_row - 1)
+ 1
)
table[row][col] = min(addition, deletion, substitution, transposition)
if c1 == c2:
last_row = row
da[c1] = col
return table[len(word2) + 1][len(word1) + 1] | 7b75bb94fe66897c1807ac185d8602ea2b3ebd67 | 3,656,987 |
from typing import Any
def ga_validator(value: Any) -> str | int:
"""Validate that value is parsable as GroupAddress or InternalGroupAddress."""
if isinstance(value, (str, int)):
try:
parse_device_group_address(value)
return value
except CouldNotParseAddress:
pass
raise vol.Invalid(
f"value '{value}' is not a valid KNX group address '<main>/<middle>/<sub>', '<main>/<sub>' "
"or '<free>' (eg.'1/2/3', '9/234', '123'), nor xknx internal address 'i-<string>'."
) | 84845c9dbf5db041e243bf462dec4533ff7e0e3e | 3,656,988 |
import time
import re
from datetime import datetime
def getTime(sim):
"""
Get the network time
@param sim: the SIM serial handle
"""
sim.write(b'AT+CCLK?\n')
line = sim.readline()
res = None
while not line.endswith(b'OK\r\n'):
time.sleep(0.5)
matcher = re.match(br'^\+CCLK: "([^+]+)\+[0-9]+"\r\n', line)
if matcher:
ts = matcher.group(1).decode('ascii')
res = datetime.datetime.strptime(ts[:ts.find('+')], "%y/%m/%d,%H:%M:%S")
line = sim.readline()
return res | 77c889a41b214046a5965126927ca7e7ee043129 | 3,656,989 |
import makehuman
def defaultTargetLicense():
"""
Default license for targets, shared for all targets that do not specify
their own custom license, which is useful for saving storage space as this
license is globally referenced by and applies to the majority of targets.
"""
return makehuman.getAssetLicense( {"license": "AGPL3",
"author": "MakeHuman",
"copyright": "2016 Data Collection AB, Joel Palmius, Jonas Hauquier"} ) | a638129f1674b14fbf0d72e5323c1725f6fb5035 | 3,656,990 |
import json
def get_repo_info(main_path):
""" Get the info of repo.
Args:
main_path: the file store location.
Return:
A json object.
"""
with open(main_path + '/repo_info.json') as read_file:
repo_info = json.load(read_file)
return repo_info | f4a538819add0a102f6cbe50be70f2c9a0f969b6 | 3,656,991 |
import yaml
def parse_settings(settings_file: str) -> dict:
"""
The function parses settings file into dict
Parameters
----------
settings_file : str
File with the model settings, must be in yaml.
Returns
-------
ydict : dict
Parsed settings used for modeling.
"""
with open(settings_file, 'r') as fstream:
ydict = yaml.safe_load(fstream)
return ydict | 1aec2a8be51376209db81d60115814ddefca7ea6 | 3,656,992 |
def get_mac_address(path):
"""
input: path to the file with the location of the mac address
output: A string containing a mac address
Possible exceptions:
FileNotFoundError - when the file is not found
PermissionError - in the absence of access rights to the file
TypeError - If the function argument is not a string.
"""
if type(path) is not str:
raise TypeError("The path must be a string value")
try:
file = open(path)
except FileNotFoundError as e:
raise e
except PermissionError as e:
raise e
return file.readline().strip().upper() | 814a530b63896103adcb8fbc84d17939644b9bbe | 3,656,993 |
def jwt_get_username_from_payload_handler(payload):
"""
Override this function if username is formatted differently in payload
"""
return payload.get('name') | 92d60ce714632571346e93459729dcf1d764617b | 3,656,994 |
import shlex
def grr_uname(line):
"""Returns certain system infornamtion.
Args:
line: A string representing arguments passed to the magic command.
Returns:
String representing some system information.
Raises:
NoClientSelectedError: Client is not selected to perform this operation.
"""
args = grr_uname.parser.parse_args(shlex.split(line))
return magics_impl.grr_uname_impl(args.machine, args.kernel_release) | 5e671fcffe415397edc3b7c6011cc4e21b72cb5a | 3,656,995 |
import requests
import warnings
def stock_szse_summary(date: str = "20200619") -> pd.DataFrame:
"""
深证证券交易所-总貌-证券类别统计
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 证券类别统计
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.get(url, params=params)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
temp_df = pd.read_excel(BytesIO(r.content), engine="openpyxl")
temp_df["证券类别"] = temp_df["证券类别"].str.strip()
temp_df.iloc[:, 2:] = temp_df.iloc[:, 2:].applymap(lambda x: x.replace(",", ""))
temp_df.columns = ["证券类别", "数量", "成交金额", "总市值", "流通市值"]
temp_df["数量"] = pd.to_numeric(temp_df["数量"])
temp_df["成交金额"] = pd.to_numeric(temp_df["成交金额"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"], errors="coerce")
temp_df["流通市值"] = pd.to_numeric(temp_df["流通市值"], errors="coerce")
return temp_df | 6544b0d78baa76858c13a001287b35d2a0faf7ba | 3,656,996 |
def find_all_movies_shows(pms): # pragma: no cover
""" Helper of get all the shows on a server.
Args:
func (callable): Run this function in a threadpool.
Returns: List
"""
all_shows = []
for section in pms.library.sections():
if section.TYPE in ('movie', 'show'):
all_shows += section.all()
return all_shows | ca4a8a5f4b2c1632ea6e427c748ef790c896b3ba | 3,656,997 |
def parse_vars(vars):
"""
Transform a list of NAME=value environment variables into a dict
"""
retval = {}
for var in vars:
key, value = var.split("=", 1)
retval[key] = value
return retval | e2c6ae05cdf0151caaf8589eb7d7df90dcdd99a1 | 3,656,998 |
from typing import List
import collections
def find_dup_items(values: List) -> List:
"""Find duplicate items in a list
Arguments:
values {List} -- A list of items
Returns:
List -- A list of duplicated items
"""
dup = [t for t, c in collections.Counter(values).items() if c > 1]
return dup | 3a84c2f3b723bed9b7a82dc5f0cfd81d99c2bf48 | 3,656,999 |
def circle_location_Pass(circle_, image_, margin=0.15):
"""
Function for check if the circle_ is overlapping
with the margin of the image_.
"""
cy, cx, rad, accum = circle_
image_sizeY_, image_sizeX_ = image_.shape[0], image_.shape[1]
margin_min_x = int(image_sizeX_ * margin)
margin_max_x = int(image_sizeX_ * (1 - margin))
margin_min_y = int(image_sizeY_ * margin)
margin_max_y = int(image_sizeY_ * (1 - margin))
margin_min_xh = int(image_sizeX_ * margin/2.)
margin_max_xh = int(image_sizeX_ * (1 - margin/2.))
margin_min_yh = int(image_sizeY_ * margin/2.)
margin_max_yh = int(image_sizeY_ * (1 - margin/2.))
if cy<margin_min_y or cy>margin_max_y:
return False
if cx<margin_min_x or cx>margin_max_x:
return False
if cy-rad<margin_min_yh or cy+rad>margin_max_yh:
return False
if cx-rad<margin_min_xh or cx+rad>margin_max_xh:
return False
return True | 4ad94552bc1bf06282a691edede89a65f8b9c328 | 3,657,000 |
def calculate_molecular_mass(symbols):
"""
Calculate the mass of a molecule.
Parameters
----------
symbols : list
A list of elements.
Returns
-------
mass : float
The mass of the molecule
"""
mass = 0
for i in range(len(symbols)):
mass = mass + atomic_weights[symbols[i]]
return mass | 7ac18cffc02652428b51009d2bf304301def96dd | 3,657,002 |
def _color_str(string, color):
"""Simple color formatter for logging formatter"""
# For bold add 1; after "["
start_seq = '\033[{:d}m'.format(COLOR_DICT[color])
return start_seq + string + '\033[0m' | 715b0b597885f1cffa352cc01bdb743c3ed23dd4 | 3,657,003 |
def parser_tool_main(args):
"""Main function for the **parser** tool.
This method will parse a JSON formatted Facebook conversation,
reports informations and retrieve data from it, depending on the
arguments passed.
Parameters
----------
args : Namespace (dict-like)
Arguments passed by the `ArgumentParser`.
See Also
--------
FBParser: Class used for the **parser** tool.
main : method used for parsing arguments
"""
with args.cookie as f:
user_raw_data = f.read()
print("[+] - Parsing JSON for {} files".format(len(args.infile)))
data_formatted = build_fmt_str_from_enum(args.data)
print("[+] - Parsing JSON to retrieve {}".format(data_formatted))
fb_parser = FBParser(user_raw_data,
infile_json=args.infile, mode=args.mode,
data=args.data, output=args.output,
threads=args.threads)
fb_parser.parse(to_stdout=True, verbose=args.verbose)
print("[+] - JSON parsed succesfully, saving results "
"inside folder '" + str(args.output) + "'")
return 0 | 1e07a60e78b042c6c229410e5d1aaf306e692f61 | 3,657,004 |
from functools import reduce
def merge(from_args):
"""Merge a sequence of operations into a cross-product tree.
from_args: A dictionary mapping a unique string id to a
raco.algebra.Operation instance.
Returns: a single raco.algebra.Operation instance and an opaque
data structure suitable for passing to the rewrite_refs function.
"""
assert len(from_args) > 0
def cross(x, y):
return algebra.CrossProduct(x, y)
from_ops = from_args.values()
op = reduce(cross, from_ops)
return (op, __calculate_offsets(from_args)) | e3690a26fc9e3e604984aab827617ffc535f63d3 | 3,657,005 |
def graph(task_id):
"""Return the graph.json results"""
return get_file(task_id, "graph.json") | 4d8728d3b61cf62057525054d8eafa127b1c48ff | 3,657,007 |
def parse_components_from_aminochange(aminochange):
""" Returns a dictionary containing (if possible) 'ref', 'pos', and 'alt'
characteristics of the supplied aminochange string.
If aminochange does not parse, returns None.
:param aminochange: (str) describing amino acid change
:return: dict or None
"""
match = re_aminochange_comp_long.match(aminochange)
if match:
# reverse long-form amino strings to short-form.
stuff = match.groupdict()
return {'ref': amino_acid_map[stuff['ref']],
'pos': stuff['pos'],
'alt': amino_acid_map[stuff['alt']],
}
else:
match = re_aminochange_comp_short.match(aminochange)
return match.groupdict()
return None | 69877d635b58bdc3a8a7f64c3c3d86f59a7c7548 | 3,657,008 |
import random
import string
import csv
def get_logs_csv():
"""
get target's logs through the API in JSON type
Returns:
an array with JSON events
"""
api_key_is_valid(app, flask_request)
target = get_value(flask_request, "target")
data = logs_to_report_json(target)
keys = data[0].keys()
filename = "report-" + now(
model="%Y_%m_%d_%H_%M_%S"
) + "".join(
random.choice(
string.ascii_lowercase
) for _ in range(10)
)
with open(filename, "w") as report_path_filename:
dict_writer = csv.DictWriter(
report_path_filename,
fieldnames=keys,
quoting=csv.QUOTE_ALL
)
dict_writer.writeheader()
for event in data:
dict_writer.writerow(
{
key: value for key, value in event.items() if key in keys
}
)
with open(filename, 'r') as report_path_filename:
reader = report_path_filename.read()
return Response(
reader, mimetype='text/csv',
headers={
'Content-Disposition': 'attachment;filename=' + filename + '.csv'
}
) | f9296cfc7c6559ebccbfa29268e3b22875fb9fed | 3,657,009 |
def _cache_key_format(lang_code, request_path, qs_hash=None):
"""
função que retorna o string que será a chave no cache.
formata o string usando os parâmetros da função:
- lang_code: código do idioma: [pt_BR|es|en]
- request_path: o path do request
- qs_hash: o hash gerado a partir dos parametros da querystring (se não for None)
"""
cache_key = "/LANG=%s/PATH=%s" % (lang_code, request_path)
if qs_hash is not None:
cache_key = "%s?QS=%s" % (cache_key, qs_hash)
return cache_key | 365b1ff144f802e024da5d6d5b25b015463da8b3 | 3,657,010 |
from typing import Iterable
from pathlib import Path
from typing import Callable
from typing import Any
from typing import List
def select_from(paths: Iterable[Path],
filter_func: Callable[[Any], bool] = default_filter,
transform: Callable[[Path], Any] = None,
order_func: Callable[[Any], Any] = None,
order_asc: bool = True,
fn_base: int = 10,
limit: int = None) -> (List[Any], List[Path]):
"""Filter, order, and truncate the given paths based on the filter and
other parameters.
:param paths: A list of paths to filter, order, and limit.
:param transform: Function to apply to each path before applying filters
or ordering. The filter and order functions should expect the type
returned by this.
:param filter_func: A function that takes a directory, and returns whether
to include that directory. True -> include, False -> exclude
:param order_func: A function that returns a comparable value for sorting,
as per the list.sort keys argument. Items for which this returns
None are removed.
:param order_asc: Whether to sort in ascending or descending order.
:param fn_base: Number base for file names. 10 by default, ensure dir name
is a valid integer.
:param limit: The max items to return. None denotes return all.
:returns: A filtered, ordered list of transformed objects, and the list
of untransformed paths.
"""
if transform is None:
transform = lambda v: v
selected = []
for path in paths:
if not path.is_dir():
continue
try:
int(path.name, fn_base)
except ValueError:
continue
try:
item = transform(path)
except ValueError:
continue
if not filter_func(item):
continue
if order_func is not None and order_func(item) is None:
continue
selected.append((item, path))
if order_func is not None:
selected.sort(key=lambda d: order_func(d[0]), reverse=not order_asc)
return SelectItems(
[item[0] for item in selected][:limit],
[item[1] for item in selected][:limit]) | d952d7d81932c5f6d206c39a5ac12aae1e940431 | 3,657,011 |
import torch
from typing import Counter
def dbscan(data:torch.Tensor, epsilon:float, **kwargs) -> torch.Tensor:
"""
Generate mask using DBSCAN.
Note, data in the largest cluster have True values.
Parameters
----------
data: torch.Tensor
input data with shape (n_samples, n_features)
epsilon: float
DBSCAN epsilon
**kwargs:
passed to DBSCAN()
Returns
-------
mask (torch.Tensor)
"""
group = DBSCAN(eps=epsilon, **kwargs).fit(data.cpu().numpy())
label = Counter(group.labels_)
label = max(label, key=label.get)
return torch.tensor(group.labels_ == label).to(data.device) | 0121b8b9dceaf9fc8399ffd75667afa6d34f66e1 | 3,657,012 |
import copy
def simulate_multivariate_ts(mu, alpha, beta, num_of_nodes=-1,\
Thorizon = 60, seed=None, output_rejected_data=False):
"""
Inputs:
mu: baseline intesnities M X 1 array
alpha: excitiation rates of multivariate kernel pf HP M X M array
beta: decay rates of kernel of multivariate HP
node: k-th node of multivariate HP
"""
#################
# Initialisation
#################
if num_of_nodes < 0:
num_of_nodes = np.shape(mu)[0]
rng = default_rng(seed) # get instance of random generator
ts = [num_of_nodes * np.array([])] # create M number of empty lise to store ordered set of timestamps of each nodes
t = 0 # initialise current time to be 0
num_of_events = np.zeros(num_of_nodes) # set event counter to be 0 for all nodes
epsilon = 10**(-10) # This was used in many HP code
M_star = copy.copy(mu) # upper bound at current time t = 0
accepted_event_intensity = [num_of_nodes * np.array([])]
rejected_points = [num_of_nodes * np.array([])]; rpy = [num_of_nodes * np.array([])] # containter for rejected time points and their correspodning intensities
M_x = [num_of_nodes * []]; M_y = [num_of_nodes * np.array([])] # M_y stores Maximum or upper bound of current times while M_x stores their x-values
#################
# Begin loop
#################
while(t < Thorizon):
previous_M_star = M_star; previous_t = t
M_star = np.sum(multiv_cif(t+epsilon, ts, mu, alpha, beta)) # compute upper bound of intensity using conditional intensity function
u = rng.uniform(0,1) # draw a uniform random number between interval (0,1)
tau = -np.log(u)/M_star # sample inter-arrival time
t = t + tau # update current time by adding tau to current time (hence t is the candidate point)
M_x += [previous_t,t]
M_y += [previous_M_star]
s = rng.uniform(0,1) # draw another standard uniform random number
M_t = np.sum(multiv_cif(t, ts, mu, alpha, beta)) # compute intensity function at current time t
if t <= Thorizon:
##########################
## Rejection Sampling test where probability of acceptance: M_t/M_star
if s <= M_t/M_star:
k = 0 # initialise k to be the first node '0'
# Search for node k such that the 'while condition' below is satisfied
while s*M_star <= np.sum(multiv_cif(t, ts, mu, alpha, beta)[0:k+1]):
k += 1
num_of_events[k] += 1 # update number of points in node k
ts[k] = np.append(ts[k], float(t)) # accept candidate point t in node k
accepted_event_intensity.append(M_t)
else:
rejected_points += [t]
rpy += [M_t]
else:
break
if output_rejected_data:
return ts, num_of_events, accepted_event_intensity, rejected_points, rpy
else:
return ts, num_of_events | 85ab71fa3f2b16cbe296d21d6bc43c15c94aa40a | 3,657,013 |
import base64
def token_urlsafe(nbytes):
"""Return a random URL-safe text string, in Base64 encoding.
The string has *nbytes* random bytes. If *nbytes* is ``None``
or not supplied, a reasonable default is used.
>>> token_urlsafe(16) #doctest:+SKIP
'Drmhze6EPcv0fN_81Bj-nA'
"""
tok = token_bytes(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') | 1855dc44cec1ddd0c6c83d0f765c15fd98d1ec98 | 3,657,014 |
def sha206a_get_pk_useflag_count(pk_avail_count):
"""
calculates available Parent Key use counts
Args:
pk_avail_count counts available bit's as 1 (int)
Returns:
Status Code
"""
if not isinstance(pk_avail_count, AtcaReference):
status = Status.ATCA_BAD_PARAM
else:
c_pk_avail_count = c_uint8(pk_avail_count.value)
status = get_cryptoauthlib().sha206a_get_pk_useflag_count(byref(c_pk_avail_count))
pk_avail_count.value = c_pk_avail_count.value
return status | 389174a21efe1ca78037b479895035b4bdd66b87 | 3,657,015 |
from typing import Tuple
def rotate_points_around_origin(
x: tf.Tensor,
y: tf.Tensor,
angle: tf.Tensor,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Rotates points around the origin.
Args:
x: Tensor of shape [batch_size, ...].
y: Tensor of shape [batch_size, ...].
angle: Tensor of shape [batch_size, ...].
Returns:
Rotated x, y, each with shape [batch_size, ...].
"""
tx = tf.cos(angle) * x - tf.sin(angle) * y
ty = tf.sin(angle) * x + tf.cos(angle) * y
return tx, ty | 8d4bf5f94964271f640def7d7e2b4242fbfe8e7b | 3,657,016 |
import inspect
def form_of(state):
"""Return the form of the given state."""
if hasattr(state, "__form__"):
if callable(state.__form__) and not inspect.isclass(state.__form__):
return state.__form__()
else:
return state.__form__
else:
raise ValueError(f"{state} has no form") | e39aa7db7b324ab38b65232b34b987b862812c54 | 3,657,017 |
def poly_to_geopandas(polys, columns):
"""
Converts a GeoViews Paths or Polygons type to a geopandas dataframe.
Parameters
----------
polys : gv.Path or gv.Polygons
GeoViews element
columns: list(str)
List of columns
Returns
-------
gdf : Geopandas dataframe
"""
rows = []
for g in polys.geom():
rows.append(dict({c: '' for c in columns}, geometry=g))
return gpd.GeoDataFrame(rows, columns=columns+['geometry']) | 889fc5b1bf5bf15cd9612c40e7bf14b1c05043f6 | 3,657,018 |
def get_sequences(query_file=None, query_ids=None):
"""Convenience function to get dictionary of query sequences from file or IDs.
Parameters:
query_file (str): Path to FASTA file containing query protein sequences.
query_ids (list): NCBI sequence accessions.
Raises:
ValueError: Did not receive values for query_file or query_ids.
Returns:
sequences (dict): Dictionary of query sequences keyed on accession.
"""
if query_file and not query_ids:
with open(query_file) as query:
sequences = parse_fasta(query)
elif query_ids:
sequences = efetch_sequences(query_ids)
else:
raise ValueError("Expected 'query_file' or 'query_ids'")
return sequences | 8056ce1c98b7a4faa4bb5a02505d527df31c7c8b | 3,657,019 |
def random_show_date(database_connection: mysql.connector.connect) -> str:
"""Return a random show date from the ww_shows table"""
database_connection.reconnect()
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showdate FROM ww_shows s "
"WHERE s.showdate <= NOW() "
"ORDER BY RAND() "
"LIMIT 1;")
cursor.execute(query)
result = cursor.fetchone()
cursor.close()
if not result:
return None
return result["showdate"].isoformat() | e3afdf9aa1fe9a02adab72c424caa80d60280699 | 3,657,021 |
def get_output_tensor(interpreter, index):
"""Returns the output tensor at the given index."""
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details["index"]))
return tensor | 158db3fc7ba13ee44d422248a9b96b7738a486e3 | 3,657,022 |
def make_d_mappings(n_dir, chain_opts):
"""Generate direction to solution interval mapping."""
# Get direction dependence for all terms.
dd_terms = [dd for _, dd in yield_from(chain_opts, "direction_dependent")]
# Generate a mapping between model directions gain directions.
d_map_arr = (np.arange(n_dir, dtype=np.int32)[:, None] * dd_terms).T
return d_map_arr | fd9eddf81b4388e3fa40c9b65a591af9aabf9014 | 3,657,023 |
def _calculateVolumeByBoolean(vtkDataSet1,vtkDataSet2,iV):
"""
Function to calculate the volumes of a cell intersecting a mesh.
Uses a boolean polydata filter to calculate the intersection,
a general implementation but slow.
"""
# Triangulate polygon and calc normals
baseC = vtkTools.dataset.getCell2vtp(vtkDataSet2,iV)
baseVol = vtkTools.polydata.calculateVolume(baseC)
# print iV, baseVol
# Extract cells from the first mesh that intersect the base cell
extractCells = vtkTools.extraction.extractDataSetWithPolygon(vtkDataSet1,baseC,extInside=True,extBoundaryCells=True,extractBounds=True)
extInd = npsup.vtk_to_numpy(extractCells.GetCellData().GetArray('id'))
# print extInd
# Assert if there are no cells cutv
assert extractCells.GetNumberOfCells() > 0, 'No cells in the clip, cell id {:d}'.format(iV)
# Calculate the volumes of the clipped cells and insert to the matrix
volL = []
for nrCC,iR in enumerate(extInd):
tempCell = vtkTools.dataset.thresholdCellId2vtp(extractCells,iR)
# Find the intersection of the 2 cells
boolFilt = vtk.vtkBooleanOperationPolyDataFilter()
boolFilt.SetInputData(0,tempCell)
boolFilt.SetInputData(1,baseC)
boolFilt.SetOperationToIntersection()
# If they intersect, calculate the volumes
if boolFilt.GetOutput().GetNumberOfPoints() > 0:
cleanInt = vtkTools.polydata.cleanPolyData(boolFilt.GetOutputPort())
del3dFilt = vtk.vtkDelaunay3D()
del3dFilt.SetInputData(cleanInt)
del3dFilt.Update()
# Get the output
intC = vtkTools.extraction.vtu2vtp(del3dFilt.GetOutput())
intVol = vtkTools.polydata.calculateVolume(tempCell)
# Calculate the volume
volVal = intVol/baseVol
# print iR, intVol, volVal
# Insert the value
if volVal > 0.0:
volL.append(volVal)
return extInd,np.array(volL) | a2c30133973527fb339c9d1e33cc2a937b35d958 | 3,657,025 |
def WebChecks(input_api, output_api):
"""Run checks on the web/ directory."""
if input_api.is_committing:
error_type = output_api.PresubmitError
else:
error_type = output_api.PresubmitPromptWarning
output = []
output += input_api.RunTests([input_api.Command(
name='web presubmit',
cmd=[
input_api.python_executable,
input_api.os_path.join('web', 'web.py'),
'presubmit',
],
kwargs={},
message=error_type,
)])
return output | 5fb828cc98da71bd231423223336ec81e02505ff | 3,657,026 |
from HUGS.Util import load_hugs_json
def synonyms(species: str) -> str:
"""
Check to see if there are other names that we should be using for
a particular input. E.g. If CFC-11 or CFC11 was input, go on to use cfc-11,
as this is used in species_info.json
Args:
species (str): Input string that you're trying to match
Returns:
str: Matched species string
"""
# Load in the species data
species_data = load_hugs_json(filename="acrg_species_info.json")
# First test whether site matches keys (case insensitive)
matched_strings = [k for k in species_data if k.upper() == species.upper()]
# Used to access the alternative names in species_data
alt_label = "alt"
# If not found, search synonyms
if not matched_strings:
for key in species_data:
# Iterate over the alternative labels and check for a match
matched_strings = [s for s in species_data[key][alt_label] if s.upper() == species.upper()]
if matched_strings:
matched_strings = [key]
break
if matched_strings:
updated_species = matched_strings[0]
return updated_species
else:
raise ValueError(f"Unable to find synonym for species {species}") | 31013464ce728cc3ed93b1a9318af3dbcf3f65ec | 3,657,027 |
def _blkid_output(out):
"""
Parse blkid output.
"""
flt = lambda data: [el for el in data if el.strip()]
data = {}
for dev_meta in flt(out.split("\n\n")):
dev = {}
for items in flt(dev_meta.strip().split("\n")):
key, val = items.split("=", 1)
dev[key.lower()] = val
if dev.pop("type", None) == "xfs":
dev["label"] = dev.get("label")
data[dev.pop("devname")] = dev
mounts = _get_mounts()
for device in mounts:
if data.get(device):
data[device].update(mounts[device])
return data | 2cbcbb3ec9b732c3c02183f43ca5a5d5e876af71 | 3,657,028 |
def as_iso_datetime(qdatetime):
""" Convert a QDateTime object into an iso datetime string.
"""
return qdatetime.toString(Qt.ISODate) | 8dba5d1d6efc0dc17adc26a5687923e067ca3c29 | 3,657,029 |
def spec_means_and_magnitudes(action_spec):
"""Get the center and magnitude of the ranges in action spec."""
action_means = tf.nest.map_structure(
lambda spec: (spec.maximum + spec.minimum) / 2.0, action_spec)
action_magnitudes = tf.nest.map_structure(
lambda spec: (spec.maximum - spec.minimum) / 2.0, action_spec)
return tf.cast(
action_means, dtype=tf.float32), tf.cast(
action_magnitudes, dtype=tf.float32) | 119054966a483bb60e80941a6bf9dc5a4a0778f6 | 3,657,030 |
def clean_data(df):
"""
Clean Data :
1. Clean and Transform Category Columns from categories csv
2.Drop Duplicates
3.Remove any missing values
Args:
INPUT - df - merged Dataframe from load_data function
OUTPUT - Returns df - cleaned Dataframe
"""
# Split categories into separate category columns
categories = df['categories'].str.split(';', expand=True)
row = categories.iloc[0]
# Get new column names from category columns
category_colnames = row.apply(lambda x: x.rstrip('- 0 1'))
categories.columns = category_colnames
# Convert category values to 0 or 1
categories = categories.applymap(lambda s: int(s[-1]))
# Drop the original categories column from Dataframe
df.drop('categories', axis=1, inplace=True)
# Concatenate the original dataframe with the new `categories` dataframe
df_final = pd.concat([df, categories], axis=1)
#Drop missing values and duplicates from the dataframe
df_final.drop_duplicates(subset='message', inplace=True)
df_final.dropna(subset=category_colnames, inplace=True)
#Refer ETL Pipeline preparation Notebook to understand why these columns are dropped
df_final = df_final[df_final.related != 2]
df_final = df_final.drop('child_alone', axis=1)
return df_final | 752d675d8ac5e27c61c9b8c90acee4cdab8c08fc | 3,657,031 |
def all_pairs_shortest_path_length(G,cutoff=None):
""" Compute the shortest path lengths between all nodes in G.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary of shortest path lengths keyed by source and target.
Notes
-----
The dictionary returned only has keys for reachable node pairs.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.all_pairs_shortest_path_length(G)
>>> print(length[1][4])
3
>>> length[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
"""
paths={}
for n in G:
paths[n]=single_source_shortest_path_length(G,n,cutoff=cutoff)
return paths | 1d312a71bd97d4f1a51a8b1e24331d54055bc156 | 3,657,033 |
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None):
""" Figure out based on the possible columns inputs which columns to keep.
Args:
gctoo (GCToo object):
cid (list of strings):
col_bool (boolean array):
cidx (list of integers):
exclude_cid (list of strings):
Returns:
cols_to_keep (list of strings): col ids to be kept
"""
# Use cid if provided
if cid is not None:
assert type(cid) == list, "cid must be a list. cid: {}".format(cid)
cols_to_keep = [gctoo_col for gctoo_col in gctoo.meth_df.columns if gctoo_col in cid]
# Tell user if some cids not found
num_missing_cids = len(cid) - len(cols_to_keep)
if num_missing_cids != 0:
logger.info("{} cids were not found in the GCT.".format(num_missing_cids))
# Use col_bool if provided
elif col_bool is not None:
assert len(col_bool) == gctoo.meth_df.shape[1], (
"col_bool must have length equal to gctoo.meth_df.shape[1]. " +
"len(col_bool): {}, gctoo.meth_df.shape[1]: {}".format(
len(col_bool), gctoo.meth_df.shape[1]))
cols_to_keep = gctoo.meth_df.columns[col_bool].values
# Use cidx if provided
elif cidx is not None:
assert type(cidx[0]) is int, (
"cidx must be a list of integers. cidx[0]: {}, " +
"type(cidx[0]): {}").format(cidx[0], type(cidx[0]))
assert max(cidx) <= gctoo.meth_df.shape[1], (
"cidx contains an integer larger than the number of columns in " +
"the GCToo. max(cidx): {}, gctoo.meth_df.shape[1]: {}").format(
max(cidx), gctoo.meth_df.shape[1])
cols_to_keep = gctoo.meth_df.columns[cidx].values
# If cid, col_bool, and cidx are all None, return all columns
else:
cols_to_keep = gctoo.meth_df.columns.values
# Use exclude_cid if provided
if exclude_cid is not None:
# Keep only those columns that are not in exclude_cid
cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid]
return cols_to_keep | 1215a392ecb068e2d004c64cf56f2483c722f3f6 | 3,657,034 |
import shutil
def check_zenity():
""" Check if zenity is installed """
warning = '''zenity was not found in your $PATH
Installation is recommended because zenity is used to
indicate that protonfixes is doing work while waiting
for a game to launch. To install zenity use your system's
package manager.
'''
if not shutil.which('zenity'):
log.warn(warning)
return False
return True | decea9be11e0eb1d866ed295cb33a06aa663a432 | 3,657,035 |
def get_auth_token():
"""
Return the zerotier auth token for accessing its API.
"""
with open("/var/snap/zerotier-one/common/authtoken.secret", "r") as source:
return source.read().strip() | bd74fde05fbb375f8899d4e5d552ad84bcd80573 | 3,657,036 |
def sph_harm_transform(f, mode='DH', harmonics=None):
""" Project spherical function into the spherical harmonics basis. """
assert f.shape[0] == f.shape[1]
if isinstance(f, tf.Tensor):
sumfun = tf.reduce_sum
def conjfun(x): return tf.conj(x)
n = f.shape[0].value
else:
sumfun = np.sum
conjfun = np.conj
n = f.shape[0]
assert np.log2(n).is_integer()
if harmonics is None:
harmonics = sph_harm_all(n)
a = DHaj(n, mode)
f = f*np.array(a)[np.newaxis, :]
real = is_real_sft(harmonics)
coeffs = []
for l in range(n // 2):
row = []
minl = 0 if real else -l
for m in range(minl, l+1):
# WARNING: results are off by this factor, when using driscoll1994computing formulas
factor = 2*np.sqrt(np.pi)
row.append(sumfun(factor * np.sqrt(2*np.pi)/n *
f * conjfun(harmonics[l][m-minl])))
coeffs.append(row)
return coeffs | a88f9a71fa19a57441fdfe88e8b0632cc08fb413 | 3,657,037 |
def create_model(experiment_settings:ExperimentSettings) -> OuterModel:
"""
function creates an OuterModel with provided settings.
Args:
inner_settings: an instannce of InnerModelSettings
outer_settings: an instannce of OuterModelSettings
"""
model = OuterModel(experiment_settings.outer_settings)
model.compile(
loss= experiment_settings.outer_settings.loss,
optimizer=experiment_settings.outer_settings.optimizer,
metrics=experiment_settings.outer_settings.metrics,
)
return model | e6af03c5afd53a39e6929dba71990f91ff8ffbb3 | 3,657,038 |
import pickle
def LoadTrainingTime(stateNum):
"""
Load the number of seconds spent training
"""
filename = 'time_' + str(stateNum) + '.pth'
try:
timeVals = pickle.load( open(GetModelPath() + filename, "rb"))
return timeVals["trainingTime"]
except:
print("ERROR: Failed to load training times! Returning 0")
return 0 | 1db59103bf3e31360237951241b90b3a85dae2bc | 3,657,039 |
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 15 epochs"""
lr = args.lr * (0.1 ** (epoch // args.lr_epochs))
print('Learning rate:', lr)
for param_group in optimizer.param_groups:
if args.retrain and ('mask' in param_group['key']): # retraining
param_group['lr'] = 0.0
elif args.prune_target and ('mask' in param_group['key']):
if args.prune_target in param_group['key']:
param_group['lr'] = lr
else:
param_group['lr'] = 0.0
else:
param_group['lr'] = lr
return lr | dc08034b0176ac0062d6fc7640a115f916a663a8 | 3,657,040 |
def disk_status(hardware, disk, dgtype):
"""
Status disk
"""
value = int(float(disk['used']) / float(disk['total']) * 100.0)
if value >= 90:
level = DiagnosticStatus.ERROR
elif value >= 70:
level = DiagnosticStatus.WARN
else:
level = DiagnosticStatus.OK
# Make board diagnostic status
d_board = DiagnosticStatus(
level=level,
name='jetson_stats {type} disk'.format(type=dgtype),
message="{0:2.1f}GB/{1:2.1f}GB".format(disk['used'], disk['total']),
hardware_id=hardware,
values=[
KeyValue(key="Used", value=str(disk['used'])),
KeyValue(key="Total", value=str(disk['total'])),
KeyValue(key="Unit", value="GB")])
return d_board | f248ccb0ba07106c3ed923f9ac7bc2e85d9b5e63 | 3,657,041 |
def hr_admin(request):
""" Views for HR2 Admin page """
user = request.user
# extra_info = ExtraInfo.objects.select_related().get(user=user)
designat = HoldsDesignation.objects.select_related().get(user=user)
if designat.designation.name =='hradmin':
template = 'hr2Module/hradmin.html'
# searched employee
query = request.GET.get('search')
if(request.method == "GET"):
if(query != None):
emp = ExtraInfo.objects.filter(
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)|
Q(id__icontains=query)
).distinct()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
else:
emp = ExtraInfo.objects.all()
emp = emp.filter(user_type="faculty")
context = {'emps': emp}
return render(request, template, context)
else:
return HttpResponse('Unauthorized', status=401) | 1b2c1027f8f4caf716019d9e5500223f76119a0b | 3,657,042 |
import six
def test_extra():
"""Returns dict of extrapolation testing modules."""
return {name: module.test_extra() for name, module in six.iteritems(all_)} | 5538f81891c0388ae0f5e312cb6c521ee19d18a5 | 3,657,043 |
import torch
def _switch_component(
x: torch.Tensor, ones: torch.Tensor, zeros: torch.Tensor
) -> torch.Tensor:
"""
Basic component of switching functions.
Args:
x (torch.Tensor): Switch functions.
ones (torch.Tensor): Tensor with ones.
zeros (torch.Tensor): Zero tensor
Returns:
torch.Tensor: Output tensor.
"""
x_ = torch.where(x <= 0, ones, x)
return torch.where(x <= 0, zeros, torch.exp(-ones / x_)) | 8d60c09428440be704e8ced9b8ac19219a0d0b04 | 3,657,044 |
def get_vector(x_array, y_array, pair):
"""This function is for calculating a vector of a bone from the openpose skelleton"""
x = x_array[:,pair[0]]-x_array[:,pair[1]]
y = y_array[:,pair[0]]-y_array[:,pair[1]]
return [x, y] | e2bfcce3952c6b0a2c8cd9c67c4cd7b52547694d | 3,657,045 |
def update_bar(tweets_json, handle):
"""
Pull data from signal and updates aggregate bar graph
This is using thresholds that combine toxicity and severe toxicity models
suggested by Lucas.
"""
if not tweets_json:
raise PreventUpdate('no data yet!')
tweets_df = pd.read_json(tweets_json, orient='split')
low_count = tweets_df['LOW_LEVEL'].value_counts().get(True, 0)
med_count = tweets_df['MED_LEVEL'].value_counts().get(True, 0)
hi_count = tweets_df['HI_LEVEL'].value_counts().get(True, 0)
begin_date = tweets_df['display_time'].iloc[-1]
end_date = tweets_df['display_time'].iloc[0]
title = f"tweets at {handle}: {begin_date} – {end_date} (UTC)"
data = dict(
type='bar',
x=['Low', 'Medium', 'High'],
y=[low_count, med_count, hi_count],
marker=dict(
color=[colors['low'],
colors['medium'],
colors['high']])
)
return {
'data': [data],
'layout': dict(
type='layout',
title=title,
xaxis={'title': 'toxicity level'},
yaxis={'title': 'count'},
)
} | 1c523a455393ce211b8ef6483ee25b981e028bd0 | 3,657,046 |
from typing import List
from typing import Dict
def render_foreign_derivation(tpl: str, parts: List[str], data: Dict[str, str]) -> str:
"""
>>> render_foreign_derivation("bor", ["en", "ar", "الْعِرَاق", "", "Iraq"], defaultdict(str))
'Arabic <i>الْعِرَاق</i> (<i>ālʿrāq</i>, “Iraq”)'
>>> render_foreign_derivation("der", ["en", "fro", "-"], defaultdict(str))
'Old French'
>>> render_foreign_derivation("etyl", ["enm", "en"], defaultdict(str))
'Middle English'
>>> render_foreign_derivation("etyl", ["grc"], defaultdict(str))
'Ancient Greek'
>>> render_foreign_derivation("inh", ["en", "enm", "water"], defaultdict(str))
'Middle English <i>water</i>'
>>> render_foreign_derivation("inh", ["en", "ang", "wæter", "", "water"], defaultdict(str))
'Old English <i>wæter</i> (“water”)'
>>> render_foreign_derivation("inh", ["en", "ang", "etan"], defaultdict(str, {"t":"to eat"}))
'Old English <i>etan</i> (“to eat”)'
>>> render_foreign_derivation("inh", ["en", "ine-pro", "*werdʰh₁om", "*wr̥dʰh₁om"], defaultdict(str))
'Proto-Indo-European <i>*wr̥dʰh₁om</i>'
>>> render_foreign_derivation("noncog", ["fro", "livret"], defaultdict(str, {"t":"book, booklet"}))
'Old French <i>livret</i> (“book, booklet”)'
>>> render_foreign_derivation("noncog", ["xta", "I̱ta Ita"], defaultdict(str, {"lit":"flower river"})) #xochopa
'Alcozauca Mixtec <i>I̱ta Ita</i> (literally “flower river”)'
>>> render_foreign_derivation("noncog", ["egy", "ḫt n ꜥnḫ", "", "grain, food"], defaultdict(str, {"lit":"wood/stick of life"}))
'Egyptian <i>ḫt n ꜥnḫ</i> (“grain, food”, literally “wood/stick of life”)'
>>> render_foreign_derivation("cal", ["fr" , "en", "light year"], defaultdict(str, {"alt":"alt", "tr":"tr", "t":"t", "g":"m", "pos":"pos", "lit":"lit"}))
'Calque of English <i>alt</i> <i>m</i> (<i>tr</i>, “t”, pos, literally “lit”)'
>>> render_foreign_derivation("pcal", ["en" , "de", "Leberwurst"], defaultdict(str, {"nocap":"1"}))
'partial calque of German <i>Leberwurst</i>'
>>> render_foreign_derivation("sl", ["en", "ru", "пле́нум", "", "plenary session"], defaultdict(str, {"nocap":"1"}))
'semantic loan of Russian <i>пле́нум</i> (<i>plenum</i>, “plenary session”)'
>>> render_foreign_derivation("learned borrowing", ["en", "la", "consanguineus"], defaultdict(str))
'Learned borrowing from Latin <i>consanguineus</i>'
>>> render_foreign_derivation("learned borrowing", ["en", "LL.", "trapezium"], defaultdict(str, {"notext":"1"}))
'Late Latin <i>trapezium</i>'
>>> render_foreign_derivation("slbor", ["en", "fr", "mauvaise foi"], defaultdict(str, {"nocap":"1"}))
'semi-learned borrowing from French <i>mauvaise foi</i>'
>>> render_foreign_derivation("obor", ["en", "ru", "СССР"], defaultdict(str))
'Orthographic borrowing from Russian <i>СССР</i> (<i>SSSR</i>)'
>>> render_foreign_derivation("unadapted borrowing", ["en", "ar", "قِيَاس", "", "measurement, analogy"], defaultdict(str))
'Unadapted borrowing from Arabic <i>قِيَاس</i> (<i>qīās</i>, “measurement, analogy”)'
>>> render_foreign_derivation("psm", ["en", "yue", "-"], defaultdict(str))
'Phono-semantic matching of Cantonese'
>>> render_foreign_derivation("translit", ["en", "ar", "عَالِيَة"], defaultdict(str))
'Transliteration of Arabic <i>عَالِيَة</i> (<i>ʿālī</i>)'
>>> render_foreign_derivation("back-form", ["en", "zero derivation"], defaultdict(str, {"nocap":"1"}))
'back-formation from <i>zero derivation</i>'
>>> render_foreign_derivation("bf", ["en"], defaultdict(str))
'Back-formation'
>>> render_foreign_derivation("l", ["cs", "háček"], defaultdict(str))
'háček'
>>> render_foreign_derivation("l", ["en", "go", "went"], defaultdict(str))
'went'
>>> render_foreign_derivation("l", ["en", "God be with you"], defaultdict(str))
'God be with you'
>>> render_foreign_derivation("l", ["la", "similis"], defaultdict(str, {"t":"like"}))
'similis (“like”)'
>>> render_foreign_derivation("l", ["la", "similis", "", "like"], defaultdict(str))
'similis (“like”)'
>>> render_foreign_derivation("l", ["mul", "☧", ""], defaultdict(str))
'☧'
>>> render_foreign_derivation("l", ["ru", "ру́сский", "", "Russian"], defaultdict(str, {"g":"m"}))
'ру́сский <i>m</i> (<i>russkij</i>, “Russian”)'
>>> render_foreign_derivation("link", ["en", "water vapour"], defaultdict(str))
'water vapour'
>>> render_foreign_derivation("ll", ["en", "cod"], defaultdict(str))
'cod'
>>> render_foreign_derivation("m", ["en", "more"], defaultdict(str))
'<b>more</b>'
>>> render_foreign_derivation("m", ["enm", "us"], defaultdict(str))
'<i>us</i>'
>>> render_foreign_derivation("m", ["ine-pro", "*h₁ed-"], defaultdict(str, {"t":"to eat"}))
'<i>*h₁ed-</i> (“to eat”)'
>>> render_foreign_derivation("m", ["ar", "عِرْق", "", "root"], defaultdict(str))
'<i>عِرْق</i> (<i>ʿrq</i>, “root”)'
>>> render_foreign_derivation("m", ["pal"], defaultdict(str, {"tr":"ˀl'k'", "ts":"erāg", "t":"lowlands"}))
"(<i>ˀl'k'</i> /erāg/, “lowlands”)"
>>> render_foreign_derivation("m", ["ar", "عَرِيق", "", "deep-rooted"], defaultdict(str))
'<i>عَرِيق</i> (<i>ʿrīq</i>, “deep-rooted”)'
>>> render_foreign_derivation("langname-mention", ["en", "-"], defaultdict(str))
'English'
>>> render_foreign_derivation("m+", ["en", "-"], defaultdict(str))
'English'
>>> render_foreign_derivation("m+", ["ja", "力車"], defaultdict(str, {"tr":"rikisha"}))
'Japanese <i>力車</i> (<i>rikisha</i>)'
""" # noqa
# Short path for the {{m|en|WORD}} template
if tpl == "m" and len(parts) == 2 and parts[0] == "en" and not data:
return strong(parts[1])
mentions = (
"back-formation",
"back-form",
"bf",
"l",
"link",
"ll",
"mention",
"m",
)
dest_lang_ignore = (
"cog",
"cognate",
"etyl",
"langname-mention",
"m+",
"nc",
"ncog",
"noncog",
"noncognate",
*mentions,
)
if tpl not in dest_lang_ignore:
parts.pop(0) # Remove the destination language
dst_locale = parts.pop(0)
if tpl == "etyl" and parts:
parts.pop(0)
phrase = ""
starter = ""
word = ""
if data["notext"] != "1":
if tpl in ("calque", "cal", "clq"):
starter = "calque of "
elif tpl in ("partial calque", "pcal"):
starter = "partial calque of "
elif tpl in ("semantic loan", "sl"):
starter = "semantic loan of "
elif tpl in ("learned borrowing", "lbor"):
starter = "learned borrowing from "
elif tpl in ("semi-learned borrowing", "slbor"):
starter = "semi-learned borrowing from "
elif tpl in ("orthographic borrowing", "obor"):
starter = "orthographic borrowing from "
elif tpl in ("unadapted borrowing", "ubor"):
starter = "unadapted borrowing from "
elif tpl in ("phono-semantic matching", "psm"):
starter = "phono-semantic matching of "
elif tpl in ("transliteration", "translit"):
starter = "transliteration of "
elif tpl in ("back-formation", "back-form", "bf"):
starter = "back-formation"
if parts:
starter += " from"
phrase = starter if data["nocap"] == "1" else starter.capitalize()
lang = langs.get(dst_locale, "")
phrase += lang if tpl not in mentions else ""
if parts:
word = parts.pop(0)
if word == "-":
return phrase
word = data["alt"] or word
gloss = data["t"] or data["gloss"]
if parts:
word = parts.pop(0) or word # 4, alt=
if tpl in ("l", "link", "ll"):
phrase += f" {word}"
elif word:
phrase += f" {italic(word)}"
if data["g"]:
phrase += f' {italic(data["g"])}'
trans = ""
if not data["tr"]:
trans = transliterate(dst_locale, word)
if parts:
gloss = parts.pop(0) # 5, t=, gloss=
phrase += gloss_tr_poss(data, gloss, trans)
return phrase.lstrip() | af3c37664e683d9bff610ad1fa53a167f5390988 | 3,657,048 |
def create_from_ray(ray):
"""Converts a ray to a line.
The line will extend from 'ray origin -> ray origin + ray direction'.
:param numpy.array ray: The ray to convert.
:rtype: numpy.array
:return: A line beginning at the ray start and extending for 1 unit
in the direction of the ray.
"""
# convert ray relative direction to absolute
# position
return np.array([ray[0], ray[0] + ray[1]], dtype=ray.dtype) | 6d0429abbacd235f95636369985bea8a17117409 | 3,657,049 |
def opts2dict(opts):
"""Converts options returned from an OptionParser into a dict"""
ret = {}
for k in dir(opts):
if callable(getattr(opts, k)):
continue
if k.startswith('_'):
continue
ret[k] = getattr(opts, k)
return ret | cfa828f0248ff7565aabbb5c37a7bc6fa38c6450 | 3,657,052 |
def combined_directions(a_list, b_list):
"""
Takes two NoteList objects.
Returns a list of (3)tuples each of the form:
(
int: a dir,
int: b dir,
(int: bar #, float: beat #)
)
"""
onsets = note_onsets(a_list, b_list)
a_dirs = directions(a_list)
b_dirs = directions(b_list)
dirs = {}
for time in onsets:
dirs[time] = (0, 0)
for dir, time in a_dirs:
dirs[time] = (dir, dirs[time][1])
for dir, time in b_dirs:
dirs[time] = (dirs[time][0], dir)
return [
(dirs[time][0], dirs[time][1], time)
for time in onsets
] | 8b66d4de725c51b1abdedb8a8e4c48e78f4ca953 | 3,657,053 |
def _naive_csh_seismology(l, m, theta, phi):
"""
Compute the spherical harmonics according to the seismology convention, in a naive way.
This appears to be equal to the sph_harm function in scipy.special.
"""
return (lpmv(m, l, np.cos(theta)) * np.exp(1j * m * phi) *
np.sqrt(((2 * l + 1) * factorial(l - m))
/
(4 * np.pi * factorial(l + m)))) | ba2a17f0dfa6035a05d16c8af79310657fe6ecd7 | 3,657,054 |
def is_room_valid(room):
"""Check if room is valid."""
_, names, checksum = room
letters = defaultdict(int)
complete_name = ''.join(names)
for letter in complete_name:
letters[letter] += 1
sorted_alphabetic = sorted(letters)
sorted_by_occurrences = sorted(
sorted_alphabetic, key=letters.__getitem__, reverse=True)
return ''.join(sorted_by_occurrences).startswith(checksum) | b893cf97ee28b033741e4b2797b2a4aef485324f | 3,657,055 |
def _mag_shrink_hard(x, r, t):
""" x is the input, r is the magnitude and t is the threshold
"""
gain = (r >= t).float()
return x * gain | da795bcfc2a6e4bfa3e54d1334c9d8865141a4f1 | 3,657,057 |
def wiki_data(request, pro_id):
""" 文章标题展示 """
data = models.Wiki.objects.filter(project_id=pro_id).values('id', 'title', 'parent_id').order_by('deepth')
return JsonResponse({'status': True, 'data': list(data)}) | 6dfbb79b78133935356bd87cc24a294ed0001b73 | 3,657,059 |
def many_capitalized_words(s):
"""Returns a function to check percentage of capitalized words.
The function returns 1 if percentage greater then 65% and 0 otherwise.
"""
return 1 if capitalized_words_percent(s) > 66 else 0 | cc82a2708defd545a1170bfeabb5848e3092fc39 | 3,657,061 |
def cmd_te_solution_build(abs_filename,wait=False,print_output=False,clear_output=False):
"""ソリューションをビルドする(テキストエディタ向け)
ファイルが含まれるVisual Studioを探し出してソリューションをビルドする。
VisualStudioの「メニュー -> ビルド -> ソリューションのビルド」と同じ動作。
abs_filename- ファイル名の絶対パス
(Ex.) c:/project/my_app/src/main.cpp
wait - True ビルド終了まで待つ(完了復帰)
False 即時復帰
print_output- True コンパイル結果をコンソールへ表示
False 何もしない
clear_output- True VisualStudioの出力ウインドウをクリアする
False 何もしない
"""
return _te_main(cmd_solution_build, abs_filename,wait,print_output,clear_output) | db48988d483da6ae9a012460e0d5fdd326d5ae40 | 3,657,062 |
def log_ratio_measure(
segmented_topics, accumulator, normalize=False, with_std=False, with_support=False):
"""
If normalize=False:
Popularly known as PMI.
This function calculates the log-ratio-measure which is used by
coherence measures such as c_v.
This is defined as: m_lr(S_i) = log[(P(W', W*) + e) / (P(W') * P(W*))]
If normalize=True:
This function calculates the normalized-log-ratio-measure, popularly knowns as
NPMI which is used by coherence measures such as c_v.
This is defined as: m_nlr(S_i) = m_lr(S_i) / -log[P(W', W*) + e]
Args:
segmented_topics (list): Output from the segmentation module of the segmented
topics. Is a list of list of tuples.
accumulator: word occurrence accumulator from probability_estimation.
with_std (bool): True to also include standard deviation across topic segment
sets in addition to the mean coherence for each topic; default is False.
with_support (bool): True to also include support across topic segments. The
support is defined as the number of pairwise similarity comparisons were
used to compute the overall topic coherence.
Returns:
list : of log ratio measure for each topic.
"""
topic_coherences = []
num_docs = float(accumulator.num_docs)
for s_i in segmented_topics:
segment_sims = []
for w_prime, w_star in s_i:
w_prime_count = accumulator[w_prime]
w_star_count = accumulator[w_star]
co_occur_count = accumulator[w_prime, w_star]
if normalize:
# For normalized log ratio measure
numerator = log_ratio_measure([[(w_prime, w_star)]], accumulator)[0]
co_doc_prob = co_occur_count / num_docs
m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))
else:
# For log ratio measure without normalization
numerator = (co_occur_count / num_docs) + EPSILON
denominator = (w_prime_count / num_docs) * (w_star_count / num_docs)
m_lr_i = np.log(numerator / denominator)
segment_sims.append(m_lr_i)
topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support))
return topic_coherences | 73fec59f84402066ccbbcd25d30cc69698f6b721 | 3,657,063 |
def _calculate_monthly_anomaly(data, apply_filter=False, base_period=None,
lat_name=None, lon_name=None, time_name=None):
"""Calculate monthly anomalies at each grid point."""
# Ensure that the data provided is a data array
data = rdu.ensure_data_array(data)
# Get coordinate names
lat_name = lat_name if lat_name is not None else rdu.get_lat_name(data)
lon_name = lon_name if lon_name is not None else rdu.get_lon_name(data)
time_name = time_name if time_name is not None else rdu.get_time_name(data)
# Get subset of data to use for computing anomalies
base_period = rdu.check_base_period(
data, base_period=base_period, time_name=time_name)
input_frequency = rdu.detect_frequency(data, time_name=time_name)
if input_frequency not in ('daily', 'monthly'):
raise RuntimeError(
'Can only calculate anomalies for daily or monthly data')
if input_frequency == 'daily':
data = data.resample({time_name: '1MS'}).mean()
base_period_data = data.where(
(data[time_name] >= base_period[0]) &
(data[time_name] <= base_period[1]), drop=True)
monthly_clim = base_period_data.groupby(
base_period_data[time_name].dt.month).mean(time_name)
monthly_anom = data.groupby(data[time_name].dt.month) - monthly_clim
if apply_filter:
monthly_anom = monthly_anom.rolling(
{time_name: 3}).mean().dropna(time_name, how='all')
# Approximate sampling frequency
seconds_per_day = 60 * 60 * 24.0
fs = 1.0 / (seconds_per_day * 30)
# Remove all modes with period greater than 7 years
fmin = 1.0 / (seconds_per_day * 365.25 * 7)
monthly_anom = _apply_fft_high_pass_filter(
monthly_anom, fmin=fmin, fs=fs, detrend=True,
time_name=time_name)
return monthly_anom | 397bffb8f22ae26cf2c41cd8c056951ef55d692d | 3,657,064 |
import pprint
def oxe_system_alaw_to_mulaw(host, token, mode):
"""Summary
Args:
host (TYPE): Description
token (TYPE): Description
mode (TYPE): Description
Returns:
TYPE: Description
"""
payload = {
'T0_Mu_Law': mode
}
packages.urllib3.disable_warnings(packages.urllib3.exceptions.InsecureRequestWarning)
try:
modification = put(
'https://' + host +
'/api/mgt/1.0/Node/1/System_Parameters/1/System_Parameters_2/1/System_/T0_Mu_Law',
json=payload,
headers=oxe_set_headers(token, 'PUT'),
verify=False)
except exceptions.RequestException as e:
pprint(e)
return modification.status_code | 19bb98f8326e84cde83691028a2fc2585a7abe6e | 3,657,067 |
def update_weights(comment_weights, comment_usage):
"""Updates the weights used to upvote comments so that the actual voting
power usage is equal to the estimated usage.
"""
desired_usage = 1.0 - VP_COMMENTS / 100.0
actual_usage = 1.0 - comment_usage / 100.0
scaler = np.log(desired_usage) / np.log(actual_usage)
for category in comment_weights.keys():
comment_weights[category] *= scaler
return comment_weights | 19d2f0a9ec790c26000946c0b91ef3bc00f36905 | 3,657,068 |
import math
def smaller2k(n):
"""
Returns power of 2 which is smaller than n. Handles negative numbers.
"""
if n == 0: return 0
if n < 0:
return -2**math.ceil(math.log2(-n))
else:
return 2**math.floor(math.log2(n)) | 0d0bbbf95cb22bf1b9ffb29012075534bcc9646d | 3,657,069 |
import opcode
def modeify(intcode, i):
"""Apply a mode to a parameter"""
j = i + 1
_opcode = opcode(intcode[i])
params = intcode[j: j + _opcode['param_count']]
modes = _opcode['modes']
mode_covert = {
0: lambda x: intcode[x], # position mode
1: lambda x: x # immediate mode
}
output = [mode_covert[mode](param) for mode, param in zip(modes, params)]
return output | 230fb2e43c33558d94a7d60c6dd16978098421aa | 3,657,072 |
def unwind(g, num):
"""Return <num> first elements from iterator <g> as array."""
return [next(g) for _ in range(num)] | 59b724ca27729b4fc20d19a40f95d590025307c4 | 3,657,073 |
import re
def CPPComments(text):
"""Remove all C-comments and replace with C++ comments."""
# Keep the copyright header style.
line_list = text.splitlines(True)
copyright_list = line_list[0:10]
code_list = line_list[10:]
copy_text = ''.join(copyright_list)
code_text = ''.join(code_list)
# Remove */ for C-comments, don't care about trailing blanks.
comment_end = re.compile(r'\n[ ]*\*/[ ]*')
code_text = re.sub(comment_end, '', code_text)
comment_end = re.compile(r'\*/')
code_text = re.sub(comment_end, '', code_text)
# Remove comment lines in the middle of comments, replace with C++ comments.
comment_star = re.compile(r'(?<=\n)[ ]*(?!\*\w)\*[ ]*')
code_text = re.sub(comment_star, r'// ', code_text)
# Remove start of C comment and replace with C++ comment.
comment_start = re.compile(r'/\*[ ]*\n')
code_text = re.sub(comment_start, '', code_text)
comment_start = re.compile(r'/\*[ ]*(.)')
code_text = re.sub(comment_start, r'// \1', code_text)
# Add copyright info.
return copy_text + code_text | 0dd490f5497c073534abc30944bd49d0a3cf7e3e | 3,657,075 |
def get_bulk_statement(
stmt_type, table_name, column_names, dicts=True, value_string="%s", odku=False
):
"""Get a SQL statement suitable for use with bulk execute functions
Parameters
----------
stmt_type : str
One of REPLACE, INSERT, or INSERT IGNORE. **Note:** Backend support for
this varies.
table_name : str
Name of SQL table to use in statement
column_names : list
A list of column names to load
dicts : bool, optional
If true, assume the data will be a list of dict rows
value_string : str, optional
The parameter replacement string used by the underyling DB API
odku : bool or list, optional
If true, add ON DUPLICATE KEY UPDATE clause for all columns. If a list
then only add it for the specified columns. **Note:** Backend support for
this varies.
Returns
-------
sql : str
The sql query string to use with bulk execute functions
"""
if not stmt_type.lower() in ("replace", "insert", "insert ignore"):
raise AssertionError("Invalid statement type: %s" % stmt_type)
columns_clause = ", ".join(["`%s`" % c for c in column_names])
if dicts:
values_clause = ", ".join(["%%(%s)s" % c for c in column_names])
else:
values_clause = ", ".join(["%s" % value_string for c in column_names])
sql = "%s INTO %s (%s) VALUES (%s)" % (
stmt_type,
table_name,
columns_clause,
values_clause,
)
if odku:
odku_cols = column_names
if isinstance(odku, (list, tuple)):
odku_cols = odku
odku_clause = ", ".join(["%s=VALUES(%s)" % (col, col) for col in odku_cols])
sql = sql + " ON DUPLICATE KEY UPDATE %s" % odku_clause
return escape_string(sql) | ba2277fc6f84d79a97d70cf98d2e26f308b8fa82 | 3,657,076 |
def map_remove_by_value_range(bin_name, value_start, value_end, return_type, inverted=False):
"""Creates a map_remove_by_value_range operation to be used with operate or operate_ordered
The operation removes items, with values between value_start(inclusive) and
value_end(exclusive) from the map
Args:
bin_name (str): The name of the bin containing the map.
value_start: The start of the range of values to be removed. (Inclusive)
value_end: The end of the range of values to be removed. (Exclusive)
return_type (int): Value specifying what should be returned from the operation.
This should be one of the aerospike.MAP_RETURN_* values.
inverted (bool): If True, values outside of the specified range will be removed, and
values inside of the range will be kept. Default: False
Returns:
A dictionary usable in operate or operate_ordered. The format of the dictionary
should be considered an internal detail, and subject to change.
"""
op_dict = {
OP_KEY: aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE,
BIN_KEY: bin_name,
VALUE_KEY: value_start,
RANGE_KEY: value_end,
RETURN_TYPE_KEY: return_type,
INVERTED_KEY: inverted
}
return op_dict | 42a49aefb92f61a3064e532390bdcf26b6266f40 | 3,657,077 |
def rationalApproximation(points, N, tol=1e-3, lowest_order_only=True):
"""
Return rational approximations for a set of 2D points.
For a set of points :math:`(x,y)` where :math:`0 < x,y \\leq1`, return all
possible rational approximations :math:`(a,b,c) \\; a,b,c \\in \\mathbb{Z}`
such that :math:`(x,y) \\approx (a/c, b/c)`.
Arguments:
points: 2D (L x 2) points to approximate
N: max order
Returns:
``dict``: Dictionary with ``points`` as *keys* and the corresponding
``set`` of tuples ``(a,b,c)`` as values.
"""
L,_ = points.shape
# since this solutions assumes a>0, a 'quick' hack to also obtain solutions
# with a < 0 is to flip the dimensions of the points and explore those
# solutions as well
points = np.vstack((points, np.fliplr(points)))
solutions = defaultdict(set)
sequences = {1: set(fareySequence(1))}
for n in range(2, N+1):
sequences[n] = set(fareySequence(n)) - sequences[n-1]
for h,k in fareySequence(N,1):
if 0 in (h,k):
continue
# print h,k
for x,y in resonanceSequence(N, k):
# avoid 0-solutions
if 0 in (x,y):
continue
norm = np.sqrt(x**2+y**2)
n = np.array([ y/norm, x/norm]) * np.ones_like(points)
n[points[:,0] < h/k, 0] *= -1 # points approaching from the left
# nomenclature inspired in http://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Vector_formulation
ap = np.array([h/k, 0]) - points
apn = np.zeros((1,L))
d = np.zeros_like(points)
apn = np.sum(n*ap, 1, keepdims=True)
d = ap - apn*n
## DON'T RETURN IMMEDIATELY; THERE MIGHT BE OTHER SOLUTIONS OF THE SAME ORDER
indices, = np.nonzero(np.sqrt(np.sum(d*d,1)) <= tol)
for i in indices:
# print "h/k:", h , "/", k
# print "point:", points[i,:]
if points[i,0] >= h/k:
if i<L:
# print "non-flipped >= h/k"
solutions[i].add((x,-y, h*x/k))
# print i, (x,-y, h*x/k)
elif x*(-y)<0: # only consider solutions where (a,b) have different sign for the "flipped" points (the other solutions should have already been found for the non-flipped points)
# print "flipped >= h/k"
solutions[i-L].add((-y, x, h*x/k))
# print i-L, (-y, x, h*x/k)
else:
if i<L:
# print "non-flipped < h/k"
solutions[i].add((x, y, h*x/k))
# print i, (x, y, h*x/k)
elif x*y>0: # only consider solutions where (a,b) have different sign for the "flipped" points (the other solutions should have already been found for the non-flipped points)
# print "flipped < h/k"
solutions[i-L].add((y, x, h*x/k))
# print i-L, (y, x, h*x/k)
if lowest_order_only:
# removed = 0
for k in solutions:
# keep lowest order solutions only
lowest_order = 2*N
s = set([])
for sol in solutions[k]:
K = abs(sol[0])+abs(sol[1])+abs(sol[2])
if K == lowest_order:
s.add(sol)
elif K < lowest_order:
lowest_order = K
# if len(s) > 0:
# print("point: ({},{}) -> removing {} for {}".format(points[k,0], points[k,1], s, sol))
# removed += len(s)
s = set([sol])
solutions[k] = s
# print("Removed {} solutions".format(removed))
return solutions | 614c230ad7fd68cb60d0203cba2bd15e30f3f36a | 3,657,078 |
def to_dict(doc, fields):
"""Warning: Using this convenience fn is probably not as efficient as the
plain old manually building up a dict.
"""
def map_field(prop):
val = getattr(doc, prop)
if isinstance(val, list):
return [(e.to_dict() if hasattr(e, 'to_dict') else e) for e in val]
else:
return val.to_dict() if hasattr(val, 'to_dict') else val
return {f: map_field(f) for f in fields} | cb51e3dfdf8c313f218e38d8693af9e7c6bf5045 | 3,657,080 |
import time
def _auto_wrap_external(real_env_creator):
"""Wrap an environment in the ExternalEnv interface if needed.
Args:
real_env_creator (fn): Create an env given the env_config.
"""
def wrapped_creator(env_config):
real_env = real_env_creator(env_config)
if not isinstance(real_env, (ExternalEnv, ExternalMultiAgentEnv)):
logger.info(
"The env you specified is not a supported (sub-)type of "
"ExternalEnv. Attempting to convert it automatically to "
"ExternalEnv."
)
if isinstance(real_env, MultiAgentEnv):
external_cls = ExternalMultiAgentEnv
else:
external_cls = ExternalEnv
class ExternalEnvWrapper(external_cls):
def __init__(self, real_env):
super().__init__(
observation_space=real_env.observation_space,
action_space=real_env.action_space,
)
def run(self):
# Since we are calling methods on this class in the
# client, run doesn't need to do anything.
time.sleep(999999)
return ExternalEnvWrapper(real_env)
return real_env
return wrapped_creator | ef7f0c7ecdf3eea61a4e9dc0ad709e80d8a09e08 | 3,657,081 |
def _get_binary_link_deps(
base_path,
name,
linker_flags = (),
allocator = "malloc",
default_deps = True):
"""
Return a list of dependencies that should apply to *all* binary rules that link C/C++ code.
This also creates a sanitizer configuration rule if necessary, so this function
should not be called more than once for a given rule.
Args:
base_path: The package path
name: The name of the rule
linker_flags: If provided, flags to pass to allocator/converage/sanitizers to
make sure proper dependent rules are generated.
allocator: The allocator to use. This is generally set by a configuration option
and retreived in alloctors.bzl
default_deps: If set, add in a list of "default deps", dependencies that
should generally be added to make sure binaries work consistently.
e.g. common/init
Returns:
A list of `RuleTarget` structs that should be added as dependencies.
"""
deps = []
# If we're not using a sanitizer add allocator deps.
if sanitizers.get_sanitizer() == None:
deps.extend(allocators.get_allocator_deps(allocator))
# Add in any dependencies required for sanitizers.
deps.extend(sanitizers.get_sanitizer_binary_deps())
deps.append(
_create_sanitizer_configuration(
base_path,
name,
linker_flags,
),
)
# Add in any dependencies required for code coverage
if coverage.get_coverage():
deps.extend(coverage.get_coverage_binary_deps())
# We link in our own implementation of `kill` to binaries (S110576).
if default_deps:
deps.append(_COMMON_INIT_KILL)
return deps | 06a52934a0c121b606c79a6f5ae58863645bba34 | 3,657,082 |
def double2pointerToArray(ptr, n, m_sizes):
""" Converts ctypes 2D array into a 2D numpy array.
Arguments:
ptr: [ctypes double pointer]
n: [int] number of cameras
m_sizes: [list] number of measurements for each camera
Return:
arr_list: [list of ndarrays] list of numpy arrays, each list entry containing data for individual
cameras
"""
arr_list = []
# Go through every camera
for i in range(n):
# Init a new empty data array
arr = np.zeros(shape=(m_sizes[i]))
# Go through ctypes array and extract data for this camera
for j in range(m_sizes[i]):
arr[j] = ptr[i][j]
# Add the data for this camera to the final list
arr_list.append(arr)
return arr_list | f556c5a36f645c6047c3b487b7cd865edc3b76db | 3,657,084 |
def read_varint(stream: bytes):
"""
读取 varint。
Args:
stream (bytes): 字节流。
Returns:
tuple[int, int],真实值和占用长度。
"""
value = 0
position = 0
shift = 0
while True:
if position >= len(stream):
break
byte = stream[position]
value += (byte & 0b01111111) << shift
if byte & 0b10000000 == 0:
break
position += 1
shift += 7
return value, position + 1 | 58c8187501dc08b37f777256474f95412649bf04 | 3,657,085 |
def any(array, mapFunc):
"""
Checks if any of the elements of array returns true, when applied on a function that returns a boolean.
:param array: The array that will be checked, for if any of the elements returns true, when applied on the function. \t
:type array: [mixed] \n
:param mapFunc: The function that gives a boolean value, when applied on the element of the array. \t
:type mapFunc: function \n
:returns: Whether any of the elements of the array, returned true or not. \t
:rtype: : bool \n
"""
for elem in array:
if mapFunc(elem):
return True
return False | 1e635da691fd1c2fc9d99e15fd7fa0461a7bdf0e | 3,657,087 |
def qt_point_to_point(qt_point, unit=None):
"""Create a Point from a QPoint or QPointF
Args:
qt_point (QPoint or QPointF): The source point
unit (Unit): An optional unit to convert
values to in the output `Point`. If omitted, values
in the output `Point` will be plain `int` or `float` values.
Returns: Point
"""
if unit:
return Point(qt_point.x(), qt_point.y()).to_unit(unit)
else:
return Point(qt_point.x(), qt_point.y()) | 595dacc2d39d126822bf680e1ed1784c05deb6d7 | 3,657,088 |
import requests
import json
def apiRequest(method, payload=None):
"""
Get request from vk server
:param get: method for vkApi
:param payload: parameters for vkApi
:return: answer from vkApi
"""
if payload is None:
payload = {}
if not ('access_token' in payload):
payload.update({'access_token': GROUP_TOKEN, 'v': V})
response = requests.post(BASE_URL + method, payload)
data = json.loads(response.text)
return data | b60c77aec5ae500b9d5e9901216c7ff7c93676ad | 3,657,089 |
def page_required_no_auth(f):
"""Full page, requires user to be logged out to access, otherwise redirects to main page."""
@wraps(f)
def wrapper(*args, **kwargs):
if "username" in session:
return redirect("/")
else:
return f(*args, **kwargs)
return wrapper | 7d7d314e10dcaf1d81ca5c713afd3da6a021247d | 3,657,090 |
import sympy
def generate_forward():
"""
Generate dataset with forward method
It tries to integrate random function.
The integral may not be symbolically possible, or may contains invalid operators.
In those cases, it returns None.
"""
formula = symbolic.fixed_init(15)
integrated = sympy.integrate(formula, symbolic.x, meijerg=False)
if symbolic.is_integral_valid(integrated):
return (formula, integrated)
else:
return None | 91a91e5b23f3f59b49d8f7102585ff7fbfbbf6c4 | 3,657,092 |
import pickle
def load_agent(agent_args, domain_settings, experiment_settings):
"""
This function loads the agent from the results directory results/env_name/method_name/filename
Args:
experiment_settings
Return:
sarsa_lambda agent
"""
with open('results/' + experiment_settings['env'] + '/sarsa_lambda/agents/' + experiment_settings['filename'] + '.pkl', 'rb') as input:
my_agent = pickle.load(input)
return my_agent, None | a5769c952d9fcc583b8fb909e6e772c83b7126ca | 3,657,093 |
def unpickle_robust(bytestr):
""" robust unpickle of one byte string """
fin = BytesIO(bytestr)
unpickler = robust_unpickler(fin)
return unpickler.load() | 42fee03886b36aef5ab517e0abcb2cc2ecfd6a8b | 3,657,094 |
def build_ins_embed_branch(cfg, input_shape):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.INS_EMBED_HEAD.NAME
return INS_EMBED_BRANCHES_REGISTRY.get(name)(cfg, input_shape) | 4d8242614426a13f9e93a241184bd3d8f57ef648 | 3,657,095 |
def atl03sp(ipx_region, parm, asset=icesat2.DEFAULT_ASSET):
"""
Performs ATL03 subsetting in parallel on ATL03 data and returns photon segment data.
See the `atl03sp <../api_reference/icesat2.html#atl03sp>`_ function for more details.
Parameters
----------
ipx_region: Query
icepyx region object defining the query of granules to be processed
parms: dict
parameters used to configure ATL03 subsetting (see `Parameters <../user_guide/ICESat-2.html#parameters>`_)
asset: str
data source asset (see `Assets <../user_guide/ICESat-2.html#assets>`_)
Returns
-------
list
ATL03 segments (see `Photon Segments <../user_guide/ICESat-2.html#photon-segments>`_)
"""
try:
version = ipx_region.product_version
resources = ipx_region.avail_granules(ids=True)[0]
except:
logger.critical("must supply an icepyx query as region")
return icesat2.__emptyframe()
# try to get the subsetting region
if ipx_region.extent_type in ('bbox','polygon'):
parm.update({'poly': to_region(ipx_region)})
return icesat2.atl03sp(parm, asset, version=version, resources=resources) | 8c822af0d2f9b6e42bd6a1efeb29249a04079e66 | 3,657,096 |
def get_sample_activity_from_batch(activity_batch, idx=0):
"""Return layer activity for sample ``idx`` of an ``activity_batch``.
"""
return [(layer_act[0][idx], layer_act[1]) for layer_act in activity_batch] | 0302fdf215e63d6cbcd5dafc1bd36ae3d27712f2 | 3,657,097 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.