content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Tuple
def get_byte_range_bounds(byte_range_str: str, total_size: int) -> Tuple[int, int]:
"""Return the start and end byte of a byte range string."""
byte_range_str = byte_range_str.replace("bytes=", "")
segments = byte_range_str.split("-")
start_byte = int(segments[0])
# chrome does not send end_byte but safari does
# we need to handle this case and generate an end_byte if not provided
end_byte = min(
int(segments[-1]) if segments[-1] else start_byte + MAX_CHUNK_SIZE,
total_size,
)
return start_byte, end_byte | f376f5af0771901d9850e06f08ebd32b13243176 | 3,650,212 |
def privmsg(recipient, s, prefix='', msg=None):
"""Returns a PRIVMSG to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (areReceivers(recipient)), repr(recipient)
assert s, 's must not be empty.'
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PRIVMSG',
args=(recipient, s), msg=msg) | 1ff0087794732a06c7e2151ad6c255ca863ffb37 | 3,650,213 |
def char_decoding(value):
""" Decode from 'UTF-8' string to unicode.
:param value:
:return:
"""
if isinstance(value, bytes):
return value.decode('utf-8')
# return directly if unicode or exc happens.
return value | b8054b4a5012a6e23e2c08b6ff063cf3f71d6863 | 3,650,214 |
def inv2(x: np.ndarray) -> np.ndarray:
"""矩阵求逆"""
# np.matrix()废弃
return np.matrix(x).I | 9b1b18dc0cbd248c977fd0ae0c35a65b4cd5b797 | 3,650,215 |
def clean_remaining_artifacts(image):
"""
Method still on development. Use at own risk!
Remove remaining artifacts from image
:param image: Path to Image or 3D Matrix representing RGB image
:return: Image
"""
img, *_ = __image__(image)
blur = cv2.GaussianBlur(img, (3, 3), 0)
# convert to hsv and get saturation channel
sat = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)[:, :, 1]
# threshold saturation channel
thresh = cv2.threshold(sat, 50, 255, cv2.THRESH_BINARY)[1]
# apply morphology close and open to make mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
mask = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel, iterations=1)
# do OTSU threshold to get melanoma image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
otsu = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
pre_otsu = otsu.copy()
otsu = cv2.dilate(otsu, kernel)
otsu = cv2.erode(otsu, kernel)
inv_otsu = otsu.copy()
inv_otsu[otsu == 255] = 0
inv_otsu[otsu == 0] = 255
inpaint = mask - inv_otsu
img_result = cv2.inpaint(img, inpaint, 100, cv2.INPAINT_TELEA)
return cv2.cvtColor(img_result, cv2.COLOR_BGR2RGB), otsu | 54473dcce5eb5764e80304836f0ca16ce4d82a77 | 3,650,216 |
def max_simple_dividers(a):
"""
:param a: число от 1 до 1000
:return: самый большой простой делитель числа
"""
return max(simple_dividers(a)) | 3fc2fb51e2940ed07db97285886e7cb30e99d5a0 | 3,650,217 |
def to_light_low_sat(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into lightness
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a lightness version of the original image
"""
img = skimage.img_as_float( img )
img = np.clip(img, 0.2, 0.8)
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,0]
img = rescale_image( img, new_scale, current_scale=[0,100])
return np.expand_dims(img,2) | 2aca96378551a2a083f1762f30582efe1560f2fb | 3,650,218 |
def get_slug_blacklist(lang=None, variant=None):
"""
Returns a list of KA slugs to skip when creating the channel.
Combines the "global" slug blacklist that applies for all channels, and
additional customization for specific languages or curriculum variants.
"""
SLUG_BLACKLIST = GLOBAL_SLUG_BLACKLIST
if variant and (lang, variant) in SLUG_BLACKLIST_PER_LANG:
SLUG_BLACKLIST.extend(SLUG_BLACKLIST_PER_LANG[(lang, variant)])
elif lang in SLUG_BLACKLIST_PER_LANG:
SLUG_BLACKLIST.extend(SLUG_BLACKLIST_PER_LANG[lang])
else:
LOGGER.warning('No slugs for lang=' + lang + ' variant=' + str(variant))
return SLUG_BLACKLIST | e06dd582812ddd8d2d2e929c733dc957abb748d6 | 3,650,219 |
def get_level_rise(station):
"""For a MonitoringStation object (station), returns a the rate of water level rise, specifically
the average value over the last 2 days"""
#Fetch data (if no data available, return None)
times, values = fetch_measure_levels(station.measure_id, timedelta(days=2))
#Only continue if data available, otherwise return None
if times and values and (None in times or None in values) == False:
#Get polynomial approximation of
poly, d0 = polyfit(times, values, p=4)
#Find derivative polynomial
level_der = np.polyder(poly)
#Obtain list of gradients over last 2 days using the derivative polynomial
grads = []
for t in times:
grads.append(level_der(date.date2num(t) - d0))
#Return average of gradient values
return np.average(grads)
else:
return None | 59de80a5bdb5b24711f45395ec9cbc282ad6ad44 | 3,650,220 |
def get_mask_indices(path):
"""Helper function to get raster mask for NYC
Returns:
list: returns list of tuples (row, column) that represent area of interest
"""
raster = tiff_to_array(path)
indices = []
it = np.nditer(raster, flags=['multi_index'])
while not it.finished:
if it[0] == 1:
r, c = it.multi_index
indices.append((r, c))
it.iternext()
return indices | ff957ffac0f79635e729f8880457d8aa33379185 | 3,650,221 |
def about(isin:str):
"""
Get company description.
Parameters
----------
isin : str
Desired company ISIN. ISIN must be of type EQUITY or BOND, see instrument_information() -> instrumentTypeKey
Returns
-------
TYPE
Dict with description.
"""
params = {'isin': isin}
return _data_request('about_the_company', params) | 86554c393087670637859d991bd2ae740ea4ff86 | 3,650,222 |
def aesEncrypt(message):
"""
Encrypts a message with a fresh key using AES-GCM.
Returns: (key, ciphertext)
"""
key = get_random_bytes(symmetricKeySizeBytes)
cipher = AES.new(key, AES.MODE_GCM)
ctext, tag = cipher.encrypt_and_digest(message)
# Concatenate (nonce, tag, ctext) and return with key
return key, (cipher.nonce + tag + ctext) | 06a13bb605f9038d8096f73681932a09022107b1 | 3,650,223 |
def getType(o):
"""There could be only return o.__class__.__name__"""
if isinstance(o, LispObj):
return o.type
return o.__class__.__name__ | a3602469d8a7d5f6372c6e74d868a903651f85f7 | 3,650,227 |
def failed_revisions_for_case_study(
case_study: CaseStudy, result_file_type: MetaReport
) -> tp.List[str]:
"""
Computes all revisions of this case study that have failed.
Args:
case_study: to work on
result_file_type: report type of the result files
Returns:
a list of failed revisions
"""
total_failed_revisions = set(
get_failed_revisions(case_study.project_name, result_file_type)
)
return [
rev for rev in case_study.revisions
if rev[:10] in total_failed_revisions
] | 2e9d5ab3818343e7a07fc3ff7242206f4b231e89 | 3,650,229 |
def bytes_load(path):
"""Load bytest from a file."""
with open(path, 'rb') as f:
return f.read() | ebbeb4bfcecfb94a1fa1ef8640f4e749bfa0dfcb | 3,650,231 |
def get_relationship_length_fam_mean(data):
"""Calculate mean length of relationship for families DataDef 43
Arguments:
data - data frames to fulfill definiton id
Modifies:
Nothing
Returns: added_members
mean_relationship_length - mean relationship length of families
"""
families = data[1]
return families['max_days_since_first_service'].mean() | 4d9b76c4dca3e1f09e7dd2684bd96e25792177fd | 3,650,232 |
def convert_hapmap(input_dataframe, recode=False, index_col=0):
""" Specifically deals with hapmap and 23anMe Output
"""
complement = {'G/T': 'C/A', 'C/T': 'G/A', "G/A" : "G/A", "C/A": "C/A", "A/G" : "A/G",
"A/C": "A/C"}
dataframe = input_dataframe.copy()
if recode:
recode = dataframe.ix[:, index_col].apply(lambda x: complement[x])
dataframe.ix[:,0] = recode
new_dataframe = dataframe.apply(_single_column_allele, axis=1)
return new_dataframe | c328cfe41e25f7fd3ff2274861fb6fc89effa181 | 3,650,233 |
def to_base64(message):
"""
Returns the base64 representation of a string or bytes.
"""
return b64encode(to_bytes(message)).decode('ascii') | d3f091f7dbf04850e8c40bca8e7fb0ec06f2848f | 3,650,234 |
import traceback
def create_alerts():
"""
Function to create alerts.
"""
try:
# validate post json data
content = request.json
print(content)
if not content: raise ValueError("Empty value")
if not 'timestamp' in content or not 'camera_id' in content or not 'class_id' in content: raise KeyError("Invalid dictionary keys")
if not isinstance(content.get('timestamp'), int): raise TypeError("Timestamp must be in int64 type")
if not isinstance(content.get('camera_id'), int): raise TypeError("Camera_id must be in int32 type")
class_id = content.get('class_id')
if not isinstance(class_id, list): raise TypeError("Class_id must be an array")
for val in class_id:
if not isinstance(val, int): raise TypeError("Array class_id values must be in int32 type")
except (ValueError, KeyError, TypeError) as e:
traceback.print_exc()
resp = Response({"Json format error"}, status=400, mimetype='application/json')
return resp
try:
record_created = db.alerts.insert_one(content)
return jsonify(id=str(record_created.inserted_id)), 201
except:
#traceback.print_exc()
return jsonify(error="Internal server error"), 500 | ca824f0f356cf2b42e7a598810dc89f7121664ba | 3,650,235 |
import numpy
def load_catalog_npy(catalog_path):
"""
Load a numpy catalog (extension ".npy")
@param catalog_path: str
@return record array
"""
return numpy.load(catalog_path) | 912281ad17b043c6912075144e6a2ff3d849a391 | 3,650,238 |
def pgd(fname, n_gg=20, n_mm=20, n_kk=20, n_scale=1001):
"""
:param fname: data file name
:param n_gg: outer iterations
:param n_mm: intermediate iterations
:param n_kk: inner iterations
:param n_scale: number of discretized points, arbitrary
:return:
"""
n_buses, Qmax, Qmin, Y, V_mod, P_pq, Q_pq, P_pv, I0_pq, n_pv, n_pq = read_grid_data(fname)
SSk, SSp, SSq = init_apparent_powers_decomposition(n_buses, n_scale, P_pq, Q_pq, Qmin, Qmax)
VVk, VVp, VVq = init_voltages_decomposition(n_mm, n_buses, n_scale)
IIk, IIp, IIq = init_currents_decomposition(n_gg, n_mm, n_buses, n_scale)
n_max = n_gg * n_mm * n_kk
iter_count = 1
idx_i = 0
idx_v = 1
for gg in range(n_gg): # outer loop: iterate on γ to solve the power flow as such
for mm in range(n_mm): # intermediate loop: iterate on i to find the superposition of terms of the I tensor.
# define the new C
CCk, CCp, CCq, Nc, Nv, n = fun_C(SSk, SSp, SSq,
VVk, VVp, VVq,
IIk, IIp, IIq,
idx_i, idx_v,
n_buses, n_scale)
# initialize the residues we have to find
IIk1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1 # could also try to set IIk1 = VVk1
IIp1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1
IIq1 = (np.random.rand(n_scale) - np.random.rand(n_scale)) * 1
for kk in range(n_kk): # inner loop: iterate on Γ to find the residues.
# compute IIk1 (residues on Ik)
RHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRK = np.dot(IIp1, CCp[ii]) * np.dot(IIq1, CCq[ii])
RHSk += prodRK * CCk[ii]
LHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLK = np.dot(IIp1, VVp[ii] * IIp1) * np.dot(IIq1, VVq[ii] * IIq1)
LHSk += prodLK * VVk[ii]
IIk1 = RHSk / LHSk
# compute IIp1 (residues on Ip)
RHSp = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRP = np.dot(IIk1, CCk[ii]) * np.dot(IIq1, CCq[ii])
RHSp += prodRP * CCp[ii]
LHSp = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLP = np.dot(IIk1, VVk[ii] * IIk1) * np.dot(IIq1, VVq[ii] * IIq1)
LHSp += prodLP * VVp[ii]
IIp1 = RHSp / LHSp
# compute IIq1 (residues on Iq)
RHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nc):
prodRQ = np.dot(IIk1, CCk[ii]) * np.dot(IIp1, CCp[ii])
RHSq += prodRQ * CCq[ii]
LHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nv):
prodLQ = np.dot(IIk1, VVk[ii] * IIk1) * np.dot(IIp1, VVp[ii] * IIp1)
LHSq += prodLQ * VVq[ii]
IIq1 = RHSq / LHSq
progress_bar(iter_count, n_max, 50) # display the inner operations
iter_count += 1
IIk[idx_i, :] = IIk1
IIp[idx_i, :] = IIp1
IIq[idx_i, :] = IIq1
idx_i += 1
for ii in range(n_mm):
VVk[ii, :] = np.conj(sp_linalg.spsolve(Y, IIk[ii]))
VVp[ii, :] = IIp[ii]
VVq[ii, :] = IIq[ii]
# try to add I0 this way:
VVk[n_mm, :] = np.conj(sp_linalg.spsolve(Y, I0_pq))
VVp[n_mm, :] = np.ones(n_buses)
VVq[n_mm, :] = np.ones(n_scale)
idx_v = n_mm + 1
# VVk: size (n_mm + 1, nbus)
# VVp: size (n_mm + 1, nbus)
# VVq: size (n_mm + 1, n_scale)
v_map = build_map(VVk, VVp, VVq)
# SSk: size (2, nbus)
# SSp: size (2, nbus)
# SSq: size (2, n_scale)
s_map = build_map(SSk, SSp, SSq)
# IIk: size (n_gg * n_mm, nbus)
# IIp: size (n_gg * n_mm, nbus)
# IIq: size (n_gg * n_mm, n_scale)
i_map = build_map(IIk, IIp, IIq)
# the size of the maps is nbus, nbus, n_scale
return v_map, s_map, i_map | 5a00b6992ecf5c4f3b89fcf5151cae71c4a36298 | 3,650,239 |
def find_first_empty(rect):
"""
Scan a rectangle and find first open square
@param {Array} rect Board layout (rectangle)
@return {tuple} x & y coordinates of the leftmost top blank square
"""
return _find_first_empty_wrapped(len(rect[0]))(rect) | d266805761cda903733cef7704baff6d38576b04 | 3,650,240 |
import re
def parseArticle(text: str) -> str:
"""
Parses and filters an article. It uses the `wikitextparser` and custom logic.
"""
# clear the image attachments and links
text = re.sub("\[\[Податотека:.+\]\][ \n]", '', text)
text = wikipedia.filtering.clearCurlyBrackets(text)
# replace everything after "Надворешни врски"
links_location = re.search("[\=]+[ ]+(Поврзано|Наводи|Надворешни врски)[ ]+[\=]+", text)
if links_location != None:
text = text[:links_location.span()[0]]
# remove headings and break lines
text = re.sub("([\=]+.+[\=]+.+\n)|(<br />)", '\n', text)
# parse the file using the wikitextparser
parsed = wtp.parse(text)
return parsed.plain_text() | 7a17e31ec960b568debb9c6e7ccb8018bba19218 | 3,650,241 |
import torch
def exp2(input, *args, **kwargs):
"""
Computes the base two exponential function of ``input``.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.exp2(ttorch.tensor([-4.0, -1.0, 0, 2.0, 4.8, 8.0]))
tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02])
>>> ttorch.exp2(ttorch.tensor({
... 'a': [-4.0, -1.0, 0, 2.0, 4.8, 8.0],
... 'b': {'x': [[-2.0, 1.2, 0.25],
... [16.0, 3.75, -2.34]]},
... }))
<Tensor 0x7ff90a4c3af0>
├── a --> tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02])
└── b --> <Tensor 0x7ff90a4c3be0>
└── x --> tensor([[2.5000e-01, 2.2974e+00, 1.1892e+00],
[6.5536e+04, 1.3454e+01, 1.9751e-01]])
"""
return torch.exp2(input, *args, **kwargs) | 17cbc0917acf19932ec4d3a89de8d78545d02e31 | 3,650,242 |
def list_default_storage_policy_of_datastore(
datastore,
host=None,
vcenter=None,
username=None,
password=None,
protocol=None,
port=None,
verify_ssl=True,
):
"""
Returns a list of datastores assign the storage policies.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1
"""
log.trace("Listing the default storage policy of datastore '{}'" "".format(datastore))
if salt.utils.platform.is_proxy():
details = __salt__["vmware_info.get_proxy_connection_details"]()
else:
details = __salt__["vmware_info.get_connection_details"](
host=host,
vcenter=vcenter,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
service_instance = saltext.vmware.utils.vmware.get_service_instance(**details)
# Find datastore
target_ref = __salt__["vmware_info.get_proxy_target"](service_instance)
ds_refs = saltext.vmware.utils.vmware.get_datastores(
service_instance, target_ref, datastore_names=[datastore]
)
if not ds_refs:
raise VMwareObjectRetrievalError("Datastore '{}' was not " "found".format(datastore))
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy = salt.utils.pbm.get_default_storage_policy_of_datastore(profile_manager, ds_refs[0])
return saltext.vmware.utils.get_policy_dict(policy) | caf729c258a4ece895d14fdd93ba1f23879453e8 | 3,650,243 |
import numpy
def action_stats(env, md_action, cont_action):
"""
Get information on `env`'s action space.
Parameters
----------
md_action : bool
Whether the `env`'s action space is multidimensional.
cont_action : bool
Whether the `env`'s action space is continuous.
Returns
-------
n_actions_per_dim : list of length (action_dim,)
The number of possible actions for each dimension of the action space.
action_ids : list or None
A list of all valid actions within the space. If `cont_action` is
True, this value will be None.
action_dim : int or None
The number of dimensions in a single action.
"""
if cont_action:
action_dim = 1
action_ids = None
n_actions_per_dim = [numpy.inf]
if md_action:
action_dim = env.action_space.shape[0]
n_actions_per_dim = [numpy.inf for _ in range(action_dim)]
else:
if md_action:
n_actions_per_dim = [
space.n if hasattr(space, "n") else numpy.inf
for space in env.action_space.spaces
]
action_ids = (
None
if numpy.inf in n_actions_per_dim
else list(product(*[range(i) for i in n_actions_per_dim]))
)
action_dim = len(n_actions_per_dim)
else:
action_dim = 1
n_actions_per_dim = [env.action_space.n]
action_ids = list(range(n_actions_per_dim[0]))
return n_actions_per_dim, action_ids, action_dim | 8a08b3fe5be20f274680fe33df9ca02456bb511f | 3,650,244 |
def count(pred: Pred, seq: Seq) -> int:
"""
Count the number of occurrences in which predicate is true.
"""
pred = to_callable(pred)
return sum(1 for x in seq if pred(x)) | 09726935174d7590030331da62322da870e216aa | 3,650,245 |
def lambda_k(W, Z, k):
"""Coulomb function $\lambda_k$ as per Behrens et al.
:param W: Total electron energy in units of its rest mass
:param Z: Proton number of daughter
:param k: absolute value of kappa
"""
#return 1.
gammak = np.sqrt(k**2.0-(ALPHA*Z)**2.0)
gamma1 = np.sqrt(1.-(ALPHA*Z)**2.0)
R = 1.2e-15*(2.5*Z)**(1./3.)/NATURALLENGTH
return generalizedFermiFunction(W, Z, R, k)/generalizedFermiFunction(W, Z, R, 1)*(k+gammak)/(k*(1+gamma1)) | 9f6a59bd730460c09a3c7f855550254ffb5cdc66 | 3,650,246 |
import pprint
import json
def tryJsonOrPlain(text):
"""Return json formatted, if possible. Otherwise just return."""
try:
return pprint.pformat( json.loads( text ), indent=1 )
except:
return text | 2431479abf6ab3c17ea63356ec740840d2d18a74 | 3,650,247 |
import aiohttp
import websockets
import random
def create_signaling(args):
"""
Create a signaling method based on command-line arguments.
"""
if args.signaling == "apprtc":
if aiohttp is None or websockets is None: # pragma: no cover
raise Exception("Please install aiohttp and websockets to use appr.tc")
if not args.signaling_room:
args.signaling_room = "".join(
[random.choice("0123456789") for x in range(10)]
)
return ApprtcSignaling(args.signaling_room)
elif args.signaling == "tcp-socket":
return TcpSocketSignaling(args.signaling_host, args.signaling_port)
elif args.signaling == "unix-socket":
return UnixSocketSignaling(args.signaling_path)
else:
return CopyAndPasteSignaling() | f9fe4ed35555381468dbf15379ab70a4af289ac9 | 3,650,248 |
def get_corpus_gene_adjacency(corpus_id):
"""Generate a nugget table."""
corpus = get_corpus(corpus_id)
data = get_gene_adjacency(corpus)
return jsonify(data), 200 | 9dd86e11eba5e5bc5094d89bd15dd9315df40480 | 3,650,249 |
def get_pool_health(pool):
""" Get ZFS list info. """
pool_name = pool.split()[0]
pool_capacity = pool.split()[6]
pool_health = pool.split()[9]
return pool_name, pool_capacity, pool_health | 1a9dbb8477d8735b225afc2bdd683f550602b36e | 3,650,250 |
def resize_short(img, target_size):
""" resize_short """
percent = float(target_size) / min(img.shape[0], img.shape[1])
resized_width = int(round(img.shape[1] * percent))
resized_height = int(round(img.shape[0] * percent))
resized_width = normwidth(resized_width)
resized_height = normwidth(resized_height)
resized = cv2.resize(img, (resized_width, resized_height))
return resized | ca9a71dc97a57c5739419c284514466e86dc3fa1 | 3,650,251 |
def _scale(aesthetic, name=None, breaks=None, labels=None, limits=None, expand=None, na_value=None, guide=None,
trans=None, **other):
"""
Create a scale (discrete or continuous)
:param aesthetic
The name of the aesthetic that this scale works with
:param name
The name of the scale - used as the axis label or the legend title
:param breaks
A numeric vector of positions (of ticks)
:param labels
A vector of labels (on ticks)
:param limits
A numeric vector of length two providing limits of the scale.
:param expand
A numeric vector of length two giving multiplicative and additive expansion constants.
:param na_value
Value to use for missing values
:param guide
Type of legend. Use 'colorbar' for continuous color bar, or 'legend' for discrete values.
:param trans
Name of built-in transformation. ('identity', 'log10', 'sqrt', 'reverse')
:return:
"""
# flatten the 'other' sub-dictionary
args = locals().copy()
args.pop('other')
return FeatureSpec('scale', **args, **other) | c8d98a52f2b87340e0a1df46b9f36ca811c18d5d | 3,650,252 |
def logsubexp(x, y):
"""
Helper function to compute the exponential
of a difference between two numbers
Computes: ``x + np.log1p(-np.exp(y-x))``
Parameters
----------
x, y : float or array_like
Inputs
"""
if np.any(x < y):
raise RuntimeError('cannot take log of negative number '
f'{str(x)!s} - {str(y)!s}')
return x + np.log1p(-np.exp(y - x)) | a0b0434fb2714f3d1dec24b88ce0fa9ff0110bc0 | 3,650,253 |
def is_sequence_of_list(items):
"""Verify that the sequence contains only items of type list.
Parameters
----------
items : sequence
The items.
Returns
-------
bool
True if all items in the sequence are of type list.
False otherwise.
Examples
--------
>>> is_sequence_of_list([[1], [1], [1]])
True
"""
return all(isinstance(item, list) for item in items) | e53e5d31e1c4f5649b2f03edf792f810bc398446 | 3,650,254 |
def sum_fib_dp(m, n):
"""
A dynamic programming version.
"""
if m > n: m, n = n, m
large, small = 1, 0
# a running sum for Fibbo m ~ n + 1
running = 0
# dynamically update the two variables
for i in range(n):
large, small = large + small, large
# note that (i + 1) -> small is basically mapping m -> F[m]
if m <= i + 1 <= n:
running += small
return running | 5be6e57ddf54d185ca6d17adebd847d0bc2f56fc | 3,650,255 |
def fibo_dyn2(n):
"""
return
the n-th fibonacci number
"""
if n < 2:
return 1
else:
a, b = 1, 1
for _ in range(1,n):
a, b = b, a+b
return b | e8483e672914e20c6e7b892f3dab8fb299bac6fc | 3,650,256 |
import time
import math
def build_all(box, request_list):
"""
box is [handle, left, top, bottom] \n
request_list is the array about dic \n
****** Attention
before running the function, you should be index.
After build_all, function will close the windows about train troop
"""
# get the box of windows
left = box[1]
top = box[2]
positions = init_pos_army()
# get the information about request
request = request_deal(request_list[0]['str'])
num_army = int(request_list[0]['army']['max'])
num_spells = int(request_list[0]['spells']['max'])
num_devices = int(request_list[0]['device']['max'])
num_army_fill_in = int(request_list[0]['army']['fill_in'])
num_spells_fill_in = int(request_list[0]['spells']['fill_in'])
num_device_fill_in = int(request_list[0]['device']['fill_in'])
# open army
time.sleep(0.2)
Click(left + positions['army'][0], top + positions['army'][1])
# select dragon
if request[0] != None:
# open train troops
time.sleep(0.2)
Click(left + positions['train_troops'][0], top + positions['train_troops'][1])
if ( num_army - num_army_fill_in ) >= num_housing_space[request[0]]:
for index in range( math.floor( ( num_army - num_army_fill_in ) / num_housing_space[request[0]] ) ):
time.sleep(0.2)
Click(left + positions[request[0]][0], top + positions[request[0]][1])
# select speed increase
if request[1] != None:
# open brew spells
time.sleep(0.2)
Click(left + positions['Brew_spells'][0], top + positions['Brew_spells'][1])
if ( num_spells - num_spells_fill_in ) >= num_housing_space[request[1]]:
for index in range( math.floor( ( num_spells - num_spells_fill_in ) / num_housing_space[request[1]] ) ):
time.sleep(0.2)
Click(left + positions[request[1]][0], top + positions[request[1]][1])
# select device
# if request[2] != None:
# open brew spells
##
# close the army
time.sleep(0.2)
Click(left + positions['close_army'][0], top + positions['close_army'][1])
print('close the army')
return True | 59db695f802867e85a5e920f2efa8f652ac12823 | 3,650,257 |
def select_results(results):
"""Select relevant images from results
Selects most recent image for location, and results with positive fit index.
"""
# Select results with positive bestFitIndex
results = [x for x in results['items'] if x['bestFitIndex'] > 0]
# counter_dict schema:
# counter_dict = {
# bounds: {
# 'dateCreated': date,
# 'downloadURL'
# }
# }
counter_dict = {}
for result in results:
bounds = result_to_bounds(result)
# does something already exist with these bounds?
existing = counter_dict.get(bounds)
# If exists, check if newer
if existing is not None:
existing_date = existing['dateCreated']
this_date = date_parse(result['dateCreated'])
if this_date < existing_date:
continue
# Doesn't exist yet or is newer, so add to dict
counter_dict[bounds] = {
'dateCreated': date_parse(result['dateCreated']),
'downloadURL': result['downloadURL']}
return [x['downloadURL'] for x in counter_dict.values()] | 85940fe93b33d79ca0a799ca52a9e68439e7e822 | 3,650,258 |
def dc_session(virtual_smoothie_env, monkeypatch):
"""
Mock session manager for deck calibation
"""
ses = endpoints.SessionManager()
monkeypatch.setattr(endpoints, 'session', ses)
return ses | 06876e5ce2d599086efcb37688f5181f32392068 | 3,650,259 |
def is_available(_cache={}):
"""Return version tuple and None if OmnisciDB server is accessible or
recent enough. Otherwise return None and the reason about
unavailability.
"""
if not _cache:
omnisci = next(global_omnisci_singleton)
try:
version = omnisci.version
except Exception as msg:
_cache['reason'] = 'failed to get OmniSci version: %s' % (msg)
else:
print(' OmnisciDB version', version)
if version[:2] >= (4, 6):
_cache['version'] = version
else:
_cache['reason'] = (
'expected OmniSci version 4.6 or greater, got %s'
% (version,))
return _cache.get('version', ()), _cache.get('reason', '') | 0edcbfcd1ecb6a56b4b4a6f55907271c9094b8d8 | 3,650,260 |
import copy
def pitch_info_from_pitch_string(pitch_str: str) -> PitchInfo:
"""
Parse a pitch string representation. E.g. C#4, A#5, Gb8
"""
parts = tuple((c for c in pitch_str))
size = len(parts)
pitch_class = register = accidental = None
if size == 1:
(pitch_class,) = parts
elif size == 2:
(pitch_class, register) = parts
elif size >= 3:
(pitch_class, accidental, register) = parts[:3]
accidental = Accidental.SHARP if accidental == '#' \
else Accidental.FLAT if accidental == 'b' \
else Accidental.NATURAL
register = int(register)
pitch_info = PitchInfo(pitch_class=pitch_class, accidental=accidental)
matching_chromatic_pitch_info, _ = next(
matching_pitch_info_generator(pitch_info, CHROMATIC_PITCHES_INFO)
)
final_pitch_info = copy.deepcopy(matching_chromatic_pitch_info)
final_pitch_info.register = register
if is_enharmonic_match(pitch_info, matching_chromatic_pitch_info):
final_pitch_info.swap_enharmonic()
return final_pitch_info | cc1b6c0fe64834fe3fdc5249078e10e8f3af1434 | 3,650,261 |
def determine_word_type(tag):
"""
Determines the word type by checking the tag returned by the nltk.pos_tag(arr[str]) function.
Each word in the array is marked with a special tag which can be used to find the correct type of a word.
A selection is given in the dictionaries.
Args:
tag : String tag from the nltk.pos_tag(str) function that classified the particular word with a tag
Returns:
str: Word type as a string
"""
types = {
"noun" : {"NN", "NNS", "NNPS", "FW"},
"adjective" : {"JJ", "JJR", "JJS"},
"verb" : {"VB", "VBD", "VBG", "VBN", "VBP", "VBZ"},
"adverb" : {"RB", "RBR"}
}
for type_, set_ in types.iteritems():
if tag in set_:
return type_
return "noun" | 4505d2cf69f961ecdec4e3c693ea85b916acce96 | 3,650,262 |
def get_normalized_map_from_google(normalization_type, connection=None, n_header_lines=0):
"""
get normalized voci or titoli mapping from gdoc spreadsheets
:param: normalization_type (t|v)
:param: connection - (optional) a connection to the google account (singleton)
:param: n_header_lines - (optional) n. of lines to ignore
:ret: a dict, containing the consuntivo and preventivo sheets
"""
# get all gdocs keys
gdoc_keys = settings.GDOC_KEYS
if normalization_type == 't':
gdoc_key = gdoc_keys['titoli_map']
elif normalization_type == 'v':
gdoc_key = gdoc_keys['voci_map']
else:
raise Exception("normalization_type arg accepts 't' or 'v' as possible values")
if connection is None:
connection = get_connection()
# open the list worksheet
list_sheet = None
try:
list_sheet = connection.open_by_key(gdoc_key)
except exceptions.SpreadsheetNotFound:
raise Exception("Error: gdoc url not found: {0}".format(
gdoc_key
))
logger.info("normalized mapping gdoc read. key: {0}".format(
gdoc_key
))
# put the mapping into the voci_map dict
# preventivo and consuntivo sheets are appended in a single list
# the first two rows are removed (labels)
try:
logger.info("reading preventivo ...")
voci_map_preventivo = list_sheet.worksheet("preventivo").get_all_values()[n_header_lines:]
logger.info("reading consuntivo ...")
voci_map_consuntivo = list_sheet.worksheet("consuntivo").get_all_values()[n_header_lines:]
except URLError:
raise Exception("Connection error to Gdrive")
logger.info("done with reading the mapping list.")
return {
'preventivo': voci_map_preventivo,
'consuntivo': voci_map_consuntivo,
} | 61056536cae2da9053f21d2739488c5512546a68 | 3,650,263 |
import io
def parse_file(fname, is_true=True):
"""Parse file to get labels."""
labels = []
with io.open(fname, "r", encoding="utf-8", errors="igore") as fin:
for line in fin:
label = line.strip().split()[0]
if is_true:
assert label[:9] == "__label__"
label = label[9:]
labels.append(label)
return labels | ea6cbd4b1a272f472f8a75e1cc87a2209e439205 | 3,650,264 |
def make_mesh(object_name, object_colour=(0.25, 0.25, 0.25, 1.0), collection="Collection"):
"""
Create a mesh then return the object reference and the mesh object
:param object_name: Name of the object
:type object_name: str
:param object_colour: RGBA colour of the object, defaults to a shade of grey
:type object_colour: (float, float, float, float)
:param collection: Where you want the objected to be added, defaults to Collection
:type collection: str
:return: Object reference and mesh reference
"""
# Make the block
mesh = bpy.data.meshes.new(object_name) # add the new mesh
obj = bpy.data.objects.new(mesh.name, mesh)
create_emission_node(obj, object_colour)
col = bpy.data.collections.get(collection)
col.objects.link(obj)
bpy.context.view_layer.objects.active = obj
return obj, mesh | 73fd8d13d471c55258a06feb71eec51ca51f23f9 | 3,650,266 |
import optparse
def _OptionParser():
"""Returns the options parser for run-bisect-perf-regression.py."""
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a try bot to run the bisection script using the parameters'
' provided in the auto_bisect/bisect.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('-p', '--path_to_goma',
type='str',
help='Path to goma directory. If this is supplied, goma '
'builds will be enabled.')
parser.add_option('--path_to_config',
type='str',
help='Path to the config file to use. If this is supplied, '
'the bisect script will use this to override the default '
'config file path. The script will attempt to load it '
'as a bisect config first, then a perf config.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--dry_run',
action="store_true",
help='The script will perform the full bisect, but '
'without syncing, building, or running the performance '
'tests.')
return parser | 7485db294d89732c2c5223a3e3fe0b7773444b49 | 3,650,267 |
def calc_radiance(wavel, Temp):
"""
Calculate the blackbody radiance
Parameters
----------
wavel: float or array
wavelength (meters)
Temp: float
temperature (K)
Returns
-------
Llambda: float or arr
monochromatic radiance (W/m^2/m/sr)
"""
Llambda_val = c1 / (wavel**5. * (np.exp(c2 / (wavel * Temp)) - 1))
return Llambda_val | 9a957b42e0e92614709f7157765f0185e1dd532a | 3,650,268 |
def _JMS_to_Fierz_III_IV_V(C, qqqq):
"""From JMS to 4-quark Fierz basis for Classes III, IV and V.
`qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc."""
#case dduu
classIII = ['sbuc', 'sbcu', 'dbuc', 'dbcu', 'dsuc', 'dscu']
classVdduu = ['sbuu' , 'dbuu', 'dsuu', 'sbcc' , 'dbcc', 'dscc']
if qqqq in classIII + classVdduu:
f1 = dflav[qqqq[0]]
f2 = dflav[qqqq[1]]
f3 = uflav[qqqq[2]]
f4 = uflav[qqqq[3]]
return {
'F' + qqqq + '1' : C["V1udLL"][f3, f4, f1, f2]
- C["V8udLL"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2' : C["V8udLL"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3' : C["V1duLR"][f1, f2, f3, f4]
- C["V8duLR"][f1, f2, f3, f4] / (2 * Nc),
'F' + qqqq + '4' : C["V8duLR"][f1, f2, f3, f4] / 2,
'F' + qqqq + '5' : C["S1udRR"][f3, f4, f1, f2]
- C["S8udduRR"][f3, f2, f1, f4] / 4
- C["S8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '6' : -C["S1udduRR"][f3, f2, f1, f4] / 2
+ C["S8udduRR"][f3, f2, f1, f4] /(4 * Nc)
+ C["S8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '7' : -C["V8udduLR"][f4, f1, f2, f3].conj(),
'F' + qqqq + '8' : -2 * C["V1udduLR"][f4, f1, f2, f3].conj()
+ C["V8udduLR"][f4, f1, f2, f3].conj() / Nc,
'F' + qqqq + '9' : -C["S8udduRR"][f3, f2, f1, f4] / 16,
'F' + qqqq + '10' : -C["S1udduRR"][f3, f2, f1, f4] / 8
+ C["S8udduRR"][f3, f2, f1, f4] / (16 * Nc),
'F' + qqqq + '1p' : C["V1udRR"][f3, f4, f1, f2]
- C["V8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2p' : C["V8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3p' : C["V1udLR"][f3, f4, f1, f2]
- C["V8udLR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '4p' : C["V8udLR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '5p' : C["S1udRR"][f4, f3, f2, f1].conj() -
C["S8udduRR"][f4, f1, f2, f3].conj() / 4
- C["S8udRR"][f4, f3, f2, f1].conj() / (2 * Nc),
'F' + qqqq + '6p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 2 +
C["S8udduRR"][f4, f1, f2, f3].conj()/(4 * Nc)
+ C["S8udRR"][f4, f3, f2, f1].conj() / 2,
'F' + qqqq + '7p' : -C["V8udduLR"][f3, f2, f1, f4],
'F' + qqqq + '8p' : - 2 * C["V1udduLR"][f3, f2, f1, f4]
+ C["V8udduLR"][f3, f2, f1, f4] / Nc,
'F' + qqqq + '9p' : -C["S8udduRR"][f4, f1, f2, f3].conj() / 16,
'F' + qqqq + '10p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 8
+ C["S8udduRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
classVuudd = ['ucdd', 'ucss', 'ucbb']
if qqqq in classVuudd:
f3 = uflav[qqqq[0]]
f4 = uflav[qqqq[1]]
f1 = dflav[qqqq[2]]
f2 = dflav[qqqq[3]]
return {
'F' + qqqq + '1' : C["V1udLL"][f3, f4, f1, f2]
- C["V8udLL"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2' : C["V8udLL"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3p' : C["V1duLR"][f1, f2, f3, f4]
- C["V8duLR"][f1, f2, f3, f4] / (2 * Nc),
'F' + qqqq + '4p' : C["V8duLR"][f1, f2, f3, f4] / 2,
'F' + qqqq + '5' : C["S1udRR"][f3, f4, f1, f2]
- C["S8udduRR"][f3, f2, f1, f4] / 4
- C["S8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '6' : -C["S1udduRR"][f3, f2, f1, f4] / 2
+ C["S8udduRR"][f3, f2, f1, f4] /(4 * Nc)
+ C["S8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '7p' : -C["V8udduLR"][f4, f1, f2, f3].conj(),
'F' + qqqq + '8p' : -2 * C["V1udduLR"][f4, f1, f2, f3].conj()
+ C["V8udduLR"][f4, f1, f2, f3].conj() / Nc,
'F' + qqqq + '9' : -C["S8udduRR"][f3, f2, f1, f4] / 16,
'F' + qqqq + '10' : -C["S1udduRR"][f3, f2, f1, f4] / 8
+ C["S8udduRR"][f3, f2, f1, f4] / (16 * Nc),
'F' + qqqq + '1p' : C["V1udRR"][f3, f4, f1, f2]
- C["V8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2p' : C["V8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3' : C["V1udLR"][f3, f4, f1, f2]
- C["V8udLR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '4' : C["V8udLR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '5p' : C["S1udRR"][f4, f3, f2, f1].conj() -
C["S8udduRR"][f4, f1, f2, f3].conj() / 4
- C["S8udRR"][f4, f3, f2, f1].conj() / (2 * Nc),
'F' + qqqq + '6p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 2 +
C["S8udduRR"][f4, f1, f2, f3].conj()/(4 * Nc)
+ C["S8udRR"][f4, f3, f2, f1].conj() / 2,
'F' + qqqq + '7' : -C["V8udduLR"][f3, f2, f1, f4],
'F' + qqqq + '8' : - 2 * C["V1udduLR"][f3, f2, f1, f4]
+ C["V8udduLR"][f3, f2, f1, f4] / Nc,
'F' + qqqq + '9p' : -C["S8udduRR"][f4, f1, f2, f3].conj() / 16,
'F' + qqqq + '10p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 8
+ C["S8udduRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
#case dddd
classIV = ['sbsd', 'dbds', 'bsbd']
classVdddd = ['sbss', 'dbdd', 'dsdd', 'sbbb', 'dbbb', 'dsss']
classVddddind = ['sbdd', 'dsbb', 'dbss']
if qqqq in classIV + classVdddd + classVddddind:
f1 = dflav[qqqq[0]]
f2 = dflav[qqqq[1]]
f3 = dflav[qqqq[2]]
f4 = dflav[qqqq[3]]
return {
'F'+ qqqq +'1' : C["VddLL"][f3, f4, f1, f2],
'F'+ qqqq +'2' : C["VddLL"][f1, f4, f3, f2],
'F'+ qqqq +'3' : C["V1ddLR"][f1, f2, f3, f4]
- C["V8ddLR"][f1, f2, f3, f4]/(2 * Nc),
'F'+ qqqq +'4' : C["V8ddLR"][f1, f2, f3, f4] / 2,
'F'+ qqqq +'5' : C["S1ddRR"][f3, f4, f1, f2]
- C["S8ddRR"][f3, f2, f1,f4] / 4
- C["S8ddRR"][f3, f4, f1, f2] / (2 * Nc),
'F'+ qqqq +'6' : -C["S1ddRR"][f1, f4, f3, f2] / 2
+ C["S8ddRR"][f3, f2, f1, f4] / (4 * Nc)
+ C["S8ddRR"][f3, f4, f1, f2] / 2,
'F'+ qqqq +'7' : -C["V8ddLR"][f1, f4, f3, f2],
'F'+ qqqq +'8' : -2 * C["V1ddLR"][f1, f4, f3, f2]
+ C["V8ddLR"][f1, f4, f3, f2] / Nc,
'F'+ qqqq +'9' : -C["S8ddRR"][f3, f2, f1, f4] / 16,
'F'+ qqqq +'10' : -C["S1ddRR"][f1, f4, f3, f2] / 8
+ C["S8ddRR"][f3, f2, f1, f4] / (16 * Nc),
'F'+ qqqq +'1p' : C["VddRR"][f3, f4, f1, f2],
'F'+ qqqq +'2p' : C["VddRR"][f1, f4, f3, f2],
'F'+ qqqq +'3p' : C["V1ddLR"][f3, f4, f1, f2]
- C["V8ddLR"][f3, f4, f1,f2] / (2 * Nc),
'F'+ qqqq +'4p' : C["V8ddLR"][f3, f4, f1, f2] / 2,
'F'+ qqqq +'5p' : C["S1ddRR"][f4, f3, f2, f1].conj() -
C["S8ddRR"][f4, f1, f2, f3].conj() / 4
-C["S8ddRR"][f4, f3, f2, f1].conj() / 2 / Nc,
'F'+ qqqq +'6p' : -C["S1ddRR"][f4, f1, f2, f3].conj() / 2 +
C["S8ddRR"][f4, f1, f2, f3].conj() / 4 / Nc
+ C["S8ddRR"][f4, f3, f2, f1].conj() / 2,
'F'+ qqqq +'7p' : -C["V8ddLR"][f3, f2, f1, f4],
'F'+ qqqq +'8p' : -2 * C["V1ddLR"][f3, f2, f1, f4]
+ C["V8ddLR"][f3, f2, f1, f4] / Nc,
'F'+ qqqq +'9p' : -C["S8ddRR"][f4, f1, f2, f3].conj() / 16,
'F'+ qqqq +'10p' : -C["S1ddRR"][f4, f1, f2, f3].conj() / 8 +
C["S8ddRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
#case uuuu
classVuuuu = ['ucuu', 'cucc', 'cuuu', 'uccc']
if qqqq in classVuuuu:
f1 = uflav[qqqq[0]]
f2 = uflav[qqqq[1]]
f3 = uflav[qqqq[2]]
f4 = uflav[qqqq[3]]
return {
'F' + qqqq + '1' : C["VuuLL"][f3, f4, f1, f2],
'F' + qqqq + '2' : C["VuuLL"][f1, f4, f3, f2],
'F' + qqqq + '3' : C["V1uuLR"][f1, f2, f3, f4]
- C["V8uuLR"][f1, f2, f3, f4] / (2 * Nc),
'F' + qqqq + '4' : C["V8uuLR"][f1, f2, f3, f4] / 2,
'F' + qqqq + '5' : C["S1uuRR"][f3, f4, f1, f2]
- C["S8uuRR"][f3, f2, f1, f4] / 4
- C["S8uuRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '6' : -C["S1uuRR"][f1, f4, f3, f2] / 2
+ C["S8uuRR"][f3, f2, f1, f4] / (4 * Nc)
+ C["S8uuRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '7' : -C["V8uuLR"][f1, f4, f3, f2],
'F' + qqqq + '8' : -2 * C["V1uuLR"][f1, f4, f3, f2]
+ C["V8uuLR"][f1, f4, f3, f2] / Nc,
'F' + qqqq + '9' : -C["S8uuRR"][f3, f2, f1, f4] / 16,
'F' + qqqq + '10' : -C["S1uuRR"][f1, f4, f3, f2] / 8
+ C["S8uuRR"][f3, f2, f1, f4] / (16 * Nc),
'F'+ qqqq + '1p': C["VuuRR"][f3, f4, f1, f2],
'F' + qqqq + '2p': C["VuuRR"][f1, f3, f4, f2],
'F' + qqqq + '3p' : C["V1uuLR"][f3, f4, f1, f2]
- C["V8uuLR"][f3, f4, f1,f2] / (2 * Nc),
'F' + qqqq + '4p' : C["V8uuLR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '5p' : C["S1uuRR"][f4, f3, f2, f1].conj() -
C["S8uuRR"][f4, f1, f2, f3].conj() / 4 -
C["S8uuRR"][f4, f3, f2, f1].conj() / 2 / Nc,
'F' + qqqq + '6p' : -C["S1uuRR"][f4, f1, f2, f3].conj() / 2 +
C["S8uuRR"][f4, f1, f2, f3].conj() / 4 / Nc
+ C["S8uuRR"][f4, f3, f2, f1].conj() / 2,
'F' + qqqq + '7p' : -C["V8uuLR"][f3, f2, f1, f4],
'F' + qqqq + '8p' : -2 * C["V1uuLR"][f3, f2, f1, f4]
+ C["V8uuLR"][f3, f2, f1, f4] / Nc,
'F' + qqqq + '9p' : -C["S8uuRR"][f4, f1, f2, f3].conj() / 16,
'F' + qqqq + '10p' : -C["S1uuRR"][f4, f1, f2, f3].conj() / 8 +
C["S8uuRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
else:
raise ValueError(f"Case not implemented: {qqqq}") | 5629a4e88996bd3ba30bd68cc8758b3d55abd093 | 3,650,270 |
from typing import Any
def get_parsed_args() -> Any:
"""Return Porcupine's arguments as returned by :func:`argparse.parse_args`."""
assert _parsed_args is not None
return _parsed_args | 9e4dc1eadc3c68d8a8e5a5a885fecf7c0ec89856 | 3,650,271 |
def get_license_description(license_code):
"""
Gets the description of the given license code. For example, license code '1002' results in 'Accessory Garage'
:param license_code: The license code
:return: The license description
"""
global _cached_license_desc
return _cached_license_desc[license_code] | 3de38be73d303036872b285c2dd7c0048ba5660f | 3,650,272 |
import pickle
import getpass
def db_keys_unlock(passphrase) -> bool:
"""Unlock secret key with pass phrase"""
global _secretkeyfile
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if not secretkey["locked"]:
print("Secret key file is already unlocked")
return True
if passphrase:
usepass = passphrase
else:
usepass = getpass("Enter pass phrase: ")
print("")
if usepass:
if secretkey["hash"] == blake2b(str.encode(usepass)).hexdigest():
k = Fernet(password_to_key(usepass))
secretkey["secret"] = k.decrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = False
db_keys_set(secretkey, False)
else:
print("Pass phrase did not match, secret key remains locked")
return False
except Exception:
print("Error locking secret key content")
return False
print("Secret key successfully unlocked")
return True | 4d1af86143384ff6228ad086d92f797b7529c73e | 3,650,273 |
def list_domains():
"""
Return a list of the salt_id names of all available Vagrant VMs on
this host without regard to the path where they are defined.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_domains --log-level=info
The log shows information about all known Vagrant environments
on this machine. This data is cached and may not be completely
up-to-date.
"""
vms = []
cmd = 'vagrant global-status'
reply = __salt__['cmd.shell'](cmd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
try:
_ = int(tokens[0], 16) # valid id numbers are hexadecimal
except (ValueError, IndexError):
continue # skip lines without valid id numbers
machine = tokens[1]
cwd = tokens[-1]
name = get_machine_id(machine, cwd)
if name:
vms.append(name)
return vms | af04859c5d6e0cd2edb3d3cec88ebebd777c93d6 | 3,650,274 |
def get_old_options(cli, image):
""" Returns Dockerfile values for CMD and Entrypoint
"""
return {
'cmd': dockerapi.inspect_config(cli, image, 'Cmd'),
'entrypoint': dockerapi.inspect_config(cli, image, 'Entrypoint'),
} | eed75800ae3afdc99fdcd5c0f5dc36504d5db96c | 3,650,275 |
def line_crops_and_labels(iam: IAM, split: str):
"""Load IAM line labels and regions, and load line image crops."""
crops = []
labels = []
for filename in iam.form_filenames:
if not iam.split_by_id[filename.stem] == split:
continue
image = util.read_image_pil(filename)
image = ImageOps.grayscale(image)
image = ImageOps.invert(image)
labels += iam.line_strings_by_id[filename.stem]
crops += [
image.crop([region[_] for _ in ["x1", "y1", "x2", "y2"]])
for region in iam.line_regions_by_id[filename.stem]
]
assert len(crops) == len(labels)
return crops, labels | f223ded3c2dc9254985ad450995f8dc598dc5411 | 3,650,276 |
def convert(chinese):
"""converts Chinese numbers to int
in: string
out: string
"""
numbers = {'零':0, '一':1, '二':2, '三':3, '四':4, '五':5, '六':6, '七':7, '八':8, '九':9, '壹':1, '贰':2, '叁':3, '肆':4, '伍':5, '陆':6, '柒':7, '捌':8, '玖':9, '两':2, '廿':20, '卅':30, '卌':40, '虚':50, '圆':60, '近':70, '枯':80, '无':90}
units = {'个':1, '十':10, '百':100, '千':1000, '万':10000, '亿':100000000,'万亿':1000000000000, '拾':10, '佰':100, '仟':1000}
number, pureNumber = 0, True
for i in range(len(chinese)):
if chinese[i] in units or chinese[i] in ['廿', '卅', '卌', '虚', '圆', '近', '枯', '无']:
pureNumber = False
break
if chinese[i] in numbers:
number = number * 10 + numbers[chinese[i]]
if pureNumber:
return number
number = 0
for i in range(len(chinese)):
if chinese[i] in numbers or chinese[i] == '十' and (i == 0 or chinese[i - 1] not in numbers or chinese[i - 1] == '零'):
base, currentUnit = 10 if chinese[i] == '十' and (i == 0 or chinese[i] == '十' and chinese[i - 1] not in numbers or chinese[i - 1] == '零') else numbers[chinese[i]], '个'
for j in range(i + 1, len(chinese)):
if chinese[j] in units:
if units[chinese[j]] >= units[currentUnit]:
base, currentUnit = base * units[chinese[j]], chinese[j]
number = number + base
return number | cf2ece895698e2d99fde815efa0339687eadda97 | 3,650,277 |
def computeZvector(idata, hue, control, features_to_eval):
"""
:param all_data: dataframe
:return:
"""
all_data = idata.copy()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
m_indexes = list(all_data[hue].unique().astype('str'))
query_one = ""
for el in control:
if el in m_indexes:
query_one = query_one + hue + "==\'" + str(el) + "\'|"
else:
break
query_one = query_one[:-1] # remove last character
df_q = all_data.query(query_one).copy()
eps = 1e-15
# Compute average for each feature, per each treatment
avg_vec = pd.DataFrame()
for el in m_indexes:
data_calc = all_data.query(hue + "==\'" + str(el) + "\'").copy()
for col in data_calc.select_dtypes(include=numerics):
if col in features_to_eval:
avg_vec.loc[el, col] = data_calc[col].mean()
# Compute length of vector
all_data.loc[:, 'length'] = 0
for feature in features_to_eval:
all_data['length'] = all_data['length'] + all_data[feature] ** 2
all_data['length'] = np.sqrt(all_data['length'])
# Compute cosine
# Dot product of each vector per each mean v*w
all_data.loc[:, 'cosine'] = 0
for el in m_indexes:
for feature in features_to_eval:
all_data.loc[all_data['Gene'] == el, 'cosine'] = all_data.loc[all_data['Gene'] == el, 'cosine'] + \
all_data[all_data['Gene'] == el][feature] * avg_vec.loc[
el, feature]
# Norm of avg_vec
v_avg_norm = np.sqrt(np.sum(avg_vec ** 2, axis=1))
for el in m_indexes:
all_data.loc[all_data['Gene'] == el, 'cosine'] = all_data.loc[all_data['Gene'] == el, 'cosine'] / (
all_data.loc[all_data['Gene'] == el, 'length'] * v_avg_norm[el])
all_data['projection'] = all_data['length'] * all_data['cosine']
return all_data | d80e6a9fe754cb8558598c72ab2f076fab329750 | 3,650,278 |
def getjflag(job):
"""Returns flag if job in finished state"""
return 1 if job['jobstatus'] in ('finished', 'failed', 'cancelled', 'closed') else 0 | bf0c0a85cb1af954d25f4350e55b9e3604cf7c79 | 3,650,279 |
import copy
def json_parse(ddict):
"""
https://github.com/arita37/mlmodels/blob/dev/mlmodels/dataset/test_json/test_functions.json
https://github.com/arita37/mlmodels/blob/dev/mlmodels/dataset/json/benchmark_timeseries/gluonts_m5.json
"deepar": {
"model_pars": {
"model_uri" : "model_gluon.gluonts_model",
"model_name" : "deepar",
"model_pars" : {
"prediction_length": 12,
"freq": "D",
"distr_output" : {"uri" : "gluonts.distribution.neg_binomial:NegativeBinomialOutput"},
"distr_output" : "uri::gluonts.distribution.neg_binomial:NegativeBinomialOutput",
"""
js = ddict
js2 = copy.deepcopy(js)
def parse2(d2):
if "uri" in d2:
# Be careful not to include heavy compute
return json_to_object(d2)
else:
return json_norm(d2)
for k, val in js.items():
if isinstance(val, dict):
js2[k] = parse2(val)
elif "uri::" in val: # Shortcut when nor argument
js2[k] = json_to_object({"uri": val.split("uri::")[-1]})
else:
js2[k] = json_norm_val(val)
return js2 | 30b037531ac129a2a597b146c935fe344566a547 | 3,650,280 |
def read_viz_icons(style='icomoon', fname='infinity.png'):
""" Read specific icon from specific style
Parameters
----------
style : str
Current icon style. Default is icomoon.
fname : str
Filename of icon. This should be found in folder HOME/.dipy/style/.
Default is infinity.png.
Returns
--------
path : str
Complete path of icon.
"""
folder = pjoin(dipy_home, 'icons', style)
return pjoin(folder, fname) | 8d97ebb450b94dce5c4feb3f631cd9deebcdb1c1 | 3,650,281 |
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="12345",
domain=DOMAIN,
data={CONF_API_KEY: "tskey-MOCK", CONF_SYSTEM_ID: 12345},
unique_id="12345",
) | cb2a5b8d7e84d1b825e79a9b4e3aebc8f8c60783 | 3,650,282 |
import datasets
def get_mnist_loader(batch_size, train, perm=0., Nparts=1, part=0, seed=0, taskid=0, pre_processed=True, **loader_kwargs):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
transform = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0.0,), (1.0,)),
transforms.Lambda(lambda x: x.view([28,28]))])
dataset = datasets.MNIST(root='./data', download=True, transform=transform, train = train)
if perm>0:
permute_dataset(dataset, perm, seed=seed)
if Nparts>1:
partition_dataset(dataset, Nparts,part)
if pre_processed:
dataset = preprocess_dataset(dataset)
DL = DataLoaderPreProcessed
else:
DL = DataLoader
loader = DL(dataset=dataset,
batch_size=batch_size,
shuffle=train,
**loader_kwargs)
loader.taskid = taskid
loader.name = 'MNIST_{}'.format(taskid,part)
loader.short_name = 'MNIST'
return loader | 2181ffa0abd4f1357ec7cc8cdf52b0eb86a2d13c | 3,650,283 |
def read_data(filename):
"""
Reads orbital map file into a list
"""
data = []
f = open(filename, 'r')
for line in f:
data += line.strip().split('\n')
f.close()
return data | 7c2f5669735e39352b0b655425a70993baae32ef | 3,650,284 |
from typing import Union
def _form_factor_pipi(
self, s: Union[float, npt.NDArray[np.float64]], imode: int = 1
) -> Union[complex, npt.NDArray[np.complex128]]:
"""
Compute the pi-pi-V form factor.
Parameters
----------
s: Union[float,npt.NDArray[np.float64]
Square of the center-of-mass energy in MeV.
imode: Optional[int]
Iso-spin channel. Default is 1.
Returns
-------
ff: Union[complex,npt.NDArray[np.complex128]]
Form factor from pi-pi-V.
"""
return __ff_pipi(
s * 1e-6, # Convert to GeV
self._ff_pipi_params,
self._gvuu,
self._gvdd,
) | 92438400debb52bff6480791631e2b60043c758f | 3,650,285 |
import re
import click
def string_to_epoch(s):
"""
Convert argument string to epoch if possible
If argument looks like int + s,h,md (ie, 30d), we'll pass as-is
since pushshift can accept this. Per docs, pushshift supports:
Epoch value or Integer + "s,m,h,d" (i.e. 30d for 30 days)
:param s: str
:return: int | str
"""
if s is not None:
s = s.strip()
if re.search('^[0-9]+[smhd]$', s):
return s
try:
s = dp.parse(s).timestamp()
s = int(s)
except ValueError:
raise click.BadParameter("could not convert argument to "
"a datetime: {}".format(s))
return s | b45db1d589cbe71eec5f11d53d2851dee258da8f | 3,650,287 |
import array
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0,4.0,1.0],'f')/6.0
if intype in ['F','D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real,lmbda)
cki = cspline2d(Iin.imag,lmbda)
outr = sepfir2d(ckr,hcol,hcol)
outi = sepfir2d(cki,hcol,hcol)
out = (outr + 1j*outi).astype(intype)
elif intype in ['f','d']:
ckr = cspline2d(Iin,lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out | ee606bdceaf974671ccf36b9d498204613c12f51 | 3,650,288 |
def _construct_aline_collections(alines, dtix=None):
"""construct arbitrary line collections
Parameters
----------
alines : sequence
sequences of segments, which are sequences of lines,
which are sequences of two or more points ( date[time], price ) or (x,y)
date[time] may be (a) pandas.to_datetime parseable string,
(b) pandas Timestamp, or
(c) python datetime.datetime or datetime.date
alines may also be a dict, containing
the following keys:
'alines' : the same as defined above: sequence of price, or dates, or segments
'colors' : colors for the above alines
'linestyle' : line types for the above alines
'linewidths' : line types for the above alines
dtix: date index for the x-axis, used for converting the dates when
x-values are 'evenly spaced integers' (as when skipping non-trading days)
Returns
-------
ret : list
lines collections
"""
if alines is None:
return None
if isinstance(alines,dict):
aconfig = _process_kwargs(alines, _valid_lines_kwargs())
alines = aconfig['alines']
else:
aconfig = _process_kwargs({}, _valid_lines_kwargs())
#print('aconfig=',aconfig)
#print('alines=',alines)
alines = _alines_validator(alines, returnStandardizedValue=True)
if alines is None:
raise ValueError('Unable to standardize alines value: '+str(alines))
alines = _convert_segment_dates(alines,dtix)
lw = aconfig['linewidths']
co = aconfig['colors']
ls = aconfig['linestyle']
al = aconfig['alpha']
lcollection = LineCollection(alines,colors=co,linewidths=lw,linestyles=ls,antialiaseds=(0,),alpha=al)
return lcollection | 863fd8c6e8d0b1a39c5a79bca7a0eaa5b2204aea | 3,650,289 |
def is_mergeable(*ts_or_tsn):
"""Check if all objects(FermionTensor or FermionTensorNetwork)
are part of the same FermionSpace
"""
if isinstance(ts_or_tsn, (FermionTensor, FermionTensorNetwork)):
return True
fs_lst = []
site_lst = []
for obj in ts_or_tsn:
if isinstance(obj, FermionTensor):
if obj.fermion_owner is None:
return False
hashval, fsobj, tid = obj.fermion_owner
fs_lst.append(hashval)
site_lst.append(fsobj()[tid][1])
elif isinstance(obj, FermionTensorNetwork):
fs_lst.append(hash(obj.fermion_space))
site_lst.extend(obj.filled_sites)
else:
raise TypeError("unable to find fermionspace")
return all([fs==fs_lst[0] for fs in fs_lst]) and len(set(site_lst)) == len(site_lst) | de5f4fc47874e328bcdd078e2bdf8d6f53d6d4e6 | 3,650,290 |
def query_for_account(account_rec, region):
""" Performs the public ip query for the given account
:param account: Account number to query
:param session: Initial session
:param region: Region to query
:param ip_data: Initial list. Appended to and returned
:return: update ip_data list
"""
ip_data = []
session = boto3.session.Session(region_name=region)
assume = rolesession.assume_crossact_audit_role(
session, account_rec['accountNum'], region)
if assume:
for ip_addr in assume.client('ec2').describe_addresses()['Addresses']:
ip_data.append(
dict(PublicIP=(ip_addr.get('PublicIp')),
InstanceId=(ip_addr.get('InstanceId')), # Prevents a crash
PrivateIP=(ip_addr.get('PrivateIpAddress')),
NetworkInterface=(ip_addr.get('NetworkInterfaceId')),
AccountNum=account_rec['accountNum'],
AccountAlias=(account_rec['alias'])))
for instance in assume.resource('ec2').instances.filter():
if instance.public_ip_address:
ip_data.append(
dict(InstanceId=(instance.instance_id),
PublicIP=(instance.public_ip_address),
PrivateIP=(instance.private_ip_address),
AccountNum=account_rec['accountNum'],
AccountAlias=(account_rec['alias'])))
else:
pass
return ip_data | 61055939990175c6e2cb850ede7d448a261ccdff | 3,650,292 |
from typing import List
def filter_list_of_dicts(list_of_dicts: list, **filters) -> List[dict]:
"""Filter a list of dicts by any given key-value pair.
Support simple logical operators like: '<,>,<=,>=,!'. Supports
filtering by providing a list value i.e. openJobsCount=[0, 1, 2].
"""
for key, value in filters.items():
filter_function = make_dict_filter(key, value)
list_of_dicts = list(filter(filter_function, list_of_dicts))
return list_of_dicts | f926b7c478400d3804d048ced823003e48fd5ef1 | 3,650,293 |
def construct_pos_line(elem, coor, tags):
"""
Do the opposite of the parse_pos_line
"""
line = "{elem} {x:.10f} {y:.10f} {z:.10f} {tags}"
return line.format(elem=elem, x=coor[0], y=coor[1], z=coor[2], tags=tags) | 21ca509131c85a2c7bc24d00a28e7d4ea580a49a | 3,650,294 |
def compute_pcs(predicts, labels, label_mapper, dataset):
"""
compute correctly predicted full spans. If cues and scopes are predicted jointly, convert cue labels to I/O labels depending on the
annotation scheme for the considered dataset
:param predicts:
:param labels:
:return:
"""
def trim_and_convert(predict, label, label_mapper, dataset):
temp_1 = []
temp_2 = []
for j, m in enumerate(predict):
if label_mapper[label[j]] != 'X' and label_mapper[label[j]] != 'CLS' and label_mapper[label[j]] != 'SEP':
temp_1.append(label_mapper[label[j]])
temp_2.append(label_mapper[m])
if 'joint' in dataset:
if cue_in_scope[dataset] is True:
replacement= 'I'
else: replacement = 'O'
for j, m in enumerate(temp_1):
if m == 'C':
temp_1[j] = replacement
for j, m in enumerate(temp_2):
if m == 'C':
temp_2[j] = replacement
return temp_2, temp_1
tp = 0.
for predict, label in zip(predicts, labels):
predict, label = trim_and_convert(predict, label, label_mapper,dataset)
if predict == label:
tp += 1
return tp/len(predicts) | 5f046c1599617ad7620ea9a618f85f02dd93e28c | 3,650,295 |
def pentomino():
"""
Main pentomino routine
@return {string} solution as rectangles separated by a blank line
"""
return _stringify(
_pent_wrapper1(tree_main_builder())(rect_gen_boards())) | 07e448efdbfe5cb43ce943f33f24a7887878001f | 3,650,296 |
def do_login(request, username, password):
""" Check credentials and log in """
if request.access.verify_user(username, password):
request.response.headers.extend(remember(request, username))
return {"next": request.app_url()}
else:
return HTTPForbidden() | f7c076c6f4a6ac51bf5a3ea39116166002ce1833 | 3,650,297 |
from tokenize import Token
import re
def _interpolate(format1):
"""
Takes a format1 string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
tokenprog = re.compile(Token)
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format1.find("$", pos)
if dollar < 0:
break
nextchar = format1[dollar + 1]
if nextchar == "{":
chunks.append((0, format1[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format1, pos)
tstart, tend = match.regs[3]
token = format1[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format1[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format1[pos:dollar]))
match, pos = matchorfail(format1, dollar + 1)
while pos < len(format1):
if format1[pos] == "." and \
pos + 1 < len(format1) and format1[pos + 1] in namechars:
match, pos = matchorfail(format1, pos + 1)
elif format1[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format1, pos)
tstart, tend = match.regs[3]
token = format1[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format1[dollar + 1:pos]))
else:
chunks.append((0, format1[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format1):
chunks.append((0, format1[pos:]))
return chunks | 9af06b91f0ad2e15fd7479ac2e1dedc5443b6e34 | 3,650,298 |
def approxIndex(iterable, item, threshold):
"""Same as the python index() function but with a threshold from wich values are considerated equal."""
for i, iterableItem in rev_enumerate(iterable):
if abs(iterableItem - item) < threshold:
return i
return None | 45ec7b816674231a5efa8a559e9f9416a81987f5 | 3,650,299 |
import random
def delete_important_words(word_list, replace=''):
"""
randomly detele an important word in the query or replace (not in QUERY_SMALL_CHANGE_SETS)
"""
# replace can be [MASK]
important_word_list = set(word_list) - set(QUERY_SMALL_CHANGE_SETS)
target = random.sample(important_word_list, 1)[0]
if replace:
new_word_list = [item if item!=target else item.replace(target, replace) for item in word_list]
else:
new_word_list = [item for item in word_list if item!=target]
return new_word_list | 336518cb1c52f896fc9878e1c11b3f0e72c4f36a | 3,650,300 |
import numpy as np
def prot(vsini, st_rad):
"""
Function to convert stellar rotation velocity vsini in km/s to rotation period in days.
Parameters:
----------
vsini: Rotation velocity of star in km/s.
st_rad: Stellar radius in units of solar radii
Returns
------
Prot: Period of rotation of the star in days.
"""
vsini=np.array(vsini)
prot=(2*np.pi*st_rad*rsun)/(vsini*24*60*60)
return prot | db2ef4648c5142a996e4a700aee0c7df0f02a394 | 3,650,301 |
def dialog_sleep():
"""Return the time to sleep as set by the --exopy-sleep option.
"""
return DIALOG_SLEEP | cc40ffa09c83bd095f685b3b1d237545b8d7dd34 | 3,650,302 |
def required_overtime (db, user, frm) :
""" If required_overtime flag is set for overtime_period of dynamic
user record at frm, we return the overtime_period belonging to
this dyn user record. Otherwise return None.
"""
dyn = get_user_dynamic (db, user, frm)
if dyn and dyn.overtime_period :
otp = db.overtime_period.getnode (dyn.overtime_period)
if otp.required_overtime :
return otp
return None | 052e1289a0d7110100b3a1ea0ad90fa7bd000cce | 3,650,303 |
def get_best_fit_member(*args):
"""
get_best_fit_member(sptr, offset) -> member_t
Get member that is most likely referenced by the specified offset.
Useful for offsets > sizeof(struct).
@param sptr (C++: const struc_t *)
@param offset (C++: asize_t)
"""
return _ida_struct.get_best_fit_member(*args) | 7d4032d5cedb789d495e658eda939c36591f3506 | 3,650,304 |
def convert_time(time):
"""Convert given time to srt format."""
stime = '%(hours)02d:%(minutes)02d:%(seconds)02d,%(milliseconds)03d' % \
{'hours': time / 3600,
'minutes': (time % 3600) / 60,
'seconds': time % 60,
'milliseconds': (time % 1) * 1000}
return stime | 948e6567c8bc17ccb5f98cf8c8eaf8fe6e8d0bec | 3,650,305 |
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0 | 727e58e0d6d596cf4833ca3ca1cbcec6b9eedced | 3,650,306 |
def test_abstract_guessing():
"""Test abstract guessing property."""
class _CustomPsychometric(DiscriminationMethod):
def psychometric_function(self, d):
return 0.5
with pytest.raises(TypeError, match="abstract method"):
_CustomPsychometric() | 996f6fb4d6b819e15fb3a931c0dc2a1f211e3d58 | 3,650,307 |
import re
def remove_repeats(msg):
"""
This function removes repeated characters from text.
:param/return msg: String
"""
# twitter specific repeats
msg = re.sub(r"(.)\1{2,}", r"\1\1\1", msg) # characters repeated 3 or more times
# laughs
msg = re.sub(r"(ja|Ja)(ja|Ja)+(j)?", r"jaja", msg) # spanish
msg = re.sub(r"(rs|Rs)(Rs|rs)+(r)?", r"rsrs", msg) # portugese
msg = re.sub(r"(ha|Ha)(Ha|ha)+(h)?", r"haha", msg) # english
return msg | 590ab42f74deaa9f8dc1eb9c8b11d81622db2e6d | 3,650,308 |
def _legend_main_get(project, row):
"""
forma la leyenda de la serie principal del gráfico
input
project: es el tag project del proyecto seleccionado
en fichero XYplus_parameters.f_xml -en XYplus_main.py-
row: es fila activa devuelta por select_master) de donde se
extrae el título del gráfico
return
un str con la leyenda del punto principal del gráfico
"""
legend_master = project.find('graph/legend_master').text.strip()
columns_master = project.findall('graph/legend_master/column')
if len(columns_master) == 0:
return legend_master
subs = [row[int(col1.text)-1] for col1 in columns_master]
return legend_master.format(*subs) | 3938d723bd44a67313b86f956464fd186ef25386 | 3,650,309 |
def ordinal_mapper(fh, coords, idmap, fmt=None, n=1000000, th=0.8,
prefix=False):
"""Read an alignment file and match reads and genes in an ordinal system.
Parameters
----------
fh : file handle
Alignment file to parse.
coords : dict of list
Gene coordinates table.
idmap : dict of list
Gene identifiers.
fmt : str, optional
Alignment file format.
n : int, optional
Number of lines per chunk.
th : float
Minimum threshold of overlap length : alignment length for a match.
prefix : bool
Prefix gene IDs with nucleotide IDs.
See Also
--------
align.plain_mapper
Yields
------
tuple of str
Query queue.
dict of set of str
Subject(s) queue.
"""
# determine file format
fmt, head = (fmt, []) if fmt else infer_align_format(fh)
# assign parser for given format
parser = assign_parser(fmt, ext=True)
# cached list of query Ids for reverse look-up
# gene Ids are unique, but read Ids can have duplicates (i.e., one read is
# mapped to multiple loci on a genome), therefore an incremental integer
# here replaces the original read Id as its identifer
rids = []
rid_append = rids.append
# cached map of read to coordinates
locmap = defaultdict(list)
def flush():
"""Match reads in current chunk with genes from all nucleotides.
Returns
-------
tuple of str
Query queue.
dict of set of str
Subject(s) queue.
"""
# master read-to-gene(s) map
res = defaultdict(set)
# iterate over nucleotides
for nucl, locs in locmap.items():
# it's possible that no gene was annotated on the nucleotide
try:
glocs = coords[nucl]
except KeyError:
continue
# get reference to gene identifiers
gids = idmap[nucl]
# append prefix if needed
pfx = nucl + '_' if prefix else ''
# execute ordinal algorithm when reads are many
# 8 (5+ reads) is an empirically determined cutoff
if len(locs) > 8:
# merge and sort coordinates
# question is to add unsorted read coordinates into pre-sorted
# gene coordinates
# Python's Timsort algorithm is efficient for this task
queue = sorted(chain(glocs, locs))
# map reads to genes using the core algorithm
for read, gene in match_read_gene(queue):
# add read-gene pairs to the master map
res[rids[read]].add(pfx + gids[gene])
# execute naive algorithm when reads are few
else:
for read, gene in match_read_gene_quart(glocs, locs):
res[rids[read]].add(pfx + gids[gene])
# return matching read Ids and gene Ids
return res.keys(), res.values()
this = None # current query Id
target = n # target line number at end of current chunk
# parse alignment file
for i, row in enumerate(parser(chain(iter(head), fh))):
query, subject, _, length, beg, end = row[:6]
# skip if length is not available or zero
if not length:
continue
# when query Id changes and chunk limits has been reached
if query != this and i >= target:
# flush: match currently cached reads with genes and yield
yield flush()
# re-initiate read Ids, length map and location map
rids = []
rid_append = rids.append
locmap = defaultdict(list)
# next target line number
target = i + n
# append read Id, alignment length and location
idx = len(rids)
rid_append(query)
# effective length = length * th
# -int(-x // 1) is equivalent to math.ceil(x) but faster
# this value must be >= 1
locmap[subject].extend((
(beg << 48) + (-int(-length * th // 1) << 31) + idx,
(end << 48) + idx))
this = query
# final flush
yield flush() | 955c411e608fdb3cf55d0c52350b38061f87cd3a | 3,650,311 |
def file_lines(filename):
"""
>>> file_lines('test/foo.txt')
['foo', 'bar']
"""
return text_file(filename).split() | b121ba549606adeac244b063ff679192951c2ff8 | 3,650,312 |
def repr_should_be_defined(obj):
"""Checks the obj.__repr__() method is properly defined"""
obj_repr = repr(obj)
assert isinstance(obj_repr, str)
assert obj_repr == obj.__repr__()
assert obj_repr.startswith("<")
assert obj_repr.endswith(">")
return obj_repr | 28537f4f48b402a2eba290d8ece9b765eeb9fdc3 | 3,650,313 |
def indexName():
"""Index start page."""
return render_template('index.html') | 0340e708a82052a98e6e9e92bfde2eb04128d354 | 3,650,317 |
def translate_http_code():
"""Print given code
:return:
"""
return make_http_code_translation(app) | c9b501b57323aeb765be47af134dd2de1c1d084e | 3,650,318 |
import warnings
def parmap(f, X, nprocs=1):
"""
parmap_fun() and parmap() are adapted from klaus se's post
on stackoverflow. https://stackoverflow.com/a/16071616/4638182
parmap allows map on lambda and class static functions.
Fall back to serial map when nprocs=1.
"""
if nprocs < 1:
raise ValueError("nprocs should be >= 1. nprocs: {}".format(nprocs))
nprocs = min(int(nprocs), mp.cpu_count())
# exception handling f
# simply ignore all exceptions. If exception occurs in parallel queue, the
# process with exception will get stuck and not be able to process
# following requests.
def ehf(x):
try:
res = f(x)
except Exception as e:
res = e
return res
# fall back on serial
if nprocs == 1:
return list(map(ehf, X))
q_in = mp.Queue(1)
q_out = mp.Queue()
proc = [mp.Process(target=_parmap_fun, args=(ehf, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
# maintain the order of X
ordered_res = [x for i, x in sorted(res)]
for i, x in enumerate(ordered_res):
if isinstance(x, Exception):
warnings.warn("{} encountered in parmap {}th arg {}".format(
x, i, X[i]))
return ordered_res | 66a498966979ca00c9a7eedfc1113a07b9076245 | 3,650,320 |
def is_char_token(c: str) -> bool:
"""Return true for single character tokens."""
return c in ["+", "-", "*", "/", "(", ")"] | 3d5691c8c1b9a592987cdba6dd4809cf2c410ee8 | 3,650,321 |
import numpy
def _float_arr_to_int_arr(float_arr):
"""Try to cast array to int64. Return original array if data is not representable."""
int_arr = float_arr.astype(numpy.int64)
if numpy.any(int_arr != float_arr):
# we either have a float that is too large or NaN
return float_arr
else:
return int_arr | 73643757b84ec28ed721608a2176b292d6e90837 | 3,650,322 |
def latest(scores: Scores) -> int:
"""The last added score."""
return scores[-1] | 393c1d9a4b1852318d622a58803fff3286db98af | 3,650,323 |
def get_dp_2m(wrfin, timeidx=0, method="cat", squeeze=True,
cache=None, meta=True, _key=None, units="degC"):
"""Return the 2m dewpoint temperature.
This functions extracts the necessary variables from the NetCDF file
object in order to perform the calculation.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): A caching key. This is used for internal
purposes only. Default is None.
units (:obj:`str`): The desired units. Refer to the :meth:`getvar`
product table for a list of available units for 'td2'. Default
is 'degC'.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
2m dewpoint temperature.
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
varnames=("PSFC", "Q2")
ncvars = extract_vars(wrfin, timeidx, varnames, method, squeeze, cache,
meta=False, _key=_key)
# Algorithm requires hPa
psfc = .01*(ncvars["PSFC"])
# Copy needed for the mmap nonsense of scipy.io.netcdf, which seems to
# break with every release
q2 = ncvars["Q2"].copy()
q2[q2 < 0] = 0
td = _td(psfc, q2)
return td | e16a5a3951312254eb852a5e03987aab32a91373 | 3,650,324 |
def fit_uncertainty(points, lower_wave, upper_wave, log_center_wave, filter_size):
"""Performs fitting many times to get an estimate of the uncertainty
"""
mock_points = []
for i in range(1, 100):
# First, fit the points
coeff = np.polyfit(np.log10(points['rest_wavelength']),
np.random.normal(points['f_lambda'], points['err_f_lambda']), deg=2) # , w=(1/points['err_f_lambda'])
# Get the polynomial
fit_func = np.poly1d(coeff)
# x-range over which we fit
fit_wavelengths = np.arange(
np.log10(lower_wave), np.log10(upper_wave), 0.001)
# Values of the points we fit
fit_points = fit_func(fit_wavelengths)
# Indexes of the values that lie in the mock filter
fit_idx = np.logical_and(fit_wavelengths > (log_center_wave -
filter_size), fit_wavelengths < (log_center_wave + filter_size))
# Average the values in the mock filter to get the mock point
mock_sed_point = np.mean(fit_points[fit_idx])
mock_points.append(mock_sed_point)
# PERCENTILE ERROR HERE?
mock_sed_point, l_err, u_err = np.percentile(mock_points, [50, 15.7, 84.3])
return mock_sed_point, u_err - mock_sed_point, mock_sed_point - l_err | cca5193e55d7aeef710a08fb16df8c896bbeef90 | 3,650,325 |
def from_dateutil_rruleset(rruleset):
"""
Convert a `dateutil.rrule.rruleset` instance to a `Recurrence`
instance.
:Returns:
A `Recurrence` instance.
"""
rrules = [from_dateutil_rrule(rrule) for rrule in rruleset._rrule]
exrules = [from_dateutil_rrule(exrule) for exrule in rruleset._exrule]
rdates = rruleset._rdate
exdates = rruleset._exdate
dts = [r._dtstart for r in rruleset._rrule] + rruleset._rdate
if len(dts) > 0:
dts.sort()
dtstart = dts[0]
else:
dtstart = None
return Recurrence(dtstart, rrules, exrules, rdates, exdates) | cd5ab771eebbf6f68ce70a8d100ad071561541de | 3,650,326 |
import re
def error_038_italic_tag(text):
"""Fix the error and return (new_text, replacements_count) tuple."""
backup = text
(text, count) = re.subn(r"<(i|em)>([^\n<>]+)</\1>", "''\\2''", text, flags=re.I)
if re.search(r"</?(?:i|em)>", text, flags=re.I):
return (backup, 0)
else:
return (text, count) | b0c2b571ade01cd483a3ffdc6f5c2bbb873cd13c | 3,650,327 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.