content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def find_or_create_role(name, desc):
""" Find existing role or create new role """
role = Role.query.filter(Role.name == name).first()
if not role:
role = Role(name=name, desc=desc)
return role
return role | 414b960488d55ea6c2cc41121132f06f0d677abd | 4,183 |
def parse_nrrdvector(inp):
"""Parse a vector from a nrrd header, return a list."""
assert inp[0] == '(', "Vector should be enclosed by parenthesis."
assert inp[-1] == ')', "Vector should be enclosed by parenthesis."
return [_to_reproducible_float(x) for x in inp[1:-1].split(',')] | 3e3c793d3ee53198c4cdb01832062be4f0c02876 | 4,185 |
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1) | 6f08d04528f5e515d5ae75d4dc47753cc4cebc7b | 4,186 |
def map_min(process):
"""
"""
param_dict = {'ignore_nodata': 'bool'}
return map_default(process, 'min', 'reduce', param_dict) | 33dcc2192fd8b979e7238c1fdbe5e9bec551dd3f | 4,189 |
def geoname_exhaustive_search(request, searchstring):
"""
List all children of a geoname filtered by a list of featurecodes
"""
if request.query_params.get('fcode'):
fcodes = [ s.upper() for s in request.query_params.get('fcode').split(',')]
else:
fcodes = []
limit = request.query_params.get('limit') or 50
if request.method == 'GET':
geonames = Geoname.objects \
.filter(
Q(englishname__startswith=searchstring) |
Q(alternatenames__alternatename__startswith=searchstring,
alternatenames__iscolloquial=0
)
) \
.order_by('-population','-fcode__searchorder_detail').distinct()
if len(fcodes) > 0:
geonames = geonames.filter(fcode__code__in=fcodes)
if limit:
geonames = geonames[:limit]
serializer = GeonameSearchSerializer(geonames,many=True)
return JsonResponse(serializer.data, safe=False) | 5a04a158a146e7e0ad3265d89520774b65c3780a | 4,190 |
def count_reads(regions_list, params):
""" Count reads from bam within regions (counts position of cutsite to prevent double-counting) """
bam_f = params.bam
read_shift = params.read_shift
bam_obj = pysam.AlignmentFile(bam_f, "rb")
log_q = params.log_q
logger = TobiasLogger("", params.verbosity, log_q) #sending all logger calls to log_q
#Count per region
read_count = 0
logger.spam("Started counting region_chunk ({0} -> {1})".format("_".join([str(element) for element in regions_list[0]]), "_".join([str(element) for element in regions_list[-1]])))
for region in regions_list:
read_lst = ReadList().from_bam(bam_obj, region)
for read in read_lst:
read.get_cutsite(read_shift)
if read.cutsite > region.start and read.cutsite < region.end: #only reads within borders
read_count += 1
logger.spam("Finished counting region_chunk ({0} -> {1})".format("_".join([str(element) for element in regions_list[0]]), "_".join([str(element) for element in regions_list[-1]])))
bam_obj.close()
return(read_count) | ffd8cc6afc6c0b5b92d82292ab9d4a54ef918641 | 4,192 |
def rgbImage2grayVector(img):
""" Turns a row and column rgb image into a 1D grayscale vector """
gray = []
for row_index in range(0, len(img)):
for pixel_index, pixel in enumerate(img[row_index]):
gray.append(rgbPixel2grayscaleValue(pixel))
return gray | a93bbb2dfa29cb3d4013334226e77f6beb526a13 | 4,193 |
def compute_MSE(predicted, observed):
""" predicted is scalar and observed as array"""
if len(observed) == 0:
return 0
err = 0
for o in observed:
err += (predicted - o)**2/predicted
return err/len(observed) | e2cc326dde2ece551f78cd842d1bf44707bfb6db | 4,194 |
def log_sum(log_u):
"""Compute `log(sum(exp(log_u)))`"""
if len(log_u) == 0:
return NEG_INF
maxi = np.argmax(log_u)
max = log_u[maxi]
if max == NEG_INF:
return max
else:
exp = log_u - max
np.exp(exp, out = exp)
return np.log1p(np.sum(exp[:maxi]) + np.sum(exp[maxi + 1:])) + max | f2c7917bc806dc7ec3fbbb1404725f590a82e194 | 4,195 |
def special_value_sub(lhs, rhs):
""" Subtraction between special values or between special values and
numbers """
if is_nan(lhs):
return FP_QNaN(lhs.precision)
elif is_nan(rhs):
return FP_QNaN(rhs.precision)
elif (is_plus_infty(lhs) and is_plus_infty(rhs)) or \
(is_minus_infty(lhs) and is_minus_infty(rhs)):
return FP_QNaN(lhs.precision)
elif is_plus_infty(lhs) and is_minus_infty(rhs):
return lhs
elif is_minus_infty(lhs) and is_plus_infty(rhs):
return lhs
elif is_infty(lhs) and is_zero(rhs):
return lhs
elif is_infty(lhs):
# invalid inf - inf excluded previous
return lhs
elif is_infty(rhs):
return -rhs
else:
return lhs + (-rhs) | df64cf6c306c3192ba28d08e878add7ce0f27a2c | 4,197 |
def parse_git_repo(git_repo):
"""Parse a git repository URL.
git-clone(1) lists these as examples of supported URLs:
- ssh://[user@]host.xz[:port]/path/to/repo.git/
- git://host.xz[:port]/path/to/repo.git/
- http[s]://host.xz[:port]/path/to/repo.git/
- ftp[s]://host.xz[:port]/path/to/repo.git/
- rsync://host.xz/path/to/repo.git/
- [user@]host.xz:path/to/repo.git/
- ssh://[user@]host.xz[:port]/~[user]/path/to/repo.git/
- git://host.xz[:port]/~[user]/path/to/repo.git/
- [user@]host.xz:/~[user]/path/to/repo.git/
- /path/to/repo.git/
- file:///path/to/repo.git/
This function doesn't support the <transport>::<address> syntax, and it
doesn't understand insteadOf shortcuts from ~/.gitconfig.
"""
if '://' in git_repo:
return urlparse.urlparse(git_repo)
if ':' in git_repo:
netloc, colon, path = git_repo.partition(':')
return urlparse.ParseResult('ssh', netloc, path, '', '', '')
else:
return urlparse.ParseResult('file', '', git_repo, '', '', '') | 5eddf3aa9016996fb8aa1720b506c2f86b2e9c14 | 4,198 |
def make_wavefunction_list(circuit, include_initial_wavefunction=True):
""" simulate the circuit, keeping track of the state vectors at ench step"""
wavefunctions = []
simulator = cirq.Simulator()
for i, step in enumerate(simulator.simulate_moment_steps(circuit)):
wavefunction_scrambled = step.state_vector()
wavefunction = unscramble_wavefunction(wavefunction_scrambled)
wavefunctions.append(wavefunction)
if include_initial_wavefunction:
initial_wavefunction = wavefunctions[0]*0 # create a blank vector
initial_wavefunction[0] = 1
wavefunctions = [initial_wavefunction]+wavefunctions
return wavefunctions | af33d4a7be58ccfa7737deb289cbf5d581246e86 | 4,199 |
def if_else(cond, a, b):
"""Work around Python 2.4
"""
if cond: return a
else: return b | 4b11328dd20fbb1ca663f272ac8feae15a8b26d9 | 4,200 |
def _update_machine_metadata(esh_driver, esh_machine, data={}):
"""
NOTE: This will NOT WORK for TAGS until openstack
allows JSONArrays as values for metadata!
"""
if not hasattr(esh_driver._connection, 'ex_set_image_metadata'):
logger.info(
"EshDriver %s does not have function 'ex_set_image_metadata'" %
esh_driver._connection.__class__
)
return {}
try:
# Possible metadata that could be in 'data'
# * application uuid
# * application name
# * specific machine version
# TAGS must be converted from list --> String
logger.info("New metadata:%s" % data)
meta_response = esh_driver._connection.ex_set_image_metadata(
esh_machine, data
)
esh_machine.invalidate_machine_cache(esh_driver.provider, esh_machine)
return meta_response
except Exception as e:
logger.exception("Error updating machine metadata")
if 'incapable of performing the request' in e.message:
return {}
else:
raise | 2733a809ac8ca7d092001d2ce86a9597ef7c8860 | 4,201 |
from datetime import datetime
def timedelta_to_time(data: pd.Series) -> pd.Series:
"""Convert ``datetime.timedelta`` data in a series ``datetime.time`` data.
Parameters
----------
data : :class:`~pandas.Series`
series with data as :class:`datetime.timedelta`
Returns
-------
:class:`~pandas.Series`
series with data converted into :class:`datetime.time`
"""
data_cpy = data.copy()
# ensure pd.Timedelta
data = data + pd.Timedelta("0h")
# convert to datetime
data = datetime.datetime.min + data.dt.to_pytimedelta()
# convert to time
data = [d.time() if d is not pd.NaT else None for d in data]
data = pd.Series(np.array(data), index=data_cpy.index, name=data_cpy.name)
return data | d660e7fea7e9e97ae388f3968d776d9d08930beb | 4,202 |
def nms(boxes, scores, iou_thresh, max_output_size):
"""
Input:
boxes: (N,4,2) [x,y]
scores: (N)
Return:
nms_mask: (N)
"""
box_num = len(boxes)
output_size = min(max_output_size, box_num)
sorted_indices = sorted(range(len(scores)), key=lambda k: -scores[k])
selected = []
for i in range(box_num):
if len(selected) >= output_size:
break
should_select = True
for j in range(len(selected) - 1, -1, -1):
if (
polygon_iou(boxes[sorted_indices[i]], boxes[selected[j]])[0]
> iou_thresh
):
should_select = False
break
if should_select:
selected.append(sorted_indices[i])
return np.array(selected, dtype=np.int32) | c6fe28e44d4e4375af39c22edd440e8cdff44917 | 4,203 |
def get_db():
"""Connect to the application's configured database. The connection
is unique for each request and will be reused if this is called
again
"""
if 'db' not in g:
g.db = pymysql.connect(
host='localhost',
port=3306,
user='root',
password='',
database='qm',
charset='utf8'
)
return g.db | 8a8ce077f98e1e6927c6578e7eff82e183cea829 | 4,204 |
def read_stb(library, session):
"""Reads a status byte of the service request.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:return: Service request status byte.
"""
status = ViUInt16()
library.viReadSTB(session, byref(status))
return status.value | 96d74ae6909371f2349cc875bd5b893d41d550f8 | 4,205 |
def one_particle_quasilocal(sp, chli, chlo, Es=None):
"""
Calculate the one-particle irreducible T-matrix T(1).
Parameters
----------
s : Setup
Setup object describing the setup
chi : int
Input schannel
cho : int
Output channel
Es : ndarray
List of particle energies
"""
T1 = np.zeros((Es.shape), dtype=np.complex128)
# guess a suitable range of energies to probe
if Es is None:
maxt = np.max(np.abs(sp.model.links)) + np.max(np.abs(sp.model.omegas)) + 1
Es = np.linspace(- maxt, maxt, 1000)
for i, E in enumerate(Es):
# single particle eigenenergies
E1, _, _ = sp.eigenbasis(1, E)
# numerators
num1 = sp.transition(0, chli, 1, E)
num2 = sp.transition(1, chlo, 0, E)
# initialize the matrix
# num = sp.gs[chli] * sp.gs[chlo] * num2.T * num1
num = num2.T * num1
for k in range(len(E1)):
T1[i] += num[k] / (E - E1[k])
return Es, T1 | 089204f48c9decb3bf58e909ec46e7ffb844ebf1 | 4,207 |
from typing import List
def get_spilled_samples(spills: List, train_dataset: Dataset):
"""
Returns the actual data that was spilled. Notice that it
returns everything that the __getitem__ returns ie. data and labels
and potentially other stuff. This is done to be more
general, not just work with datasets that return: (data, label),
but also for datasets with (data, label, third_thing) or similar.
Notice that the function only takes in one dataset but spill
is a tuple with indexes for two datasets (the other is ignored).
:param spills:
:param train_dataset:
:return: spilled_samples:
"""
spilled_samples = []
for spill in spills:
spill_inx = spill[0]
spilled_samples.append(train_dataset.__getitem__(spill_inx))
return spilled_samples | 04414e59ed43b6068188bc3bb0042c4278bd3124 | 4,209 |
from typing import List
from pathlib import Path
import logging
def reshard(
inputs: List[Path],
output: Path,
tmp: Path = None,
free_original: bool = False,
rm_original: bool = False,
) -> Path:
"""Read the given files and concatenate them to the output file.
Can remove original files on completion, or just write dummy content into them to free disk.
"""
if tmp is None:
tmp = _get_tmp(output)
logging.info(f"Resharding {inputs} to {tmp}, will move later to {output}")
jsonql.run_pipes(file=inputs, output=tmp)
tmp.replace(output)
tmp_index = get_index(tmp)
if tmp_index.exists():
tmp_index.replace(get_index(output))
if not (free_original or rm_original):
return output
for _input in inputs:
if rm_original:
_input.unlink()
elif free_original:
# Overwrite the previous file.
# This frees up disk space and allows doit to properly track the success.
_input.write_text(f"Resharded into {output}")
if get_index(_input).is_file():
get_index(_input).unlink()
return output | 8c90c2d0e42d4e6d1bbb59d541284450d59f0cd1 | 4,210 |
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "StartIntent":
'''if "attributes" in session.keys():
return answer_question(intent,session)
'''
return start_feedback(intent, session)
elif intent_name == "AnswerIntent":
return answer_question(intent, session)
elif intent_name == "AMAZON.ResumeIntent":
return resume_feedback(intent, session)
elif intent_name == "AMAZON.PauseIntent":
return pause_feedback(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request(session)
else:
raise ValueError("Invalid intent") | 040c0d80802503768969859a9fe6ccef8c9fdf06 | 4,211 |
def load_twitter(path, shuffle=True, rnd=1):
"""
load text files from twitter data
:param path: path of the root directory of the data
:param subset: what data will be loaded, train or test or all
:param shuffle:
:param rnd: random seed value
:param vct: vectorizer
:return: :raise ValueError:
"""
data = bunch.Bunch()
data = convert_tweet_2_data(path, rnd)
data = minimum_size_sraa(data)
if shuffle:
random_state = np.random.RandomState(rnd)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst
return data | 863f39faef6f4175dc18d076c3c9601d09331523 | 4,212 |
def parse_vtables(f):
"""
Parse a given file f and constructs or extend the vtable function dicts of the module specified in f.
:param f: file containing a description of the vtables in a module (*_vtables.txt file)
:return: the object representing the module specified in f
"""
marx_module = Module(f.readline().strip())
for line in f:
tokens = line.split()
vtable = marx_module.vtables[int(tokens.pop(0), 16)]
vtable.offset_to_top = int(tokens.pop(0))
index = 0
for target_address in tokens:
if index not in vtable.functions:
vtable.functions[index] = Addressable(int(target_address, 16), marx_module)
index += 1
return marx_module | 01786447982c15b55ee1411afe71990a1752dec3 | 4,213 |
import datasets
def getCifar10Dataset(root, isTrain=True):
"""Cifar-10 Dataset"""
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
if isTrain:
trans = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
])
else:
trans = transforms.Compose([
transforms.ToTensor(),
normalize,
])
return datasets.CIFAR10(root=root, train=isTrain, transform=trans, download=isTrain) | b5c103838e4c46c445e39526b61bb9c8fa3832ee | 4,214 |
import pyclesperanto_prototype as cle
def histogram(layer, num_bins : int = 256, minimum = None, maximum = None, use_cle=True):
"""
This function determines a histogram for a layer and caches it within the metadata of the layer. If the same
histogram is requested, it will be taken from the cache.
:return:
"""
if "bc_histogram_num_bins" in layer.metadata.keys() and "bc_histogram" in layer.metadata.keys():
if num_bins == layer.metadata["bc_histogram_num_bins"]:
return layer.metadata["bc_histogram"]
data = layer.data
if "dask" in str(type(data)): # ugh
data = np.asarray(data)
intensity_range = None
if minimum is not None and maximum is not None:
intensity_range = (minimum, maximum)
if use_cle:
try:
hist = np.asarray(cle.histogram(data, num_bins=num_bins, minimum_intensity=minimum, maximum_intensity=maximum, determine_min_max=False))
except ImportError:
use_cle = False
if not use_cle:
hist, _ = np.histogram(data, bins=num_bins, range=intensity_range)
# cache result
if hasattr(layer.data, "bc_histogram_num_bins") and hasattr(layer.data, "bc_histogram"):
if num_bins == layer.data.bc_histogram_num_bins:
return layer.data.bc_histogram_num_bins
# delete cache when data is changed
def _refresh_data(event):
reset_histogram_cache(layer)
layer.events.data.disconnect(_refresh_data)
layer.events.data.connect(_refresh_data)
layer.metadata["bc_histogram_num_bins"] = num_bins
layer.metadata["bc_histogram"] = hist
return hist | 65197010bfaec66a58798772e61f4e1640b7ea89 | 4,215 |
import re
def get_tbl_type(num_tbl, num_cols, len_tr, content_tbl):
"""
obtain table type based on table features
"""
count_very_common = len([i for i, x in enumerate(content_tbl) if re.match(r'^very common',x) ])
count_common = len([i for i, x in enumerate(content_tbl) if re.match(r'^common',x) ])
count_uncommon = len([i for i, x in enumerate(content_tbl) if re.match(r'^uncommon',x) ])
count_rare = len([i for i, x in enumerate(content_tbl) if re.match(r'^rare',x) ])
count_very_rare = len([i for i, x in enumerate(content_tbl) if re.match(r'^very rare',x) ])
count_unknown = len([i for i, x in enumerate(content_tbl) if "known" in x])
count_feats = [count_very_common,count_common,count_uncommon,count_rare,count_very_rare,count_unknown]
if num_cols>3 and sum(count_feats) > num_cols+5:
tbl_type = 'table type: vertical'
elif ((all(i <2 for i in count_feats) and num_tbl<=5) or num_cols>4) and len_tr>2:
tbl_type = 'table type: horizontal'
else:
tbl_type = 'table type: vertical'
return tbl_type | 19c06766c932aab4385fa8b7b8cd3c56a2294c65 | 4,216 |
def decode_EAN13(codes):
"""
คืนสตริงของเลขที่ได้จากการถอดรหัสจากสตริง 0/1 ที่เก็บใน codes แบบ EAN-13
ถ้าเกิดกรณีต่อไปนี้ ให้คืนสตริงว่าง (สาเหตุเหล่านี้มักมาจากเครื่องอ่านบาร์โค้ดอ่านรหัส 0 และ 1 มาผิด)
codes เก็บจำนวนบิต หรือรูปแบบไม่ตรงข้อกำหนด
รหัสบางส่วนแปลงเป็นตัวเลขไม่ได้
เลขหลักที่ 13 ที่อ่านได้ ไม่ตรงกับค่า check digit ที่คำนวณได้
หมายเหตุ: เป็นไปได้ว่า ผู้ใช้เครื่องบาร์โค้ด อาจสแกนบาร์โค้ดที่วางกลับหัวก็ได้ ฟังก์ชันนี้ก็ต้องรองรับกรณีเช่นนี้ด้วย
Doctest :
>>> c = '10100100110011001010011100110110010111001001101010111001010111001001110110011011101001101100101'
>>> decode_EAN13(c)
'3210292045192'
>>> c = '10111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111101'
>>> decode_EAN13(c)
''
"""
result = ''
try:
if len(codes) != 95:
return ''
else:
number_group1 = digits_of(codes[3:45])
code_group1 = digits_from(codes[3:45])
number_group2 = digits_of(codes[50:-3])
code_group2 = digits_from(codes[50:-3])
if code_group2 == 'RRRRRR':
result = str(return_group1(code_group1)) + number_group1 + number_group2
elif code_group1 == 'RRRRRR':
result = str(return_group1(code_group1)) + number_group1 + number_group2
else:
# Support when barcode reader read a barcode upside down
reverse_codes = codes
reverse_codes.reverse()
number_group1 = digits_of(codes[3:45])
code_group1 = digits_from(codes[3:45])
number_group2 = digits_of(codes[50:-3])
code_group2 = digits_from(codes[50:-3])
if code_group2 == 'RRRRRR':
result = str(return_group1(code_group1)) + number_group1 + number_group2
elif code_group1 == 'RRRRRR':
result = str(return_group1(code_group1)) + number_group1 + number_group2
return result
except:
return '' | 2f65b488c61cbafae0c441d68f01e261141569dd | 4,217 |
def GiveNewT2C(Hc, T2C):
""" The main routine, which computes T2C that diagonalized Hc, and is close to the previous
T2C transformation, if possible, and it makes the new orbitals Y_i = \sum_m T2C[i,m] Y_{lm} real,
if possible.
"""
ee = linalg.eigh(Hc)
Es = ee[0]
Us0= ee[1]
Us = matrix(ee[1])
#print 'In Eigensystem:'
#mprint(Us.H * Hc * Us)
# Us.H * Hc * Us === diagonal
print 'Eigenvalues=', Es.tolist()
print 'Starting with transformation in crystal harmonics='
mprint(Us)
print
# Finds if there are any degeneracies in eigenvalues.
deg = FindDegeneracies(Es)
print 'deg=', deg
for ig in deg:
if len(ig)>1:
# Two or more states are degenerate, we transform them with a unitary transformation,
# so that they are close to previous set of eigenvectors.
# This is not necessary, but convenient to keep the character similar to previous iteration. This is useful
# in particular when H has small off-diagonal elements, which we would like to eliminate, and we call this
# routine iteratively
Us = TransformToSimilar(ig, Us, Es)
print 'Next, the transformation in crystal harmonics='
mprint(Us)
print
final = array( Us.T*T2C )
print 'And the same transformation in spheric harmonics='
mprint( final )
# Here we will try to make the transformation real, so that ctqmc will have minimal sign problem even when Full is used.
for ig in deg:
final = TransformToReal(final, ig, Es)
# finally checking if all transformations are real
for ig in deg:
i0 = ig[0]
i2 = ig[-1]+1
#print 'Checking the set of orbitals:', Es[i0:i2]
UtU = ComputeUtU(final[i0:i2,:], ig)
if allclose( UtU, identity(len(ig)), rtol=1e-04, atol=1e-04 ):
print ':SUCCESS For orbital', ig, 'the final transformation is real'
else:
print """:WARNING: The set of rbitals """, ig, """ could not be made purely real. You should use only Coulomb='Ising' and avoid Coulomb='Full' """
print 'UtU=',
mprint(UtU)
print
return final | d4ba72e57079ab18f7e29b9d1b2ce68f2c112fb4 | 4,218 |
import unicodedata
def normalize(form, text):
"""Return the normal form form for the Unicode string unistr.
Valid values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
return unicodedata.normalize(form, text) | 6d32604a951bb13ff649fd3e221c2e9b35d4f1a1 | 4,219 |
def handle_internal_validation_error(error):
"""
Error handler to use when a InternalValidationError is raised.
Alert message can be modified here as needed.
:param error: The error that is handled.
:return: an error view
"""
alert_message = format_alert_message(error.__class__.__name__, str(error))
return _handle_error(alert_message) | 996552cf26b01f5a4fd435a604fe1428669804bd | 4,220 |
def bbox_encode(bboxes, targets):
"""
:param bboxes: bboxes
:param targets: target ground truth boxes
:return: deltas
"""
bw = bboxes[:, 2] - bboxes[:, 0] + 1.0
bh = bboxes[:, 3] - bboxes[:, 1] + 1.0
bx = bboxes[:, 0] + 0.5 * bw
by = bboxes[:, 1] + 0.5 * bh
tw = targets[:, 2] - targets[:, 0] + 1.0
th = targets[:, 3] - targets[:, 1] + 1.0
tx = targets[:, 0] + 0.5 * tw
ty = targets[:, 1] + 0.5 * th
dx = (tx - bx) / bw
dy = (ty - by) / bh
dw = np.log(tw / bw)
dh = np.log(th / bh)
deltas = np.vstack((dx, dy, dw, dh)).transpose()
return deltas | 547c098963b9932aa518774e32ca4e42301aa564 | 4,221 |
def cipher(text: str, key: str, charset: str = DEFAULT_CHARSET) -> str:
""" Cipher given text using Vigenere method.
Be aware that different languages use different charsets. Default charset
is for english language, if you are using any other you should use a proper
dataset. For instance, if you are ciphering an spanish text, you should use
a charset with "ñ" character.
This module uses only lowercase charsets. That means that caps will be kept
but lowercase and uppercase will follow ths same substitutions.
:param text: Text to be ciphered.
:param key: Secret key. Both ends should know this and
use the same one. The longer key you use the harder to break ciphered text.
:param charset: Charset used for Vigenere method. Both ends, ciphering
and deciphering, should use the same charset or original text won't be properly
recovered.
:return: Ciphered text.
"""
ciphered_text = _vigenere_offset(text, key, Vigenere.CIPHER, charset)
return ciphered_text | ef6ac915f0daf063efbc587b66de014af48fbec6 | 4,222 |
def product_delete(product_id):
"""
Delete product from database
"""
product_name = product_get_name(product_id)
res = False
# Delete product from database
if product_name:
mongo.db.products.delete_one({"_id": (ObjectId(product_id))})
flash(
product_name +
" succesfully deleted from products", "success")
res = True
return res | 600a4afa9aab54473cdeb423e12b779672f38186 | 4,223 |
def getBoxFolderPathName(annotationDict, newWidth, newHeight):
"""
getBoxFolderPathName returns the folder name which contains the
resized image files for an original image file.
Given image 'n02085620_7', you can find the resized images at:
'F:/dogs/images/n02085620-Chihuahua/boxes_64_64/'
input:
annotationDict: dictionary, contains filename
newWidth: int, the new width for the image
newHeight: int, the new height for the image
output:
returns a string, the folder path for the resized images
"""
folderName = getImageFolderPathName(annotationDict)
boxFolder = BOX_FOLDER + str(newWidth) + '_' + str(newHeight)
return IMAGE_PATH + folderName + '/' + boxFolder + '/' | 75d5470373e4c119e7bb8bd327f6d83884680a9d | 4,225 |
def _download_artifact_from_uri(artifact_uri, output_path=None):
"""
:param artifact_uri: The *absolute* URI of the artifact to download.
:param output_path: The local filesystem path to which to download the artifact. If unspecified,
a local output path will be created.
"""
store = _get_store(artifact_uri=artifact_uri)
artifact_path_module =\
get_artifact_repository(artifact_uri, store).get_path_module()
artifact_src_dir = artifact_path_module.dirname(artifact_uri)
artifact_src_relative_path = artifact_path_module.basename(artifact_uri)
artifact_repo = get_artifact_repository(
artifact_uri=artifact_src_dir, store=store)
return artifact_repo.download_artifacts(
artifact_path=artifact_src_relative_path, dst_path=output_path) | 219c4be3cd41741e6e93a1ca46696284c9ae1b80 | 4,226 |
import json
def api_get_project_members(request, key=None, hproPk=True):
"""Return the list of project members"""
if not check_api_key(request, key, hproPk):
return HttpResponseForbidden
if settings.PIAPI_STANDALONE:
if not settings.PIAPI_REALUSERS:
users = [generate_user(pk="-1"), generate_user(pk="-2"), generate_user(pk="-3")]
else:
users = DUser.object.all()
else:
(_, _, hproject) = getPlugItObject(hproPk)
users = []
for u in hproject.getMembers():
u.ebuio_member = True
u.ebuio_admin = hproject.isMemberWrite(u)
u.subscription_labels = _get_subscription_labels(u, hproject)
users.append(u)
liste = []
for u in users:
retour = {}
for prop in settings.PIAPI_USERDATA:
if hasattr(u, prop):
retour[prop] = getattr(u, prop)
retour['id'] = str(retour['pk'])
liste.append(retour)
return HttpResponse(json.dumps({'members': liste}), content_type="application/json") | 653a60fccf7e81231790e4d00a1887c660f5ad4f | 4,227 |
import re
def clean_weight(v):
"""Clean the weight variable
Args:
v (pd.Series): Series containing all weight values
Returns:
v (pd.Series): Series containing all cleaned weight values
"""
# Filter out erroneous non-float values
indices = v.astype(str).apply(
lambda x: not re.match(reg_exps['re_lab_vals'], x))
v.loc[indices] = None
# Convert values to float
v = v.astype(float)
# Sometimes the value is given in grams -- convert to kg
indices_g = v > 100
v.loc[indices_g] = v[indices_g].apply(lambda x: x / 1000)
return v | 7bcfb05fdb765ad0cc7a6b3af48f8b6cfa2f709b | 4,230 |
def get_yield(category):
"""
Get the primitive yield node of a syntactic category.
"""
if isinstance(category, PrimitiveCategory):
return category
elif isinstance(category, FunctionalCategory):
return get_yield(category.res())
else:
raise ValueError("unknown category type with instance %r" % category) | 54fbbc67ea70b7b194f6abd2fd1286f7cd27b1f2 | 4,231 |
def box(type_):
"""Create a non-iterable box type for an object.
Parameters
----------
type_ : type
The type to create a box for.
Returns
-------
box : type
A type to box values of type ``type_``.
"""
class c(object):
__slots__ = 'value',
def __init__(self, value):
if not isinstance(value, type_):
raise TypeError(
"values must be of type '%s' (received '%s')" % (
type_.__name__, type(value).__name__,
),
)
self.value = value
c.__name__ = 'Boxed%s' + type_.__name__
return c | 5b4721ab78ce17f9eeb93abcc59bed046917d296 | 4,232 |
def rssError(yArr, yHatArr):
"""
Desc:
计算分析预测误差的大小
Args:
yArr:真实的目标变量
yHatArr:预测得到的估计值
Returns:
计算真实值和估计值得到的值的平方和作为最后的返回值
"""
return ((yArr - yHatArr) ** 2).sum() | 429dd6b20c20e6559ce7d4e57deaea58a664d22b | 4,233 |
def initialize_vocabulary(vocabulary_file):
"""
Initialize vocabulary from file.
:param vocabulary_file: file containing vocabulary.
:return: vocabulary and reversed vocabulary
"""
if gfile.Exists(vocabulary_file):
rev_vocab = []
with gfile.GFile(vocabulary_file, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s doesn't exist.", vocabulary_file) | fcfbed41c5d1e34fcdcc998f52301bff0e1d5dd8 | 4,234 |
def mock_create_draft(mocker):
"""Mock the createDraft OpenAPI.
Arguments:
mocker: The mocker fixture.
Returns:
The patched mocker and response data.
"""
response_data = {"draftNumber": 1}
return (
mocker.patch(
f"{gas.__name__}.Client.open_api_do", return_value=mock_response(data=response_data)
),
response_data,
) | 43f5fdc7991eefbf965662ccd0fadaa5015c2e00 | 4,235 |
import tqdm
def to_csv(data_path="data"):
"""Transform data and save as CSV.
Args:
data_path (str, optional): Path to dir holding JSON dumps. Defaults to "data".
save_path (str, optional): Path to save transformed CSV. Defaults to "data_transformed.csv".
"""
elements = []
for data in tqdm(list_data_dir(data_path)):
try:
data = load_json(data)
add_gw_and_download_time(
data["elements"], data["download_time"], get_game_week(data["events"])
)
add_unique_id(data["elements"])
elements.extend(data["elements"])
# Add transformations here
except TypeError:
print(f"Something is wrong in {data}")
return pd.DataFrame(elements) | a6a28c4d2e1e1dfad9b7286e49289d012de0a9b4 | 4,236 |
def get_language_file_path(language):
"""
:param language: string
:return: string: path to where the language file lies
"""
return "{lang}/localization_{lang}.json".format(lang=language) | 9be9ee9511e0c82772ab73d17f689c181d63e67c | 4,238 |
def evaluate(expn):
"""
Evaluate a simple mathematical expression.
@rtype: C{Decimal}
"""
try:
result, err = CalcGrammar(expn).apply('expn')
return result
except ParseError:
raise SyntaxError(u'Could not evaluate the provided mathematical expression') | 94518c3a225f62540e045336f230a5fb2d99dcaf | 4,242 |
from typing import Optional
def get_user(is_external: Optional[bool] = None,
name: Optional[str] = None,
username: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUserResult:
"""
Use this data source to retrieve information about a Rancher v2 user
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
foo = rancher2.get_user(username="foo")
```
:param bool is_external: Set is the user if the user is external. Default: `false` (bool)
:param str name: The name of the user (string)
:param str username: The username of the user (string)
"""
__args__ = dict()
__args__['isExternal'] = is_external
__args__['name'] = name
__args__['username'] = username
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('rancher2:index/getUser:getUser', __args__, opts=opts, typ=GetUserResult).value
return AwaitableGetUserResult(
annotations=__ret__.annotations,
enabled=__ret__.enabled,
id=__ret__.id,
is_external=__ret__.is_external,
labels=__ret__.labels,
name=__ret__.name,
principal_ids=__ret__.principal_ids,
username=__ret__.username) | c15a13e6a0633bc46fd1bdc8a3d914bfbd1d6b34 | 4,244 |
import random
def randbytes(size) -> bytes:
"""Custom implementation of random.randbytes, since that's a Python 3.9 feature """
return bytes(random.sample(list(range(0, 255)), size)) | 0ea312376de3f90894befb29ec99d86cfc861910 | 4,245 |
def path_to_model(path):
"""Return model name from path."""
epoch = str(path).split("phase")[-1]
model = str(path).split("_dir/")[0].split("/")[-1]
return f"{model}_epoch{epoch}" | 78b10c8fb6f9821e6be6564738d40f822e675cb6 | 4,246 |
def _cpp_het_stat(amplitude_distribution, t_stop, rates, t_start=0.*pq.ms):
"""
Generate a Compound Poisson Process (CPP) with amplitude distribution
A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
Parameters
----------
amplitude_distribution : np.ndarray
CPP's amplitude distribution :math:`A`. `A[j]` represents the
probability of a synchronous event of size `j` among the generated
spike trains. The sum over all entries of :math:`A` must be equal to
one.
t_stop : pq.Quantity
The end time of the output spike trains
rates : pq.Quantity
Array of firing rates of each spike train generated with
t_start : pq.Quantity, optional
The start time of the output spike trains
Default: 0 pq.ms
Returns
-------
list of neo.SpikeTrain
List of neo.SpikeTrains with different firing rates, forming
a CPP with amplitude distribution `A`.
"""
# Computation of Parameters of the two CPPs that will be merged
# (uncorrelated with heterog. rates + correlated with homog. rates)
n_spiketrains = len(rates) # number of output spike trains
# amplitude expectation
expected_amplitude = np.dot(
amplitude_distribution, np.arange(n_spiketrains + 1))
r_sum = np.sum(rates) # sum of all output firing rates
r_min = np.min(rates) # minimum of the firing rates
# rate of the uncorrelated CPP
r_uncorrelated = r_sum - n_spiketrains * r_min
# rate of the correlated CPP
r_correlated = r_sum / expected_amplitude - r_uncorrelated
# rate of the hidden mother process
r_mother = r_uncorrelated + r_correlated
# Check the analytical constraint for the amplitude distribution
if amplitude_distribution[1] < (r_uncorrelated / r_mother).rescale(
pq.dimensionless).magnitude:
raise ValueError('A[1] too small / A[i], i>1 too high')
# Compute the amplitude distribution of the correlated CPP, and generate it
amplitude_distribution = \
amplitude_distribution * (r_mother / r_correlated).magnitude
amplitude_distribution[1] = \
amplitude_distribution[1] - r_uncorrelated / r_correlated
compound_poisson_spiketrains = _cpp_hom_stat(
amplitude_distribution, t_stop, r_min, t_start)
# Generate the independent heterogeneous Poisson processes
poisson_spiketrains = \
[StationaryPoissonProcess(
rate=rate - r_min, t_start=t_start, t_stop=t_stop
).generate_spiketrain()
for rate in rates]
# Pool the correlated CPP and the corresponding Poisson processes
return [_pool_two_spiketrains(compound_poisson_spiketrain,
poisson_spiketrain)
for compound_poisson_spiketrain, poisson_spiketrain
in zip(compound_poisson_spiketrains, poisson_spiketrains)] | fd62c73f5ef6464d61b96ebf69b868f12245c305 | 4,247 |
def validate_uncles(state, block):
"""Validate the uncles of this block."""
# Make sure hash matches up
if utils.sha3(rlp.encode(block.uncles)) != block.header.uncles_hash:
raise VerificationFailed("Uncle hash mismatch")
# Enforce maximum number of uncles
if len(block.uncles) > state.config['MAX_UNCLES']:
raise VerificationFailed("Too many uncles")
# Uncle must have lower block number than blockj
for uncle in block.uncles:
if uncle.number >= block.header.number:
raise VerificationFailed("Uncle number too high")
# Check uncle validity
MAX_UNCLE_DEPTH = state.config['MAX_UNCLE_DEPTH']
ancestor_chain = [block.header] + \
[a for a in state.prev_headers[:MAX_UNCLE_DEPTH + 1] if a]
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
ineligible = [b.hash for b in ancestor_chain]
for blknum, uncles in state.recent_uncles.items():
if state.block_number > int(
blknum) >= state.block_number - MAX_UNCLE_DEPTH:
ineligible.extend([u for u in uncles])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in block.uncles:
if uncle.prevhash not in eligible_ancestor_hashes:
raise VerificationFailed("Uncle does not have a valid ancestor")
parent = [x for x in ancestor_chain if x.hash == uncle.prevhash][0]
if uncle.difficulty != calc_difficulty(
parent, uncle.timestamp, config=state.config):
raise VerificationFailed("Difficulty mismatch")
if uncle.number != parent.number + 1:
raise VerificationFailed("Number mismatch")
if uncle.timestamp < parent.timestamp:
raise VerificationFailed("Timestamp mismatch")
if uncle.hash in ineligible:
raise VerificationFailed("Duplicate uncle")
if uncle.gas_used > uncle.gas_limit:
raise VerificationFailed("Uncle used too much gas")
if not check_pow(state, uncle):
raise VerificationFailed('uncle pow mismatch')
ineligible.append(uncle.hash)
return True | 5a0d51caae5ca42a31644316008cd949862713cb | 4,248 |
def quote_index(q_t,tr_t):
"""Get start and end index of quote times in `q_t` with the same timestamp as trade times in `tr_t`."""
left, right = get_ind(q_t,tr_t)
right[left<right] -=1 # last quote cannot be traded on, so shift index
left -=1 # consider last quote from before the timestamp of the trade
left[left<0] = 0
return left, right | 2de223b3d08e99113db56c8e9522a1d0066c76b7 | 4,249 |
def calculate_learning_curves_train_test(K, y, train_indices, test_indices, sampled_order_train,
tau, stop_t=None):
"""Calculate learning curves (train, test) from running herding algorithm
Using the sampled order from the sampled_order indexing array
calculate the learning curves on the train set using GKRR. Note that we
pass K instead of calculating it on the fly, that's why we don't use
s2 explicitly, it's already used in calculating K.
:param K: (np.ndarray, (n, n)) full kernel matrix from dataset
:param y: (np.ndarray, (n, 1)) output array
:param train_indices: (np.ndarray, (n_train,)) train indices from the original dataset
:param test_indices: (np.ndarray, (n_train,)) test indices from the original dataset
:param sampled_order_train: (np.ndarray, (n_train,)) order of the sampled training indices
:param tau: (float) regularisation parameter used in GKRR
:param stop_t: (int) final step of calculations
:return learning_curve_train: (np.ndarray, (stop_t,)) array of mse for train set
:return learning_curve_test: (np.ndarray, (stop_t,)) array of mse for test set
"""
gaussian_kr = GaussianKernelRidgeRegression(
tau=tau, s2=None, precompute_K=True)
# Index K differently depending on what we do.
# When predicting, we need the kernel matrix to be
# K_mn, where m indexes the set to predict over and
# n indexes the set we train over
K_train = K[np.ix_(train_indices, train_indices)]
K_test = K[np.ix_(test_indices, test_indices)]
K_test_train = K[np.ix_(test_indices, train_indices)]
K_sampled_train = K_train[np.ix_(sampled_order_train, sampled_order_train)]
y_train = y[train_indices]
y_test = y[test_indices]
y_sampled_train = y_train[sampled_order_train]
n_train = K_train.shape[0]
n_test = K_test.shape[0]
if stop_t is None:
stop_t = n_train
learning_curve_train = np.zeros(stop_t)
learning_curve_test = np.zeros(stop_t)
for t in range(stop_t):
K_sampled_train_t = K_sampled_train[0:t+1, 0:t+1]
gaussian_kr.fit(X=K_sampled_train_t, y=y_sampled_train[:t+1])
# Predict for train set
K_xn_train = K_train[np.ix_(
np.arange(n_train), sampled_order_train[:t+1])]
y_train_ = gaussian_kr.predict(K_xn_train)
learning_curve_train[t] = mean_squared_error(y_train, y_train_)
# Then test set
K_xn_test = K_test_train[np.ix_(
np.arange(n_test), sampled_order_train[:t+1])]
y_test_ = gaussian_kr.predict(K_xn_test)
learning_curve_test[t] = mean_squared_error(y_test, y_test_)
return learning_curve_train, learning_curve_test | d7bf3484e95d7e97eac4ba8b68b485e634b2f7f2 | 4,250 |
def removeString2(string, removeLen):
"""骚操作 直接使用字符串替换"""
alphaNums = []
for c in string:
if c not in alphaNums:
alphaNums.append(c)
while True:
preLength = len(string)
for c in alphaNums:
replaceStr = c * removeLen
string = string.replace(replaceStr, '')
if preLength == len(string):
break
return string | 57d01d7c2a244b62a173fef35fd0acf1b622beed | 4,251 |
from typing import Union
from datetime import datetime
import hashlib
def process_filing(client, file_path: str, filing_buffer: Union[str, bytes] = None, store_raw: bool = False,
store_text: bool = False):
"""
Process a filing from a path or filing buffer.
:param file_path: path to process; if filing_buffer is none, retrieved from here
:param filing_buffer: buffer; if not present, s3_path must be set
:param store_raw:
:param store_text:
:return:
"""
# Log entry
logger.info("Processing filing {0}...".format(file_path))
# Check for existing record first
try:
filing = Filing.objects.get(s3_path=file_path)
if filing is not None:
logger.error("Filing {0} has already been created in record {1}".format(file_path, filing))
return None
except Filing.DoesNotExist:
logger.info("No existing record found.")
except Filing.MultipleObjectsReturned:
logger.error("Multiple existing record found.")
return None
# Get buffer
if filing_buffer is None:
logger.info("Retrieving filing buffer from S3...")
filing_buffer = client.get_buffer(file_path)
# Get main filing data structure
filing_data = openedgar.parsers.edgar.parse_filing(filing_buffer, extract=store_text)
if filing_data["cik"] is None:
logger.error("Unable to parse CIK from filing {0}; assuming broken and halting...".format(file_path))
return None
try:
# Get company
company = Company.objects.get(cik=filing_data["cik"])
logger.info("Found existing company record.")
# Check if record exists for date
try:
_ = CompanyInfo.objects.get(company=company, date=filing_data["date_filed"])
logger.info("Found existing company info record.")
except CompanyInfo.DoesNotExist:
# Create company info record
company_info = CompanyInfo()
company_info.company = company
company_info.name = filing_data["company_name"]
company_info.sic = filing_data["sic"]
company_info.state_incorporation = filing_data["state_incorporation"]
company_info.state_location = filing_data["state_location"]
company_info.date = filing_data["date_filed"].date() if isinstance(filing_data["date_filed"],
datetime.datetime) else \
filing_data["date_filed"]
company_info.save()
logger.info("Created new company info record.")
except Company.DoesNotExist:
# Create company
company = Company()
company.cik = filing_data["cik"]
try:
# Catch race with another task/thread
company.save()
try:
_ = CompanyInfo.objects.get(company=company, date=filing_data["date_filed"])
except CompanyInfo.DoesNotExist:
# Create company info record
company_info = CompanyInfo()
company_info.company = company
company_info.name = filing_data["company_name"]
company_info.sic = filing_data["sic"]
company_info.state_incorporation = filing_data["state_incorporation"]
company_info.state_location = filing_data["state_location"]
company_info.date = filing_data["date_filed"]
company_info.save()
except django.db.utils.IntegrityError:
company = Company.objects.get(cik=filing_data["cik"])
logger.info("Created company and company info records.")
# Now create the filing record
try:
filing = Filing()
filing.form_type = filing_data["form_type"]
filing.accession_number = filing_data["accession_number"]
filing.date_filed = filing_data["date_filed"]
filing.document_count = filing_data["document_count"]
filing.company = company
filing.sha1 = hashlib.sha1(filing_buffer).hexdigest()
filing.s3_path = file_path
filing.is_processed = False
filing.is_error = True
filing.save()
except Exception as e: # pylint: disable=broad-except
logger.error("Unable to create filing record: {0}".format(e))
return None
# Create filing document records
try:
create_filing_documents(client, filing_data["documents"], filing, store_raw=store_raw, store_text=store_text)
filing.is_processed = True
filing.is_error = False
filing.save()
return filing
except Exception as e: # pylint: disable=broad-except
logger.error("Unable to create filing documents for {0}: {1}".format(filing, e))
return None | aafbe010615b6aeb1a21760a43a9680fa9c2a37f | 4,252 |
def _get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
return ~(labels.unsqueeze(0) == labels.unsqueeze(1)).all(-1) | 4c67bcc4e17e091c039c72a1324f501226561557 | 4,254 |
def random_k_edge_connected_graph(size, k, p=.1, rng=None):
"""
Super hacky way of getting a random k-connected graph
Example:
>>> from graphid import util
>>> size, k, p = 25, 3, .1
>>> rng = util.ensure_rng(0)
>>> gs = []
>>> for x in range(4):
>>> G = random_k_edge_connected_graph(size, k, p, rng)
>>> gs.append(G)
>>> # xdoc: +REQUIRES(--show)
>>> pnum_ = util.PlotNums(nRows=2, nSubplots=len(gs))
>>> fnum = 1
>>> for g in gs:
>>> util.show_nx(g, fnum=fnum, pnum=pnum_())
"""
for count in it.count(0):
seed = None if rng is None else rng.randint((2 ** 31 - 1))
# Randomly generate a graph
g = nx.fast_gnp_random_graph(size, p, seed=seed)
conn = nx.edge_connectivity(g)
# If it has exactly the desired connectivity we are one
if conn == k:
break
# If it has more, then we regenerate the graph with fewer edges
elif conn > k:
p = p / 2
# If it has less then we add a small set of edges to get there
elif conn < k:
# p = 2 * p - p ** 2
# if count == 2:
aug_edges = list(k_edge_augmentation(g, k))
g.add_edges_from(aug_edges)
break
return g | 98609e31790fc40229135f3a2c0dedd2eb123e9b | 4,255 |
from typing import Optional
from typing import Dict
from typing import Any
from typing import FrozenSet
import json
def _SendGerritJsonRequest(
host: str,
path: str,
reqtype: str = 'GET',
headers: Optional[Dict[str, str]] = None,
body: Any = None,
accept_statuses: FrozenSet[int] = frozenset([200]),
) -> Optional[Any]:
"""Send a request to Gerrit, expecting a JSON response."""
result = _SendGerritHttpRequest(
host, path, reqtype, headers, body, accept_statuses)
# The first line of the response should always be: )]}'
s = result.readline()
if s and s.rstrip() != ")]}'":
raise GerritError(200, 'Unexpected json output: %s' % s)
# Read the rest of the response.
s = result.read()
if not s:
return None
return json.loads(s) | 81bd115083c1d8ae4a705270394cf43ca1918862 | 4,257 |
def readTableRules(p4info_helper, sw, table):
"""
Reads the table entries from all tables on the switch.
:param p4info_helper: the P4Info helper
:param sw: the switch connection
"""
print '\n----- Reading tables rules for %s -----' % sw.name
ReadTableEntries1 = {'table_entries': []}
ReadTableEntries2 = []
for response in sw.ReadTableEntries():
for entity in response.entities:
ReadTableEntry = {}
entry = entity.table_entry
table_name = p4info_helper.get_tables_name(entry.table_id)
if table==None or table==table_name:
# if table==None:
ReadTableEntry['table'] = table_name
print '%s: ' % table_name,
for m in entry.match:
print p4info_helper.get_match_field_name(table_name, m.field_id),
try:
print "\\x00"+"".join("\\x"+"{:02x}".format(ord(c)) for c in "".join([d for d in (p4info_helper.get_match_field_value(m))])),
except:
print '%r' % (p4info_helper.get_match_field_value(m),),
match_name = p4info_helper.get_match_field_name(table_name, m.field_id)
tmp_match_value = (p4info_helper.get_match_field_value(m),)
ReadTableEntry['match']={}
ReadTableEntry['match'][match_name] = tmp_match_value
action = entry.action.action
action_name = p4info_helper.get_actions_name(action.action_id)
ReadTableEntry['action_name'] = action_name
print '->', action_name,
for p in action.params:
print p4info_helper.get_action_param_name(action_name, p.param_id),
print '%r' % p.value,
action_params = p4info_helper.get_action_param_name(action_name, p.param_id)
tmp_action_value = p.value
### possibly needs bytify =>> struct. pack and unpack
ReadTableEntry['action_params'] = {}
ReadTableEntry['action_params'][action_params] = tmp_action_value
print
ReadTableEntries1.setdefault('table_entries',[]).append(ReadTableEntry)
ReadTableEntries2.append(ReadTableEntry)
return ReadTableEntries2 | 267bb6dcdf2d3adf37bcfa62f8d4581d9a9deda7 | 4,258 |
def binary_to_string(bin_string: str):
"""
>>> binary_to_string("01100001")
'a'
>>> binary_to_string("a")
Traceback (most recent call last):
...
ValueError: bukan bilangan biner
>>> binary_to_string("")
Traceback (most recent call last):
...
ValueError: tidak ada yang diinputkan
>>> binary_to_string("39")
Traceback (most recent call last):
...
ValueError: bukan bilangan biner
>>> binary_to_string(1010)
Traceback (most recent call last):
...
TypeError: bukan string
"""
if not isinstance(bin_string, str):
raise TypeError("bukan string")
if not bin_string:
raise ValueError("tidak ada yang diinputkan")
if not all(char in "01" for char in bin_string):
raise ValueError("bukan bilangan biner")
return "".join([chr(int(i, 2)) for i in bin_string.split()]) | f22dd64027ee65acd4d782a6a1ce80520f016770 | 4,260 |
def load_texture_pair(filename):
"""Function what loads two verions of the texture for left/right movement"""
return [
arcade.load_texture(filename),
arcade.load_texture(filename, flipped_horizontally=True)
] | 782a79395fe25c8f391877dc9be52bf9d29f63f9 | 4,262 |
from typing import Tuple
def bytes2bson(val: bytes) -> Tuple[bytes, bytes]:
"""Encode bytes as BSON Binary / Generic."""
assert isinstance(val, (bytes, bytearray))
return BSON_BINARY, pack_i32(len(val)) + BSON_BINARY_GENERIC + val | 2c481d6585c12732f4b26a2c25f5738a5f8352bb | 4,263 |
def tabindex(field, index):
"""Set the tab index on the filtered field."""
field.field.widget.attrs["tabindex"] = index
return field | c42b64b3f94a2a8a35b8b0fa3f14fe6d44b2f755 | 4,264 |
def compute_GridData(xvals, yvals, f, ufunc=0, **keyw):
"""Evaluate a function of 2 variables and store the results in a GridData.
Computes a function 'f' of two variables on a rectangular grid
using 'tabulate_function', then store the results into a
'GridData' so that it can be plotted. After calculation the data
are written to a file; no copy is kept in memory. Note that this
is quite different than 'Func' (which tells gnuplot to evaluate
the function).
Arguments:
'xvals' -- a 1-d array with dimension 'numx'
'yvals' -- a 1-d array with dimension 'numy'
'f' -- the function to plot--a callable object for which
'f(x,y)' returns a number.
'ufunc=<bool>' -- evaluate 'f' as a ufunc?
Other keyword arguments are passed to the 'GridData' constructor.
'f' should be a callable object taking two arguments.
'f(x,y)' will be computed at all grid points obtained by
combining elements from 'xvals' and 'yvals'.
If called with 'ufunc=1', then 'f' should be a function that is
composed entirely of ufuncs, and it will be passed the 'xvals' and
'yvals' as rectangular matrices.
Thus if you have a function 'f' and two vectors 'xvals' and
'yvals' and a Gnuplot instance called 'g', you can plot the
function by typing 'g.splot(compute_GridData(f, xvals, yvals))'.
"""
xvals = utils.float_array(xvals)
yvals = utils.float_array(yvals)
# evaluate function:
data = tabulate_function(f, xvals, yvals, ufunc=ufunc)
return Gnuplot.GridData(data, xvals, yvals, **keyw) | 64c95b4ce6e7735869a31eb29e06c6e6c324a844 | 4,265 |
import functools
from typing import Any
import collections
def decorator_with_option(
decorator_fn,
):
"""Wraps a decorator to correctly forward decorator options.
`decorator_with_option` is applied on decorators. Usage:
```
@jax3d.utils.decorator_with_option
def my_decorator(fn, x=None, y=None):
...
```
The decorated decorator can then be used with or without options, or
called directly.
```
@my_decorator(x, y=y)
def fn():
...
@my_decorator
def fn():
...
fn = my_decorator(fn, x, y=y)
```
Args:
decorator_fn: The decorator with signature `(fn, *option, **option_kwargs)`
Returns:
The `decorator_fn` which now can be used as decorator with option.
"""
@functools.wraps(decorator_fn)
def decorated(*args: Any, **kwargs: Any) -> Any:
fn = args[0] if args else None
if not isinstance(fn, collections.abc.Callable):
def decorated_with_options(fn):
return decorator_fn(fn, *args, **kwargs)
return decorated_with_options
return decorator_fn(fn, *args[1:], **kwargs)
return decorated | 2e9a4a772d7c072b579ac49e183d6ca00e9feacf | 4,266 |
import math
def chebyshev_parameters(rxn_dstr, a_units='moles'):
""" Parses the data string for a reaction in the reactions block
for the lines containing the Chebyshevs fitting parameters,
then reads the parameters from these lines.
:param rxn_dstr: data string for species in reaction block
:type rxn_dstr: str
:return params: Chebyshev fitting parameters
:rtype: dict[param: value]
"""
original_rxn_dstr = rxn_dstr
rxn_dstr = apf.remove(COMMENTS_PATTERN, rxn_dstr)
tcheb_pattern = (
'TCHEB' + app.zero_or_more(app.SPACE) + app.escape('/') +
app.zero_or_more(app.SPACE) + app.capturing(app.NUMBER) +
app.one_or_more(app.SPACE) + app.capturing(app.NUMBER) +
app.zero_or_more(app.SPACE) + app.escape('/')
)
pcheb_pattern = (
'PCHEB' + app.zero_or_more(app.SPACE) + app.escape('/') +
app.zero_or_more(app.SPACE) + app.capturing(app.NUMBER) +
app.one_or_more(app.SPACE) + app.capturing(app.NUMBER) +
app.zero_or_more(app.SPACE) + app.escape('/')
)
cheb_pattern = (
app.not_preceded_by(app.one_of_these(['T', 'P'])) +
'CHEB' + app.zero_or_more(app.SPACE) +
app.escape('/') + app.capturing(app.one_or_more(app.WILDCARD2)
) + app.escape('/')
)
cheb_params_raw = apf.all_captures(cheb_pattern, rxn_dstr)
if cheb_params_raw:
params = {}
# Get the temp and pressure limits;
# add the Chemkin default values if they don't exist
cheb_temps = apf.first_capture(tcheb_pattern, rxn_dstr)
cheb_pressures = apf.first_capture(pcheb_pattern, rxn_dstr)
if cheb_temps is None:
cheb_temps = ('300.00', '2500.00')
print(
'No Chebyshev temperature limits specified' +
' for the below reaction.' +
f' Assuming 300 and 2500 K. \n \n {original_rxn_dstr}\n')
if cheb_pressures is None:
cheb_pressures = ('0.001', '100.00')
print(
'No Chebyshev pressure limits specified' +
' for the below reaction.' +
f' Assuming 0.001 and 100 atm. \n \n {original_rxn_dstr}\n')
# Get all the numbers from the CHEB parameters
cheb_params = []
for cheb_line in cheb_params_raw:
cheb_params.extend(cheb_line.split())
# Get the cheb array dimensions N and M, which are the first two
# entries of the CHEB params
cheb_n = int(math.floor(float(cheb_params[0])))
cheb_m = int(math.floor(float(cheb_params[1])))
# Start on the third value (after N and M)
# and get all the polynomial coefficients
coeffs = []
for idx, coeff in enumerate(cheb_params[2:]):
# extra coefficients are allowed but ignored
if idx+1 > (cheb_n*cheb_m):
break
coeffs.append(coeff)
assert len(coeffs) == (cheb_n*cheb_m), (
f'For the below reaction, there should be {cheb_n*cheb_m}' +
' Chebyshev polynomial' +
f' coefficients, but there are only {len(coeffs)}.' +
f' \n \n {original_rxn_dstr}\n'
)
alpha = np.array(list(map(float, coeffs)))
params['t_limits'] = [float(val) for val in cheb_temps]
params['p_limits'] = [float(val) for val in cheb_pressures]
params['alpha_elm'] = alpha.reshape([cheb_n, cheb_m])
params['a_units'] = a_units
else:
params = None
return params | fb1ae476d17a7f6546f99c729139bf55b4834174 | 4,267 |
def _gen_matrix(n, *args):
"""Supports more matrix construction routines.
1. Usual contruction (from a 2d list or a single scalar).
2. From a 1-D array of n*n elements (glsl style).
3. From a list of n-D vectors (glsl style).
"""
if len(args) == n * n: # initialize with n*n scalars
data = [[args[k * n + i] for i in range(n)] for k in range(n)]
return ti.Matrix(data, float)
if len(args) == n: # initialize with n vectors
# Matrix.rows() will do implict type inference
data = [list(x) for x in args]
return ti.Matrix(data, float)
if len(args) == 1: # initialize with a scalar, a matrix or a 1d list
x = args[0]
if isinstance(x, ti.Matrix):
return x
if hasattr(x, "__len__") and len(x) == n * n:
data = [[x[k * n + i] for i in range(n)] for k in range(n)]
return ti.Matrix(data, float)
return ti.types.matrix(n, n, float)(*args) | eea482c20dfa5c30c9077185eba81bc3bd1ba8ce | 4,268 |
import time
import math
def temperature(analog_pin, power_pin = None, ground_pin = None, R = 20000, n = 100):
"""Function for computing thermister temperature
Parameters
----------
adc_pin: :obj:'pyb.Pin'
Any pin connected to an analog to digital converter on a pyboard
power_pin: :obj:'pyb.Pin', optional
Used if a digital pin is to be used to power the thermistor. Note that
the thermistor may also be powered by the 3.3V pin. In that case, this
argument is not required.
ground_pin: :obj:'pyb.Pin', optional
Used if a digital pin is used to ground the thermistor. Note that
the thermistor may also be grounded by the GND pin. In that case, this
argument is not required.
R: float, optional
Value of the fixed resistor in the resistor divider. Default is 20,000 ohm
n: int, optional
Number of readings to make--returns average of middle two quartiles. Defaults to 100.
Returns
-------
Float
Temperature (Celsius degrees)
"""
#Define constants for conversion
A = 0.001125308852122
B = 0.000234711863267
C = 0.000000085663516
#Allocate array for storing temperature readings
T = arr.array('f',[0]*n)
#Turn on the power if necessary, then wait a moment
if power_pin is not None: power_pin.off()
time.sleep_ms(1)
#Turn off the ground if necessary, then wait a moment
if ground_pin is not None: ground_pin.off()
time.sleep_ms(1)
#Loop through readings, computing thermistor resistance
#and temperature, then storing in array
for i in range(n):
#if possible, switch current on pins to ensure
#no net accumulation of charge if this is in parallel with pins that have a capacitance
if power_pin is not None:
power_pin.on()
ontick = time.ticks_us()
time.sleep_us(1000)
count = analog_pin.read()
power_pin.off()
offtick = time.ticks_us()
time_on = time.ticks_diff(offtick, ontick)
power_pin.off()
if ground_pin is not None:
ground_pin.on()
time.sleep_us(time_on)
ground_pin.off()
#calculate resistance and temperature, being careful not to cause an overload
if count>0:
if count < 4095:
R_t = ((count/4095)*R)/(1-count/4095)
T[i] = 1/((A+B*(math.log(R_t)))+C*((math.log(R_t))**3))-273.15
else:
T[i] = -55
else:
T[i] =150
#Turn the power back off if possible
if power_pin is not None: power_pin.off()
#Define and analyze the middle two quartiles
upper_index = math.ceil(3*n/4)
lower_index = math.floor(n/4)
sampled_length = (upper_index - lower_index)
T_mean_of_mid_quartiles = sum(sorted(T)[lower_index:upper_index])/sampled_length
return T_mean_of_mid_quartiles | a526595bcab6e824a82de7ec4d62fbfc12ee39f8 | 4,269 |
import json
def submit_form():
""" Submits survey data to SQL database. """
if request.method == "POST":
data = request.data
if data:
data = json.loads(data.decode("utf-8").replace("'",'"'))
student, enrollments, tracks = processSurveyData(data)
insert = 'INSERT INTO student_info.student (net_id, student_name, pass_word, start_semester, current_semester,' \
'expected_graduation, total_semesters) VALUES("%s", "%s", "%s", "%s", "%s", "%s", %i)' % (student)
insert += 'ON DUPLICATE KEY UPDATE student_name="%s", pass_word="%s", start_semester="%s", current_semester="%s", ' \
'expected_graduation="%s", total_semesters=%i' % tuple(student[1:])
engine.raw_operation(insert)
for e in enrollments:
insert = 'INSERT INTO student_info.enrollments VALUES("%s", "%s", "%s", "%s", %i)' % tuple(e)
insert += 'ON DUPLICATE KEY UPDATE semester="%s", semester_taken="%s", rating=%i' % tuple(e[2:])
engine.raw_operation(insert)
for t in tracks:
insert = 'INSERT INTO student_info.track VALUES("%s", "%s", %i, "%s")' % tuple(t)
insert += 'ON DUPLICATE KEY UPDATE interest = "{}", credit_hours={}'.format(t[1], t[2])
engine.raw_operation(insert)
if 'DeletedMajor' in data or 'DeletedMinor' in data:
tracks_to_delete = data['DeletedMajor'] + data['DeletedMinor']
for track in tracks_to_delete:
engine.drop_rows('DELETE FROM student_info.track WHERE \
track.net_id = "%s" AND track.field_name = "%s"' % (student[0], track))
return 'Successful submission'
return "Invalid input" | 2d5a10c5af906a7993559cf71f23eea1a35ec993 | 4,270 |
def get_target_external_resource_ids(relationship_type, ctx_instance):
"""Gets a list of target node ids connected via a relationship to a node.
:param relationship_type: A string representing the type of relationship.
:param ctx: The Cloudify ctx context.
:returns a list of security group ids.
"""
ids = []
if not getattr(ctx_instance, 'relationships', []):
ctx.logger.info('Skipping attaching relationships, '
'because none are attached to this node.')
return ids
for r in ctx_instance.relationships:
if relationship_type in r.type:
ids.append(
r.target.instance.runtime_properties[
constants.EXTERNAL_RESOURCE_ID])
return ids | 27077d3118ca10a6896ba856d9d261f1b8ab9d56 | 4,271 |
def split_multibody(beam, tstep, mb_data_dict, ts):
"""
split_multibody
This functions splits a structure at a certain time step in its different bodies
Args:
beam (:class:`~sharpy.structure.models.beam.Beam`): structural information of the multibody system
tstep (:class:`~sharpy.utils.datastructures.StructTimeStepInfo`): timestep information of the multibody system
mb_data_dict (dict): Dictionary including the multibody information
ts (int): time step number
Returns:
MB_beam (list(:class:`~sharpy.structure.models.beam.Beam`)): each entry represents a body
MB_tstep (list(:class:`~sharpy.utils.datastructures.StructTimeStepInfo`)): each entry represents a body
"""
MB_beam = []
MB_tstep = []
for ibody in range(beam.num_bodies):
ibody_beam = None
ibody_tstep = None
ibody_beam = beam.get_body(ibody = ibody)
ibody_tstep = tstep.get_body(beam, ibody_beam.num_dof, ibody = ibody)
ibody_beam.FoR_movement = mb_data_dict['body_%02d' % ibody]['FoR_movement']
if ts == 1:
ibody_beam.ini_info.pos_dot *= 0
ibody_beam.timestep_info.pos_dot *= 0
ibody_tstep.pos_dot *= 0
ibody_beam.ini_info.psi_dot *= 0
ibody_beam.timestep_info.psi_dot *= 0
ibody_tstep.psi_dot *= 0
MB_beam.append(ibody_beam)
MB_tstep.append(ibody_tstep)
return MB_beam, MB_tstep | 25d3d3496bdf0882e732ddfb14d3651d54167adc | 4,272 |
def qname_decode(ptr, message, raw=False):
"""Read a QNAME from pointer and respect labels."""
def _rec(name):
ret = []
while name and name[0] > 0:
length = int(name[0])
if (length & 0xC0) == 0xC0:
offset = (length & 0x03) << 8 | int(name[1])
comps, _ = _rec(message[offset:])
ret += comps
name = name[1:]
break
ret.append(name[1 : 1 + length])
name = name[length + 1 :]
return ret, name
name_components, rest = _rec(ptr)
if raw:
return name_components, rest[1:]
return ".".join([x.decode("utf-8") for x in name_components]), rest[1:] | d08a6c286e2520807a05cbd2c2aa5eb8ce7a7602 | 4,273 |
def coordinates_within_board(n: int, x: int, y: int) -> bool:
"""Are the given coordinates inside the board?"""
return x < n and y < n and x >= 0 and y >= 0 | 6359343bde0c9d7658a484c45d9cd07893b4e00d | 4,274 |
def hook_name_to_env_name(name, prefix='HOOKS'):
"""
>>> hook_name_to_env_name('foo.bar_baz')
HOOKS_FOO_BAR_BAZ
>>> hook_name_to_env_name('foo.bar_baz', 'PREFIX')
PREFIX_FOO_BAR_BAZ
"""
return '_'.join([prefix, name.upper().replace('.', '_')]) | b0dabce88da8ddf8695303ac4a22379baa4ddffa | 4,276 |
def get_coefficients():
"""
Returns the global scaling dictionary.
"""
global COEFFICIENTS
if COEFFICIENTS is None:
COEFFICIENTS = TransformedDict()
COEFFICIENTS["[length]"] = 1.0 * u.meter
COEFFICIENTS["[mass]"] = 1.0 * u.kilogram
COEFFICIENTS["[time]"] = 1.0 * u.year
COEFFICIENTS["[temperature]"] = 1.0 * u.degK
COEFFICIENTS["[substance]"] = 1.0 * u.mole
return COEFFICIENTS | 7acd77fdbb0604ff97aa24bb7ce4f29bb32889b0 | 4,277 |
def get_distance_from_guide_alignment(data, guide_data, reference_index_key="position", minus_strand=False):
"""Calculate the distance of input data alignment to the guide alignment.
:param data: input data with at least "raw_start", "raw_length", and reference_index_key fields
:param guide_data: guide alignmnet data
:param reference_index_key: key to grab reference index from data
:param minus_strand: boolean option if data is aligned to minus strand
:return: modified data with "guide_delta" field
"""
variant_data = data.sort_values(by=reference_index_key)
if minus_strand:
guide_data = guide_data[::-1]
distance_to_guide = []
variant_index = 0
len_variant_data = len(variant_data)
v_position = variant_data.iloc[variant_index][reference_index_key]
for i, guide in enumerate(guide_data.itertuples()):
if getattr(guide, "reference_index") >= v_position:
if getattr(guide, "reference_index") == v_position:
guide_index = i
else:
guide_index = i - 1
v_position_middle = (variant_data.iloc[variant_index]["raw_start"] +
(variant_data.iloc[variant_index]["raw_length"] / 2))
guide_middle_position = np.round(
(guide_data.iloc[guide_index]["raw_start"] + (guide_data.iloc[guide_index]["raw_length"] / 2)))
distance_to_guide.append(v_position_middle - guide_middle_position)
variant_index += 1
if variant_index < len_variant_data:
v_position = variant_data.iloc[variant_index][reference_index_key]
else:
break
distance = pd.DataFrame(distance_to_guide, columns=['guide_delta'])
final_data = pd.concat([variant_data, distance], axis=1)
return final_data | 1a1150a470c75e4e836278fafb263839a67f7da4 | 4,278 |
def get_info(font):
""" currently wraps the infoFont call, but I would like to add a JSON represenation of this data to
better display the individual details of the font."""
return pyfiglet.FigletFont.infoFont(font) | 40ae5238701754a12c4b1c75fff54412f7e33d4f | 4,279 |
def readline_skip_comments(f):
"""
Read a new line while skipping comments.
"""
l = f.readline().strip()
while len(l) > 0 and l[0] == '#':
l = f.readline().strip()
return l | c4b36af14cc48b1ed4cd72b06845e131015da6c6 | 4,280 |
def _verify_weight_parameters(weight_parameters):
"""Verifies that the format of the input `weight_parameters`.
Checks that the input parameters is a 2-tuple of tensors of equal shape.
Args:
weight_parameters: The parameters to check.
Raises:
RuntimeError: If the input is not a 2-tuple of tensors with equal shape.
Returns:
The input `weight_parameters`.
"""
if len(weight_parameters) != 2:
raise RuntimeError("Incorrect number of weight parameters. Expected "
"2 tensors, got {}".format(len(weight_parameters)))
if weight_parameters[0].shape != weight_parameters[1].shape:
raise RuntimeError("Expected theta and log alpha parameter tensor "
"to be same shape. Got shapes {} and {}"
.format(weight_parameters[0].get_shape().as_list(),
weight_parameters[1].get_shape().as_list()))
return weight_parameters | 9ec018c66d48e830250fd299cd50f370118132cc | 4,281 |
import requests
def send_notification(lira_url, auth_dict, notification):
"""Send a notification to a given Lira.
Args:
lira_url (str): A typical Lira url, e.g. https://pipelines.dev.data.humancellatlas.org/
auth_dict (dict): Dictionary contains credentials for authenticating with Lira.
It should have 'method' and 'value' as keys.
notification (dict): A dict of notification content.
Returns:
requests.Response: The response object returned by Lira.
"""
if auth_dict['method'] == 'token':
response = requests.post(
url=harmonize_url(lira_url) + 'notifications',
json=notification,
params={'auth': auth_dict['value']['auth_token']},
)
else:
auth = HTTPSignatureAuth(
key_id=auth_dict['value']['hmac_key_id'],
key=auth_dict['value']['hmac_key_value'].encode('utf-8'),
)
response = requests.post(
url=harmonize_url(lira_url) + 'notifications', json=notification, auth=auth
)
return response | 1f6c28f9b12458cd2af6c3f12f5a9a6df6e64f49 | 4,282 |
from exojax.spec.molinfo import molmass
from exojax.utils.constants import kB, m_u
def calc_vfactor(atm="H2",LJPparam=None):
"""
Args:
atm: molecule consisting of atmosphere, "H2", "O2", and "N2"
LJPparam: Custom Lennard-Jones Potential Parameters (d (cm) and epsilon/kB)
Returns:
vfactor: dynamic viscosity factor for Rosner eta = viscosity*T**0.66
applicable tempature range (K,K)
Note:
The dynamic viscosity is from the Rosner book (3-2-12) and caption in p106 Hirschfelder et al. (1954) within Trange.
"""
mu=molmass(atm)
if LJPparam is None:
LJPparam_d,LJPparam_epsilon_per_kB=get_LJPparam()
epsilon_per_kB=LJPparam_epsilon_per_kB[atm]
d=LJPparam_d[atm]
else:
epsilon_per_kB=LJPparam[0]
d=LJPparam[1]
vfactor=5.0/16.0*np.sqrt(np.pi*kB*mu*m_u)/(np.pi*d*d)/1.22*(1.0/epsilon_per_kB)**0.16
Trange=[3.0*epsilon_per_kB,200.0*epsilon_per_kB]
return vfactor, Trange | a48fe55216bcf9848eece92d45f03d2c1e3fdee3 | 4,283 |
def _crc16_checksum(bytes):
"""Returns the CRC-16 checksum of bytearray bytes
Ported from Java implementation at: http://introcs.cs.princeton.edu/java/61data/CRC16CCITT.java.html
Initial value changed to 0x0000 to match Stellar configuration.
"""
crc = 0x0000
polynomial = 0x1021
for byte in bytes:
for i in range(8):
bit = (byte >> (7 - i) & 1) == 1
c15 = (crc >> 15 & 1) == 1
crc <<= 1
if c15 ^ bit:
crc ^= polynomial
return crc & 0xFFFF | 2b00d11f1b451f3b8a4d2f42180f6f68d2fbb615 | 4,284 |
from typing import Callable
from typing import Dict
import select
import math
def method_get_func(model, fields="__all__", need_user=False, **kwargs)->Callable:
"""生成一个model的get访问"""
async def list(page: Dict[str, int] = Depends(paging_query_depend),
user: User = Depends(create_current_active_user(need_user))):
"""
get
:return:
"""
table = model.__table__
if fields == "__all__":
query = table.select().offset((page['page_number'] - 1) * page['page_size']).limit(
page['page_size']) # 第一页,每页20条数据。 默认第一页。
else:
query = table.select([getattr(model.__table__.c, i) for i in fields]).offset(
(page['page_number'] - 1) * page['page_size']).limit(
page['page_size']) # 第一页,每页20条数据。 默认第一页。
paginate_obj = await AdminDatabase().database.fetch_all(query)
query2 = select([func.count(table.c.id)])
total_page = await AdminDatabase().database.fetch_val(query2)
print("注意需要考虑查询两次的两倍代价")
return {
"page_count": int(math.ceil(total_page * 1.0 / page['page_size'])),
"rows_total": total_page,
"page_number": page['page_number'],
"page_size": page['page_size'],
"data": paginate_obj
}
return list | 2a9b31aa6261d99da94f7a971a9096dfec291e86 | 4,286 |
def get_mysqlops_connections():
""" Get a connection to mysqlops for reporting
Returns:
A mysql connection
"""
(reporting_host, port, _, _) = get_mysql_connection('mysqlopsdb001')
reporting = HostAddr(''.join((reporting_host, ':', str(port))))
return connect_mysql(reporting, 'scriptrw') | 25fb3cba8db11d28450e142a35ff763313c4c360 | 4,287 |
def reverse(segment):
"""Reverses the track"""
return segment.reverse() | 74632b8a8a192970187a89744b2e8c6fa77fb2cf | 4,289 |
def rmse(y, y_pred):
"""Returns the root mean squared error between
ground truths and predictions.
"""
return np.sqrt(mse(y, y_pred)) | c3d2e20d1e9ebf40c704c5f14fb0dd35758f2458 | 4,290 |
from typing import Dict
from typing import Any
def cdm_cluster_location_command(client: PolarisClient, args: Dict[str, Any]):
"""
Find the CDM GeoLocation of a CDM Cluster.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
cluster_id = validate_required_arg("clusterId", args.get('clusterId'))
raw_response = client.get_cdm_cluster_location(cluster_id)
if raw_response == "No Location Configured":
return CommandResults(readable_output=MESSAGES['NO_RESPONSE'])
hr_content = {"Location": raw_response}
hr = tableToMarkdown("CDM Cluster Location", hr_content, headers="Location", removeNull=True)
context = {
"ClusterId": cluster_id.lower(),
"Cluster": {
"Location": raw_response
}
}
return CommandResults(outputs_prefix=OUTPUT_PREFIX['CDM_CLUSTER'],
outputs_key_field="ClusterId",
readable_output=hr,
outputs=context,
raw_response=raw_response) | b0001969177dcceb144d03edece589a84af48dc1 | 4,291 |
def prepare_cases(cases, cutoff=25):
""" clean cases per day for Rt estimation. """
new_cases = cases.diff()
smoothed = new_cases.rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2).round()
idx_start = np.searchsorted(smoothed, cutoff)
smoothed = smoothed.iloc[idx_start:]
original = new_cases.loc[smoothed.index]
return original, smoothed | 71b41959178e6fb8480ba2f6549bfeb6f02aded4 | 4,292 |
def utility(board):
"""
Returns 1 if X has won the game, -1 if O has won, 0 otherwise.
"""
if winner(board) == 'X':
return 1
elif winner(board) == 'O':
return -1
else:
return 0 | 77f4e503882a6c1a6445167d2b834f342adbc21e | 4,293 |
def shard(group, num_shards):
"""Breaks the group apart into num_shards shards.
Args:
group: a breakdown, perhaps returned from categorize_files.
num_shards: The number of shards into which to break down the group.
Returns:
A list of shards.
"""
shards = []
for i in range(num_shards):
shards.append(LanguageBreakdown())
pos = 0
for kind, files in group.kinds.items():
for filename in files:
shards[pos].kinds[kind].append(filename)
pos = (pos + 1) % num_shards
return shards | 635af47ac15c5b3aeeec8ab73ea8a6a33cd0d964 | 4,294 |
def is_bond_member(yaml, ifname):
"""Returns True if this interface is a member of a BondEthernet."""
if not "bondethernets" in yaml:
return False
for _bond, iface in yaml["bondethernets"].items():
if not "interfaces" in iface:
continue
if ifname in iface["interfaces"]:
return True
return False | 521186221f2d0135ebcf1edad8c002945a56da26 | 4,295 |
def get_id(group):
"""
Get the GO identifier from a list of GO term properties.
Finds the first match to the id pattern.
Args:
group (List[str])
Returns:
str
"""
return first_match(group, go_id).split(':',1)[1].strip() | de99e159c3de1f8984cce40f952ff14568c8a1a5 | 4,296 |
import random
import math
def generateLogNormalVariate(mu, sigma):
"""
RV generated using rejection method
"""
variateGenerated = False
while not variateGenerated:
u1 = random.uniform(0, 1)
u2 = random.uniform(0, 1)
x = -1*math.log(u1)
if u2 > math.exp(-1*math.pow((x-1), 2)/2):
continue
else:
return math.exp(mu+(sigma*x)) | 048bc1c2123fbdd9a750aac86bd6922aaf42de27 | 4,297 |
import re
def replace_color_codes(text, replacement):
"""Replace ANSI color sequence from a given string.
Args:
text (str): Original string to replacement from.
replacement (str): String to replace color codes with.
Returns:
str: Mutated string after the replacement.
"""
return re.sub(COLOR_CODE_REGEX, replacement, text) | a0c3e1b1060ae475a16b936c237669edab8cfc91 | 4,298 |
def get_uti_for_extension(extension):
"""get UTI for a given file extension"""
if not extension:
return None
# accepts extension with or without leading 0
if extension[0] == ".":
extension = extension[1:]
if (OS_VER, OS_MAJOR) <= (10, 16):
# https://developer.apple.com/documentation/coreservices/1448939-uttypecreatepreferredidentifierf
with objc.autorelease_pool():
uti = CoreServices.UTTypeCreatePreferredIdentifierForTag(
CoreServices.kUTTagClassFilenameExtension, extension, None
)
if uti:
return uti
# on MacOS 10.12, HEIC files are not supported and UTTypeCopyPreferredTagWithClass will return None for HEIC
if extension.lower() == "heic":
return "public.heic"
return None
uti = _get_uti_from_ext_dict(extension)
if uti:
return uti
uti = _get_uti_from_mdls(extension)
if uti:
# cache the UTI
EXT_UTI_DICT[extension.lower()] = uti
UTI_EXT_DICT[uti] = extension.lower()
return uti
return None | 50e147d9fb267d7c4686dd9e53cd3404a9eaae6f | 4,299 |
async def anext(*args):
"""Retrieve the next item from the async generator by calling its __anext__() method.
If default is given, it is returned if the iterator is exhausted,
otherwise StopAsyncIteration is raised.
"""
if len(args) < 1:
raise TypeError(
f"anext expected at least 1 arguments, got {len(args)}")
aiterable, default, has_default = args[0], None, False
if len(args) > 2:
raise TypeError(f"anext expected at most 2 arguments, got {len(args)}")
if len(args) == 2:
default = args[1]
has_default = True
try:
return await aiterable.__anext__()
except (StopAsyncIteration, CancelledError) as exc:
if has_default:
return default
raise StopAsyncIteration() from exc | 60f71e2277501b1c274d691cef108937a2492147 | 4,301 |
from typing import List
def get_service_gateway(client: VirtualNetworkClient = None,
compartment_id: str = None,
vcn_id: str = None) -> List[RouteTable]:
"""
Returns a complete, unfiltered list of Service Gateways of a vcn in the
compartment.
"""
service_gateway = []
service_gateway_raw = client.list_service_gateways(compartment_id=compartment_id,
vcn_id=vcn_id)
service_gateway.extend(service_gateway_raw.data)
while service_gateway_raw.has_next_page:
service_gateway_raw = client.list_service_gateways(
compartment_id=compartment_id,
vcn_id=vcn_id,
page=service_gateway_raw.next_page)
service_gateway.extend(service_gateway_raw.data)
return service_gateway | d1635dac2d6e8c64617eb066d114785674e9e8b3 | 4,303 |
def get_road_network_data(city='Mumbai'):
"""
"""
data = pd.read_csv("./RoadNetwork/"+city+"/"+city+"_Edgelist.csv")
size = data.shape[0]
X = np.array(data[['XCoord','YCoord']])
u, v = np.array(data['START_NODE'], dtype=np.int32), np.array(data['END_NODE'], dtype=np.int32)
w = np.array(data['LENGTH'], dtype=np.float64)
w = w/np.max(w) + 1e-6
G = sp.sparse.csr_matrix((w, (u,v)), shape = (size, size))
n, labels = sp.sparse.csgraph.connected_components(G)
if n == 1:
return G
# If there are more than one connected component, return the largest connected component
count_size_comp = np.bincount(labels)
z = np.argmax(count_size_comp)
indSelect = np.where(labels==z)
Gtmp = G[indSelect].transpose()[indSelect]
Gtmp = make_undirected(Gtmp)
return X[indSelect], Gtmp | 1d014e50b2d2883b5fda4aba9c2de5ca5e9dae2a | 4,304 |
from apps.jsonapp import JSONApp
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
logger.info("1. Instantiate and launch the App")
app = JSONApp()
result = app.launch(process_fastqc,
config,
in_metadata,
out_metadata)
# 2. The App has finished
logger.info("2. Execution finished; see " + out_metadata)
return result | 302ba667a775dd09a6779235a774a4a95f26af32 | 4,305 |
def _infection_active(state_old, state_new):
"""
Parameters
----------
state_old : dict or pd.Series
Dictionary or pd.Series with the keys "s", "i", and "r".
state_new : dict or pd.Series
Same type requirements as for the `state_old` argument in this function
apply.
Returns
-------
infection_active : bool
True if the event that occurred between `state_old` and `state_new` was
a transition from E to I. False otherwise.
"""
return state_new["s"] == state_old["s"] and \
state_new["e"] == state_old["e"] - 1 and \
state_new["i"] == state_old["i"] + 1 and \
state_new["r"] == state_old["r"] | a81376ee1853d34b1bc23b29b2a3e0f9d8741472 | 4,306 |
Subsets and Splits