content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import random
def delete_important_words(word_list, replace=''):
"""
randomly detele an important word in the query or replace (not in QUERY_SMALL_CHANGE_SETS)
"""
# replace can be [MASK]
important_word_list = set(word_list) - set(QUERY_SMALL_CHANGE_SETS)
target = random.sample(important_word_list, 1)[0]
if replace:
new_word_list = [item if item!=target else item.replace(target, replace) for item in word_list]
else:
new_word_list = [item for item in word_list if item!=target]
return new_word_list | 336518cb1c52f896fc9878e1c11b3f0e72c4f36a | 3,650,300 |
import numpy as np
def prot(vsini, st_rad):
"""
Function to convert stellar rotation velocity vsini in km/s to rotation period in days.
Parameters:
----------
vsini: Rotation velocity of star in km/s.
st_rad: Stellar radius in units of solar radii
Returns
------
Prot: Period of rotation of the star in days.
"""
vsini=np.array(vsini)
prot=(2*np.pi*st_rad*rsun)/(vsini*24*60*60)
return prot | db2ef4648c5142a996e4a700aee0c7df0f02a394 | 3,650,301 |
def dialog_sleep():
"""Return the time to sleep as set by the --exopy-sleep option.
"""
return DIALOG_SLEEP | cc40ffa09c83bd095f685b3b1d237545b8d7dd34 | 3,650,302 |
def required_overtime (db, user, frm) :
""" If required_overtime flag is set for overtime_period of dynamic
user record at frm, we return the overtime_period belonging to
this dyn user record. Otherwise return None.
"""
dyn = get_user_dynamic (db, user, frm)
if dyn and dyn.overtime_period :
otp = db.overtime_period.getnode (dyn.overtime_period)
if otp.required_overtime :
return otp
return None | 052e1289a0d7110100b3a1ea0ad90fa7bd000cce | 3,650,303 |
def get_best_fit_member(*args):
"""
get_best_fit_member(sptr, offset) -> member_t
Get member that is most likely referenced by the specified offset.
Useful for offsets > sizeof(struct).
@param sptr (C++: const struc_t *)
@param offset (C++: asize_t)
"""
return _ida_struct.get_best_fit_member(*args) | 7d4032d5cedb789d495e658eda939c36591f3506 | 3,650,304 |
def convert_time(time):
"""Convert given time to srt format."""
stime = '%(hours)02d:%(minutes)02d:%(seconds)02d,%(milliseconds)03d' % \
{'hours': time / 3600,
'minutes': (time % 3600) / 60,
'seconds': time % 60,
'milliseconds': (time % 1) * 1000}
return stime | 948e6567c8bc17ccb5f98cf8c8eaf8fe6e8d0bec | 3,650,305 |
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0 | 727e58e0d6d596cf4833ca3ca1cbcec6b9eedced | 3,650,306 |
def test_abstract_guessing():
"""Test abstract guessing property."""
class _CustomPsychometric(DiscriminationMethod):
def psychometric_function(self, d):
return 0.5
with pytest.raises(TypeError, match="abstract method"):
_CustomPsychometric() | 996f6fb4d6b819e15fb3a931c0dc2a1f211e3d58 | 3,650,307 |
import re
def remove_repeats(msg):
"""
This function removes repeated characters from text.
:param/return msg: String
"""
# twitter specific repeats
msg = re.sub(r"(.)\1{2,}", r"\1\1\1", msg) # characters repeated 3 or more times
# laughs
msg = re.sub(r"(ja|Ja)(ja|Ja)+(j)?", r"jaja", msg) # spanish
msg = re.sub(r"(rs|Rs)(Rs|rs)+(r)?", r"rsrs", msg) # portugese
msg = re.sub(r"(ha|Ha)(Ha|ha)+(h)?", r"haha", msg) # english
return msg | 590ab42f74deaa9f8dc1eb9c8b11d81622db2e6d | 3,650,308 |
def _legend_main_get(project, row):
"""
forma la leyenda de la serie principal del gráfico
input
project: es el tag project del proyecto seleccionado
en fichero XYplus_parameters.f_xml -en XYplus_main.py-
row: es fila activa devuelta por select_master) de donde se
extrae el título del gráfico
return
un str con la leyenda del punto principal del gráfico
"""
legend_master = project.find('graph/legend_master').text.strip()
columns_master = project.findall('graph/legend_master/column')
if len(columns_master) == 0:
return legend_master
subs = [row[int(col1.text)-1] for col1 in columns_master]
return legend_master.format(*subs) | 3938d723bd44a67313b86f956464fd186ef25386 | 3,650,309 |
import time
import os
def contrast_jwst_num(coro_floor, norm, matrix_dir, rms=50*u.nm):
"""
Compute the contrast for a random segmented OTE misalignment on the JWST simulator.
:param coro_floor: float, coronagraph contrast floor
:param norm: float, normalization factor for PSFs: peak of unaberrated direct PSF
:param matrix_dir: str, directory of saved matrix
:param rms: astropy quantity (e.g. m or nm), WFE rms (OPD) to be put randomly over the entire segmented mirror
:return: 2x float, E2E and matrix contrast
"""
# Keep track of time
start_time = time.time()
# Parameters
nb_seg = CONFIG_PASTIS.getint('JWST', 'nb_subapertures')
iwa = CONFIG_PASTIS.getfloat('JWST', 'IWA')
owa = CONFIG_PASTIS.getfloat('JWST', 'OWA')
sampling = CONFIG_PASTIS.getfloat('JWST', 'sampling')
# Import numerical PASTIS matrix
filename = 'pastis_matrix'
matrix_pastis = fits.getdata(os.path.join(matrix_dir, filename + '.fits'))
# Create random aberration coefficients on segments, scaled to total rms
aber = util.create_random_rms_values(nb_seg, rms)
### E2E JWST sim
start_e2e = time.time()
jwst_sim = webbpsf_imaging.set_up_nircam()
jwst_sim[0].image_mask = CONFIG_PASTIS.get('JWST', 'focal_plane_mask')
log.info('Calculating E2E contrast...')
# Put aberration on OTE
jwst_sim[1].zero()
for nseg in range(nb_seg): # TODO: there is probably a single function that puts the aberration on the OTE at once
seg_num = webbpsf_imaging.WSS_SEGS[nseg].split('-')[0]
jwst_sim[1].move_seg_local(seg_num, piston=aber[nseg].value, trans_unit='nm')
image = jwst_sim[0].calc_psf(nlambda=1)
psf_jwst = image[0].data / norm
# Create DH
dh_mask = util.create_dark_hole(psf_jwst, iwa=iwa, owa=owa, samp=sampling)
# Get the mean contrast
contrast_jwst = util.dh_mean(psf_jwst, dh_mask)
end_e2e = time.time()
## MATRIX PASTIS
log.info('Generating contrast from matrix-PASTIS')
start_matrixpastis = time.time()
# Get mean contrast from matrix PASTIS
contrast_matrix = util.pastis_contrast(aber, matrix_pastis) + coro_floor # calculating contrast with PASTIS matrix model
end_matrixpastis = time.time()
## Outputs
log.info('\n--- CONTRASTS: ---')
log.info(f'Mean contrast from E2E: {contrast_jwst}')
log.info(f'Contrast from matrix PASTIS: {contrast_matrix}')
log.info('\n--- RUNTIMES: ---')
log.info(f'E2E: {end_e2e-start_e2e}sec = {(end_e2e-start_e2e)/60}min')
log.info(f'Matrix PASTIS: {end_matrixpastis-start_matrixpastis}sec = {(end_matrixpastis-start_matrixpastis)/60}min')
end_time = time.time()
runtime = end_time - start_time
log.info(f'Runtime for contrast_calculation_simple.py: {runtime} sec = {runtime/60} min')
return contrast_jwst, contrast_matrix | 750287224184cf6f49394b5bceee3307fe790810 | 3,650,310 |
def ordinal_mapper(fh, coords, idmap, fmt=None, n=1000000, th=0.8,
prefix=False):
"""Read an alignment file and match reads and genes in an ordinal system.
Parameters
----------
fh : file handle
Alignment file to parse.
coords : dict of list
Gene coordinates table.
idmap : dict of list
Gene identifiers.
fmt : str, optional
Alignment file format.
n : int, optional
Number of lines per chunk.
th : float
Minimum threshold of overlap length : alignment length for a match.
prefix : bool
Prefix gene IDs with nucleotide IDs.
See Also
--------
align.plain_mapper
Yields
------
tuple of str
Query queue.
dict of set of str
Subject(s) queue.
"""
# determine file format
fmt, head = (fmt, []) if fmt else infer_align_format(fh)
# assign parser for given format
parser = assign_parser(fmt, ext=True)
# cached list of query Ids for reverse look-up
# gene Ids are unique, but read Ids can have duplicates (i.e., one read is
# mapped to multiple loci on a genome), therefore an incremental integer
# here replaces the original read Id as its identifer
rids = []
rid_append = rids.append
# cached map of read to coordinates
locmap = defaultdict(list)
def flush():
"""Match reads in current chunk with genes from all nucleotides.
Returns
-------
tuple of str
Query queue.
dict of set of str
Subject(s) queue.
"""
# master read-to-gene(s) map
res = defaultdict(set)
# iterate over nucleotides
for nucl, locs in locmap.items():
# it's possible that no gene was annotated on the nucleotide
try:
glocs = coords[nucl]
except KeyError:
continue
# get reference to gene identifiers
gids = idmap[nucl]
# append prefix if needed
pfx = nucl + '_' if prefix else ''
# execute ordinal algorithm when reads are many
# 8 (5+ reads) is an empirically determined cutoff
if len(locs) > 8:
# merge and sort coordinates
# question is to add unsorted read coordinates into pre-sorted
# gene coordinates
# Python's Timsort algorithm is efficient for this task
queue = sorted(chain(glocs, locs))
# map reads to genes using the core algorithm
for read, gene in match_read_gene(queue):
# add read-gene pairs to the master map
res[rids[read]].add(pfx + gids[gene])
# execute naive algorithm when reads are few
else:
for read, gene in match_read_gene_quart(glocs, locs):
res[rids[read]].add(pfx + gids[gene])
# return matching read Ids and gene Ids
return res.keys(), res.values()
this = None # current query Id
target = n # target line number at end of current chunk
# parse alignment file
for i, row in enumerate(parser(chain(iter(head), fh))):
query, subject, _, length, beg, end = row[:6]
# skip if length is not available or zero
if not length:
continue
# when query Id changes and chunk limits has been reached
if query != this and i >= target:
# flush: match currently cached reads with genes and yield
yield flush()
# re-initiate read Ids, length map and location map
rids = []
rid_append = rids.append
locmap = defaultdict(list)
# next target line number
target = i + n
# append read Id, alignment length and location
idx = len(rids)
rid_append(query)
# effective length = length * th
# -int(-x // 1) is equivalent to math.ceil(x) but faster
# this value must be >= 1
locmap[subject].extend((
(beg << 48) + (-int(-length * th // 1) << 31) + idx,
(end << 48) + idx))
this = query
# final flush
yield flush() | 955c411e608fdb3cf55d0c52350b38061f87cd3a | 3,650,311 |
def file_lines(filename):
"""
>>> file_lines('test/foo.txt')
['foo', 'bar']
"""
return text_file(filename).split() | b121ba549606adeac244b063ff679192951c2ff8 | 3,650,312 |
def repr_should_be_defined(obj):
"""Checks the obj.__repr__() method is properly defined"""
obj_repr = repr(obj)
assert isinstance(obj_repr, str)
assert obj_repr == obj.__repr__()
assert obj_repr.startswith("<")
assert obj_repr.endswith(">")
return obj_repr | 28537f4f48b402a2eba290d8ece9b765eeb9fdc3 | 3,650,313 |
import os
def read_tiff(tiff_path):
"""
Args:
tiff_path: a tiff file path
Returns:
tiff image object
"""
if not os.path.isfile(tiff_path) or not is_tiff_file(tiff_path):
raise InvalidTiffFileError(tiff_path)
return imread(tiff_path) | 63a5ae406c47ba72a46d7c4a29bde9371e6ce824 | 3,650,314 |
def changePassword():
"""
Change to a new password and email user.
URL Path:
URL Args (required):
- JSON structure of change password data
Returns:
200 OK if invocation OK.
500 if server not configured.
"""
@testcase
def changePasswordWorker():
try:
util.debugPrint("changePassword")
if not Config.isConfigured():
util.debugPrint("Please configure system")
abort(500)
urlPrefix = Config.getDefaultPath()
requestStr = request.data
accountData = json.loads(requestStr)
return jsonify(AccountsChangePassword.changePasswordEmailUser(
accountData, urlPrefix))
except:
print "Unexpected error:", sys.exc_info()[0]
print sys.exc_info()
traceback.print_exc()
util.logStackTrace("Unexpected error:" + str(sys.exc_info()[0]))
raise
return changePasswordWorker() | 0c354535b2ff6be180ddec939be533b2a87a5a94 | 3,650,315 |
from typing import Callable
from typing import Any
from typing import List
from typing import Tuple
import os
import multiprocessing
def parallel_for(
loop_callback: Callable[[Any], Any],
parameters: List[Tuple[Any, ...]],
nb_threads: int = os.cpu_count()+4
) -> List[Any]:
"""Execute a for loop body in parallel
.. note:: Race-Conditions
Code executation in parallel can cause into an "race-condition"
error.
Arguments:
loop_callback(Callable): function callback running in the
loop body
parameters(List[Tuple]): element to execute in parallel
Returns:
(List[Any]): list of values
Examples:
.. example-code::
>>> x = lambda x: x ** 2
>>> parallel_for(x, [y for y in range(10)])
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
"""
with closing(multiprocessing.pool.ThreadPool(nb_threads)) as pool:
return pool.map(loop_callback, parameters) | e3ae62d02ee886386e939a7f4890df74f3ab4879 | 3,650,316 |
def indexName():
"""Index start page."""
return render_template('index.html') | 0340e708a82052a98e6e9e92bfde2eb04128d354 | 3,650,317 |
def translate_http_code():
"""Print given code
:return:
"""
return make_http_code_translation(app) | c9b501b57323aeb765be47af134dd2de1c1d084e | 3,650,318 |
from re import DEBUG
def dose_rate(sum_shielding, isotope, disable_buildup = False):
"""
Calculate the dose rate for a specified isotope behind shielding
behind shielding barriers. The dose_rate is calculated for 1MBq
Args:
sum_shielding: dict with shielding elements
isotope: isotope name (string)
"""
t = transmission_sum(sum_shielding, isotope, disable_buildup)
energies = ps.config.get_setting(ps.ISOTOPES)[isotope][ps.ENERGY_keV]
abundance = ps.config.get_setting(ps.ISOTOPES)[isotope][ps.ABUNDANCE]
if DEBUG:
ps.logger.debug(isotope)
ps.logger.debug('t: %s', t)
ps.logger.debug('energies: %s', energies)
ps.logger.debug('abundance: %s', abundance)
rate = H10(energy_keV=energies, abundance=t * np.array(abundance))
return rate | 37b90720564c1a15b5e4fb40cb67df556caaa742 | 3,650,319 |
import warnings
def parmap(f, X, nprocs=1):
"""
parmap_fun() and parmap() are adapted from klaus se's post
on stackoverflow. https://stackoverflow.com/a/16071616/4638182
parmap allows map on lambda and class static functions.
Fall back to serial map when nprocs=1.
"""
if nprocs < 1:
raise ValueError("nprocs should be >= 1. nprocs: {}".format(nprocs))
nprocs = min(int(nprocs), mp.cpu_count())
# exception handling f
# simply ignore all exceptions. If exception occurs in parallel queue, the
# process with exception will get stuck and not be able to process
# following requests.
def ehf(x):
try:
res = f(x)
except Exception as e:
res = e
return res
# fall back on serial
if nprocs == 1:
return list(map(ehf, X))
q_in = mp.Queue(1)
q_out = mp.Queue()
proc = [mp.Process(target=_parmap_fun, args=(ehf, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
# maintain the order of X
ordered_res = [x for i, x in sorted(res)]
for i, x in enumerate(ordered_res):
if isinstance(x, Exception):
warnings.warn("{} encountered in parmap {}th arg {}".format(
x, i, X[i]))
return ordered_res | 66a498966979ca00c9a7eedfc1113a07b9076245 | 3,650,320 |
def is_char_token(c: str) -> bool:
"""Return true for single character tokens."""
return c in ["+", "-", "*", "/", "(", ")"] | 3d5691c8c1b9a592987cdba6dd4809cf2c410ee8 | 3,650,321 |
import numpy
def _float_arr_to_int_arr(float_arr):
"""Try to cast array to int64. Return original array if data is not representable."""
int_arr = float_arr.astype(numpy.int64)
if numpy.any(int_arr != float_arr):
# we either have a float that is too large or NaN
return float_arr
else:
return int_arr | 73643757b84ec28ed721608a2176b292d6e90837 | 3,650,322 |
def latest(scores: Scores) -> int:
"""The last added score."""
return scores[-1] | 393c1d9a4b1852318d622a58803fff3286db98af | 3,650,323 |
def get_dp_2m(wrfin, timeidx=0, method="cat", squeeze=True,
cache=None, meta=True, _key=None, units="degC"):
"""Return the 2m dewpoint temperature.
This functions extracts the necessary variables from the NetCDF file
object in order to perform the calculation.
Args:
wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \
iterable): WRF-ARW NetCDF
data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile`
or an iterable sequence of the aforementioned types.
timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The
desired time index. This value can be a positive integer,
negative integer, or
:data:`wrf.ALL_TIMES` (an alias for None) to return
all times in the file or sequence. The default is 0.
method (:obj:`str`, optional): The aggregation method to use for
sequences. Must be either 'cat' or 'join'.
'cat' combines the data along the Time dimension.
'join' creates a new dimension for the file index.
The default is 'cat'.
squeeze (:obj:`bool`, optional): Set to False to prevent dimensions
with a size of 1 from being automatically removed from the shape
of the output. Default is True.
cache (:obj:`dict`, optional): A dictionary of (varname, ndarray)
that can be used to supply pre-extracted NetCDF variables to the
computational routines. It is primarily used for internal
purposes, but can also be used to improve performance by
eliminating the need to repeatedly extract the same variables
used in multiple diagnostics calculations, particularly when using
large sequences of files.
Default is None.
meta (:obj:`bool`, optional): Set to False to disable metadata and
return :class:`numpy.ndarray` instead of
:class:`xarray.DataArray`. Default is True.
_key (:obj:`int`, optional): A caching key. This is used for internal
purposes only. Default is None.
units (:obj:`str`): The desired units. Refer to the :meth:`getvar`
product table for a list of available units for 'td2'. Default
is 'degC'.
Returns:
:class:`xarray.DataArray` or :class:`numpy.ndarray`: The
2m dewpoint temperature.
If xarray is enabled and the *meta* parameter is True, then the result
will be a :class:`xarray.DataArray` object. Otherwise, the result will
be a :class:`numpy.ndarray` object with no metadata.
"""
varnames=("PSFC", "Q2")
ncvars = extract_vars(wrfin, timeidx, varnames, method, squeeze, cache,
meta=False, _key=_key)
# Algorithm requires hPa
psfc = .01*(ncvars["PSFC"])
# Copy needed for the mmap nonsense of scipy.io.netcdf, which seems to
# break with every release
q2 = ncvars["Q2"].copy()
q2[q2 < 0] = 0
td = _td(psfc, q2)
return td | e16a5a3951312254eb852a5e03987aab32a91373 | 3,650,324 |
def fit_uncertainty(points, lower_wave, upper_wave, log_center_wave, filter_size):
"""Performs fitting many times to get an estimate of the uncertainty
"""
mock_points = []
for i in range(1, 100):
# First, fit the points
coeff = np.polyfit(np.log10(points['rest_wavelength']),
np.random.normal(points['f_lambda'], points['err_f_lambda']), deg=2) # , w=(1/points['err_f_lambda'])
# Get the polynomial
fit_func = np.poly1d(coeff)
# x-range over which we fit
fit_wavelengths = np.arange(
np.log10(lower_wave), np.log10(upper_wave), 0.001)
# Values of the points we fit
fit_points = fit_func(fit_wavelengths)
# Indexes of the values that lie in the mock filter
fit_idx = np.logical_and(fit_wavelengths > (log_center_wave -
filter_size), fit_wavelengths < (log_center_wave + filter_size))
# Average the values in the mock filter to get the mock point
mock_sed_point = np.mean(fit_points[fit_idx])
mock_points.append(mock_sed_point)
# PERCENTILE ERROR HERE?
mock_sed_point, l_err, u_err = np.percentile(mock_points, [50, 15.7, 84.3])
return mock_sed_point, u_err - mock_sed_point, mock_sed_point - l_err | cca5193e55d7aeef710a08fb16df8c896bbeef90 | 3,650,325 |
def from_dateutil_rruleset(rruleset):
"""
Convert a `dateutil.rrule.rruleset` instance to a `Recurrence`
instance.
:Returns:
A `Recurrence` instance.
"""
rrules = [from_dateutil_rrule(rrule) for rrule in rruleset._rrule]
exrules = [from_dateutil_rrule(exrule) for exrule in rruleset._exrule]
rdates = rruleset._rdate
exdates = rruleset._exdate
dts = [r._dtstart for r in rruleset._rrule] + rruleset._rdate
if len(dts) > 0:
dts.sort()
dtstart = dts[0]
else:
dtstart = None
return Recurrence(dtstart, rrules, exrules, rdates, exdates) | cd5ab771eebbf6f68ce70a8d100ad071561541de | 3,650,326 |
import re
def error_038_italic_tag(text):
"""Fix the error and return (new_text, replacements_count) tuple."""
backup = text
(text, count) = re.subn(r"<(i|em)>([^\n<>]+)</\1>", "''\\2''", text, flags=re.I)
if re.search(r"</?(?:i|em)>", text, flags=re.I):
return (backup, 0)
else:
return (text, count) | b0c2b571ade01cd483a3ffdc6f5c2bbb873cd13c | 3,650,327 |
def create_user():
""" Method that will create an user .
Returns:
user.id: The id of the created user
Raises:
If an error occurs it will be displayed in a error message.
"""
try:
new_user = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
db_session.add(new_user)
db_session.commit()
user = db_session.query(User).filter_by(email=login_session['email']).one()
return user.id
except Exception as e:
flash('An error has occurred: {}'.format(str(e)), 'error')
return None | e5d555fb955523c6bff811efe308030de257e05a | 3,650,328 |
import numpy
def buildStartAndEndWigData(thisbam, LOG_EVERY_N=1000, logger=None):
"""parses a bam file for 3' and 5' ends and builds these into wig-track data
Returns a dictionary of various gathered statistics."""
def formatToWig(wigdata):
""" take in the read position data and output in wigTrack format"""
this_wigTracks = {}
for key in wigdata.keys():
track = wigTrack()
track.wigtype = "fixedStep"
track.chr = key
track.start = 1
track.step = 1
track.position = numpy.arange(len(wigdata[key]))+track.start
this_wigTracks[key] = track
this_wigTracks[key].data = wigdata[key]
this_wigData = wigData()
this_wigData.tracks = this_wigTracks
return(this_wigData)
if type(thisbam) is str:
thisbam = pysam.AlignmentFile(thisbam, "rb")
all_wigdata={
"fwd":{
"five_prime":{},
"three_prime":{}
},
"rev":{
"five_prime":{},
"three_prime":{}
}
}
chromSizes = dict(zip(thisbam.references, thisbam.lengths))
for key in chromSizes.keys():
for strand in all_wigdata.keys():
for end in all_wigdata[strand].keys():
all_wigdata[strand][end][key] = numpy.zeros(chromSizes[key])
counter=0
nlogs=0
for read in thisbam.fetch():
if read.is_reverse:
all_wigdata["rev"]["five_prime"][read.reference_name][read.reference_end-1]+=1
all_wigdata["rev"]["three_prime"][read.reference_name][read.reference_start]+=1
else:
all_wigdata["fwd"]["five_prime"][read.reference_name][read.reference_start]+=1
all_wigdata["fwd"]["three_prime"][read.reference_name][read.reference_end-1]+=1
counter+=1
if (counter % LOG_EVERY_N)==0:
msg = "processed {these} reads...".format(these=(nlogs*LOG_EVERY_N))
if logger is not None:
logger.info(msg)
else:
print(msg)
nlogs+=1
msg = "Processed {counted} reads...".format(counted=counter)
if logger is not None:
logger.info(msg)
else:
print(msg)
msg = "Formatting wig tracks..."
if logger is not None:
logger.info(msg)
else:
print(msg)
for strand in all_wigdata.keys():
for end in all_wigdata[strand].keys():
all_wigdata[strand][end] = formatToWig(all_wigdata[strand][end])
return(all_wigdata, chromSizes) | f97a2b2c54f1cf2f978a17ef2b74435153ec4369 | 3,650,329 |
from pathlib import Path
from typing import List
from typing import Optional
def time_series_h5(timefile: Path, colnames: List[str]) -> Optional[DataFrame]:
"""Read temporal series HDF5 file.
If :data:`colnames` is too long, it will be truncated. If it is too short,
additional column names will be deduced from the content of the file.
Args:
timefile: path of the TimeSeries.h5 file.
colnames: names of the variables expected in :data:`timefile` (may be
modified).
Returns:
A :class:`pandas.DataFrame` containing the time series, organized by
variables in columns and the time steps in rows.
"""
if not timefile.is_file():
return None
with h5py.File(timefile, 'r') as h5f:
dset = h5f['tseries']
_, ncols = dset.shape
ncols -= 1 # first is istep
h5names = h5f['names'].asstr()[len(colnames) + 1:]
_tidy_names(colnames, ncols, h5names)
data = dset[()]
pdf = pd.DataFrame(data[:, 1:],
index=np.int_(data[:, 0]), columns=colnames)
# remove duplicated lines in case of restart
return pdf.loc[~pdf.index.duplicated(keep='last')] | e28194bcfead5b188ea947efe51fc2bac052bea9 | 3,650,330 |
def decode_jwt(token):
"""decodes a token and returns ID associated (subject) if valid"""
try:
payload = jwt.decode(token.encode(), current_app.config['SECRET_KEY'], algorithms=['HS256'])
return {"isError": False, "payload": payload["sub"]}
except jwt.ExpiredSignatureError as e:
current_app.logger.error("Token expired.")
raise ExpiredTokenError()
except jwt.InvalidTokenError as e:
current_app.logger.error("Invalid token.")
raise InvalidTokenError() | 15938fc40d2fb5b60c4ef5ccb3d6f3211fa5952f | 3,650,331 |
def format_point(point: Point) -> str:
"""Return a str representing a Point object.
Args:
point:
Point obj to represent.
Returns:
A string representing the Point with ° for grades, ' for minutes and " for seconds.
Latitude is written before Longitude.
Example Output: 30°21'12", 10°21'22"
"""
lat = to_sexagesimal(point.latitude)
long = to_sexagesimal(point.longitude)
return f'[{lat.deg}°{lat.min}\'{lat.sec}\", {long.deg}°{long.min}\'{long.sec}\"]' | 435a13d7198e6da99306c58d35249b666a03571c | 3,650,332 |
def families_horizontal_correctors():
"""."""
return ['CH'] | a3f8de3e0d44ea72d2fb98733050b7a2d598c142 | 3,650,333 |
import requests
def variable_select_source_data_proxy(request):
"""
@summary: 获取下拉框源数据的通用接口
@param request:
@return:
"""
url = request.GET.get('url')
try:
response = requests.get(
url=url,
verify=False
)
except Exception as e:
logger.exception('variable select get data from url[url={url}] raise error: {error}'.format(url=url, error=e))
text = _('请求数据异常: {error}').format(error=e)
data = [{'text': text, 'value': ''}]
return JsonResponse(data, safe=False)
try:
data = response.json()
except Exception:
try:
content = response.content.decode(response.encoding)
logger.exception('variable select get data from url[url={url}] is not a valid JSON: {data}'.format(
url=url, data=content[:500])
)
except Exception:
logger.exception('variable select get data from url[url={url}] data is not a valid JSON'.format(url=url))
text = _('返回数据格式错误,不是合法 JSON 格式')
data = [{'text': text, 'value': ''}]
return JsonResponse(data, safe=False) | c8d131d6c7d0e766e0a4dacd1b0086090ee02c4f | 3,650,334 |
async def select_guild_lfg_events(guild_id: int) -> list[asyncpg.Record]:
"""Gets the lfg messages for a specific guild ordered by the youngest creation date"""
select_sql = f"""
SELECT
id, message_id, creation_time, voice_channel_id
FROM
lfgmessages
WHERE
guild_id = $1
ORDER BY
creation_time ASC;"""
async with (await get_connection_pool()).acquire(timeout=timeout) as connection:
return await connection.fetch(select_sql, guild_id) | 3a1b98191b75b4ec0bbdb5942a7b5b2d8c8dca48 | 3,650,335 |
def ValueToString(descriptor, field_desc, value):
"""Renders a field value as a PHP literal.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the field to be rendered.
Returns:
A PHP literal for the provided value.
"""
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if value:
return '[%s]' % ', '.join([NonRepeatedValueToString(descriptor,
field_desc, s)
for s in value])
return '[]'
return NonRepeatedValueToString(descriptor, field_desc, value) | e40815ab6e3b55e1a7cb026c33a9c9324da900b4 | 3,650,336 |
def get_checkpoints(run_dir, from_global_step=None, last_only=False):
"""Return all available checkpoints.
Args:
run_dir: Directory where the checkpoints are located.
from_global_step (int): Only return checkpoints after this global step.
The comparison is *strict*. If ``None``, returns all available
checkpoints.
Returns:
List of dicts (with keys ``global_step``, ``file``) with all the
checkpoints found.
Raises:
ValueError: If there are no checkpoints in ``run_dir``.
"""
# The latest checkpoint file should be the last item of
# `all_model_checkpoint_paths`, according to the CheckpointState protobuf
# definition.
# TODO: Must check if the checkpoints are complete somehow.
ckpt = tf.train.get_checkpoint_state(run_dir)
if not ckpt or not ckpt.all_model_checkpoint_paths:
raise ValueError('Could not find checkpoint in {}.'.format(run_dir))
# TODO: Any other way to get the global_step? (Same as in `checkpoints`.)
checkpoints = sorted([
{'global_step': int(path.split('-')[-1]), 'file': path}
for path in ckpt.all_model_checkpoint_paths
], key=lambda c: c['global_step'])
if last_only:
checkpoints = checkpoints[-1:]
tf.logging.info(
'Using last checkpoint in run_dir, global_step = {}'.format(
checkpoints[0]['global_step']
)
)
elif from_global_step is not None:
checkpoints = [
c for c in checkpoints
if c['global_step'] > from_global_step
]
tf.logging.info(
'Found %s checkpoints in run_dir with global_step > %s',
len(checkpoints), from_global_step,
)
else:
tf.logging.info(
'Found {} checkpoints in run_dir'.format(len(checkpoints))
)
return checkpoints | c40a89049b11bda2688cb195956db3e50bc44c06 | 3,650,337 |
import tempfile
import sys
def write_env_file(env_dict):
""" Write the env_file information to a temporary file
If there is any error, return None;
otherwise return the temp file
"""
try:
temp_file = tempfile.NamedTemporaryFile(mode='w')
for key in env_dict.keys():
val = env_dict[key]
if val is None or val=='':
continue
temp_file.write('%s=%s\n' % (key, env_dict[key]))
temp_file.flush()
return temp_file
except KeyboardInterrupt: # If Control-C etc, allow calling function to cleanup before halting the script
close_ignore_exception(temp_file)
return None
except Exception as e:
sys.stderr.write("Exception %s\n" % e)
close_ignore_exception(temp_file)
return None
return None | c53ee69cc12fafc7504f8adbbfbef40626f18427 | 3,650,338 |
def __load_txt_resource__(path):
"""
Loads a txt file template
:param path:
:return:
"""
txt_file = open(path, "r")
return txt_file | 9e3632098c297d1f6407559a86f0d8dc7b68ea75 | 3,650,339 |
def parse_cpu_spec(spec):
"""Parse a CPU set specification.
:param spec: cpu set string eg "1-4,^3,6"
Each element in the list is either a single
CPU number, a range of CPU numbers, or a
caret followed by a CPU number to be excluded
from a previous range.
:returns: a set of CPU indexes
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in spec.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available CPU ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single CPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
return cpuset_ids | fa323a2fc5c27a6645f0abfb3b4878f6ae6390ee | 3,650,340 |
import typing
def distance_fit_from_transits() -> typing.List[float]:
"""
This uses the observers position from full transits and then the runway positions from all
the transit lines fitted to a
"""
((x_mean, x_std), (y_mean, y_std)) = observer_position_mean_std_from_full_transits()
transits = transit_x_axis_distances(x_mean, y_mean)
times = [v.time for v in transits]
dists = [v.distance for v in transits]
popt, pcov = curve_fit(
video_utils.polynomial_3,
times,
dists,
)
return popt | 57b201b1328528191b4926a66325ca026855f09a | 3,650,341 |
import torch
def collate_fn_synthesize(batch):
"""
Create batch
Args : batch(tuple) : List of tuples / (x, c) x : list of (T,) c : list of (T, D)
Returns : Tuple of batch / Network inputs x (B, C, T), Network targets (B, T, 1)
"""
local_conditioning = len(batch[0]) >= 2
if local_conditioning:
new_batch = []
for idx in range(len(batch)):
x, c = batch[idx]
if upsample_conditional_features:
assert len(x) % len(c) == 0 and len(x) // len(c) == hop_length
new_batch.append((x, c))
batch = new_batch
else:
pass
input_lengths = [len(x[0]) for x in batch]
max_input_len = max(input_lengths)
# x_batch : [B, T, 1]
x_batch = np.array([_pad_2d(x[0].reshape(-1, 1), max_input_len) for x in batch], dtype=np.float32)
assert len(x_batch.shape) == 3
y_batch = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.float32)
assert len(y_batch.shape) == 2
if local_conditioning:
max_len = max([len(x[1]) for x in batch])
c_batch = np.array([_pad_2d(x[1], max_len) for x in batch], dtype=np.float32)
assert len(c_batch.shape) == 3
# (B x C x T')
c_batch = torch.tensor(c_batch).transpose(1, 2).contiguous()
else:
c_batch = None
# Convert to channel first i.e., (B, C, T) / C = 1
x_batch = torch.tensor(x_batch).transpose(1, 2).contiguous()
# Add extra axis i.e., (B, T, 1)
y_batch = torch.tensor(y_batch).unsqueeze(-1).contiguous()
input_lengths = torch.tensor(input_lengths)
return x_batch, y_batch, c_batch, input_lengths | b784a45eb753d5d84ae3bf18fd4e7a09e891753d | 3,650,342 |
def max_(context, mapping, args, **kwargs):
"""Return the max of an iterable"""
if len(args) != 1:
# i18n: "max" is a keyword
raise error.ParseError(_("max expects one argument"))
iterable = evalwrapped(context, mapping, args[0])
try:
return iterable.getmax(context, mapping)
except error.ParseError as err:
# i18n: "max" is a keyword
hint = _("max first argument should be an iterable")
raise error.ParseError(bytes(err), hint=hint) | 068f77031fb83dc9d88446863e39f38c14a7478d | 3,650,343 |
from typing import Optional
from typing import Dict
def dict_to_duration(time_dict: Optional[Dict[str, int]]) -> Duration:
"""Convert a QoS duration profile from YAML into an rclpy Duration."""
if time_dict:
try:
return Duration(seconds=time_dict['sec'], nanoseconds=time_dict['nsec'])
except KeyError:
raise ValueError(
'Time overrides must include both seconds (sec) and nanoseconds (nsec).')
else:
return Duration() | 7b20ed1ecbe496f55426562e791e591d8c5104e5 | 3,650,344 |
def gen_ex_tracking_df(subj_dir):
"""Generate subject tracking error data frames from time series CSVs.
This method generates tracking error (Jaccard distance, CSA, T, AR) data
frames from raw time series CSV data for a single subject.
Args:
subj_dir (str): path to subject data directory, including final '/'
Returns:
pandas.DataFrame mean errors (Jaccard distance, CSA, T, AR)
pandas.DataFrame standard deviation errors (Jaccard distance, CSA, T, AR)
"""
df_iou = gen_jd_vals(subj_dir)
df_csa = gen_def_err_vals(subj_dir, 'CSA')
df_t = gen_def_err_vals(subj_dir, 'T')
df_tr = gen_def_err_vals(subj_dir, 'AR')
df_iou_mean = df_iou.mean().to_frame()
df_csa_mean = df_csa.mean().to_frame()
df_t_mean = df_t.mean().to_frame()
df_tr_mean = df_tr.mean().to_frame()
df_means = df_iou_mean.copy()
df_means.rename(columns={0: 'Jaccard Distance'}, inplace=True)
df_means['CSA'] = df_csa_mean[0]
df_means['T'] = df_t_mean[0]
df_means['AR'] = df_tr_mean[0]
df_iou_std = df_iou.std().to_frame()
df_csa_std = df_csa.std().to_frame()
df_t_std = df_t.std().to_frame()
df_tr_std = df_tr.std().to_frame()
df_stds = df_iou_std.copy()
df_stds.rename(columns={0: 'Jaccard Distance'}, inplace=True)
df_stds['CSA'] = df_csa_std[0]
df_stds['T'] = df_t_std[0]
df_stds['AR'] = df_tr_std[0]
return df_means, df_stds | 259f2533bf8a0d9a03c250fc937c4a99903c8994 | 3,650,345 |
def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute the MSE (Mean Squared Error)."""
return sklearn.metrics.mean_squared_error(y_true, y_pred) | f9c669b04bc6a44bcd983c79dec5d630a6acbd09 | 3,650,346 |
import torch
def policy_improvement(env, V, gamma):
"""
Obtain an improved policy based on the values
@param env: OpenAI Gym environment
@param V: policy values
@param gamma: discount factor
@return: the policy
"""
n_state = env.observation_space.n
n_action = env.action_space.n
policy = torch.zeros(n_state)
for state in range(n_state):
v_actions = torch.zeros(n_action)
for action in range(n_action):
for trans_prob, new_state, reward, _ in env.env.P[state][action]:
v_actions[action] += trans_prob * (reward + gamma * V[new_state])
policy[state] = torch.argmax(v_actions)
return policy | 10587e5d4fb08158eff06a4305de6c02fc2d878c | 3,650,347 |
def loudness_zwst_freq(spectrum, freqs, field_type="free"):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
spectrum : numpy.array
A RMS frequency spectrum, size (Nfreq, Ntime)
freqs : list
List of the corresponding frequencies, size (Nfreq,) or (Nfreq, Ntime)
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse")
Outputs
-------
N : float or numpy.array
Calculated loudness [sones], size (Ntime,).
N_specific : numpy.ndarray
Specific loudness [sones/bark], size (Nbark, Ntime).
bark_axis : numpy.array
Frequency axis in bark, size (Nbark,).
"""
if len(spectrum) != len(freqs):
raise ValueError('Input spectrum and frequency axis must have the same shape')
# Compute third octave band spectrum
spec_third, _ = noct_synthesis(spectrum, freqs, fmin=24, fmax=12600)
# Compute dB values
spec_third = amp2db(spec_third, ref=2e-5)
# Compute main loudness
Nm = _main_loudness(spec_third, field_type)
#
# Computation of specific loudness pattern and integration of overall
# loudness by attaching slopes towards higher frequencies
N, N_specific = _calc_slopes(Nm)
# Define Bark axis
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
return N, N_specific, bark_axis | 391e33784e355675b68b3e95ade084cbcb86d5b5 | 3,650,348 |
import sys
def func(back=2):
"""
Returns the function name
"""
return "{}".format(sys._getframe(back).f_code.co_name) | 97332c32195418e4bf6dd6427adabbc5c4360580 | 3,650,349 |
def normU(u):
"""
A function to scale Uranium map. We don't know what this function should be
"""
return u | e4bd83a26c502e9129d18091e807e17ab3294fd1 | 3,650,350 |
def exact_riemann_solution(q_l, q_r, gamma=1.4, phase_plane_curves=False):
"""Return the exact solution to the Riemann problem with initial states
q_l, q_r. The solution is given in terms of a list of states, a list of
speeds (each of which may be a pair in case of a rarefaction fan), and a
function reval(xi) that gives the solution at a point xi=x/t.
The input and output vectors are the conserved quantities.
If phase_plane_curves==True, then the appropriate Hugoniot Locus and/or
integral curve is returned for the 1- and 3-waves.
"""
rho_l, u_l, p_l = conservative_to_primitive(*q_l)
rho_r, u_r, p_r = conservative_to_primitive(*q_r)
# Compute left and right state sound speeds
c_l = sound_speed(rho_l, p_l, gamma)
c_r = sound_speed(rho_r, p_r, gamma)
ws = np.zeros(5)
wave_types = ['', 'contact', '']
if rho_l == 0:
# 3-rarefaction connecting right state to vacuum
p = 0.
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_r = integral_curve_3(0., rho_r, u_r, p_r, gamma)
u = u_vacuum_r
ws[0] = 0.
ws[1] = 0.
ws[2] = 0.
ws[3] = u_vacuum_r
ws[4] = u_r + c_r
wave_types = ['contact', 'contact', 'raref']
elif rho_r == 0:
# 1-rarefaction connecting left state to vacuum
p = 0
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_l = integral_curve_1(0., rho_l, u_l, p_l, gamma)
u = u_vacuum_l
ws[0] = u_l - c_l
ws[1] = u_vacuum_l
ws[2] = 0.
ws[3] = 0.
ws[4] = 0.
wave_types = ['raref', 'contact', 'contact']
elif u_l - u_r + 2*(c_l+c_r)/(gamma-1.) < 0:
# Middle states are vacuum
p = 0.
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_l = integral_curve_1(0., rho_l, u_l, p_l, gamma)
u_vacuum_r = integral_curve_3(0., rho_r, u_r, p_r, gamma)
u = 0.5*(u_vacuum_l + u_vacuum_r)
ws[0] = u_l - c_l
ws[1] = u_vacuum_l
ws[2] = u
ws[3] = u_vacuum_r
ws[4] = u_r + c_r
wave_types = ['raref', 'contact', 'raref']
else:
# Check whether the 1-wave is a shock or rarefaction
def phi_l(p):
if p >= p_l: return hugoniot_locus_1(p, rho_l, u_l, p_l, gamma)
else: return integral_curve_1(p, rho_l, u_l, p_l, gamma)
# Check whether the 1-wave is a shock or rarefaction
def phi_r(p):
if p >= p_r: return hugoniot_locus_3(p, rho_r, u_r, p_r, gamma)
else: return integral_curve_3(p, rho_r, u_r, p_r, gamma)
phi = lambda p: phi_l(p)-phi_r(p)
exp = (1.-gamma)/(2.*gamma)
guess = ((c_l + c_r - (gamma-1.)*(u_r-u_l)/2.)/(c_l*p_l**exp+c_r*p_r**exp))**(-1./exp)
# Compute middle state p, u by finding curve intersection
p, info, ier, msg = fsolve(phi, guess, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p, info, ier, msg = fsolve(phi, guess, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
print(msg)
u = phi_l(p)
ws[2] = u
# Find shock and rarefaction speeds
if p > p_l:
wave_types[0] = 'shock'
rho_l_star = rho_l*(1+beta(gamma)*p/p_l)/(p/p_l+beta(gamma))
ws[0] = (rho_l*u_l - rho_l_star*u)/(rho_l - rho_l_star)
ws[1] = ws[0]
else:
wave_types[0] = 'raref'
rho_l_star = (p/p_l)**(1./gamma) * rho_l
c_l_star = sound_speed(rho_l_star, p, gamma)
ws[0] = u_l - c_l
ws[1] = u - c_l_star
if p > p_r:
wave_types[2] = 'shock'
rho_r_star = rho_r*(1+beta(gamma)*p/p_r)/(p/p_r+beta(gamma))
ws[4] = (rho_r*u_r - rho_r_star*u)/(rho_r - rho_r_star)
ws[3] = ws[4]
else:
wave_types[2] = 'raref'
rho_r_star = (p/p_r)**(1./gamma) * rho_r
c_r_star = sound_speed(rho_r_star, p, gamma)
ws[3] = u + c_r_star
ws[4] = u_r + c_r
# Find solution inside rarefaction fans (in primitive variables)
def raref1(xi):
u1 = ((gamma-1.)*u_l + 2*(c_l + xi))/(gamma+1.)
rho1 = (rho_l**gamma*(u1-xi)**2/pospart(gamma*p_l))**(1./(gamma-1.))
p1 = p_l*(rho1/pospart(rho_l))**gamma
return rho1, u1, p1
def raref3(xi):
u3 = ((gamma-1.)*u_r - 2*(c_r - xi))/(gamma+1.)
rho3 = (rho_r**gamma*(xi-u3)**2/pospart(gamma*p_r))**(1./(gamma-1.))
p3 = p_r*(rho3/pospart(rho_r))**gamma
return rho3, u3, p3
q_l_star = np.squeeze(np.array(primitive_to_conservative(rho_l_star,u,p)))
q_r_star = np.squeeze(np.array(primitive_to_conservative(rho_r_star,u,p)))
states = np.column_stack([q_l,q_l_star,q_r_star,q_r])
speeds = [[], ws[2], []]
if wave_types[0] in ['shock','contact']:
speeds[0] = ws[0]
else:
speeds[0] = (ws[0],ws[1])
if wave_types[2] in ['shock','contact']:
speeds[2] = ws[3]
else:
speeds[2] = (ws[3],ws[4])
def reval(xi):
r"""Returns the Riemann solution in primitive variables for any
value of xi = x/t.
"""
rar1 = raref1(xi)
rar3 = raref3(xi)
rho_out = (xi<=ws[0] )*rho_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[0] \
+ (xi>ws[1])*(xi<=ws[2] )*rho_l_star \
+ (xi>ws[2]) *(xi<=ws[3])*rho_r_star \
+ (xi>ws[3])*(xi<=ws[4])*rar3[0] \
+ (xi>ws[4] )*rho_r
u_out = (xi<=ws[0] )*u_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[1] \
+ (xi>ws[1])*(xi<=ws[2] )*u \
+ (xi>ws[2] )*(xi<=ws[3])*u \
+ (xi>ws[3])*(xi<=ws[4])*rar3[1] \
+ (xi>ws[4] )*u_r
p_out = (xi<=ws[0] )*p_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[2] \
+ (xi>ws[1])*(xi<=ws[2] )*p \
+ (xi>ws[2] )*(xi<=ws[3])*p \
+ (xi>ws[3])*(xi<=ws[4])*rar3[2] \
+ (xi>ws[4] )*p_r
return primitive_to_conservative(rho_out,u_out,p_out)
if phase_plane_curves:
if wave_types[0] == 'raref':
phi1 = lambda p: integral_curve_1(p, rho_l, u_l, p_l, gamma)
elif wave_types[0] == 'shock':
phi1 = lambda p: hugoniot_locus_1(p, rho_l, u_l, p_l, gamma)
else:
phi1 = lambda p: p
if wave_types[2] == 'raref':
phi3 = lambda p: integral_curve_3(p, rho_r, u_r, p_r, gamma)
elif wave_types[2] == 'shock':
phi3 = lambda p: hugoniot_locus_3(p, rho_r, u_r, p_r, gamma)
else:
phi3 = lambda p: p
return states, speeds, reval, wave_types, (p, phi1, phi3)
else:
return states, speeds, reval, wave_types | a5baa391d88a56026cc02a1a1fa841325e712ea0 | 3,650,351 |
def show_edge_scatter(N, s1, s2, t1, t2, d, dmax=None, fig_ax=None):
"""Draw the cell-edge contour and the displacement vectors.
The contour is drawn using a scatter plot to color-code the displacements."""
if fig_ax is None:
fig, ax = plt.subplots()
else:
fig, ax = fig_ax
plt.figure(fig.number)
# Evaluate splines at window locations and on fine-resolution grid
c1 = splineutils.splevper(t1, s1)
c2 = splineutils.splevper(t2, s2)
c1p = splev(np.linspace(0, 1, N + 1), s1)
c2p = splev(np.linspace(0, 1, N + 1), s2)
# Interpolate displacements
# d = 0.5 + 0.5 * d / np.max(np.abs(d))
if len(d) < N + 1:
d = np.interp(np.linspace(0, 1, N + 1), t1, d, period=1)
if dmax is None:
dmax = np.max(np.abs(d))
if dmax == 0:
dmax = 1
# Plot results
# matplotlib.use('PDF')
lw = 1
s = 1 # Scaling factor for the vectors
ax.plot(c1p[0], c1p[1], "b", zorder=50, lw=lw)
ax.plot(c2p[0], c2p[1], "r", zorder=100, lw=lw)
# plt.scatter(c1p[0], c1p[1], c=d, cmap='bwr', vmin=-dmax, vmax=dmax, zorder=50, s1=lw)
# # plt.colorbar(label='Displacement [pixels]')
for j in range(len(t2)):
ax.arrow(
c1[0][j],
c1[1][j],
s * (c2[0][j] - c1[0][j]),
s * (c2[1][j] - c1[1][j]),
color="y",
zorder=200,
lw=lw,
)
# plt.arrow(c1[0][j], c1[1][j], s1 * u[0][j], s1 * u[1][j], color='y', zorder=200, lw=lw) # Show normal to curve
ax.arrow(
c1[0][0],
c1[1][0],
s * (c2[0][0] - c1[0][0]),
s * (c2[1][0] - c1[1][0]),
color="c",
zorder=400,
lw=lw,
)
fig.tight_layout()
return fig, ax | d0575ec425828e895f24c3ff8cbf9f472ba62947 | 3,650,352 |
def get_A2_const(alpha1, alpha2, lam_c, A1):
"""Function to compute the constant A2.
Args:
alpha1 (float): The alpha1 parameter of the WHSCM.
alpha2 (float): The alpha2 parameter of the WHSCM.
lam_c (float): The switching point between the
two exponents of the double power-laws
in the WHSCM.
A1 (float): The A1 constant of the WHSCM.
Returns:
A2 (float): The A2 constant of the WHSCM.
"""
A2 = A1 * (lam_c**(alpha2 - alpha1))
return A2 | 16fe12e9ef9d72cfe7250cf840e222512409d377 | 3,650,353 |
import argparse
def args_parser():
"""
Parese command line arguments
Args:
opt_args: Optional args for testing the function. By default sys.argv is used
Returns:
args: Dictionary of args.
Raises:
ValueError: Raises an exception if some arg values are invalid.
"""
# Construct the parser
parser = argparse.ArgumentParser(description='NConv')
# Mode selection
parser.add_argument('--neighbor', type=int, default=2, help='the neighbors will be considered')
parser.add_argument('--machine', type=str, default="local", help='choose the training machin, local or remote')
parser.add_argument('--num-channels', type=int, help='choose the number of channels in the model')
parser.add_argument('--output_type', type=str, default="normal",
help='choose the meaning of output tensor, rgb or normal')
parser.add_argument('--args', '-a', type=str, default='', choices=['defaults', 'json'],
help='How to read args? (json file or dataset defaults)')
parser.add_argument('--exp', '--e', help='Experiment name')
parser.add_argument('--workspace', '--ws', default='', type=str, help='Workspace name')
parser.add_argument('--resume', default=None, type=str, metavar='PATH',
help='Path to latest checkpoint (default: none)')
########### General Dataset arguments ##########
parser.add_argument('--dataset-path', type=str, default='', help='Dataset path.')
parser.add_argument('--batch_size', '-b', default=8, type=int, help='Mini-batch size (default: 8)')
parser.add_argument('--train-on', default='full', type=str, help='The number of images to train on from the data.')
parser.add_argument('--sharp_penalty', default=1.5, type=float, help='penalty of sharp normal prediction')
########### KITTI-Depth arguments ###########
parser.add_argument('--raw-kitti-path', type=str, default='', help='Dataset path')
parser.add_argument('--selval-ds', default='selval', type=str, choices=['selval, selval, test'],
help='Which set to evaluate on? ' + ' | '.join(['selval, selval, test']) + ' (default: selval)')
parser.add_argument('--norm-factor', default=256, type=float,
help='Normalization factor for the input data (default: 256)')
parser.add_argument('--train-disp', default=False, type=bool,
help='Train on disparity (1/depth) (default: False)')
########### Training arguments ###########
parser.add_argument('--epochs', default=20, type=int,
help='Total number of epochs to run (default: 30)')
parser.add_argument('--optimizer', '-o', default='adam', choices=optimizers_list,
help='Optimizer method: ' + ' | '.join(optimizers_list) + ' (default: sgd)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='Initial learning rate (default 0.001)')
parser.add_argument('--momentum', default=0.9, type=float, help='SGD momentum.')
parser.add_argument('--lr-scheduler', default='step', choices=lr_scheduler_list,
help='LR scheduler method: ' + ' | '.join(lr_scheduler_list) + ' (default: step)')
parser.add_argument('--lr-decay-step', default=5, type=int,
help='Learning rate decay step (default: 20)')
parser.add_argument('--lr-decay-factor', default=0.1, type=float,
help='Learning rate decay factor(default: 0.1)')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
help='Weight decay (default: 0)')
parser.add_argument('--loss', '-l', default='l1', choices=losses_list,
help='Loss function: ' + ' | '.join(losses_list) + ' (default: l1)')
parser.add_argument('--penalty', '-pena', default=1.2, help='penalty of output value which out of range [0-255]')
########### Logging ###########
parser.add_argument('--print-freq', default=10, type=int,
help='Printing evaluation criterion frequency (default: 10)')
parser.add_argument('--tb_log', default=False, type=bool,
help='Log to Tensorboard (default: False)')
parser.add_argument('--tb_freq', default=1000, type=int,
help='Logging Frequence to Tensorboard (default: 1000)')
parser.add_argument('--save-selval-imgs', default=False, type=bool,
help='A flag for saving validation images (default: False)')
parser.add_argument('--eval_uncert', default=False, type=bool,
help='Evaluate uncertainty or not')
parser.add_argument('--angle_loss', default=False, type=bool,
help='Calculate angle loss and plot')
# Parse the arguments
args = parser.parse_args()
args = initialize_args(args)
return args | 10cfb19844e380d683bb7d50b6752e8d1b2a62eb | 3,650,354 |
def _parse_seq_tf_example(example, uint8_features, shapes):
"""Parse tf.Example containing one or two episode steps."""
def to_feature(key, shape):
if key in uint8_features:
return tf.io.FixedLenSequenceFeature(
shape=[], dtype=tf.string, allow_missing=True)
else:
return tf.io.FixedLenSequenceFeature(
shape=shape, dtype=tf.float32, allow_missing=True)
feature_map = {}
for k, v in shapes.items():
feature_map[k] = to_feature(k, v)
parsed = tf.io.parse_single_example(example, features=feature_map)
observation = {}
restructured = {}
for k in parsed.keys():
if 'observation' not in k:
restructured[k] = parsed[k]
continue
if k in uint8_features:
observation[k.replace('observation/', '')] = tf.reshape(
tf.io.decode_raw(parsed[k], out_type=tf.uint8), (-1,) + shapes[k])
else:
observation[k.replace('observation/', '')] = parsed[k]
restructured['observation'] = observation
restructured['length'] = tf.shape(restructured['action'])[0]
return restructured | 5e0e4a6d3f26c28eb6e5dfe12e9295eb5b53979c | 3,650,355 |
def unique_list(a_list, unique_func=None, replace=False):
"""Unique a list like object.
- collection: list like object
- unique_func: the filter functions to return a hashable sign for unique
- replace: the following replace the above with the same sign
Return the unique subcollection of collection.
Example:
data = [(1, 2), (2, 1), (2, 3), (1, 2)]
unique_func = lambda x: tuple(sorted(x))
unique(data) -> [(1, 2), (2, 1), (2, 3)]
unique(data, unique_func) -> [(1, 2), (2, 3)]
unique(data, unique_func, replace=True) -> [(2, 1), (2, 3)]
"""
unique_func = unique_func or (lambda x: x)
result = {}
for item in a_list:
hashable_sign = unique_func(item)
if hashable_sign not in result or replace:
result[hashable_sign] = item
return list(result.values()) | 8d7957a8dffc18b82e8a45129ba3634c28dd0d52 | 3,650,356 |
def calculate_attitude_angle(eccentricity_ratio):
"""Calculates the attitude angle based on the eccentricity ratio.
Parameters
----------
eccentricity_ratio: float
The ratio between the journal displacement, called just eccentricity, and
the radial clearance.
Returns
-------
float
Attitude angle
Examples
--------
>>> from ross.fluid_flow.fluid_flow import fluid_flow_example
>>> my_fluid_flow = fluid_flow_example()
>>> calculate_attitude_angle(my_fluid_flow.eccentricity_ratio) # doctest: +ELLIPSIS
1.5...
"""
return np.arctan(
(np.pi * (1 - eccentricity_ratio ** 2)**(1/2)) /
(4 * eccentricity_ratio)
) | 24dcb463a0ebdcab8582309bf9ff44fdbfc44686 | 3,650,357 |
import torch
import pickle
def enc_obj2bytes(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
'objects too large: object size {}, max size {}'.format(
obj_size, max_size
)
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2:2+obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor | bca30d1a88db42f66bdd978386a1c4d24c0c790b | 3,650,358 |
def featCompression (feats, deltas, deltas2):
"""
Returns augmented feature vectors for all cases.
"""
feats_total = np.zeros (78)
for i in range (len (feats)):
row_total = np.array ([])
feat_mean = np.mean (np.array (feats[i]), axis = 0)
delt_mean = np.mean (np.array (deltas[i]), axis = 0)
delt2_mean = np.mean (np.array (deltas2[i]), axis = 0)
feat_std = np.std (np.array (feats[i]), axis = 0)
delt_std = np.std (np.array (deltas[i]), axis = 0)
delt2_std = np.std (np.array (deltas2[i]), axis = 0)
row_total = np.hstack ((feat_mean, feat_std, delt_mean, delt_std, \
delt2_mean, delt2_std))
feats_total = np.vstack ((feats_total, row_total))
return feats_total[1:, :] | 033b53fec9cf920daadb3ba5bef2fcce7cc11d21 | 3,650,359 |
def _array_indexing(array, key, key_dtype, axis):
"""Index an array or scipy.sparse consistently across NumPy version."""
if np_version < parse_version('1.12') or issparse(array):
# Remove the check for NumPy when using >= 1.12
# check if we have an boolean array-likes to make the proper indexing
if key_dtype == 'bool':
key = np.asarray(key)
if isinstance(key, tuple):
key = list(key)
return array[key] if axis == 0 else array[:, key] | b8c04647ad79ce8ffd973d2b13fe7231854822de | 3,650,360 |
def get_stock_ledger_entries(previous_sle, operator=None,
order="desc", limit=None, for_update=False, debug=False, check_serial_no=True):
"""get stock ledger entries filtered by specific posting datetime conditions"""
conditions = " and timestamp(posting_date, posting_time) {0} timestamp(%(posting_date)s, %(posting_time)s)".format(
operator)
if previous_sle.get("warehouse"):
conditions += " and warehouse = %(warehouse)s"
elif previous_sle.get("warehouse_condition"):
conditions += " and " + previous_sle.get("warehouse_condition")
if check_serial_no and previous_sle.get("serial_no"):
conditions += " and serial_no like '{}'".format(
frappe.db.escape('%{0}%'.format(previous_sle.get("serial_no"))))
if not previous_sle.get("posting_date"):
previous_sle["posting_date"] = "1900-01-01"
if not previous_sle.get("posting_time"):
previous_sle["posting_time"] = "00:00"
if operator in (">", "<=") and previous_sle.get("name"):
conditions += " and name!=%(name)s"
return frappe.db.sql("""select *, timestamp(posting_date, posting_time) as "timestamp" from `tabStock Ledger Entry`
where item_code = %%(item_code)s
and ifnull(is_cancelled, 'No')='No'
%(conditions)s
order by timestamp(posting_date, posting_time) %(order)s, creation %(order)s
%(limit)s %(for_update)s""" % {
"conditions": conditions,
"limit": limit or "",
"for_update": for_update and "for update" or "",
"order": order
}, previous_sle, as_dict=1, debug=debug) | 14a3e7a9a0f207260a21ba4166b59d44c8ab15b9 | 3,650,361 |
import torch
def test_binary(test_data, model, criterion, batch_size, device, generate_batch=None):
"""Calculate performance of a Pytorch binary classification model
Parameters
----------
test_data : torch.utils.data.Dataset
Pytorch dataset
model: torch.nn.Module
Pytorch Model
criterion: function
Loss function
bacth_size : int
Number of observations per batch
device : str
Name of the device used for the model
collate_fn : function
Function defining required pre-processing steps
Returns
-------
Float
Loss score
Float:
Accuracy Score
"""
# Set model to evaluation mode
model.eval()
test_loss = 0
test_acc = 0
# Create data loader
data = DataLoader(test_data, batch_size=batch_size, collate_fn=generate_batch)
# Iterate through data by batch of observations
for feature, target_class in data:
# Load data to specified device
feature, target_class = feature.to(device), target_class.to(device).to(torch.float32)
# Set no update to gradients
with torch.no_grad():
# Make predictions
output = model(feature)
# Calculate loss for given batch
loss = criterion(output, target_class.unsqueeze(1))
# Calculate global loss
test_loss += loss.item()
# Calculate global accuracy
test_acc += (output.argmax(1) == target_class).sum().item()
return test_loss / len(test_data), test_acc / len(test_data) | 9bc8fefffca3d484abbcac48836fde3ca7b5287a | 3,650,362 |
def linear_imputer(y, missing_values=np.nan, copy=True):
"""
Replace missing values in y with values from a linear interpolation on their position in the array.
Parameters
----------
y: list or `numpy.array`
missing_values: number, string, np.nan or None, default=`np.nan`
The placeholder for the missing values. All occurrences of `missing_values` will be imputed.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will be done in-place whenever possible.
Returns
-------
`numpy.array` : array with `missing_values` imputed
"""
x = np.arange(len(y))
if missing_values is np.nan:
mask_missing = np.isnan(y)
else:
mask_missing = y == missing_values
imputed_values = np.interp(x[mask_missing], x[~mask_missing], y[~mask_missing])
if copy:
yy = np.copy(y)
yy[mask_missing] = imputed_values
return yy
else:
y[mask_missing] = imputed_values
return y | 2557e7647e8d7e0246bb15c57605b75bf3d4131b | 3,650,363 |
def gap2d_cx(cx):
"""Accumulates complexity of gap2d into cx = (h, w, flops, params, acts)."""
cx["h"] = 1
cx["w"] = 1
return cx | 28f6ba5f166f0b21674dfd507871743243fb4737 | 3,650,364 |
import requests
def test_is_not_healthy(requests_mock):
"""
Test is not healthy response
"""
metadata = Gen3Metadata("https://example.com")
def _mock_request(url, **kwargs):
assert url.endswith("/_status")
mocked_response = MagicMock(requests.Response)
mocked_response.status_code = 500
mocked_response.text = "Not Healthy"
mocked_response.json.return_value = {}
mocked_response.raise_for_status.side_effect = HTTPError("uh oh")
return mocked_response
requests_mock.side_effect = _mock_request
response = metadata.is_healthy()
assert not response | 9b6fdb7b822ef83e0441bdafa1c87ea07ad901ad | 3,650,365 |
def kernelTrans(X, A, kTup):
"""
通过核函数将数据转换更高维的空间
Parameters:
X - 数据矩阵
A - 单个数据的向量
kTup - 包含核函数信息的元组
Returns:
K - 计算的核K
"""
m,n = np.shape(X)
K = np.mat(np.zeros((m,1)))
if kTup[0] == 'lin': K = X * A.T #线性核函数,只进行内积。
elif kTup[0] == 'rbf': #高斯核函数,根据高斯核函数公式进行计算
for j in range(m):
deltaRow = X[j,:] - A
K[j] = deltaRow*deltaRow.T
K = np.exp(K/(-1*kTup[1]**2)) #计算高斯核K
else: raise NameError('核函数无法识别')
return K #返回计算的核K | da43ab6aeff623b32d3287ad07acdc1ef5ec4bc3 | 3,650,366 |
def getBusEquipmentData(bhnd,paraCode):
"""
Retrieves the handle of all equipment of a given type (paraCode)
that is attached to bus [].
Args :
bhnd : [bus handle]
nParaCode : code data (BR_nHandle,GE_nBusHnd...)
Returns:
[][] = [len(bhnd)] [len(all equipment)]
Raises:
OlrxAPIException
"""
# get data
res = []
vt = paraCode//100
val1 = setValType(vt,0)
for bhnd1 in bhnd:
r2 = []
while ( OLXAPI_OK == OlxAPI.GetBusEquipment( bhnd1, c_int(paraCode), byref(val1) ) ) :
if vt==VT_STRING:
r2.append((val1.value).decode("UTF-8"))
else:
r2.append(val1.value)
res.append(r2)
return res | 6e0e63846c7b934edbe9de078c7ad8561766e58d | 3,650,367 |
from typing import List
from typing import Counter
import click
def build_and_register(
client: "prefect.Client",
flows: "List[FlowLike]",
project_id: str,
labels: List[str] = None,
force: bool = False,
) -> Counter:
"""Build and register all flows.
Args:
- client (prefect.Client): the prefect client to use
- flows (List[FlowLike]): the flows to register
- project_id (str): the project id in which to register the flows
- labels (List[str], optional): Any extra labels to set on all flows
- force (bool, optional): If false (default), an idempotency key will
be used to avoid unnecessary register calls.
Returns:
- Counter: stats about the number of successful, failed, and skipped flows.
"""
# Finish preparing flows to ensure a stable hash later
prepare_flows(flows, labels)
# Group flows by storage instance.
storage_to_flows = defaultdict(list)
for flow in flows:
storage = flow.storage if isinstance(flow, prefect.Flow) else None
storage_to_flows[storage].append(flow)
# Register each flow, building storage as needed.
# Stats on success/fail/skip rates are kept for later display
stats = Counter(registered=0, errored=0, skipped=0)
for storage, flows in storage_to_flows.items():
# Build storage if needed
if storage is not None:
click.echo(f" Building `{type(storage).__name__}` storage...")
try:
storage.build()
except Exception as exc:
click.secho(" Error building storage:", fg="red")
log_exception(exc, indent=6)
red_error = click.style("Error", fg="red")
for flow in flows:
click.echo(f" Registering {flow.name!r}... {red_error}")
stats["errored"] += 1
continue
for flow in flows:
click.echo(f" Registering {flow.name!r}...", nl=False)
try:
if isinstance(flow, box.Box):
serialized_flow = flow
else:
serialized_flow = flow.serialize(build=False)
flow_id, flow_version, is_new = register_serialized_flow(
client=client,
serialized_flow=serialized_flow,
project_id=project_id,
force=force,
)
except Exception as exc:
click.secho(" Error", fg="red")
log_exception(exc, indent=4)
stats["errored"] += 1
else:
if is_new:
click.secho(" Done", fg="green")
click.echo(f" └── ID: {flow_id}")
click.echo(f" └── Version: {flow_version}")
stats["registered"] += 1
else:
click.secho(" Skipped", fg="yellow")
stats["skipped"] += 1
return stats | 7e4634578b44b7b5a1743fa0cfab21c6c551930b | 3,650,368 |
from typing import List
import os
def generate_path_to_bulk_roms(
roms: List[models.BulkSystemROMS]) -> List[models.BulkSystemROMS]:
"""Creates the absolute path to where each rom should be downloaded"""
for system in roms:
for section in system.Sections:
if not 'number' in section.Section:
path: str = os.path.join(os.getcwd(), "ROMS", section.Section)
section.Path = path
else:
path: str = os.path.join(os.getcwd(), "ROMS", system.System,
'#')
section.Path = path
return roms | 68c170aeef9af3d357a026229c9c5766260f70de | 3,650,369 |
import sys
import argparse
def parse_args(args=sys.argv[1:]):
""" Get the parsed arguments specified on this script.
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'path_to_xml',
action='store',
type=str,
help='Ful path to tei file.')
parser.add_argument(
'omeka_id',
action='store',
type=str,
help='omeka id')
parser.add_argument(
'name',
action='store',
type=str,
help='name')
return parser.parse_args(args) | e934cfb5f0315fb0e834bef05fecaa67168485d4 | 3,650,370 |
def creer_element_xml(nom_elem,params):
"""
Créer un élément de la relation qui va donner un des attributs.
Par exemple, pour ajouter le code FANTOIR pour une relation, il faut que le code XML soit <tag k='ref:FR:FANTOIR' v='9300500058T' />"
Pour cela, il faut le nom de l'élément (ici tag) et un dictionnaire de paramètres nommé param qui associe chaque clé à une valeur (ici
{'k':'ref:FR:FANTOIR', 'v'='9300500058T'}
:param nom_elem:
:type nom_elem: str
:param params:
:type params: dict
:return: élément XML désiré
:rtype: xml.etree.ElementTree.Element
"""
# Initialisation de l'objet XML
elem = ET.Element(nom_elem)
ajouter_atrributs_element_xml(elem, params)
return elem | a4cd29b82531c7bc01864ae79de87643afeb8276 | 3,650,371 |
def get_display():
"""Getter function for the display keys
Returns:
list: list of dictionary keys
"""
return data.keys() | dcc8957faf30db15282d2e67025cd6d5fd07e9dd | 3,650,372 |
def calculate_class_probabilities(summaries, row) -> dict():
"""
Calculate the probability of a value using the Gaussian Probability Density Function from inputs:
summaries: prepared summaries of dataset
row: a row in the dataset for predicting its label (a row of X_test)
This function uses the statistics calculated from training data to calculate probabilities for the testing dataset (new data). Probabilities are calculated separately for each class. First, we calculate the probability that a new X vector from the testing dataset belongs to the first class. Then, we calculate the probabilities that it belongs to the second class, and so on for all the classes identified in the training dataset.
The probability that a new X vector from the testing dataset belongs to a class is calculated as follows:
P(class|data) = P(X|class) * P(class)
Note we have simplified the Bayes theorem by removing the division as we do not strictly need a number between 0 and 1 to predict the class the new data belongs to as we will be simply taking the maximum result from the above equation for each class.
It returns a dictionary where each key is the class label and the values are the probabibilities of that row belonging to each class on the dataset.
"""
# total number of training records calculated from the counts stored in the summary statistics
# note that the count column has the same value for all rows, and hence picking up item [0] will suffice
total_rows = sum([summaries[label]['count'][0] for label in summaries])
probabilities = dict()
for class_value, class_summaries in summaries.items():
probabilities[class_value] = summaries[class_value]['count'][0]/float(total_rows)
for i in range(len(class_summaries)-1):
mean, stdev, _ = class_summaries.iloc[i]
# probabilities are multiplied together as they accumulate.
probabilities[class_value] *= calculate_probability(row[i], mean, stdev)
# normalize probabilities so that they sum 1
max_prob = probabilities[max(probabilities, key=probabilities.get)]
min_prob = probabilities[min(probabilities, key=probabilities.get)]
for class_value, probability in probabilities.items():
if (max_val - min_val) > 0:
probabilities[class_value] = (probability - min_val) / (max_val - min_val)
else:
probabilities[class_value] = float(0.0)
sum_prob = sum(probabilities.values())
for class_value, probability in probabilities.items():
if sum_prob > 0:
probabilities[class_value] = probability / sum_prob
return probabilities | ffdff84e27fe5e76d66176d5d1c862f16b1ee494 | 3,650,373 |
import logging
import os
def discard_missing_files(df: pd.DataFrame, path_colname: str) -> pd.DataFrame:
"""Discard rows where the indicated file does not exist or has
filesize 0.
Log a warning with the number of rows discarded, if not zero.
Args:
df
path_colname: Name of `df` column that specifies paths to check
"""
logging.info('Discarding missing files')
nrows_before = len(df)
file_exists = df.loc[:, path_colname].progress_apply(
lambda x: os.path.isfile(x) and os.path.getsize(x) > 0
)
nrows_after = file_exists.sum()
if nrows_after < nrows_before:
logging.warning(f'{nrows_before - nrows_after} records discarded '
f'because file does not exist or is empty.'
)
return df.loc[file_exists, :] | 7358192a8400a4cd46522b1b6e11d9ffe03d9db7 | 3,650,374 |
from typing import Sequence
from typing import Any
def find(sequence: Sequence, target_element: Any) -> int:
"""Find the index of the first occurrence of target_element in sequence.
Args:
sequence: A sequence which to search through
target_element: An element to search in the sequence
Returns:
The index of target_element's first occurrence, -1 if it was not found or the sequence is empty
"""
if not sequence:
return -1
try:
return sequence.index(target_element)
except ValueError:
return -1 | 20edfae45baafa218d8d7f37e0409e6f4868b75d | 3,650,375 |
def read_time_data(fname, unit):
"""
Read time data (csv) from file and load into Numpy array
"""
data = np.loadtxt(fname, delimiter=',')
t = data[:,0]
x = data[:,1]*unit
f = interp1d(t, x, kind='linear', bounds_error=False, fill_value=x[0])
return f | 588c3bc472aa05eb0ead983405f22ecdf260687c | 3,650,376 |
import sys
def run(connection, args):
"""Prints it out based on command line configuration.
Exceptions:
- SAPCliError:
- when the given type does not belong to the type white list
"""
types = {'program': sap.adt.Program, 'class': sap.adt.Class, 'package': sap.adt.Package}
try:
typ = types[args.type]
except KeyError:
raise SAPCliError(f'Unknown type: {args.type}')
aunit = sap.adt.AUnit(connection)
obj = typ(connection, args.name)
response = aunit.execute(obj)
run_results = sap.adt.aunit.parse_run_results(response.text)
if args.output == 'human':
return print_results_to_stream(run_results, sys.stdout)
if args.output == 'raw':
return print_raw(response.text, run_results)
if args.output == 'junit4':
return print_junit4(run_results, args, sys.stdout)
raise SAPCliError(f'Unsupported output type: {args.output}') | 579d94aa22f9b1bd62ee757bfb4b328679901a75 | 3,650,377 |
from pathlib import Path
from typing import List
from typing import Dict
import json
def read_nli_data(p: Path) -> List[Dict]:
"""Read dataset which has been converted to nli form"""
with open(p) as f:
data = json.load(f)
return data | 2218d8dc06e3b9adfe89cb780a9ef4e7cb111d14 | 3,650,378 |
def stanley_control(state, cx, cy, cyaw, last_target_idx):
"""
Stanley steering control.
:param state: (State object)
:param cx: ([float])
:param cy: ([float])
:param cyaw: ([float])
:param last_target_idx: (int)
:return: (float, int)
"""
current_target_idx, error_front_axle = calc_target_index(state, cx, cy)
if last_target_idx >= current_target_idx:
current_target_idx = last_target_idx
# theta_e corrects the heading error
theta_e = normalize_angle(cyaw[current_target_idx] - state.yaw)
# theta_d corrects the cross track error
theta_d = np.arctan2(k * error_front_axle, state.v)
# Steering control
delta = theta_e + theta_d
return delta, current_target_idx | bef2d7d075a6ef637d1423d6c85cdde3ac4d9d70 | 3,650,379 |
def get_predictions(my_map, reviews, restaurants):
"""
Get the topic predictions for all restaurants.
Parameters:
my_map - the Map object representation of the current city
reviews - a dictionary of reviews with restaurant ids for keys
restaurants - a list of restaurants of the current city
Returns:
A tuple of a dictionary of restaurant ids to topic distributions and the lda model
"""
predictor = LDAPredictor()
lda = predictor.lda
restaurant_ids_to_topics = {}
for restaurant in restaurants:
business_id = restaurant["business_id"]
review = reviews[business_id]
prediction = predictor.predict_topics(review)
restaurant_ids_to_topics[business_id] = make_topic_array_from_tuple_list(prediction, NUM_TOPICS) #topic array of weights for each topic index
normalized_restaurant_ids_to_topics = normalize_predictions(restaurant_ids_to_topics, restaurants)
return normalized_restaurant_ids_to_topics, lda | ef900b9d3526a12f64951e9d7b6f4eb80db9f6f4 | 3,650,380 |
def decode_token(token, secret_key):
"""
解密websocket token
:param token:
:param secret_key:
:return:
"""
info = jwt.decode(token, secret_key, algorithms=['HS256'])
return info | 5807ce3428435eb0c15dd464164627fb342e46d6 | 3,650,381 |
import requests
def getPatternID(pattern_url):
"""asssumes pattern_url is a string, representing the URL of a ravelry pattern
e.g.https://www.ravelry.com/patterns/library/velvet-cache-cou
returns an int, the pattern ID
"""
permalink = pattern_url[41:]
with requests.Session() as a_session:
auth_name = "read-046277a3027f680ebe3fa030e755eb34"
auth_pass = "O+mL0KzfjgQ1eLA7K8FO9s28QPvr6QuiL+pOvFHZ"
a_session.auth = (auth_name, auth_pass)
ravelry_adapter = HTTPAdapter(max_retries=3)
a_session.mount('https://ravelry.com', ravelry_adapter)
base_request = "https://api.ravelry.com/patterns/search.json?query="
pattern = a_session.get(base_request+permalink)
if pattern.status_code != 200:
raise RuntimeError("Ravelry not responding as expected.\
Please check your internet connection or try again later")
pattern_id = pattern.json()['patterns'][0]['id']
return pattern_id | 0624457bad8753a9d15f8339c381ec233a207098 | 3,650,382 |
def make_multibonacci_modulo(history_length, limit):
"""Creates a function that generates the Multibonacci sequence modulo n."""
def sequence_fn(seq):
return np.sum(seq[-history_length:]) % limit
return sequence_fn | 358876a91fec23853bde843c7222cd837b45ada3 | 3,650,383 |
def _get_key(arguments):
"""
Determine the config key based on the arguments.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: The datastore path for the config key.
"""
# Get the base path.
if arguments.get("felix"):
base = CONFIG_PATH
elif arguments.get("node"):
base = BGP_HOST_PATH % {"hostname": hostname}
else:
base = BGP_GLOBAL_PATH
# Determine the actual name of the field. Look this up from the config
# data, otherwise just use the name.
config_data = _get_config_data(arguments)
name = arguments["<NAME>"]
if name in config_data:
name, _ = config_data[name]
return base + name | b68dd68a013ed2289ae60ab49a347858ce447964 | 3,650,384 |
def prepare_data_from_stooq(df, to_prediction = False, return_days = 5):
"""
Prepares data for X, y format from pandas dataframe
downloaded from stooq. Y is created as closing price in return_days
- opening price
Keyword arguments:
df -- data frame contaning data from stooq
return_days -- number of day frame in which to calculate y.
"""
if 'Wolumen' in df.columns:
df = df.drop(['Data', 'Wolumen', 'LOP'], axis=1)
else:
df = df.drop('Data', axis = 1)
y = df['Zamkniecie'].shift(-return_days) - df['Otwarcie']
if not to_prediction:
df = df.iloc[:-return_days,:]
y = y[:-return_days]/df['Otwarcie']
return df.values, y | 4b5bc45529b70ed1e8517a1d91fb5a6c2ff0b504 | 3,650,385 |
def represents_int_above_0(s: str) -> bool:
"""Returns value evaluating if a string is an integer > 0.
Args:
s: A string to check if it wil be a float.
Returns:
True if it converts to float, False otherwise.
"""
try:
val = int(s)
if val > 0:
return True
else:
return False
except ValueError:
return False | e39c4afeff8f29b86ef2a80be0af475223654449 | 3,650,386 |
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet18-5c106cde.pth'))
return model | 89b71b447890e8986493abc90cca6e2ac2f0eca8 | 3,650,387 |
def sydney():
"""Import most recent Sydney dataset"""
d = {
'zip':'Sydney_geol_100k_shape',
'snap':-1,
}
return(d) | f79a5002ef548769096d3aeb1ad2c7d77ac5ce68 | 3,650,388 |
import json
async def apiAdminRoleNotExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* name `str` *
* role_id `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
No role has been found
"""
res:dict = dict(status=400, error="admin_role_not_exists")
name:str = kwargs.get("name", "")
if name:
res["name"] = name
role_id:str = kwargs.get("role_id", "")
if role_id:
res["role_id"] = role_id
# build message
default_msg:str = "No command has been found"
if name:
default_msg += f" with name '{name}'"
if role_id:
default_msg += f" (Role ID: {role_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Admin) 400 Role not found: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
) | 535bf4740e6d95ef1d82d9fdc97b36577425fed7 | 3,650,389 |
def format_non_date(value):
"""Return non-date value as string."""
return_value = None
if value:
return_value = value
return return_value | 9a7a13d7d28a14f5e92920cfef7146f9259315ec | 3,650,390 |
def get_loss(loss_str):
"""Get loss type from config"""
def _get_one_loss(cur_loss_str):
if hasattr(keras_losses, cur_loss_str):
loss_cls = getattr(keras_losses, cur_loss_str)
elif hasattr(custom_losses, cur_loss_str):
loss_cls = getattr(custom_losses, cur_loss_str)
else:
raise ValueError('%s is not a valid loss' % cur_loss_str)
return loss_cls
if not isinstance(loss_str, list):
loss_cls = _get_one_loss(loss_str)
return loss_cls
else:
loss_cls_list = []
for cur_loss in loss_str:
loss_cls = _get_one_loss(cur_loss)
loss_cls_list.append(loss_cls)
return loss_cls_list | 4c5714b7e8ca0becf43922a9624d9a4dccc4ac28 | 3,650,391 |
from functools import cmp_to_key
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
# Amazon will automatically convert bool and int to strings for us
if isinstance(policy, bool):
return tuple([str(policy).lower()])
elif isinstance(policy, int):
return tuple([str(policy)])
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
element = policy[key]
# Special case defined in
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html
if key in ["NotPrincipal", "Principal"] and policy[key] == "*":
element = {"AWS": "*"}
tupleified = _hashable_policy(element, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
policy_list.sort(key=cmp_to_key(_py3cmp))
return policy_list | beb5b527f38d7d1a9cecf81918a3291c0c9960ad | 3,650,392 |
def LF_CD_NO_VERB(c):
"""
This label function is designed to fire if a given
sentence doesn't contain a verb. Helps cut out some of the titles
hidden in Pubtator abstracts
"""
if len([x for x in nltk.pos_tag(word_tokenize(c.get_parent().text)) if "VB" in x[1]]) == 0:
if "correlates with" in c.get_parent().text:
return 0
return -1
return 0 | aa36b8a4cd00194fd1d786a7d3619ea46da0e1ab | 3,650,393 |
from typing import Tuple
def has_file_allowed_extension(filename: PATH_TYPE, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return str(filename).lower().endswith(extensions) | 2f0d5698ecdb10533b303a637fdc03747ef8060c | 3,650,394 |
def get_account_html(netid, timestamp=None):
"""
The Libraries object has a method for getting information
about a user's library account
"""
return _get_resource(netid, timestamp=timestamp, style='html') | 7c917f6778e42a1166d4a33819c2e51378933226 | 3,650,395 |
import functools
import math
def gcd_multiple(*args) -> int:
"""Return greatest common divisor of integers in args"""
return functools.reduce(math.gcd, args) | c686b9495cd45ff047f091e31a79bedcd61f8842 | 3,650,396 |
from typing import Counter
def chars_to_family(chars):
"""Takes a list of characters and constructs a family from them. So, A1B2
would be created from ['B', 'A', 'B'] for example."""
counter = Counter(chars)
return "".join(sorted([char + str(n) for char, n in counter.items()])) | e78de779599f332045a98edde2aa0a0edc5a653b | 3,650,397 |
import configparser
def get_config_properties(config_file="config.properties", sections_to_fetch = None):
"""
Returns the list of properties as a dict of key/value pairs in the file config.properties.
:param config_file: filename (string).
:param section: name of section to fetch properties from (if specified); all sections are returned by default (iterable).
:return: A flat (no sections) Python dictionary of properties.
"""
cf = configparser.ConfigParser()
try:
cf.read(config_file)
except Exception as e:
print("[ERROR] exception {} reading configurations from file {}".format(e, config_file))
properties = {}
for section in cf.sections():
# only include args section if requested
if (not sections_to_fetch or (section in sections_to_fetch)):
for item in cf.items(section):
properties[item[0]] = item[1]
return properties | 627d21327560595bb4c2905c98604926f03ca655 | 3,650,398 |
import base64
import secrets
import time
def process_speke():
"""Processes an incoming request from MediaLive, which is using SPEKE
A key is created and stored in DynamoDB."""
input_request = request.get_data()
# Parse request
tree = ET.fromstring(input_request)
content_id = tree.get("id")
kid = tree[0][0].get("kid")
iv = tree[0][0].get("explicitIV") or ""
keyPeriod = tree[2][0].get("id")
index = tree[2][0].get("index")
# Create key
key = base64.b64encode(secrets.token_bytes(16)).decode("ascii")
# Expire key tomorrow
expiry = round(time.time()) + 24 * 60 * 60
# Create the pssh
systems = []
for drmsystem in tree[1]:
if drmsystem.get("systemId") == CLEARKEY_SYSTEM_ID:
pssh = psshgen.genClearkeyPssh([kid])
systems.append(
f"""<!-- ClearKey -->
<cpix:DRMSystem kid="{kid}" systemId="{CLEARKEY_SYSTEM_ID}">
<cpix:PSSH>{pssh}</cpix:PSSH>
</cpix:DRMSystem>"""
)
# Save key info in dynamo
dynamo.put_item(
TableName=TABLE,
Item={
"content_id": {"S": content_id},
"kid": {"S": kid},
"iv": {"S": iv},
"keyPeriod": {"S": keyPeriod},
"index": {"S": index},
"key": {"S": key},
"expiry": {"N": str(expiry)},
},
)
if iv:
iv = f'explicitIV="{iv}"'
# Craft response
response = f"""<cpix:CPIX xmlns:cpix="urn:dashif:org:cpix" xmlns:pskc="urn:ietf:params:xml:ns:keyprov:pskc" xmlns:speke="urn:aws:amazon:com:speke" id="{content_id}">
<cpix:ContentKeyList>
<cpix:ContentKey {iv} kid="{kid}">
<cpix:Data>
<pskc:Secret>
<pskc:PlainValue>{key}</pskc:PlainValue>
</pskc:Secret>
</cpix:Data>
</cpix:ContentKey>
</cpix:ContentKeyList>
<cpix:DRMSystemList>
{''.join(systems)}
</cpix:DRMSystemList>
<cpix:ContentKeyPeriodList>
<cpix:ContentKeyPeriod id="{keyPeriod}" index="{index}" />
</cpix:ContentKeyPeriodList>
<cpix:ContentKeyUsageRuleList>
<cpix:ContentKeyUsageRule kid="{kid}">
<cpix:KeyPeriodFilter periodId="{keyPeriod}" />
</cpix:ContentKeyUsageRule>
</cpix:ContentKeyUsageRuleList>
</cpix:CPIX>"""
return response | 3caa1c0390ea699feab2f138942b6773933fbada | 3,650,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.