content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
async def post_user(ctx: Context, user: MemberOrUser) -> t.Optional[dict]:
"""
Create a new user in the database.
Used when an infraction needs to be applied on a user absent in the guild.
"""
log.trace(f"Attempting to add user {user.id} to the database.")
payload = {
'discriminator': int(user.discriminator),
'id': user.id,
'in_guild': False,
'name': user.name,
'roles': []
}
try:
response = await ctx.bot.api_client.post('bot/users', json=payload)
log.info(f"User {user.id} added to the DB.")
return response
except ResponseCodeError as e:
log.error(f"Failed to add user {user.id} to the DB. {e}")
await ctx.send(f":x: The attempt to add the user to the DB failed: status {e.status}") | 25a6a710d7d94cc9837d9d67408a09fd6ff48596 | 4,066 |
from typing import List
def delete(ids: List = Body(...)):
"""
Deletes from an embeddings index. Returns list of ids deleted.
Args:
ids: list of ids to delete
Returns:
ids deleted
"""
try:
return application.get().delete(ids)
except ReadOnlyError as e:
raise HTTPException(status_code=403, detail=e.args[0]) from e | 9075db7c7dd174b850d1d4acbe1cdb4001162b5d | 4,067 |
def main():
""" Simple Event Viewer """
events = None
try:
events = remote('127.0.0.1', EventOutServerPort, ssl=False, timeout=5)
while True:
event_data = ''
while True:
tmp = len(event_data)
event_data += events.recv(numb=8192, timeout=1).decode('latin-1')
if tmp == len(event_data):
break
if len(event_data):
# fix the JSON mess
event_data = fix_json(event_data)
if not len(event_data):
log.warning('[Simple Event Viewer]: callback data invalid!\n')
return False
for event in event_data:
log.info('[Event From]: {}\n{}'.format(color(event.get('host'), GREEN), event))
except (PwnlibException, EOFError, KeyboardInterrupt):
log.warning("[Simple Event Viewer]")
if events:
events.close()
return False | d96500a3114785dbb408681e96d7ffb7a5c59d04 | 4,068 |
def fsi_acm_up_profiler_descending(vp1, vp2, vp3):
"""
Description:
Calculates the VEL3D Series A and L upwards velocity data product VELPTMN-VLU-DSC_L1
for the Falmouth Scientific (FSI) Acoustic Current Meter (ACM) mounted on a McLane
profiler.
Because of the orientation of the ACM stinger fingers (see Notes) upward
current velocity can be calculated in several different ways. This function
calculates the vertical velocity to be used when the profiler is descending,
avoiding the use of data from vp4 which will be contaminated by the sheet-flow
wake of the stinger's central post.
Usage:
w_fsi_dsc = fsi_acm_up_profiler_descending(vp1, vp2, vp3)
where
w_fsi_dsc = velocity up; VELPTMN-VLU-DSC_L1 [m/s]
vp1 = raw beam velocity from the port stinger finger; VELPTMN-VP1_L0 [cm/s]
vp2 = raw beam velocity from the lower stinger finger; VELPTMN-VP2_L0 [cm/s]
vp3 = raw beam velocity from the starboard stinger finger; VELPTMN-VP3_L0 [cm/s]
Implemented by:
2015-02-13: Russell Desiderio. Initial code.
Notes:
The VEL3D series A and L instruments are FSI current meters modified for use on a
McLane profiler. The FSI ACM has 4 raw beam velocities. The correspondences between
the MMP manual designations and the IDD designations are:
(Xplus, Yplus, Xminus, Yminus) (MMP manual, page G-22)
(va , vb , vc , vd ) (IDD, VEL3D series A)
(vp1 , vp2 , vp3 , vp4 ) (IDD, VEL3D series L)
(left , down , right , up ) (spatial orientation)
This is also the ordering of these parameters in telemetered and recovered data.
The MMP manual Rev E, page 8-30, incorrectly calculates the upward velocities wU and wD.
For more information see the Notes to worker function fsi_acm_horz_vel.
References:
OOI (2015). Data Product Specification for Mean Point Water Velocity
Data from FSI Acoustic Current Meters. Document Control Number
1341-00792. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00792_Data_Product_SPEC_VELPTMN_ACM_OOI.pdf)
OOI (2015). 1341-00792_VELPTMN Artifact: McLane Moored Profiler User Manual.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00792_VELPTMN >>
MMP-User Manual-Rev-E-WEB.pdf)
"""
# find the x-velocity in the instrument coordinate system
x = -(vp1 + vp3) / np.sqrt(2.0)
# the z-velocity in the instrument coordinate system is also the w velocity in the
# earth coordinate system because the effects of pitch and roll are negligible.
w = -x + np.sqrt(2.0) * vp2
# change units from cm/s to m/s
return w / 100.0 | e176ff1b23bf4b5624cdc8a698d03c8d2ee1947a | 4,069 |
from typing import Tuple
def colour_name(colour: Tuple[int, int, int]) -> str:
"""Return the colour name associated with this colour value, or the empty
string if this colour value isn't in our colour list.
>>> colour_name((1, 128, 181))
'Pacific Point'
>>> colour_name(PACIFIC_POINT)
'Pacific Point'
"""
colour_names = {
PACIFIC_POINT: 'Pacific Point',
REAL_RED: 'Real Red',
OLD_OLIVE: 'Old Olive',
DAFFODIL_DELIGHT: 'Daffodil Delight'
}
if colour in colour_names:
return colour_names[colour]
else:
return '' | e596bf802b8f168e6c8d9bd9b8e4113b61e7fd58 | 4,070 |
def score_fn(subj_score, comp_score):
"""
Generates the TextStim with the updated score values
Parameters
----------
subj_score : INT
The subjects score at the moment
comp_score : INT
The computer's score at the moment'
Returns
-------
score_stim : psychopy.visual.text.TextStim
The visual stimulus ready to be drawn.
e.g.
5 - 4
Spacebar to continue
"""
score = stimuli.score_text.format(subj_score, comp_score)
#To edit the score_text go to the stimuli.py module
score_stim = visual.TextStim(win, text = score, pos = (0, -.6))
return score_stim | 2d52b4c8d47543c6c1c98e5aa7feb8c3341ff7a4 | 4,071 |
def parse_write_beam(line):
"""
Write_beam (type -2)
If btype = −2, output particle phase-space coordinate information at given location V3(m)
into filename fort.Bmpstp with particle sample frequency Bnseg. Here, the maximum number
of phase- space files which can be output is 100. Here, 40 and 50 should be avoided
since these are used for initial and final phase space output.
"""
x = line.split()
v = v_from_line(line)
d={}
d['filename']='fort.'+x[2]
d['sample_frequency'] = int(x[1])
d['s'] = float(v[3])
if int(x[2]) in [40, 50]:
print('warning, overwriting file fort.'+x[2])
return d | 7ce86ae39a51ea8d4636e37bea26edd3caae19e8 | 4,072 |
def get_tone(pinyin):
"""Renvoie le ton du pinyin saisi par l'utilisateur.
Args:
pinyin {str}:
l'entrée pinyin par l'utilisateur
Returns:
number/None :
Si pas None, la partie du ton du pinyin (chiffre)
"""
# Prenez le dernier chaine du pinyin
tone = pinyin[-1]
# Déterminer s'il s'agit d'un type numérique
if tone.isdigit():
return tone
else:
return None | fc0b02902053b3f2470acf952812573f5125c4cf | 4,073 |
def authIfV2(sydent, request, requireTermsAgreed=True):
"""For v2 APIs check that the request has a valid access token associated with it
:returns Account|None: The account object if there is correct auth, or None for v1 APIs
:raises MatrixRestError: If the request is v2 but could not be authed or the user has not accepted terms
"""
if request.path.startswith('/_matrix/identity/v2'):
token = tokenFromRequest(request)
if token is None:
raise MatrixRestError(401, "M_UNAUTHORIZED", "Unauthorized")
accountStore = AccountStore(sydent)
account = accountStore.getAccountByToken(token)
if account is None:
raise MatrixRestError(401, "M_UNAUTHORIZED", "Unauthorized")
if requireTermsAgreed:
terms = get_terms(sydent)
if (
terms.getMasterVersion() is not None and
account.consentVersion != terms.getMasterVersion()
):
raise MatrixRestError(403, "M_TERMS_NOT_SIGNED", "Terms not signed")
return account
return None | 6c3f60df233cc030dfc3ec2658bd2a70c5a20aed | 4,074 |
def gen_rho(K):
"""The Ideal Soliton Distribution, we precompute
an array for speed
"""
return [1.0/K] + [1.0/(d*(d-1)) for d in range(2, K+1)] | 40382af047d0f2efba0eb6db17c28b92e47d3c92 | 4,075 |
import numpy as np
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
__tracebackhide__ = True # Hide traceback for py.test
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g "
"ULP (max difference is %g ULP)" %
(maxulp, np.max(ret)))
return ret | 8ca9698e5b213f753002535061b17aeb59f12e83 | 4,076 |
def Ambient_Switching(crop_PPFDmin, Trans, consumption):
"""
Inputs: consumption (returned from Light_Sel)
"""
#How much energy can you save if you switch off when ambient lighting is enough for plant needs?
#Assume that when ambient is higher than max recommended PPFD, that the greenhouse is cloaked, allowing it to still rely on outside light.
#Assume that peak solar always happens in the afternoons
#Inputs are Detroit 2010 data for solar insolation, consumption in J, and transmissivity.
count = 0
for i in Detroit['PPFD (Micromoles/m^2/s)']:
if (i*Trans) > (crop_PPFDmin):
count = count + 1
energy_savings = count *consumption
#print("If lights are strageically shut off during highly sunny hours, then", energy_savings, "J will be saved")
return energy_savings | 5a8ab9d6eb0c6b3ddd7bb3f7efb1599b952aa345 | 4,077 |
from typing import Callable
def get_numerical_gradient(position: np.ndarray, function: Callable[[np.ndarray], float],
delta_magnitude: float = 1e-6) -> np.ndarray:
""" Returns the numerical derivative of an input function at the specified position."""
dimension = position.shape[0]
vec_low = np.zeros(dimension)
vec_high = np.zeros(dimension)
for ii in range(dimension):
delta_vec = np.zeros(dimension)
delta_vec[ii] = delta_magnitude/2.0
vec_low[ii] = function(position-delta_vec)
vec_high[ii] = function(position+delta_vec)
return (vec_high-vec_low)/delta_magnitude | a439acd3934006e2b8f9188e3204e12ef3885ae5 | 4,078 |
def stable_point(r):
"""
repeat the process n times to
make sure we have reaches fixed points
"""
n = 1500
x = np.zeros(n)
x[0] = np.random.uniform(0, 0.5)
for i in range(n - 1):
x[i + 1] = f(x[i], r)
print(x[-200:])
return x[-200:] | 9d9c32abfb0fea74abb32ec8cebd8c76738669b1 | 4,080 |
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
@wraps(func)
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return inner_func
return decorator | 9892ac00aa31b0e294f79b2d1539d6d79f3eaed7 | 4,081 |
def non_daemonic_process_pool_map(func, jobs, n_workers, timeout_per_job=None):
"""
function for calculating in parallel a function that may not be run
a in a regular pool (due to forking processes for example)
:param func: a function that accepts one input argument
:param jobs: a list of input arguments to func
:param n_workers: number of parallel workers
:param timeout_per_job: timeout for processing a single job
:return: list of results in the order of the "jobs" list
"""
END_TOKEN = 'END'
q_in = Queue()
q_out = Queue()
def queue_worker(q_in, q_out):
arg_in = q_in.get()
while arg_in != END_TOKEN:
try:
result = func(arg_in)
except Exception as e:
logger.exception(e)
logger.error(f'Queue worker failed on input: {arg_in}, with {str(e)}')
result = None
q_out.put((arg_in, result))
arg_in = q_in.get()
q_out.put(END_TOKEN)
# put jobs
[q_in.put(c) for c in jobs + n_workers * [END_TOKEN]]
# start workers
workers = [Process(target=queue_worker, args=(q_in, q_out))
for _ in range(n_workers)]
[w.start() for w in workers]
# wait for results
n_finished = 0
outputs = []
while n_finished < n_workers:
output = q_out.get(timeout=timeout_per_job)
logger.info(f'queue out, got: {output}')
if output == END_TOKEN:
n_finished += 1
logger.info(f'{n_finished}/{n_workers} queue workers done')
else:
outputs.append(output)
# wait for workers to join
logger.info('Joining queue workers')
[w.join() for w in workers]
logger.info('Joined all queue workers')
# sort in original order
results = [output[1] for output in
sorted(outputs, key=lambda output: jobs.index(output[0]))]
return results | 41fbdaae1e584839692eae4d5034ffd6828eb5c7 | 4,083 |
import random
import textwrap
from datetime import datetime
def result_list_handler(*args: list, **kwargs) -> str:
"""
Handles the main search result for each query. It checks whether there are any result for this qeury or not.
1. If there was results, then it sorts and decorates the them.
2 Otherwise it shows a message containing there were no results for this query
:param args: 1. *[0] -> query
2. *[1] -> a list of search results objects
:param kwargs:
:return: Final decorated search results
"""
query = args[0]
search_res = args[1]
print(UD.bidirectional(u'\u0688'))
x = len([None for ch in query if UD.bidirectional(ch) in ('R', 'AL')]) / float(len(query))
# print('{t} => {c}'.format(t=query.encode('utf-8'), c='RTL' if x > 0.5 else 'LTR'))
# print(UD.bidirectional("dds".decode('utf-8')))
# direction = 'RTL' if x > 0.5 else 'LTR'
dir_str = "‏" if x > 0.5 else '‎'
fruit = random.choice(fruit_list)
print(search_res)
if int(search_res["hits"]["total"]["value"]) > 0:
text = f"<b>{_search_emoji} نتایج جستجو برای: {textwrap.shorten(query, width=100, placeholder='...')}</b>\n"
text += f"{_checkmark_emoji} نتایج بهتر پایین لیست هستند.\n\n\n"
_headphone_emoji = emoji.EMOJI_ALIAS_UNICODE[':headphone:']
for index, hit in reversed(list(enumerate(search_res['hits']['hits']))):
duration = timedelta(seconds=int(hit['_source']['duration']))
d = datetime(1, 1, 1) + duration
_performer = hit['_source']['performer']
_title = hit['_source']['title']
_file_name = hit['_source']['file_name']
if not (len(_title) < 2 or len(_performer) < 2):
name = f"{_performer} - {_title}"
elif not len(_performer) < 2:
name = f"{_performer} - {_file_name}"
else:
name = _file_name
# name = f"{_file_name if (_performer == 'None' and _title == 'None') else (_performer if _title == 'None' else _title)}".replace(
# ".mp3", "")
text += f"<b>{str(index + 1)}. {dir_str} {_headphone_emoji} {fruit if index == 0 else ''}</b>" \
f"<b>{textwrap.shorten(name, width=35, placeholder='...')}</b>\n" \
f"{dir_str} {_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1000_000, 1)} {'مگابایت' if x > 0.5 else 'MB'} " \
f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}\n{dir_str}" \
f" دانلود: " \
f" /dl_{hit['_id']} \n" \
f" {34 * '-' if not index == 0 else ''}{dir_str} \n\n"
else:
text = f"{_traffic_light} هیچ نتیجه ای برای این عبارت پیدا نشد:" \
f"\n<pre>{textwrap.shorten(query, width=200, placeholder='...')}</pre>"
return text | c0fb0db46e3c47d24b06c290ba9d0129eb436edf | 4,084 |
def flip_mask(mask, x_flip, y_flip):
"""
Args:
mask: バイナリマスク
[height, width]
"""
mask = mask.copy()
if y_flip:
mask = np.flip(mask, axis=0)
if x_flip:
mask = np.flip(mask, axis=1)
return mask | d3d783fb3e5913448f4e9d06f1f96d89559a686c | 4,085 |
def sbn2journal(sbn_record, permalink_template="http://id.sbn.it/bid/%s"):
"""
Creates a `dbmodels.Journal` instance out of a dictionary with metadata.
:param record: the dictionary returned by `resolution.supporting_functions.enrich_metadata()`
:return: an instance of `dbmodels.Journal`
"""
bid = normalize_bid(sbn_record["codiceIdentificativo"])
metadata = {
'short_title' : sbn_record["titolo"].split(":")[0].split("/")[0].strip()
, 'full_title' : sbn_record["titolo"]
, 'bid' : bid
, 'sbn_link' : permalink_template % bid
, 'identifiers' : []
, "provenance" : "lbcatalogue"
}
if "numeri" in sbn_record:
identifiers = sbn_record["numeri"]
for identifier in identifiers:
tmp = [{
"identifier_type" : key
,"value": identifier[key]
} for key in identifier.keys()][0]
metadata["identifiers"].append(SBN_Identifier(**tmp))
return Journal(**metadata) | 059b00aeb81dd1bdbc987f31c045b6eb5aedc3b3 | 4,086 |
def median(data):
"""Calculates the median value from |data|."""
data = sorted(data)
n = len(data)
if n % 2 == 1:
return data[n / 2]
else:
n2 = n / 2
return (data[n2 - 1] + data[n2]) / 2.0 | ad2b3f7eb3f5446c81c6c400bc16c7833e75c05c | 4,087 |
from typing import Optional
def split_text_by_length(text: str,
length: Optional[int] = None, # 方案一:length + delta
delta: Optional[int] = 30,
max_length: Optional[int] = None, # 方案二:直接确定长度上下限
min_length: Optional[int] = None,
ignore_=False):
"""
根据给定的长度切分文本
:param text: 文本
:param delta:
:param length:
:param max_length: 文章允许的最长长度。
:param min_length: 文章允许的最短长度。比这还短就丢弃。
:return : 迭代器,每次返回切分出来的那一段
:param ignore_: 如果最后一段太短,是否丢弃掉该段。默认不丢弃
"""
if length:
max_length = length + delta
min_length = length - delta
if not max_length or not min_length:
logger.error(f"split_text_by_length 缺少必要参数!!!")
return None
while len(text) > max_length:
s = text[:max_length]
index = search_split_pos(s) # 上策
if index < min_length:
index = search_split_pos(s, keys=",") # 中策
if index == -1:
index = (max_length + min_length) // 2 # 直接切分,下下策
yield text[:index]
text = text[index:]
else:
if len(text) < min_length and ignore_:
return # 结束迭代
yield text | 60bf713a2cbe3eff85237d9637a303668a9f436b | 4,089 |
def _tfidf_fit_transform(vectors: np.ndarray):
""" Train TF-IDF (Term Frequency — Inverse Document Frequency)
Transformer & Extract TF-IDF features on training data
"""
transformer = TfidfTransformer()
features = transformer.fit_transform(vectors).toarray()
return features, transformer | c38aa629d11258291f306052ac0e4c9c2a474ebd | 4,090 |
from typing import List
def _is_missing_sites(spectra: List[XAS]):
"""
Determines if the collection of spectra are missing any indicies for the given element
"""
structure = spectra[0].structure
element = spectra[0].absorbing_element
# Find missing symmeterically inequivalent sites
symm_sites = SymmSites(structure)
absorption_indicies = {spectrum.absorbing_index for spectrum in spectra}
missing_site_spectra_indicies = set(structure.indices_from_symbol(element)) - absorption_indicies
for site_index in absorption_indicies:
missing_site_spectra_indicies -= set(symm_sites.get_equivalent_site_indices(site_index))
return len(missing_site_spectra_indicies) != 0 | 0ae7ad0622e8ec398306e05def214b0ad40fd90f | 4,091 |
def get_objects(params, meta):
"""
Retrieve a list of objects based on their upas.
params:
guids - list of string - KBase IDs (upas) to fetch
post_processing - object of post-query filters (see PostProcessing def at top of this module)
output:
objects - list of ObjectData - see the ObjectData type description in the module docstring above.
search_time - int - time it took to perform the search on ES
access_group_narrative_info - dict of {access_group_id: narrative_info} -
Information about the workspaces in which the objects in the
results reside. This data only applies to workspace objects.
"""
# KBase convention is to wrap params in an array
if isinstance(params, list) and len(params) == 1:
params = params[0]
post_processing = params.get('post_processing', {})
search_results = _search_objects({'query': {'terms': {'_id': params['guids']}}}, meta)
objects = _get_object_data_from_search_results(search_results, post_processing)
(narrative_infos, ws_infos) = _fetch_narrative_info(search_results, meta)
return [{
'search_time': search_results['search_time'],
'objects': objects,
'access_group_narrative_info': narrative_infos,
'access_groups_info': ws_infos
}] | 17d38a1a5e09847700537076c0bfefdd55947682 | 4,092 |
def parseMidi(midifile):
"""Take a MIDI file and return the list Of Chords and Interval Vectors.
The file is first parsed, midi or xml. Then with chordify and
PC-Set we compute a list of PC-chords and Interval Vectors.
"""
mfile = ms.converter.parse(midifile)
mChords = mfile.chordify()
chordList = []
chordVectors = []
for c in mChords.recurse().getElementsByClass('Chord'):
chordList.append(c.orderedPitchClasses)
chordVectors.append(c.intervalVector)
# print('The number of chords found is : ', len(chordList))
return chordList, chordVectors | 8c803c297eee5cc29a78d6c8b864a85e8bfd3d52 | 4,095 |
def get_similarity(s1, s2):
"""
Return similarity of both strings as a float between 0 and 1
"""
return SM(None, s1, s2).ratio() | 3964670a69a135fbc6837e9c68a2e7ac713d67dc | 4,096 |
from typing import Union
from typing import Sequence
from re import T
def concrete_values_from_iterable(
value: Value, ctx: CanAssignContext
) -> Union[None, Value, Sequence[Value]]:
"""Return the exact values that can be extracted from an iterable.
Three possible return types:
- ``None`` if the argument is not iterable
- A sequence of :class:`Value` if we know the exact types in the iterable
- A single :class:`Value` if we just know that the iterable contains this
value, but not the precise number of them.
Examples:
- ``int`` -> ``None``
- ``tuple[int, str]`` -> ``(int, str)``
- ``tuple[int, ...]`` -> ``int``
"""
if isinstance(value, MultiValuedValue):
subvals = [concrete_values_from_iterable(val, ctx) for val in value.vals]
if any(subval is None for subval in subvals):
return None
value_subvals = [subval for subval in subvals if isinstance(subval, Value)]
seq_subvals = [
subval
for subval in subvals
if subval is not None and not isinstance(subval, Value)
]
if not value_subvals and len(set(map(len, seq_subvals))) == 1:
return [unite_values(*vals) for vals in zip(*seq_subvals)]
return unite_values(*value_subvals, *chain.from_iterable(seq_subvals))
elif isinstance(value, AnnotatedValue):
return concrete_values_from_iterable(value.value, ctx)
value = replace_known_sequence_value(value)
if isinstance(value, SequenceIncompleteValue) and value.typ is tuple:
return value.members
tv_map = IterableValue.can_assign(value, ctx)
if not isinstance(tv_map, CanAssignError):
return tv_map.get(T, UNRESOLVED_VALUE)
return None | 3acdda92df4e27d4eecf39630570b90049580d6d | 4,097 |
def quat_conjugate(quat_a):
"""Create quatConjugate-node to conjugate a quaternion.
Args:
quat_a (NcNode or NcAttrs or str or list or tuple): Quaternion to
conjugate.
Returns:
NcNode: Instance with quatConjugate-node and output-attribute(s)
Example:
::
Op.quat_conjugate(create_node("decomposeMatrix").outputQuat)
"""
created_node = _create_operation_node("quat_conjugate", quat_a)
return created_node | 2bff8b1e472ad2975ba96084843004ce86205f9f | 4,099 |
def askfont():
"""
Opens a :class:`FontChooser` toplevel to allow the user to select a font
:return: font tuple (family_name, size, \*options), :class:`~font.Font` object
"""
chooser = FontChooser()
chooser.wait_window()
return chooser.font | 8bce830a24d92be38c23ba09b6754f2e6cc6d161 | 4,100 |
def load_data(train_file, test_file):
"""
The method reads train and test data from their dataset files.
Then, it splits train data into features and labels.
Parameters
----------
train_file: directory of the file in which train data set is located
test_file: directory of the file in which test data set is located
"""
x_tra = pd.read_csv(train_file[0]).drop(columns=["ID"])
y_tra = pd.read_csv(train_file[1]).drop(columns=["ID"])
x_tst = pd.read_csv(test_file).drop(columns=["ID"])
return x_tra, y_tra, x_tst | d830f4bcd3efe467a23cab0dfa4a3cdb4694559e | 4,101 |
def guide(batch_X, batch_y=None, num_obs_total=None):
"""Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|x)
"""
# we are interested in the posterior of w and intercept
# since this is a fairly simple model, we just initialize them according
# to our prior believe and let the optimization handle the rest
assert(jnp.ndim(batch_X) == 2)
d = jnp.shape(batch_X)[1]
z_w_loc = param("w_loc", jnp.zeros((d,)))
z_w_std = jnp.exp(param("w_std_log", jnp.zeros((d,))))
z_w = sample('w', dist.Normal(z_w_loc, z_w_std))
z_intercept_loc = param("intercept_loc", 0.)
z_interpet_std = jnp.exp(param("intercept_std_log", 0.))
z_intercept = sample('intercept', dist.Normal(z_intercept_loc, z_interpet_std))
return (z_w, z_intercept) | 889f3224424496a4f001d81b046e1279ba0efe77 | 4,103 |
def get_reddit_tables():
"""Returns 12 reddit tables corresponding to 2016"""
reddit_2016_tables = []
temp = '`fh-bigquery.reddit_posts.2016_{}`'
for i in range(1, 10):
reddit_2016_tables.append(temp.format('0' + str(i)))
for i in range(10, 13):
reddit_2016_tables.append(temp.format(str(i)))
return reddit_2016_tables | e590ab35becbe46aa220257f6629e54f720b3a13 | 4,105 |
def first_empty():
"""Return the lowest numbered workspace that is empty."""
workspaces = sorted(get_workspace_numbers(get_workspaces().keys()))
for i in range(len(workspaces)):
if workspaces[i] != i + 1:
return str(i + 1)
return str(len(workspaces) + 1) | f9c9f868570bbcc15a28097930d304b308ddf452 | 4,106 |
def get_local_tzone():
"""Get the current time zone on the local host"""
if localtime().tm_isdst:
if altzone < 0:
tzone = '+' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
tzone = '-' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
if altzone < 0:
tzone = \
'+' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
else:
tzone = \
'-' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
return tzone | dec1d9f9c5ecf937779de55a33397436841913bc | 4,108 |
def subscribers_tables_merge(tablename1: Tablename, tablename2: Tablename, csv_path=csvpath, verbose=True):
"""
Сводит таблицы, полученные загрузчиком, в одну. Может принимать pandas.DataFrame или имя группы, в этом
случае группа должна быть в списке групп, а соответствующий файл - в <csv_path>
"""
if isinstance(tablename1, pd.DataFrame):
table1 = tablename1
else:
table1 = pd.read_csv(csv_path + tablename1 + '.csv', sep=";", header=0, dtype=str)
if isinstance(tablename2, pd.DataFrame):
table2 = tablename2
else:
table2 = pd.read_csv(csv_path + tablename2 + '.csv', sep=";", header=0, dtype=str)
concatenated = table1.append(table2, ignore_index=True)
# Выявляем тех, кто подписан на несколько групп
# Условие gs_x != gs_x проверяет, не является ли значение NaN
outer_joined = pd.merge(table1[{'id', 'group_subscribed'}],
table2[{'id', 'group_subscribed'}],
on='id', how='outer')
outer_joined['groups'] = outer_joined['group_subscribed_x'] + ',' + outer_joined['group_subscribed_y']
outer_joined.loc[ outer_joined.group_subscribed_x != outer_joined.group_subscribed_x,
'groups'] = outer_joined.group_subscribed_y
outer_joined.loc[ outer_joined.group_subscribed_y != outer_joined.group_subscribed_y,
'groups'] = outer_joined.group_subscribed_x
# Сводим воедино и чистим
left_joined = pd.merge(concatenated, outer_joined[{'id', 'groups'}], on='id', how='left')
left_joined['group_subscribed'] = left_joined['groups']
L = left_joined.drop_duplicates('id')
if verbose:
print("{0} и {1} обработаны".format(str(tablename1), str(tablename2)))
return L[L.columns[0:6]] | 56c5c80b57aa4103f1836f8b9a5ca7bbb67e25bc | 4,109 |
def get_current_offset(session):
"""
For backfilling only, this function works with the init container to look up
it's job_id so it can line that up with it's consumer group and offest so that
we can backfill up to a given point and then kill the worker afterwards.
"""
if settings.JOB_ID is None:
return settings.CONSUMER_GROUP, None
output = {}
while True:
logger.info(f"Getting kafka job with job_id = {settings.JOB_ID}")
sql = f"select * from kafka_jobs WHERE job_id='{settings.JOB_ID}';"
result = session.execute(sql).fetchall()
session.commit()
if len(result) == 0:
logger.info(f"Did not find job_id={settings.JOB_ID} - sleeping")
sleep(2)
continue
for r in result:
# Keyed on tuple of topic, partition to look up the stop_offset
output[(r[2], r[3])] = r[4]
return r[1], output | 97d0b0485005a709a047582667f56a79f636388d | 4,110 |
def get_params(p1, p2, L):
"""
Return the curve parameters 'a', 'p', 'q' as well as the integration
constant 'c', given the input parameters.
"""
hv = p2 - p1
m = p1 + p2
def f_bind(a): return f(a, *hv, L)
def fprime_bind(a): return fprime(a, hv[0])
# Newton-Raphson algorithm to find a value for 'a'
a0 = nr_first_guess(f_bind, 0.1, 0.01, 1.8)
a = optimize.newton(f_bind, a0, fprime_bind)
# Use our formulas to compute the rest
p = 0.5 * (m[0] - a * np.log((L+hv[1])/(L-hv[1])))
q = 0.5 * (m[1] - L / np.tanh(0.5 * hv[0]/a))
c = -a * np.sinh((p1[0]-p)/a)
return a, p, q, c | eae7d942b4272b3addc6c3f3912abc564e93f339 | 4,111 |
def _unpack(f):
"""to unpack arguments"""
def decorated(input):
if not isinstance(input, tuple):
input = (input,)
return f(*input)
return decorated | 245d425b45d9d7ef90239b791d6d65bcbd0433d5 | 4,112 |
from .ops.classes import WriteRichOp
from typing import Iterable
from functools import reduce
def chain_rich(iterable: Iterable['WriteRichOp']) -> 'WriteRichOp':
"""Take an `iterable` of `WriteRich` segments and combine them to produce a single WriteRich operation."""
return reduce(WriteRichOp.then, iterable) | fa75ab929fb01b9c68e58938aa04aebddc26f245 | 4,113 |
def sum_plot_chi2_curve(bin_num, sum_bin, r_mpc, ax=None, cov_type='bt', label=None,
xlabel=True, ylabel=True, show_bin=True, ref_sig=None):
"""Plot the chi2 curve."""
if ax is None:
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0.165, bottom=0.13, right=0.995, top=0.99, wspace=None, hspace=None)
ax = fig.add_subplot(111)
ax.axhline(1.0, linewidth=3.0, alpha=.4, c='k')
# Reduced chi2 curves
rchi2 = sum_bin['chi2_' + cov_type] / (len(sum_bin['dsigma']) - 1)
# Best-fit scatter and its uncertainty
ax.axvline(sum_bin['sig_med_' + cov_type], linewidth=2.0, alpha=0.4,
linestyle='--', color='k')
ax.fill_between(
[sum_bin['sig_low_' + cov_type], sum_bin['sig_upp_' + cov_type]],
[0, 0], [np.max(rchi2) * 1.2, np.max(rchi2) * 1.2],
color=color_bins[bin_num], alpha=0.2)
if ref_sig is not None:
ax.axvline(ref_sig, linewidth=3.0, alpha=0.5, linestyle='-.', color='k')
# Reduced chi2 curves
sims = sum_bin['simulation']
markers = cycle(['o', 's', 'h', '8', '+'])
for sim in np.unique(sims):
mask = sims == sim
ax.scatter(
sum_bin['sigma'][mask], rchi2[mask], marker=next(markers),
s=60, alpha=0.8, facecolor=color_bins[bin_num], edgecolor='grey',
linewidth=1.0, label=label)
ax.scatter(sum_bin['sigma'][sum_bin['idx_med_' + cov_type]],
rchi2[sum_bin['idx_med_' + cov_type]], marker='o',
s=100, alpha=1.0, facecolor=color_bins[bin_num], edgecolor='k',
linewidth=1.0, label=r'__no_label__')
ax.set_xlim(0.00, np.max(sum_bin['sigma']) * 1.09)
ax.set_ylim(0.01, np.max(rchi2) * 1.19)
sig_best = sum_bin['sig_med_' + cov_type]
sig_upp = sum_bin['sig_upp_' + cov_type]
sig_low = sum_bin['sig_low_' + cov_type]
if sig_best <= 0.65:
_ = ax.text(
sig_best + 0.05, np.max(rchi2) * 0.95,
r'$\sigma={:4.2f}^{{+{:4.2f}}}_{{-{:4.2f}}}$'.format(
sig_best, sig_upp - sig_best, sig_best - sig_low), fontsize=25)
else:
_ = ax.text(
sig_best - 0.45, np.max(rchi2) * 0.95,
r'$\sigma={:4.2f}^{{+{:4.2f}}}_{{-{:4.2f}}}$'.format(
sig_best, sig_upp - sig_best, sig_best - sig_low), fontsize=25)
if show_bin:
_ = ax.text(0.07, 0.87, r'$\rm Bin\ {:1d}$'.format(bin_num + 1), fontsize=35,
transform=ax.transAxes)
if xlabel:
_ = ax.set_xlabel(r'$\sigma_{\mathcal{M} | \mathcal{O}}$', fontsize=30)
else:
_ = ax.set_xticklabels([])
if ylabel:
_ = ax.set_ylabel(r'$\rm Reduced\ \chi^2$', fontsize=30)
else:
_ = ax.set_yticklabels([])
if ax is None:
return fig
return ax | 6f0b7adf2daa98ecac9ff722eab9f6b748ef188b | 4,114 |
import dfim
import dfim.util
def compute_importance(model, sequences, tasks,
score_type='gradient_input',
find_scores_layer_idx=0,
target_layer_idx=-2,
reference_gc=0.46,
reference_shuffle_type=None,
num_refs_per_seq=10):
"""
reference_shuffle_type in ['random', 'dinuc']
reference_gc = 0 will return numpy array of 0s
reference_gc < 1 will assign each G and C reference_gc/2
"""
### Compute Importance scores
print('Calculating Importance Scores')
importance_method = {
"deeplift": deeplift.blobs.NonlinearMxtsMode.DeepLIFT_GenomicsDefault,
"rescale_all_layers": deeplift.blobs.NonlinearMxtsMode.Rescale,
"revealcancel_all_layers": deeplift.blobs.NonlinearMxtsMode.RevealCancel,
"gradient_input": deeplift.blobs.NonlinearMxtsMode.Gradient,
"guided_backprop": deeplift.blobs.NonlinearMxtsMode.GuidedBackprop,
"deconv": deeplift.blobs.NonlinearMxtsMode.DeconvNet
}
importance_model = kc.convert_sequential_model(model,
nonlinear_mxts_mode=importance_method[score_type])
importance_func = importance_model.get_target_contribs_func(
find_scores_layer_idx=find_scores_layer_idx,
target_layer_idx=target_layer_idx)
(reference, new_importance_func) = get_reference(sequences, importance_func,
gc_fraction=reference_gc,
shuffle=reference_shuffle_type,
seed=1)
importance_score_dict = {}
for task in tasks:
if reference is None:
reload(dfim.util)
seq_fastas = dfim.util.convert_one_hot_to_fasta(sequences)
scores = np.array(new_importance_func(task_idx=task, # was 0
input_data_sequences=seq_fastas,
num_refs_per_seq=num_refs_per_seq,
batch_size=10,
progress_update=1000))
else:
scores = np.array(new_importance_func(task_idx=task,
input_data_list=[sequences],
batch_size=10,
progress_update=1000,
input_references_list=[reference]))
importance_score_dict[task] = scores * sequences
return importance_score_dict | a7ebe928f4e3b50d5c8735d438d28c034d5dfeb9 | 4,115 |
from typing import Iterable
def test_check_non_existing() -> None:
"""Test a check on a non-existing column."""
class Schema(pa.SchemaModel):
a: Series[int]
@pa.check("nope")
@classmethod
def int_column_lt_100(cls, series: pd.Series) -> Iterable[bool]:
return series < 100
err_msg = (
"Check int_column_lt_100 is assigned to a non-existing field 'nope'"
)
with pytest.raises(pa.errors.SchemaInitError, match=err_msg):
Schema.to_schema() | 473b0e1c4b4c785970bdc648e4290426524882c7 | 4,116 |
import requests
def fetch_url(url):
"""Fetches the specified URL.
:param url: The URL to fetch
:type url: string
:returns: The response object
"""
return requests.get(url) | 26198dbc4f7af306e7a09c86b59a7da1a4802241 | 4,117 |
def _nw_score_(s1, s2, insert=lambda c: -2,
delete=lambda c: -2,
substitute=lambda c1, c2: 2 if c1 == c2 else -1):
"""Compute Needleman Wunsch score for aligning two strings.
This algorithm basically performs the same operations as Needleman Wunsch
alignment, but is made more memory efficient by storing only two columns of
the optimal alignment matrix. As a consequence, no reconstruction is
possible.
Args:
s1 (iterable): iterable to which we should align
s2 (iterable): iterable to be aligned
insert (lambda): function returning penalty for insertion (default -2)
delete (lambda): function returning penalty for deletion (default -2)
substitute (lambda): function returning penalty for substitution
(default -1)
Returns:
: last column of optimal matching matrix
"""
# lengths of two strings are further used for ranges, therefore 1 is added
# to every length
m = len(s1) + 1
n = len(s2) + 1
# score will be a two dimensional matrix
score = [[0 for i in xrange(n)], [0 for i in xrange(n)]]
# character of first and second string, respectively
c1 = c2 = ''
# iterator over the second string
s2_it = xrange(1, n)
# indices of current and previous column in the error matrix (will be
# swapped along the way)
crnt = 0
prev = 1
prev_j = 0
# base case when the first string is shorter than second
for j in s2_it:
prev_j = j - 1
score[crnt][j] = score[crnt][prev_j] + insert(s2[prev_j])
# iterate over the first string
for i in xrange(1, m):
# swap current and previous columns
prev, crnt = crnt, prev
# get current character of the first string
c1 = s1[i - 1]
# calculate the base case when len = 0
score[crnt][0] = score[prev][0] + delete(c1)
for j in s2_it:
prev_j = j - 1
c2 = s2[prev_j]
# current cell will be the maximum over insertions, deletions, and
# substitutions applied to adjacent cells
# substitution (covers cases when both chars are equal)
score[crnt][j] = max(score[prev][prev_j] + substitute(c1, c2),
# deletion
score[prev][j] + delete(c1),
# insertion
score[crnt][prev_j] + insert(c2))
# return last computed column of scores
return score[crnt] | 009c9eb4afec828adde53bddfd2a8b4d2a952c24 | 4,118 |
import pickle
import torch
import re
import warnings
from typing import Counter
from typing import OrderedDict
def load_gisaid_data(
*,
device="cpu",
min_region_size=50,
include={},
exclude={},
end_day=None,
columns_filename="results/usher.columns.pkl",
features_filename="results/usher.features.pt",
feature_type="aa",
) -> dict:
"""
Loads the two files columns_filename and features_filename,
converts the input to PyTorch tensors and truncates the data according to
``include`` and ``exclude``.
:param str device: torch device to use
:param dict include: filters of data to include
:param dict exclude: filters of data to exclude
:param end_day: last day to include
:param str columns_filename:
:param str features_filename:
:param str feature_type: Either "aa" for amino acid features or "nuc" for
nucleotide features.
:returns: A dataset dict
:rtype: dict
"""
logger.info("Loading data")
include = include.copy()
exclude = exclude.copy()
if end_day:
logger.info(f"Load gisaid data end_day: {end_day}")
# Load column data.
with open(columns_filename, "rb") as f:
columns = pickle.load(f)
# Clean up location ids (temporary; this should be done in preprocess_gisaid.py).
columns["location"] = list(map(pyrocov.geo.gisaid_normalize, columns["location"]))
logger.info(f"Training on {len(columns['day'])} rows with columns:")
logger.info(", ".join(columns.keys()))
# Aggregate regions smaller than min_region_size to country level.
fine_regions = get_fine_regions(columns, min_region_size)
# Filter features into numbers of mutations and possibly genes.
usher_features = torch.load(features_filename)
mutations = usher_features[f"{feature_type}_mutations"]
features = usher_features[f"{feature_type}_features"].to(
device=device, dtype=torch.get_default_dtype()
)
keep = [m.count(",") == 0 for m in mutations] # restrict to single mutations
if include.get("gene"):
re_gene = re.compile(include.pop("gene"))
keep = [k and bool(re_gene.search(m)) for k, m in zip(keep, mutations)]
if exclude.get("gene"):
re_gene = re.compile(exclude.pop("gene"))
keep = [k and not re_gene.search(m) for k, m in zip(keep, mutations)]
if include.get("region"):
gene, region = include.pop("region")
lb, ub = sarscov2.GENE_STRUCTURE[gene][region]
for i, m in enumerate(mutations):
g, m = m.split(":")
if g != gene:
keep[i] = False
continue
match = re.search("[0-9]+", m)
assert match is not None
pos = int(match.group())
if not (lb < pos <= ub):
keep[i] = False
mutations = [m for k, m in zip(keep, mutations) if k]
if mutations:
features = features[:, keep]
else:
warnings.warn("No mutations selected; using empty features")
mutations = ["S:D614G"] # bogus
features = features[:, :1] * 0
logger.info("Loaded {} feature matrix".format(" x ".join(map(str, features.shape))))
# Construct the list of clades.
clade_id_inv = usher_features["clades"]
clade_id = {k: i for i, k in enumerate(clade_id_inv)}
clades = columns["clade"]
# Generate sparse_data.
sparse_data: dict = Counter()
countries = set()
states = set()
state_to_country_dict = {}
location_id: dict = OrderedDict()
skipped_clades = set()
num_obs = 0
for day, location, clade in zip(columns["day"], columns["location"], clades):
if clade not in clade_id:
if clade not in skipped_clades:
skipped_clades.add(clade)
if not clade.startswith("fine"):
logger.warning(f"WARNING skipping unsampled clade {clade}")
continue
# Filter by include/exclude
row = {
"location": location,
"day": day,
"clade": clade,
}
if not all(re.search(v, row[k]) for k, v in include.items()):
continue
if any(re.search(v, row[k]) for k, v in exclude.items()):
continue
# Filter by day
if end_day is not None:
if day > end_day:
continue
# preprocess parts
parts = location.split("/")
if len(parts) < 2:
continue
parts = tuple(p.strip() for p in parts[:3])
if len(parts) == 3 and parts not in fine_regions:
parts = parts[:2]
location = " / ".join(parts)
# Populate countries on the left and states on the right.
if len(parts) == 2: # country only
countries.add(location)
p = location_id.setdefault(location, len(countries) - 1)
else: # state and country
country = " / ".join(parts[:2])
countries.add(country)
c = location_id.setdefault(country, len(countries) - 1)
states.add(location)
p = location_id.setdefault(location, -len(states))
state_to_country_dict[p] = c
# Save sparse data.
num_obs += 1
t = day // TIMESTEP
c = clade_id[clade]
sparse_data[t, p, c] += 1
logger.warning(f"WARNING skipped {len(skipped_clades)} unsampled clades")
state_to_country = torch.full((len(states),), 999999, dtype=torch.long)
for s, c in state_to_country_dict.items():
state_to_country[s] = c
logger.info(f"Found {len(states)} states in {len(countries)} countries")
location_id_inv = [None] * len(location_id)
for k, i in location_id.items():
location_id_inv[i] = k
assert all(location_id_inv)
# Generate weekly_clades tensor from sparse_data.
if end_day is not None:
T = 1 + end_day // TIMESTEP
else:
T = 1 + max(columns["day"]) // TIMESTEP
P = len(location_id)
C = len(clade_id)
weekly_clades = torch.zeros(T, P, C)
for tps, n in sparse_data.items():
weekly_clades[tps] = n
logger.info(f"Dataset size [T x P x C] {T} x {P} x {C}")
logger.info(
f"Keeping {num_obs}/{len(clades)} rows "
f"(dropped {len(clades) - int(num_obs)})"
)
# Construct sparse representation.
pc_index = weekly_clades.ne(0).any(0).reshape(-1).nonzero(as_tuple=True)[0]
sparse_counts = dense_to_sparse(weekly_clades)
# Construct time scales centered around observations.
time = torch.arange(float(T)) * TIMESTEP / GENERATION_TIME
time -= time.mean()
# Construct lineage <-> clade mappings.
lineage_to_clade = usher_features["lineage_to_clade"]
clade_to_lineage = usher_features["clade_to_lineage"]
lineage_id_inv = sorted(lineage_to_clade)
lineage_id = {k: i for i, k in enumerate(lineage_id_inv)}
clade_id_to_lineage_id = torch.zeros(len(clade_to_lineage), dtype=torch.long)
for c, l in clade_to_lineage.items():
clade_id_to_lineage_id[clade_id[c]] = lineage_id[l]
lineage_id_to_clade_id = torch.zeros(len(lineage_to_clade), dtype=torch.long)
for l, c in lineage_to_clade.items():
lineage_id_to_clade_id[lineage_id[l]] = clade_id[c]
dataset = {
"clade_id": clade_id,
"clade_id_inv": clade_id_inv,
"clade_id_to_lineage_id": clade_id_to_lineage_id,
"clade_to_lineage": usher_features["clade_to_lineage"],
"features": features,
"lineage_id": lineage_id,
"lineage_id_inv": lineage_id_inv,
"lineage_id_to_clade_id": lineage_id_to_clade_id,
"lineage_to_clade": usher_features["lineage_to_clade"],
"location_id": location_id,
"location_id_inv": location_id_inv,
"mutations": mutations,
"pc_index": pc_index,
"sparse_counts": sparse_counts,
"state_to_country": state_to_country,
"time": time,
"weekly_clades": weekly_clades,
}
return dataset | eaa9c5b3735f291706ea783272b3372ad9e7937c | 4,119 |
def get_symbol_size(sym):
"""Get the size of a symbol"""
return sym["st_size"] | b2d39afe39542e7a4e1b4fed60acfc83e6a58677 | 4,120 |
def to_unnamed_recursive(sexpr, scheme):
"""Convert all named column references to unnamed column references."""
def convert(n):
if isinstance(n, NamedAttributeRef):
n = toUnnamed(n, scheme)
n.apply(convert)
return n
return convert(sexpr) | ffb58acb1cfbef654c5c936880961b8cc982ec01 | 4,122 |
def login_process():
"""Process login."""
email_address = request.form.get("email")
password = request.form.get("password")
user = User.query.filter_by(email_address=email_address).first()
if not user:
flash("Please try again!")
return redirect('/')
if user.password != password:
flash("Incorrect password")
return redirect('/')
session["user_id"] = user.user_id
flash("Logged in")
return redirect('/dashboard') | afee068b653e5f759329658e3614b0ce7ee2d405 | 4,123 |
def get_doc_translations(doctype, name):
"""
Returns a dict custom tailored for the document.
- Translations with the following contexts are handled:
- doctype:name:docfield
- doctype:name
- doctype:docfield (Select fields only)
- 'Select' docfields will have a values dict which will have
translations for each option
document(doctype, name) {
[lang_code_1]: {
title: lang_1_title,
status: {
value: lang_1_status,
values: {
option_1: lang_1_option_1,
...
}
}
},
[lang_code_2]: {
title: lang_2_title,
}
}
"""
context = f"{doctype}:"
translations = frappe.db.sql("""
SELECT
t.language,
t.source_text,
t.context,
t.translated_text
FROM `tabTranslation` t
WHERE
t.context LIKE %(context)s
""", {
"context": f"{context}%"
}, as_dict=1)
tr_dict = frappe._dict()
if not len(translations):
return tr_dict
doc = frappe.get_cached_doc(doctype, name)
value_fieldname_dict = None
def get_value_fieldname_dict():
nonlocal value_fieldname_dict
if value_fieldname_dict is not None:
return value_fieldname_dict
d = frappe._dict()
for fieldname in frappe.get_meta(doctype).get_valid_columns():
v = doc.get(fieldname)
if not v:
continue
if v not in d:
d[v] = []
d[v].append(fieldname)
value_fieldname_dict = d
return value_fieldname_dict
for t in translations:
if t.language not in tr_dict:
tr_dict[t.language] = frappe._dict()
ctx = t.context.split(":")
if len(ctx) == 3 and ctx[1] == name:
# Docfield translation
# doctype:name:docfield
fieldname = t.context.split(":")[2]
if t.source_text == "*" or doc.get(fieldname) == t.source_text:
tr_dict[t.language][fieldname] = t.translated_text
elif len(ctx) == 2 and ctx[1] != name:
# Select DocField
select_df = ctx[1]
if select_df not in [x.fieldname for x in frappe.get_meta(doctype).get_select_fields()]:
continue
select_tr = tr_dict[t.language].setdefault(
select_df, frappe._dict(value=None, values=frappe._dict()))
select_tr.get("values")[t.source_text] = t.translated_text
if doc.get(select_df) == t.source_text:
select_tr.value = t.translated_text
elif len(ctx) == 2:
# Document Translation
# doctype:name
d = get_value_fieldname_dict()
if t.source_text in d:
for fieldname in d[t.source_text]:
if tr_dict[t.language].get(fieldname, None):
continue
tr_dict[t.language][fieldname] = t.translated_text
return tr_dict | e7fd896de3162452a77ab989670e61b01e8e35a2 | 4,124 |
def app(request):
"""
Default view for Person Authority App
"""
return direct_to_template(request,
'person_authority/app.html',
{'app':APP}) | 9e75c9cf381c69b19bfdf08c74b2e0dc2106822b | 4,126 |
def is_xbar(top, name):
"""Check if the given name is crossbar
"""
xbars = list(filter(lambda node: node["name"] == name, top["xbar"]))
if len(xbars) == 0:
return False, None
if len(xbars) > 1:
log.error("Matching crossbar {} is more than one.".format(name))
raise SystemExit()
return True, xbars[0] | 435b84a30f3f749f07d0cc6dfdd5e7f0c5343c4f | 4,127 |
def index():
""" Root URL response """
return "Reminder: return some useful information in json format about the service here", status.HTTP_200_OK | d8128c8ba8976238c1d68376eaa64a77d09ce525 | 4,128 |
def backproject(depth, K):
"""Backproject a depth map to a cloud map
depth: depth
----
organized cloud map: (H,W,3)
"""
H, W = depth.shape
X, Y = np.meshgrid(np.asarray(range(W)) - K[0, 2], np.asarray(range(H)) - K[1, 2])
return np.stack((X * depth / K[0, 0], Y * depth / K[1, 1], depth), axis=2) | 5433fd408932f48c238cad7e5e8d7b14ee7b00de | 4,129 |
from pathlib import Path
def get_parent_dir(os_path: str) -> str:
"""
Get the parent directory.
"""
return str(Path(os_path).parents[1]) | 3a6e518119e39bfbdb9381bc570ac772b88b1334 | 4,130 |
def parse_work_url(work_url):
"""Extract work id from work url
Args:
work_url (str): work url
Returns:
str: bdrc work id
"""
work_id = ""
if work_url:
work_url_parts = work_url.split("/")
work_id = work_url_parts[-1]
return work_id | 1e7f5e222a2f6c7d01cbcb7df556adf6dd33f7cf | 4,132 |
def room():
"""Create a Room instance for all tests to share."""
return Room({"x": 4, "y": 4, "z": 4}, savable=False) | f031faa1bf654ff32868b678f79c2af040926b44 | 4,133 |
import re
def searchLiteralLocation(a_string, patterns):
"""assumes a_string is a string, being searched in
assumes patterns is a list of strings, to be search for in a_string
returns a list of re span object, representing the found literal if it exists,
else returns an empty list"""
results = []
for pattern in patterns:
regex = pattern
match = re.search(regex, a_string)
if match:
results.append((match, match.span()))
return results | 0f751bae801eaee594216688551919ed61784187 | 4,134 |
def UIOSelector_Highlight(inUIOSelector):
"""
Highlight (draw outline) the element (in app) by the UIO selector.
:param inUIOSelector: UIOSelector - List of items, which contains condition attributes
:return:
"""
# Check the bitness
lSafeOtherProcess = UIOSelector_SafeOtherGet_Process(inUIOSelector)
if lSafeOtherProcess is None:
UIO_Highlight(UIOSelector_Get_UIO(inUIOSelector))
else:
# Run function from other process with help of PIPE
lPIPEResuestDict = {"ModuleName": "UIDesktop", "ActivityName": "UIOSelector_Highlight",
"ArgumentList": [inUIOSelector],
"ArgumentDict": {}}
# Отправить запрос в дочерний процесс, который отвечает за работу с Windows окнами
ProcessCommunicator.ProcessChildSendObject(lSafeOtherProcess, lPIPEResuestDict)
# Get answer from child process
lPIPEResponseDict = ProcessCommunicator.ProcessChildReadWaitObject(lSafeOtherProcess)
if lPIPEResponseDict["ErrorFlag"]:
raise Exception(
f"Exception was occured in child process (message): {lPIPEResponseDict['ErrorMessage']}, (traceback): {lPIPEResponseDict['ErrorTraceback']}")
else:
return lPIPEResponseDict["Result"]
return True | 9ab5930396aa9813f09d858c4bb94adc2170f312 | 4,135 |
import torch
def completeMessage_BERT(mod, tok, ind, max_length=50):
"""
Sentence Completion of the secret text from BERT
"""
tokens_tensor = torch.tensor([ind])
outInd = mod.generate(tokens_tensor, max_length=50)
outText=tok.decode(outInd[0].tolist())
newText=outText[len(tok.decode(ind)):]
newText=newText.split(sep=".", maxsplit=1)[0]
newText="".join((newText, "."))
outInd=ind+tok.encode(newText)
return outInd | c2a47bbe90a9e5d222af0bbe5959c82d2ebd2cd3 | 4,136 |
def load_real_tcs():
""" Load real timecourses after djICA preprocessing """
try:
return sio.loadmat(REAL_TC_DIR)['Shat'][0]
except KeyError:
try:
return sio.loadmat(REAL_TC_DIR)['Shat_'][0]
except KeyError:
print("Incorrect key")
pass | 68b148e6fc6088d8ef9f90daf25e07609010d9fe | 4,138 |
def create_fsaverage_forward(epoch, **kwargs):
"""
A forward model is an estimation of the potential or field distribution for a known source
and for a known model of the head. Returns EEG forward operator with a downloaded template
MRI (fsaverage).
Parameters:
epoch: mne.epochs.Epochs
MNE epoch object containing portions of raw EEG data built around specified timestamp(s).
kwargs: arguments
Specify any of the following arguments for the mne.make_forward_solution() function. These include midist=5.0, n_jobs=1.
Returns:
mne.forward.forward.Forward:
Forward operator built from the user_input epoch and the fsaverage brain.
"""
defaultKwargs = { 'n_jobs': 1, 'mindist': 5.0 }
kwargs = { **defaultKwargs, **kwargs }
# Download fsaverage brain files (to use as 3D MRI brain for model)
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
# Make forward
fwd = mne.make_forward_solution(epoch.info,
trans=trans,
src=src,
bem=bem,
eeg=True,
**kwargs)
return fwd | 34d72211babf23e41927ebe7df13c58bf6876e4d | 4,139 |
def midi_to_hz(notes):
"""Hello Part 6! You should add documentation to this function.
"""
return 440.0 * (2.0 ** ((np.asanyarray(notes) - 69.0) / 12.0)) | 7215126d25ff969a8aa187c7f49216ec7743a9e9 | 4,141 |
def bond_stereo_parities(chi, one_indexed=False):
""" Parse the bond stereo parities from the stereochemistry layers.
:param chi: ChI string
:type chi: str
:param one_indexed: Return indices in one-indexing?
:type one_indexed: bool
:returns: A dictionary mapping bond keys onto parities
:rtype: dict[frozenset[int]: bool]
"""
ste_lyr_dct = stereo_layers(chi)
bnd_ste_dct = _bond_stereo_parities(ste_lyr_dct, one_indexed=one_indexed)
return bnd_ste_dct | 02729db6888899a91e69a25dae81c06777b89182 | 4,142 |
def filter_camera_angle(places):
"""Filter camera angles for KiTTI Datasets"""
bool_in = np.logical_and((places[:, 1] < places[:, 0] - 0.27), (-places[:, 1] < places[:, 0] - 0.27))
# bool_in = np.logical_and((places[:, 1] < places[:, 0]), (-places[:, 1] < places[:, 0]))
return places[bool_in] | 417fccfbb240c5defc36b4ce465fe14333922b94 | 4,143 |
def neural_log_literal_function(identifier):
"""
A decorator for NeuralLog literal functions.
:param identifier: the identifier of the function
:type identifier: str
:return: the decorated function
:rtype: function
"""
return lambda x: registry(x, identifier, literal_functions) | 84651d58b7da677ee213d1ff4667dc3be702f243 | 4,144 |
def get_factors(n: int) -> list:
"""Returns the factors of a given integer.
"""
return [i for i in range(1, n+1) if n % i == 0] | c15a0e30e58597daf439facd3900c214831687f2 | 4,145 |
def fetch_tables():
""" Used by the frontend, returns a JSON list of all the tables including metadata. """
return jsonify([
{
"tab": "animeTables",
"name": "Anime",
"tables": [
{
"id": "englishAnimeSites",
"title": "English Streaming Sites",
"type": "anime"
},
{
"id": "foreignAnimeSites",
"title": "Foreign Streaming Sites",
"type": "anime"
},
{
"id": "downloadSites",
"title": "Download Only Sites",
"type": "animeDownload"
}
]
},
{
"tab": "mangaTables",
"name": "Manga",
"tables": [
{
"id": "englishMangaAggregators",
"title": "Aggregators",
"type": "manga"
},
{
"id": "foreignMangaAggregators",
"title": "Non-English Aggregators",
"type": "manga"
},
{
"id": "englishMangaScans",
"title": "Scans",
"type": "manga"
},
{
"id": "foreignMangaScans",
"title": "Non-English Scans",
"type": "manga"
}
]
},
{
"tab": "lightNovelTables",
"name": "Novels",
"tables": [
{
"id": "lightNovels",
"title": "Light Novels",
"type": "novel"
},
{
"id": "visualNovels",
"title": "Visual Novels",
"type": "novel"
}
]
},
{
"tab": "applicationsTables",
"name": "Applications",
"tables": [
{
"id": "iosApplications",
"title": "iOS",
"type": "application"
},
{
"id": "androidApplications",
"title": "Android",
"type": "application"
},
{
"id": "windowsApplications",
"title": "Windows",
"type": "application"
},
{
"id": "macOSApplications",
"title": "macOS",
"type": "application"
},
{
"id": "browserExtensions",
"title": "Browser Extensions",
"type": "application"
}
]
},
{
"tab": "hentaiTables",
"name": "Hentai",
"tables": [
{
"id": "hentaiAnimeSites",
"title": "Hentai Anime Streaming Sites",
"type": "anime"
},
{
"id": "hentaiDoujinshiSites",
"title": "Hentai Manga/Image Boards/LN sites",
"type": "novel"
},
{
"id": "hentaiDownloadSites",
"title": "Hentai Download",
"type": "animeDownload"
},
{
"id": "hentaiApplications",
"title": "Hentai Applications",
"type": "application"
}
]
},
{
"tab": "vpnTables",
"name": "VPN",
"tables": [
{
"id": "vpnServices",
"title": "VPNs",
"type": "vpn"
}
]
}
]) | 5c07e7bc9f3366bc72e21dd5468edf57b6c448b3 | 4,146 |
def base_positive_warps():
"""
Get warp functions associated with domain (0,inf), scale 1.0
Warp function is defined as f(x) = log(exp(x)-1)
Returns
-------
Callable[torch.Tensor,torch.Tensor],
Callable[torch.Tensor,torch.Tensor],
Callable[torch.Tensor,torch.Tensor]
Function from (0,inf) to R, from R to (0,inf),
and log of derivative of function from (0,inf) to R
"""
warpf = utils.invsoftplus
iwarpf = utils.softplus
logdwarpf = lambda x: x - utils.invsoftplus(x)
return warpf, iwarpf, logdwarpf | 389db769f55f7542a45e6acbbccf5760dc7b8c26 | 4,147 |
import re
import json
from datetime import datetime
def dev_work_create():
"""
Create work order.
:return:
"""
db_ins = current_user.dbs
audits = User.query.filter(User.role == 'audit')
form = WorkForm()
if form.validate_on_submit():
sql_content = form.sql_content.data
db_ins = form.db_ins.data
shard = form.shard.data
if form.backup.data:
is_backup = True
else:
is_backup = False
sql_content = sql_content.rstrip().replace("\n", " ")
# Only Create and Alter can be used with table shard
shard_create = re.search('\s*create\s+', sql_content, flags=re.IGNORECASE)
shard_alter = re.search('\s*alter\s+', sql_content, flags=re.IGNORECASE)
shard_judge = shard_create or shard_alter
if shard != '0' and not shard_judge:
flash('Only Create and Alter sql can be used when using table shard!')
return redirect(url_for('.dev_work_create'))
# split joint sql with shard numbers
if shard != '0' and shard_judge:
split_sql = sqlparse.split(sql_content)
format_table = re.sub(" +", " ", split_sql[1])
sql_content = ''
for count in range(int(shard)):
format_table_list = format_table.split(' ')
shard_name = '`' + str(format_table_list[2].strip('`')) + '_' + str(count) + '`'
format_table_list[2] = shard_name
sql_content += ' '.join(format_table_list)
sql_content = split_sql[0] + sql_content
if sql_content[-1] == ';':
work = Work()
work.name = form.name.data
work.db_name = form.db_ins.data
work.shard = form.shard.data
work.backup = is_backup
work.dev_name = current_user.name
work.audit_name = form.audit.data
work.sql_content = sql_content
result = sql_auto_review(sql_content, db_ins)
if result or len(result) != 0:
json_result = json.dumps(result)
work.status = 1
for row in result:
if row[2] == 2:
work.status = 2
break
elif re.match(r"\w*comments\w*", row[4]):
work.status = 2
break
work.auto_review = json_result
work.create_time = datetime.now()
db.session.add(work)
db.session.commit()
if current_app.config['MAIL_ON_OFF'] == 'ON':
auditor = User.query.filter(User.name == work.audit_name).first()
mail_content = "<p>Proposer:" + work.dev_name + "</p>" + "<p>Sql Content:" + work.sql_content + \
"</p>" + "<p>A new work sheet.</p>"
send_mail.delay('【inception_mysql】New work sheet', mail_content, auditor.email)
return redirect(url_for('.dev_work'))
else:
flash('The return of Inception is null. May be something wrong with the SQL sentence ')
return redirect(url_for('.dev_work_create'))
else:
flash('SQL sentences does not ends with ; Please Check!')
return redirect(url_for('.dev_work_create'))
return render_template('dev/work_create.html', form=form, db_ins=db_ins, audits=audits) | b11f840bbc6428696afabe7f2fe00b5d0b6ad7d1 | 4,148 |
def blur(x, mean=0.0, stddev=1.0):
"""
Resize to smaller size (AREA) and then resize to original size (BILINEAR)
"""
size = tf.shape(x)[:2]
downsample_factor = 1 + tf.math.abs(tf.random.normal([], mean=mean, stddev=stddev))
small_size = tf.to_int32(tf.to_float(size)/downsample_factor)
x = tf.image.resize_images(x, small_size, method=tf.image.ResizeMethod.AREA)
x = tf.image.resize_images(x, size, method=tf.image.ResizeMethod.BILINEAR)
return x | b0101a4b820beb84c627bef048bbafeb1d11cdea | 4,149 |
def improve(update, close, guess=1, max_updates=100):
"""Iteratively improve guess with update until close(guess) is true or
max_updates have been applied."""
k = 0
while not close(guess) and k < max_updates:
guess = update(guess)
k = k + 1
return guess | 3475c07a3e9a674661d90e116bfb91fa12344d63 | 4,150 |
def images_to_sequence(tensor):
"""Convert a batch of images into a batch of sequences.
Args:
tensor: a (num_images, height, width, depth) tensor
Returns:
(width, num_images*height, depth) sequence tensor
"""
num_image_batches, height, width, depth = _shape(tensor)
transposed = tf.transpose(tensor, [2, 0, 1, 3])
return tf.reshape(transposed, [width, num_image_batches * height, depth]) | cc89ce931239b5335d5788bc6e9007e5186648bf | 4,151 |
from typing import Any
import logging
def transform_regions(regions: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""
Transform aggregated region data for map
regions -- aggregated data from region pipeline
"""
records = []
for record in regions:
if "latitude" in record["_id"].keys():
if record["admin3"]:
id = record["admin3"]
search_term = "admin3"
elif record["admin2"]:
id = record["admin2"]
search_term = "admin2"
elif record["admin1"]:
id = record["admin1"]
search_term = "admin1"
else:
id = country_name(record["country"])
if id is None:
continue
search_term = "country"
new_record = {
"_id": id,
"casecount": record["casecount"],
"country": country_name(record["country"]),
"country_code": record["country"],
"admin1": record["admin1"],
"admin2": record["admin2"],
"admin3": record["admin3"],
"lat": record["_id"]["latitude"],
"long": record["_id"]["longitude"],
"search": search_term,
}
logging.info(new_record)
records.append(new_record)
return records | 599e58e3bd66159114d0dbf27b339c47134c29c3 | 4,152 |
def _file_to_import_exists(storage_client: storage.client.Client,
bucket_name: str, filename: str) -> bool:
"""Helper function that returns whether the given GCS file exists or not."""
storage_bucket = storage_client.get_bucket(bucket_name)
return storage.Blob(
bucket=storage_bucket, name=filename).exists(storage_client) | cb051aba0d5e787e85dbc0283aa439e3c17e819c | 4,153 |
from typing import Optional
from typing import List
from typing import Tuple
def get_relative_poses(
num_frames: int,
frames: np.ndarray,
selected_track_id: Optional[int],
agents: List[np.ndarray],
agent_from_world: np.ndarray,
current_agent_yaw: float,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Internal function that creates the targets and availability masks for deep prediction-type models.
The futures/history offset (in meters) are computed. When no info is available (e.g. agent not in frame)
a 0 is set in the availability array (1 otherwise).
Note: return dtype is float32, even if the provided args are float64. Still, the transformation
between space is performed in float64 to ensure precision
Args:
num_frames (int): number of offset we want in the future/history
frames (np.ndarray): available frames. This may be less than num_frames
selected_track_id (Optional[int]): agent track_id or AV (None)
agents (List[np.ndarray]): list of agents arrays (same len of frames)
agent_from_world (np.ndarray): local from world matrix
current_agent_yaw (float): angle of the agent at timestep 0
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: position offsets, angle offsets, extent, availabilities
"""
# How much the coordinates differ from the current state in meters.
positions_m = np.zeros((num_frames, 2), dtype=agent_from_world.dtype)
yaws_rad = np.zeros((num_frames, 1), dtype=np.float32)
extents_m = np.zeros((num_frames, 2), dtype=np.float32)
availabilities = np.zeros((num_frames,), dtype=np.float32)
for i, (frame, frame_agents) in enumerate(zip(frames, agents)):
if selected_track_id is None:
agent_centroid_m = frame["ego_translation"][:2]
agent_yaw_rad = rotation33_as_yaw(frame["ego_rotation"])
agent_extent = (EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH)
else:
# it's not guaranteed the target will be in every frame
try:
agent = filter_agents_by_track_id(frame_agents, selected_track_id)[0]
agent_centroid_m = agent["centroid"]
agent_yaw_rad = agent["yaw"]
agent_extent = agent["extent"][:2]
except IndexError:
availabilities[i] = 0.0 # keep track of invalid futures/history
continue
positions_m[i] = agent_centroid_m
yaws_rad[i] = agent_yaw_rad
extents_m[i] = agent_extent
availabilities[i] = 1.0
# batch transform to speed up
positions_m = transform_points(positions_m, agent_from_world) * availabilities[:, np.newaxis]
yaws_rad = angular_distance(yaws_rad, current_agent_yaw) * availabilities[:, np.newaxis]
return positions_m.astype(np.float32), yaws_rad, extents_m, availabilities | e1dad983e5070310ce239615c98f85d8b09b9c45 | 4,155 |
import numpy
def read_mat_cplx_bin(fname):
"""
Reads a .bin file containing floating-point values (complex) saved by Koala
Parameters
----------
fname : string
Path to the file
Returns
-------
buffer : ndarray
An array containing the complex floating-point values read from the file
See Also
--------
write_mat_cplx_bin
Example
-------
>>> buf = read_mat_cplx_bin('test/file_cplx.bin')
>>> buf
array([[ 0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j, ...,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j],
[ 0.00000000e+00 +0.00000000e+00j,
4.97599517e-09 +9.14632536e-10j,
5.99623329e-09 -1.52811275e-08j, ...,
1.17636354e-07 -1.01500063e-07j,
6.33714581e-10 +5.61812996e-09j,
0.00000000e+00 +0.00000000e+00j],
...,
[ 0.00000000e+00 +0.00000000e+00j,
-1.26479121e-08 -2.92324431e-09j,
-4.59448168e-09 +9.28236474e-08j, ...,
-4.15031316e-08 +1.48466597e-07j,
4.41099779e-08 -1.27046489e-08j,
0.00000000e+00 +0.00000000e+00j],
[ -0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j, ...,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j,
0.00000000e+00 +0.00000000e+00j]], dtype=complex64)
"""
kcplx_header_dtype = numpy.dtype([
("width", "i4"),
("height", "i4")
])
f = open(fname, 'rb')
kcplx_header = numpy.fromfile(f, dtype=kcplx_header_dtype, count=1)
shape = (kcplx_header['height'], kcplx_header['width'])
#print kcplx_header
tmp = numpy.fromfile(f, dtype='float32')
f.close()
real_tmp = (tmp[0:kcplx_header['height']*kcplx_header['width']]).reshape(shape)
imag_tmp = (tmp[kcplx_header['height']*kcplx_header['width']:]).reshape(shape)
#print tmp
#print 'array = {}'.format(len(tmp))
return real_tmp + 1j*imag_tmp | f2761f4cd7031dc16cb2f9903fd431bc7b4212d8 | 4,156 |
def DeleteDataBundle(**kwargs):
"""
Deletes a Data Bundle by ID.
:param kwargs:
:return:
"""
data_bundle_id = kwargs['data_bundle_id']
del data_bundles[data_bundle_id]
return(kwargs, 200) | 88ded979e45beebe885eeb1890ce66ae367b1fd6 | 4,157 |
def determineactions(repo, deficiencies, sourcereqs, destreqs):
"""Determine upgrade actions that will be performed.
Given a list of improvements as returned by ``finddeficiencies`` and
``findoptimizations``, determine the list of upgrade actions that
will be performed.
The role of this function is to filter improvements if needed, apply
recommended optimizations from the improvements list that make sense,
etc.
Returns a list of action names.
"""
newactions = []
knownreqs = supporteddestrequirements(repo)
for d in deficiencies:
name = d.name
# If the action is a requirement that doesn't show up in the
# destination requirements, prune the action.
if name in knownreqs and name not in destreqs:
continue
newactions.append(d)
# FUTURE consider adding some optimizations here for certain transitions.
# e.g. adding generaldelta could schedule parent redeltas.
return newactions | 0ec771565560607e839ce87a65426e01d0276f36 | 4,158 |
def filter_ccfs(ccfs, sc_thresh, min_ccf):
"""
Remove noisy ccfs from irrelevant experiments
:param ccfs: 2d array
:param sc_thresh: int
number of sign changes expected
:param min_ccf: float
cutoff value for a ccf to be above the noise threshold
:return:
"""
if sc_thresh is None:
sc_thresh = np.inf
asign = np.sign(ccfs)
signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)
signchange[:, 0] = 0
# (np.sum(signchange, axis=1) <= sc_thresh) &
### Do not cross correlate with a lag greater than 1/2 of the dataset when the timeseries is short
### throw out these cross correlations in filtered time-series
max_lag = ccfs.shape[1]
# if max_lag < 10:
# max_lag = int(np.ceil(ccfs.shape[1]/2.0))
filtered_ccf = ccfs[(np.sum(signchange, axis=1) <= sc_thresh) & (np.max(np.abs(ccfs), axis=1) > min_ccf),
:max_lag + 1]
return filtered_ccf | 06941eaea7bc5dc25f261669532c66ac37cbb9ab | 4,159 |
def market_data(symbol, expirationDate, strike, optionType, info=None):
"""Gets option market data from information. Takes time to load pages."""
assert all(isinstance(i, str) for i in [symbol, expirationDate, strike, optionType])
return robin_stocks.options.get_option_market_data(symbol, expirationDate, strike, optionType, info=None) | 153d15af1030be22fa6c97b8d68fdf2049ebc416 | 4,160 |
def get_documents_embeddings (y, embedder, column):
"""
Given a Dataframe containing study_id and a text column, return a numpy array of embeddings
The idea of this function is to prevent to embed two times the same text (for computation efficiency)
Parameters:
-----------
y: Dataframe containing study_id, and a text column
embedder: Object of embedding creator containing a transform function
column: column containing the text to Embed
Output:
-------
Numpy array of size (n, embedding_size)
"""
# Getting reports DF
reports_df = y[["study_id", column]].fillna("").drop_duplicates("study_id").reset_index(drop=True)
reports_list = reports_df[column].astype(str).values
# Getting BERT embeddings
reports_embeddings = embedder.fit_transform(reports_list)
output = pd.merge(
y[["study_id"]],
reports_df[["study_id"]].join(
pd.DataFrame(reports_embeddings)
),
left_on="study_id",
right_on="study_id",
how="left"
).iloc[:,1:].values
return output | 9a748ef8b276d68a61d78c6fa567a40aae4fc222 | 4,161 |
def index(request):
"""view fonction de la page d'accueil
Render templates de la page d'accueil
"""
return render(request, "t_myapp/index.html") | b3cf3be5d3c2a286d5705281e35042ad19d0a050 | 4,162 |
def cidr_mask_to_subnet_mask(mask_num):
"""
掩码位数转换为点分掩码
:param mask_num: 掩码位数, 如 16
:return: 十进制点分ipv4地址
"""
return convert_to_ipv4(cidr_mask_to_ip_int(mask_num), stype='int') | 83556c856f68e82824fa1f3a34b4d629361081af | 4,163 |
def correlate(A,B,
rows=None,columns=None, mode_row='zero', mode_column='zero'):
"""Correlate A and B.
Input:
------
A,B : array
Input data.
columns : int
Do correlation at columns 0..columns, defaults to the number of columns in A.
rows : int
Do correlation at columns 0..rows, defaults to the number of rows in A.
mode_row, mode_column : string
How values outside boundaries are handled ('zero' or 'mirror').
Output:
-------
Y : array
Rows-by-columns array of correlation values.
"""
A,B = atype([A,B],[np.double,np.double])
assert A.ndim == 2 and B.ndim == 2, "Input arrays must be two dimensional"
A_r,A_c = A.shape
B_r,B_c = B.shape
columns = columns or A_c
rows = rows or A_r
assert rows <= A_r and columns <= A_c, \
"columns and rows cannot be larger than dimensions of A"
modes = {'zero': 0,
'mirror': 1}
output = np.empty((rows,columns),dtype=np.double)
_lib.correlate(A_r, A_c, A,
B_r, B_c, B,
rows, columns,
modes[mode_row], modes[mode_column],
output)
return output | 88bfec52c318aaf119a6fac5cff731855f0a0d81 | 4,164 |
def getChrLenList(chrLenDict, c):
""" Given a chromosome length dictionary keyed on chromosome names and
a chromosome name (c) this returns a list of all the runtimes for a given
chromosome across all Step names.
"""
l = []
if c not in chrLenDict:
return l
for n in chrLenDict[c]:
l.append(chrLenDict[c][n])
return l | aedf613484262ac5bd31baf384ade2eb35f3e1eb | 4,165 |
from typing import Optional
def query_sessions(user_id: Optional[int]) -> TList[Session]:
"""
Return all user's sessions
:param user_id: current user ID (None if user auth is disabled)
:return: list of session objects
"""
adb = get_data_file_db(user_id)
return [Session(db_session) for db_session in adb.query(DbSession)] | c7449c7805f1ba0c425140603952215b67e3ce0e | 4,167 |
import torch
import math
def positionalencoding3d(d_model, dx, dy, dz):
"""
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix
"""
# if d_model % 6 != 0:
# raise ValueError("Cannot use sin/cos positional encoding with "
# "odd dimension (got dim={:d})".format(d_model))
pe = torch.zeros(d_model, dx, dy, dz)
# Each dimension use half of d_model
interval = int(d_model // 6) * 2
div_term = torch.exp(torch.arange(0., interval, 2) * -(math.log(10000.0) / interval))
pos_x = torch.arange(0., dx).unsqueeze(1) * div_term
pos_y = torch.arange(0., dy).unsqueeze(1) * div_term
pos_z = torch.arange(0., dz).unsqueeze(1) * div_term
pe[0:interval:2, :, :, :] = torch.sin(pos_x).T.unsqueeze(2).unsqueeze(3).repeat(1, 1, dy, dz)
pe[1:interval:2, :, :, :] = torch.cos(pos_x).T.unsqueeze(2).unsqueeze(3).repeat(1, 1, dy, dz)
pe[interval:int(interval * 2):2, :, :] = torch.sin(pos_y).T.unsqueeze(1).unsqueeze(3).repeat(1, dx, 1, dz)
pe[interval + 1:int(interval * 2):2, :, :] = torch.cos(pos_y).T.unsqueeze(1).unsqueeze(3).repeat(1, dx, 1, dz)
pe[int(interval * 2):int(interval * 3):2, :, :] = torch.sin(pos_z).T.unsqueeze(1).unsqueeze(2).repeat(1, dx, dy, 1)
pe[int(interval * 2) + 1:int(interval * 3):2, :, :] = torch.cos(pos_z).T.unsqueeze(1).unsqueeze(2).repeat(1, dx, dy, 1)
return pe | 178dc3b86e3be0c9e799f5f0c658808f541f1eca | 4,168 |
def make_headers(context: TraceContext) -> Headers:
"""Creates dict with zipkin headers from supplied trace context.
"""
headers = {
TRACE_ID_HEADER: context.trace_id,
SPAN_ID_HEADER: context.span_id,
FLAGS_HEADER: '0',
SAMPLED_ID_HEADER: '1' if context.sampled else '0',
}
if context.parent_id is not None:
headers[PARENT_ID_HEADER] = context.parent_id
return headers | 474e3a57af1bda99585f7d140fbd0bb1d9bd18b2 | 4,169 |
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc) | 342d08644c56c2cce5e02f0d3d0ddd9df0b2f173 | 4,170 |
def scalar_sub(x: Number, y: Number) -> Number:
"""Implement `scalar_sub`."""
_assert_scalar(x, y)
return x - y | 74c9d44eaaabb1bfeea012b4ec1503e37d7c9f8b | 4,171 |
def predict_attack(h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13):
"""
Parameters:
-name:h1
in:query
type:number
required=True
-name:h5
in:query
type:number
required:True
-name:h4
in:query
type:number
required:True
-name:h8
in:query
type:number
required:True
-name:h9
in:query
type:number
required:True
-name:h10
in:query
type:number
required:True
-name:h11
in:query
type:number
required:True
-name:h12
in:query
type:number
required:True
DESCRIPTION:output varaibles
"""
if h2=='male':
h2=0
else:
h2=1
if h3=='angina':
h3=0
elif h3=='atypical anigna':
h3=1
elif h3=='non-anignal pain':
h3=2
else:
h3=3
if h6=='greater than 120':
h6=1
else:
h6=0
if h7=='normal':
h7=0
elif h7=='ST-t normal':
h7=1
else:
h7=2
if h13=='yes':
h13=1
else:
h13=0
res=classifier.predict([[h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11,h12,h13]])
return res | 907f6b52c3b1c24a409b8b7ebc157412bd67777d | 4,172 |
def _check_varrlist_integrity(vlist):
"""Return true if shapes and datatypes are the same"""
shape = vlist[0].data.shape
datatype = vlist[0].data.dtype
for v in vlist:
if v.data.shape != shape:
raise(Exception("Data shapes don't match"))
if v.data.dtype != datatype:
raise(Exception("Data types don't match"))
return True | 1b6fedd1222757c0bc92490be85d8030ee877842 | 4,173 |
def subclassfactory(fact_method):
"""fact_method takes the same args as init and returns the subclass appropriate to those args
that subclass may in turn override the same factory method and choose amoung it's subclasses.
If this factory method isn't overridden in the subclass an object of that class is inited.
fact_method is made into a cls method and must take at least a cls argument
"""
@wraps(fact_method)
@classmethod
def wrapper(cls, *args, **kwargs):
subclass = fact_method(cls, *args, **kwargs)
submeth = getattr(subclass, fact_method.__name__)
curmeth = getattr(cls, fact_method.__name__)
if (submeth.__func__ == curmeth.__func__):
return subclass(*args, **kwargs)
else:
return submeth(*args, **kwargs)
return wrapper | eb0b8227276ed7499d21d9998ec08fb830d89642 | 4,174 |
def simulate_var1(x_tnow, b, mu, sigma2, m_, *, j_=1000, nu=10**9,
init_value=True):
"""For details, see here.
Parameters
----------
x_tnow : array, shape(n_, )
b : array, shape(n_,n_)
mu : array, shape(n_, )
sigma2 : array, shape(n_,n_)
m_ : int
nu: int
j_ : int
init_value : boolean
Returns
-------
x_tnow_thor : array, shape(j_, m_+1, n_)
"""
n_ = np.shape(sigma2)[0]
# Step 1: Monte Carlo scenarios of projected paths of the risk drivers
x_tnow_thor = np.zeros((j_, m_, n_))
for m in range(0, m_):
epsi = simulate_t(mu, sigma2, nu, j_).reshape((j_, -1))
if m > 0:
x_prec = x_tnow_thor[:, m-1, :]
else:
x_prec = np.tile(x_tnow, (j_, 1))
x_tnow_thor[:, m, :] = x_prec @ b.T + epsi
# Step 2: Include the initial value as starting node, if selected
if init_value:
x_tnow = np.tile(x_tnow, (j_, 1))
x_tnow = np.expand_dims(x_tnow, axis=1)
x_tnow_thor = np.concatenate((x_tnow, x_tnow_thor), axis=1)
return x_tnow_thor | 66bf82052e933e14d16e82738d36a4c96b51ca43 | 4,175 |
from typing import Optional
def is_drom(insee_city: Optional[str] = None, insee_region: Optional[str] = None) -> bool:
"""
Est-ce que le code INSEE de la ville ou de la région correspond à un DROM ?
Args:
insee_city: Code INSEE de la ville
insee_region: Code INSEE de la région
Returns:
Vrai ssi le code INSE est un DROM
"""
if insee_city is not None:
return insee_city[:2] in {'97', '98'}
elif insee_region is not None: # Les codes région ne suivent pas la nomenclature des codes département
return insee_region in {'01', '02', '03', '04', '06'} | 7a33516eb31c5ff7800eb6dc663d76d5e2c445cb | 4,176 |
import math
def pack_rows(rows, bitdepth):
"""Yield packed rows that are a byte array.
Each byte is packed with the values from several pixels.
"""
assert bitdepth < 8
assert 8 % bitdepth == 0
# samples per byte
spb = int(8 / bitdepth)
def make_byte(block):
"""Take a block of (2, 4, or 8) values,
and pack them into a single byte.
"""
res = 0
for v in block:
res = (res << bitdepth) + v
return res
for row in rows:
a = bytearray(row)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
n = float(len(a))
extra = math.ceil(n / spb) * spb - n
a.extend([0] * int(extra))
# Pack into bytes.
# Each block is the samples for one byte.
blocks = group(a, spb)
yield bytearray(make_byte(block) for block in blocks) | e0b8a4701adf1757a558475e2ea5830a3d53ab2a | 4,177 |
def reset_user_pwd(username: str) -> int:
"""
:param username: 用户名
:return: 结果代码: 1: 成功, 0: 失败
"""
return update_user_info(username=username, args={
'password': '12345678'
}) | a9703bb82913b47e9b59ba36cd9257323cbfeec2 | 4,178 |
def location_engineering(df: pd.DataFrame) -> pd.DataFrame:
"""Call the `location_dict()` function to get the location dictionary and the
`location_dataframe()` one to add the location dictionary info to the DataFrame.
Parameters
----------
df :
The dataframe to work with.
Returns
-------
The DataFrame with location info added.
"""
# Call `location_dict` function to get a dictionary with location info
location_dictionary = location_dict(df)
# Call `location_dataframe` function to add the `location_dict` to a df
df = location_dataframe(df, location_dictionary)
return df | cca3e1724da08ffcb895aa9fc323ebaf380760e4 | 4,179 |
import re
def extract_energyxtb(logfile=None):
"""
Extracts xtb energies from xtb logfile using regex matching.
Args:
logfile (str): Specifies logfile to pull energy from
Returns:
energy (list[float]): List of floats containing the energy in each step
"""
re_energy = re.compile("energy: (-\\d+\\.\\d+)")
energy = []
with logfile.open() as f:
for line in f:
if "energy" in line:
energy.append(float(re_energy.search(line).groups()[0]))
return energy | 075f9d48d3bcc9f6bd12aa791cc4d0444299dd74 | 4,180 |
def make_transaction_frame(transactions):
"""
Formats a transaction DataFrame.
Parameters
----------
transactions : pd.DataFrame
Contains improperly formatted transactional data.
Returns
-------
df : pd.DataFrame
Daily transaction volume and dollar ammount.
- See full explanation in tears.create_full_tear_sheet.
"""
transaction_list = []
for dt in transactions.index:
txns = transactions.loc[dt]
if len(txns) == 0:
continue
for txn in txns:
txn = map_transaction(txn)
transaction_list.append(txn)
df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['dt']))
df['txn_dollars'] = -df['amount'] * df['price']
df.index = list(map(pd.Timestamp, df.dt.values))
return df | ab8feafb1a441fddf574ebd12a7720a7c4d7398b | 4,182 |
Subsets and Splits