content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def extract_character_pairs(letter_case, reverse_letter_case):
"""
Extract character pairs. Check that two unicode value are also a mapping value of each other.
:param letter_case: case mappings dictionary which contains the conversions.
:param reverse_letter_case: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with character pairs.
"""
character_pairs = []
for letter_id in sorted(letter_case.keys()):
if is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case):
mapped_value = letter_case[letter_id]
character_pairs.extend([letter_id, ord(mapped_value)])
# Remove character pairs from case mapping tables
del letter_case[letter_id]
del reverse_letter_case[ord(mapped_value)]
return character_pairs | 29e5415afc4e4a3bff5cd74c1fa14f78cf715384 | 3,653,364 |
def after_timestep(simulation, is_steady, force_steady=False):
"""
Move u -> up, up -> upp and prepare for the next time step
"""
# Stopping criteria for steady state simulations
vel_diff = None
if is_steady:
vel_diff = 0
for d in range(simulation.ndim):
u_new = simulation.data['u%d' % d]
up = simulation.data['up%d' % d]
diff = abs(u_new.vector().get_local() - up.vector().get_local()).max()
vel_diff = max(vel_diff, diff)
shift_fields(simulation, ['u%d', 'up%d', 'upp%d'])
shift_fields(simulation, ['u_conv%d', 'up_conv%d', 'upp_conv%d'])
if force_steady:
simulation.data['time_coeffs'].assign(dolfin.Constant([0.0, 0.0, 0.0]))
else:
# Change time coefficient to second order
simulation.data['time_coeffs'].assign(dolfin.Constant([3 / 2, -2, 1 / 2]))
# Extrapolate the convecting velocity to the next step
update_convection(simulation, force_steady=force_steady)
return vel_diff | 7aa3436ba8bcc4ec395ba6f030b83e6fc3cb4bf3 | 3,653,365 |
def get_summary_indices(df, on='NOSC'):
""" Get the summary stats for the indices: median, mean, std, weighted mean and weighted std """
samples = get_list_samples(df)
samples.append(on)
t = df[samples]
t = t.melt(id_vars=[on], var_name='SampleID', value_name='NormIntensity')
t = t[t['NormIntensity'] > 0].reset_index(drop=True)
t_agg = t.groupby(['SampleID']).agg({on: ['median', 'mean', 'std']})
t_agg.columns = t_agg.columns.map('_'.join)
t_agg = t_agg.reset_index()
t_agg[[on + '_w_mean', on + '_w_std']] = ''
for sample in t['SampleID'].unique():
# print(sample)
temp = t[t['SampleID'] == sample]
wdf = DescrStatsW(temp[on], weights=temp['NormIntensity'])
t_agg.loc[t_agg['SampleID'] == sample, on + '_w_mean'] = wdf.mean
t_agg.loc[t_agg['SampleID'] == sample, on + '_w_std'] = wdf.std
return t_agg | 1c430a9ad377e3d550e292b381af072d4adc78f0 | 3,653,366 |
def view_evidence(evidence_id: int):
"""View a single Evidence model."""
evidence = manager.get_evidence_by_id_or_404(evidence_id)
return render_template(
'evidence/evidence.html',
evidence=evidence,
manager=manager,
) | 8a51a3c6279a1501c26fb2de09c4450660546bf3 | 3,653,367 |
def rigidBlades(blds, hub=None, r_O=[0,0,0]):
""" return a rigid body for the three blades
All bodies should be in a similar frame
"""
blades = blds[0].toRigidBody()
for B in blds[1:]:
B_rigid = B.toRigidBody()
blades = blades.combine(B_rigid, r_O=r_O)
blades.name='blades'
return blades | 89b48ba43f748fa4b2db7ee768eabe9e79e9a453 | 3,653,369 |
def mea_slow(posterior_matrix, shortest_ref_per_event, return_all=False):
"""Computes the maximum expected accuracy alignment along a reference with given events and probabilities.
Computes a very slow but thorough search through the matrix
:param posterior_matrix: matrix of posterior probabilities with reference along x axis and events along y
:param shortest_ref_per_event: shortest ref position per event
:param return_all: return all forward edges
"""
ref_len = len(posterior_matrix[0])
events_len = len(posterior_matrix)
initialize = True
forward_edges = list()
new_edges = list()
# step through all events
for event_index in range(events_len):
max_prob = 0
if initialize:
ref_index = 0
while ref_index < ref_len:
# intitialize forward edges with first event alignments
# if type(posterior_matrix[ref_index][event_index]) is not int:
posterior = posterior_matrix[event_index][ref_index]
event_data = [ref_index, event_index, posterior, posterior, None]
if 0 < posterior >= max_prob:
# print("True", posterior, max_prob)
new_edges.append(event_data)
max_prob = posterior
ref_index += 1
# print("INITIALIZE", new_edges, max_prob)
if len(new_edges) != 0:
forward_edges = new_edges
new_edges = list()
initialize = False
else:
# print(forward_edges)
ref_index = 0
top_edge = []
while ref_index < ref_len:
posterior = posterior_matrix[event_index][ref_index]
if posterior >= max_prob:
# no possible connecting edges and is needed for other other events create a new one
if ref_index < shortest_ref_per_event[event_index]:
top_edge.append([ref_index, event_index, posterior, posterior, None])
max_prob = posterior
ref_index += 1
# add top edge if needed
if top_edge:
new_edges.append(top_edge[-1])
ref_index = 0
while ref_index < ref_len:
inxs = []
probs = []
posterior = posterior_matrix[event_index][ref_index]
for j, forward_edge in enumerate(forward_edges):
if forward_edge[0] < ref_index:
# track which probabilities with prev edge
inxs.append(j)
probs.append(posterior + forward_edge[3])
# if needed, keep edges aligned to ref positions previous than the current ref position
elif forward_edge[0] == ref_index:
# stay at reference position
# add probability of event if we want to promote sideways movement
inxs.append(j)
probs.append(forward_edge[3])
# add new edge
inxs = inxs[::-1]
probs = probs[::-1]
if len(probs) != 0:
if max(probs) > max_prob:
connecting_edge = forward_edges[inxs[int(np.argmax(probs))]]
new_edges.append([ref_index, event_index, posterior, max(probs), connecting_edge])
max_prob = max(probs)
else:
if forward_edges[0][0] > ref_index and posterior > max_prob:
new_edges.append([ref_index, event_index, posterior, posterior, None])
max_prob = posterior
ref_index += 1
# print("END_NEW_EDGES", new_edges)
forward_edges = new_edges
new_edges = list()
# grab and return the highest probability edge
if return_all:
return forward_edges
else:
highest_prob = 0
best_forward_edge = 0
for x in forward_edges:
if x[3] > highest_prob:
highest_prob = x[3]
best_forward_edge = x
return best_forward_edge | 4b7165a0145d2e1ad2d0550910e03de5a775733c | 3,653,370 |
import trace
def predict(cart_tree, feature_set, data_set):
"""Predict the quality."""
feature_dict = {}
for index, feature in enumerate(feature_set):
feature_dict[feature] = index
results = []
for element in data_set:
# Append a tuple.
results.append((trace(cart_tree, feature_dict, element), element[-1]))
return results | c7f50557202c4320194ecc5264059c1701e0de73 | 3,653,371 |
def test_incorporate_getitem_through_switch(tag):
""" test_incorporate_getitem_through_switch """
fns = FnDict()
scalar_gt = Primitive('scalar_gt')
@fns
def before(x, y):
def f1(x, y):
return x, y
def f2(x, y):
return y, x
return tuple_getitem(
switch(scalar_gt(x, 0), f1, f2)(x, y),
0)
@fns
def after(x, y):
def f1(x, y):
return x
def f2(x, y):
return y
return switch(scalar_gt(x, 0), f1, f2)(x, y)
return fns[tag] | df128faf55c48ba698340d06b3c232ebc0140511 | 3,653,372 |
def response_json(status, message, response):
"""
Helper method that converts the given data in json format
:param success: status of the APIs either true or false
:param data: data returned by the APIs
:param message: user-friendly message
:return: json response
"""
data = {
"status": status,
"message": message,
"response": response,
}
return data | 9c7e30e81c5412998bc8523b0e45a353c82b5a41 | 3,653,373 |
from . import conf
def settings(request):
"""
"""
conf = dict(vars(conf))
# conf.update(ThemeSite.objects.get_theme_conf(request=request, fail=False))
data = request.session.get('cms_bs3_theme_conf', {})
conf.update(data)
return {'bs3_conf': conf} | 1230171ce1263083aabbd0fb79928c9236af31a9 | 3,653,374 |
def NDVI(R, NIR):
""" Compute the NDVI
INPUT : R (np.array) -> the Red band images as a numpy array of float
NIR (np.array) -> the Near Infrared images as a numpy array of float
OUTPUT : NDVI (np.array) -> the NDVI
"""
NDVI = (NIR - R) / (NIR + R + 1e-12)
return NDVI | aa1789c80720c09aa464b3ae67da7de821e2ba97 | 3,653,375 |
from typing import Union
from typing import Optional
from datetime import datetime
def get_nearby_stations_by_number(
latitude: float,
longitude: float,
num_stations_nearby: int,
parameter: Union[Parameter, str],
time_resolution: Union[TimeResolution, str],
period_type: Union[PeriodType, str],
minimal_available_date: Optional[Union[datetime, str]] = None,
maximal_available_date: Optional[Union[datetime, str]] = None,
) -> pd.DataFrame:
"""
Provides a list of weather station ids for the requested data
:param latitude: Latitude of location to search for nearest
weather station
:param longitude: Longitude of location to search for nearest
weather station
:param minimal_available_date: Start date of timespan where measurements
should be available
:param maximal_available_date: End date of timespan where measurements
should be available
:param parameter: Observation measure
:param time_resolution: Frequency/granularity of measurement interval
:param period_type: Recent or historical files
:param num_stations_nearby: Number of stations that should be nearby
:return: DataFrames with valid stations in radius per
requested location
"""
if num_stations_nearby <= 0:
raise ValueError("'num_stations_nearby' has to be at least 1.")
parameter = parse_enumeration_from_template(parameter, Parameter)
time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)
period_type = parse_enumeration_from_template(period_type, PeriodType)
if not check_parameters(parameter, time_resolution, period_type):
raise InvalidParameterCombination(
f"The combination of {parameter.value}, {time_resolution.value}, "
f"{period_type.value} is invalid."
)
minimal_available_date = (
minimal_available_date
if not minimal_available_date or isinstance(minimal_available_date, datetime)
else parse_datetime(minimal_available_date)
)
maximal_available_date = (
maximal_available_date
if not minimal_available_date or isinstance(maximal_available_date, datetime)
else parse_datetime(maximal_available_date)
)
if minimal_available_date and maximal_available_date:
if minimal_available_date > maximal_available_date:
raise ValueError(
"'minimal_available_date' has to be before " "'maximal_available_date'"
)
coords = Coordinates(np.array(latitude), np.array(longitude))
metadata = metadata_for_climate_observations(
parameter, time_resolution, period_type
)
# Filter only for stations that have a file
metadata = metadata[metadata[DWDMetaColumns.HAS_FILE.value].values]
if minimal_available_date:
metadata = metadata[
metadata[DWDMetaColumns.FROM_DATE.value] <= minimal_available_date
]
if maximal_available_date:
metadata = metadata[
metadata[DWDMetaColumns.TO_DATE.value] >= maximal_available_date
]
metadata = metadata.reset_index(drop=True)
distances, indices_nearest_neighbours = _derive_nearest_neighbours(
metadata.LAT.values, metadata.LON.values, coords, num_stations_nearby
)
distances = pd.Series(distances)
indices_nearest_neighbours = pd.Series(indices_nearest_neighbours)
# If num_stations_nearby is higher then the actual amount of stations
# further indices and distances are added which have to be filtered out
distances = distances[: min(metadata.shape[0], num_stations_nearby)]
indices_nearest_neighbours = indices_nearest_neighbours[
: min(metadata.shape[0], num_stations_nearby)
]
distances_km = np.array(distances * KM_EARTH_RADIUS)
metadata_location = metadata.iloc[indices_nearest_neighbours, :].reset_index(
drop=True
)
metadata_location[DWDMetaColumns.DISTANCE_TO_LOCATION.value] = distances_km
if metadata_location.empty:
logger.warning(
f"No weather stations were found for coordinate "
f"{latitude}°N and {longitude}°E "
)
return metadata_location | e53896ea4644bcce6351671ec950fe8165a2cb12 | 3,653,376 |
import scipy
def get_state(tau, i=None, h=None, delta=None, state_0=None, a_matrix=None):
"""
Compute the magnetization state.
r(τ) = e^(Aτ)r(0) eq (11) at[1]
"""
if a_matrix is not None:
# get state from a known A matrix
# A matrix can be shared and it takes time to build
return np.matmul(scipy.linalg.expm(tau*a_matrix), state_0)
return np.matmul(scipy.linalg.expm(
tau*generate_A(i, h, delta, state_0.size-1)), state_0) | a4ae277d41b64c9caf49758d62767030db0b244b | 3,653,377 |
import aacgmv2._aacgmv2 as c_aacgmv2
import logging
def convert_latlon_arr(in_lat, in_lon, height, dtime, code="G2A"):
"""Converts between geomagnetic coordinates and AACGM coordinates.
Parameters
------------
in_lat : (np.ndarray or list or float)
Input latitude in degrees N (code specifies type of latitude)
in_lon : (np.ndarray or list or float)
Input longitude in degrees E (code specifies type of longitude)
height : (np.ndarray or list or float)
Altitude above the surface of the earth in km
dtime : (datetime)
Single datetime object for magnetic field
code : (int or str)
Bit code or string denoting which type(s) of conversion to perform
G2A - geographic (geodetic) to AACGM-v2
A2G - AACGM-v2 to geographic (geodetic)
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default = "G2A")
Returns
-------
out_lat : (np.ndarray)
Output latitudes in degrees N
out_lon : (np.ndarray)
Output longitudes in degrees E
out_r : (np.ndarray)
Geocentric radial distance (R_Earth) or altitude above the surface of
the Earth (km)
Notes
-------
At least one of in_lat, in_lon, and height must be a list or array.
"""
# If a list was entered instead of a numpy array, recast it here
if isinstance(in_lat, list):
in_lat = np.array(in_lat)
if isinstance(in_lon, list):
in_lon = np.array(in_lon)
if isinstance(height, list):
height = np.array(height)
# If one or two of these elements is a float or int, create an array
test_array = np.array([hasattr(in_lat, "shape"), hasattr(in_lon, "shape"),
hasattr(height, "shape")])
if not test_array.all():
if test_array.any():
arr_shape = in_lat.shape if test_array.argmax() == 0 else \
(in_lon.shape if test_array.argmax() == 1 else
height.shape)
if not test_array[0]:
in_lat = np.ones(shape=arr_shape, dtype=float) * in_lat
if not test_array[1]:
in_lon = np.ones(shape=arr_shape, dtype=float) * in_lon
if not test_array[2]:
height = np.ones(shape=arr_shape, dtype=float) * height
else:
logging.info("for a single location, consider using convert_latlon")
in_lat = np.array([in_lat])
in_lon = np.array([in_lon])
height = np.array([height])
# Ensure that lat, lon, and height are the same length or if the lengths
# differ that the different ones contain only a single value
if not (in_lat.shape == in_lon.shape and in_lat.shape == height.shape):
ulen = np.unique([in_lat.shape, in_lon.shape, height.shape])
if ulen.min() != (1,):
logging.error("mismatched input arrays")
return None, None, None
# Test time
if isinstance(dtime, dt.date):
dtime = dt.datetime.combine(dtime, dt.time(0))
assert isinstance(dtime, dt.datetime), \
logging.error('time must be specified as datetime object')
# Test height
if np.min(height) < 0:
logging.warn('conversion not intended for altitudes < 0 km')
# Initialise output
lat_out = np.empty(shape=in_lat.shape, dtype=float) * np.nan
lon_out = np.empty(shape=in_lon.shape, dtype=float) * np.nan
r_out = np.empty(shape=height.shape, dtype=float) * np.nan
# Test code
try:
code = code.upper()
if(np.nanmax(height) > 2000 and code.find("TRACE") < 0 and
code.find("ALLOWTRACE") < 0 and code.find("BADIDEA") < 0):
estr = 'coefficients are not valid for altitudes above 2000 km. You'
estr += ' must either use field-line tracing (trace=True '
estr += 'or allowtrace=True) or indicate you know this '
estr += 'is a bad idea'
logging.error(estr)
return lat_out, lon_out, r_out
# make flag
bit_code = convert_str_to_bit(code)
except AttributeError:
bit_code = code
assert isinstance(bit_code, int), \
logging.error("unknown code {:}".format(bit_code))
# Test latitude range
if np.abs(in_lat).max() > 90.0:
assert np.abs(in_lat).max() <= 90.1, \
logging.error('unrealistic latitude')
in_lat = np.clip(in_lat, -90.0, 90.0)
# Constrain longitudes between -180 and 180
in_lon = ((in_lon + 180.0) % 360.0) - 180.0
# Set current date and time
try:
c_aacgmv2.set_datetime(dtime.year, dtime.month, dtime.day, dtime.hour,
dtime.minute, dtime.second)
except:
raise RuntimeError("unable to set time for {:}".format(dtime))
# Vectorise the AACGM code
convert_vectorised = np.vectorize(c_aacgmv2.convert)
# convert
try:
lat_out, lon_out, r_out = convert_vectorised(in_lat, in_lon, height,
bit_code)
except:
pass
return lat_out, lon_out, r_out | d9efc4d58925ef9cd63e7c800258b99c91e14f7a | 3,653,379 |
import re
def getPredictedAnchor(title: str) -> str:
"""Return predicted anchor for given title, usually first letter."""
title = title.lower()
if title.startswith('npj '):
return 'npj series'
title = re.sub(r'^(the|a|an|der|die|das|den|dem|le|la|les|el|il)\s+', '',
title)
return title[0].upper() | 972eaa495078bc3929967a052f031c50d439fbdc | 3,653,381 |
from typing import Optional
from typing import Mapping
def get_contact_flow(contact_flow_id: Optional[str] = None,
instance_id: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContactFlowResult:
"""
Provides details about a specific Amazon Connect Contact Flow.
## Example Usage
By name
```python
import pulumi
import pulumi_aws as aws
test = aws.connect.get_contact_flow(instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111",
name="Test")
```
By contact_flow_id
```python
import pulumi
import pulumi_aws as aws
test = aws.connect.get_contact_flow(contact_flow_id="cccccccc-bbbb-cccc-dddd-111111111111",
instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111")
```
:param str contact_flow_id: Returns information on a specific Contact Flow by contact flow id
:param str instance_id: Reference to the hosting Amazon Connect Instance
:param str name: Returns information on a specific Contact Flow by name
:param Mapping[str, str] tags: A the map of tags to assign to the Contact Flow.
:param str type: Specifies the type of Contact Flow.
"""
__args__ = dict()
__args__['contactFlowId'] = contact_flow_id
__args__['instanceId'] = instance_id
__args__['name'] = name
__args__['tags'] = tags
__args__['type'] = type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:connect/getContactFlow:getContactFlow', __args__, opts=opts, typ=GetContactFlowResult).value
return AwaitableGetContactFlowResult(
arn=__ret__.arn,
contact_flow_id=__ret__.contact_flow_id,
content=__ret__.content,
description=__ret__.description,
id=__ret__.id,
instance_id=__ret__.instance_id,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type) | ed57d1b17c19f66c38e67613e49653b11c13f699 | 3,653,382 |
def jensen_alpha_beta(risk_returns ,benchmark_returns,Rebalancement_frequency):
"""
Compute the Beta and alpha of the investment under the CAPM
Parameters
----------
risk_returns : np.ndarray
benchmark_returns : np.ndarray
Rebalancement_frequency : np.float64
Returns
----------
np.float64,Beta,np.float64,Alpha
"""
benchmark_returns = sm.add_constant(benchmark_returns)
model = sm.OLS(risk_returns,benchmark_returns).fit()
alpha,beta = model.params[0] * Rebalancement_frequency , model.params[1]
return beta,alpha | ac9d1cf638e2ce67219ed16dbbffc652ff47c541 | 3,653,383 |
def cycles_run() -> int:
"""Number of cycles run so far"""
return lib.m68k_cycles_run() | 145dc9a154a0ec4c2e46fecdeb7106134307cf10 | 3,653,384 |
def loop_and_return_fabric(lines):
"""
loops lines like:
#1196 @ 349,741: 17x17
"""
fabric = {}
for line in lines:
[x, y, x_length, y_length] = parse_line(line)
i_x, i_y = 0, 0
while i_y < y_length:
i_x = 0
while i_x < x_length:
this_coords = (x + i_x, y - i_y)
if fabric.get(this_coords, None) != None:
fabric[this_coords] += 1
else:
fabric[this_coords] = 1
i_x += 1
i_y += 1
return fabric | d5fd18c5b90c0e6576767a77c954b3546cbaef1a | 3,653,385 |
def get_sample(id):
"""Returns sample possessing id."""
for sample in samples_global:
if sample.id == id:
return sample
raise Exception(f'sample "{id}" could not be found') | 524305fe77ef5cc03ba51af3eb61301b697b9c1f | 3,653,386 |
def transcriptIterator(transcriptsBedStream, transcriptDetailsBedStream):
""" Iterates over the transcripts detailed in the two streams, producing
Transcript objects. Streams are any iterator that returns bedlines or empty
strings.
"""
transcriptsAnnotations = {}
for tokens in tokenizeBedStream(transcriptDetailsBedStream):
assert (len(tokens) == 4 or len(tokens) == 9) # 9 if it has color data.
tA = TranscriptAnnotation(
ChromosomeInterval(tokens[0], tokens[1], tokens[2], None),
tokens[3].split('/')[-1], tokens[3].split('/')[:-1])
# normalizeAnnotation(tA) # removed this to improve xml
key = (tA.name, tA.chromosomeInterval.chromosome)
if key not in transcriptsAnnotations:
transcriptsAnnotations[key] = []
transcriptsAnnotations[key].append(tA)
for tokens in tokenizeBedStream(transcriptsBedStream):
assert len(tokens) == 12
# Transcript
name = tokens[3]
# Get the chromosome interval
assert tokens[5] in ['+', '-']
cI = ChromosomeInterval(tokens[0], tokens[1], tokens[2], tokens[5] == '+')
# Get the exons
def getExons(exonNumber, blockSizes, blockStarts):
assert exonNumber == len(blockSizes)
assert exonNumber == len(blockStarts)
return [ChromosomeInterval(
cI.chromosome, cI.start + int(blockStarts[i]),
cI.start + int(blockStarts[i]) + int(blockSizes[i]), cI.strand)
for i in range(exonNumber)]
exons = getExons(int(tokens[9]),
tokens[10].split(','), tokens[11].split(','))
# Get the name annotations
annotations = []
key = (name, cI.chromosome)
if key in transcriptsAnnotations:
annotations = transcriptsAnnotations[key]
filteredAnnotations = []
for tA in annotations:
if cI.contains(tA.chromosomeInterval):
tA.chromosomeInterval.strand = cI.strand
filteredAnnotations.append(tA)
yield Transcript(
cI, name, exons, filteredAnnotations,
int(tokens[4]), int(tokens[6]),
int(tokens[7]), tokens[8]) | 2be2bbca915667be89220d92c42b8a8dce905cc4 | 3,653,387 |
import re
def convert_check_filter(tok):
"""Convert an input string into a filter function.
The filter function accepts a qualified python identifier string
and returns a bool.
The input can be a regexp or a simple string. A simple string must
match a component of the qualified name exactly. A regexp is
matched against the entire qualified name.
Matches are case-insensitive.
Examples::
convert_check_filter('foo')('a.foo.b') == True
convert_check_filter('foo')('a.foobar') == False
convert_check_filter('foo.*')('a.foobar') == False
convert_check_filter('foo.*')('foobar') == True
"""
tok = tok.lower()
if '+' in tok or '*' in tok:
return re.compile(tok, re.I).match
else:
toklist = tok.split('.')
def func(name):
chunks = name.lower().split('.')
if len(toklist) > len(chunks):
return False
for i in range(len(chunks)):
if chunks[i:i + len(toklist)] == toklist:
return True
return False
return func | 9d1aaa9a5007371e4f33ce3b4fbc86edd15875c6 | 3,653,388 |
def region_stats(x, r_start, r_end):
"""
Generate basic stats on each region. Return a dict for easy insertion into a DataFrame.
"""
stats = Munch()
stats["start"] = r_start
stats["end"] = r_end
stats["l"] = r_end - r_start
stats["min"] = np.min(x[r_start:r_end])
stats["max"] = np.max(x[r_start:r_end])
stats["rng"] = stats["max"] - stats["min"]
stats["mean"] = np.mean(x[r_start:r_end])
stats["std"] = np.std(x[r_start:r_end])
stats["var"] = np.var(x[r_start:r_end])
stats["med"] = np.median(x[r_start:r_end])
stats["mad"] = scistat.median_abs_deviation(x[r_start:r_end])
return stats | cb52f6320952be13f9715cb2259b32996bdbb0da | 3,653,389 |
def _sql_type(ptype):
"""Convert python type to SQL type"""
if "Union" in ptype.__class__.__name__:
assert len(ptype.__args__) == 2, "Cannot create sql column with more than one type."
assert type(None) in ptype.__args__, "Cannot create sql column with more than one type."
return f"{ptype.__args__[0].__name__} NULL"
elif ptype in SQLTypes.__dict__.values() and hasattr(ptype, "__name__"):
return f"{ptype.__name__} NOT NULL"
else:
raise ValueError(f"Cannot parse type {ptype}.") | 331734ce050ca261d2d78876ebd78540a088597b | 3,653,392 |
def rescale_data(data: np.ndarray,
option: str,
args: t.Optional[t.Dict[str, t.Any]] = None) -> np.ndarray:
"""Rescale numeric fitted data accordingly to user select option.
Args:
data (:obj:`np.ndarray`): data to rescale.
option (:obj:`str`): rescaling strategy. Must be one in ``VALID_RESCA-
LE`` attribute.
args (:obj:`dict`, optional): additional arguments for the scaler. All
scaler used are from ``sklearn`` package, so you should consult
their documentation for a complete list of available arguments to
user customization. The used scalers for each available ``option``
are:
- ``min-max``: ``sklearn.preprocessing.MinMaxScaler``
- ``standard``: ``sklearn.preprocessing.StandardScale``
- ``robust``: ``sklearn.preprocessing.RobustScaler``
Returns:
np.ndarray: scaled ``data`` based in ``option`` correspondent strategy.
Raises:
ValueError: if ``option`` is not in ``VALID_RESCALE``.
Any exception caused by arguments from ``args`` into the
scaler model is also raised by this function.
"""
if option not in VALID_RESCALE:
raise ValueError("Unknown data rescaling option '{0}'. Please choose "
"one value among {1}".format(option, VALID_RESCALE))
if not args:
args = {}
scaler_model = _RESCALE_SCALERS.get(option, "min-max")(**args)
return scaler_model.fit_transform(data.astype(float)) | 5f885233c262fb2d766417e64f783f807212355e | 3,653,393 |
def extract_labels(text, spacy_model):
"""Extract entities using libratom.
Returns: core.Label list
"""
try:
document = spacy_model(text)
except ValueError:
logger.exception(f"spaCy error")
raise
labels = set()
for entity in document.ents:
label, _ = Label.objects.get_or_create(type=Label.IMPORTER, name=entity.label_)
labels.add(label)
return list(labels) | 782fdcb4bdd817b55a38c5efe03db676f0e00eed | 3,653,394 |
from typing import Callable
import click
def variant_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for a DC/OS variant.
"""
function = click.option(
'--variant',
type=click.Choice(['auto', 'oss', 'enterprise']),
default='auto',
help=(
'Choose the DC/OS variant. '
'If the variant does not match the variant of the given '
'installer, an error will occur. '
'Using "auto" finds the variant from the installer. '
'Finding the variant from the installer takes some time and so '
'using another option is a performance optimization.'
),
)(command) # type: Callable[..., None]
return function | 4c89dc15b46c9d147445ef458b721c7ce835cbe7 | 3,653,395 |
def GetSegByName(name):
"""
@return Address of the first byte in the Segment
with the provided name, or BADADDR
"""
for Segment in ida.Segments():
if ida.SegName(Segment) == name:
return Segment
return ida.BADADDR | 4b0353da187735095805b5a80bb0e23a2ce6491b | 3,653,396 |
def sample_points_from_plateaus(all_plateaus, mode, stack_size=10, n_samples=1):
"""
Samples points from each plateau in each video
:param all_plateaus: dictionary containing all plateaus, keys are plateaus's ids, values are the plateau objects
:param mode: either `flow` or `rgb`
:param stack_size: optical flow stack size
:param n_samples: number of samples you want to draw from each plateau
:return: sampled_points, dictionary whose keys are video ids and whose values are dictionary containing the sampled
points as values as the plateaus ids as keys
"""
sampled_points = {}
h_stack_c = np.ceil(stack_size / 2)
for g_id, g in all_plateaus.items():
if mode == 'flow':
x_range = np.arange(h_stack_c+1, g.n - h_stack_c, dtype=np.int32)
else:
x_range = None # will take the whole x later for sampling
if g.video not in sampled_points:
sampled_points[g.video] = {}
sampled_points[g.video][g_id] = g.sample_points(n_samples, x_range=x_range)
return sampled_points | 1dd12721acc9b126d244902016e939792b220d1e | 3,653,397 |
def mobile_user_meeting_list(request):
"""
返回用户会议列表
:param request:
:return:
"""
dbs = request.dbsession
user_id = request.POST.get('user_id', '')
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
error_msg = ''
if not user_id:
error_msg = '用户ID不能为空!'
elif not start_date:
error_msg = '开始时间不能为空!'
elif not end_date:
error_msg = '结束时间不能为空!'
else:
meetings = mob_find_user_meetings(dbs, user_id, start_date, end_date)
if error_msg:
json_str = {
'status': False,
'meeting': '',
'error_msg': error_msg
}
else:
json_str = {
'status': True,
'meeting': meetings,
'error_msg':error_msg
}
resp = Response()
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.json = json_str
return resp | 55e9a61a755ef957f4b6bf504b3efe721b13cfd7 | 3,653,398 |
import ctypes
def get_current_thread_cpu_time():
"""
<Purpose>
Gets the total CPU time for the currently executing thread.
<Exceptions>
An AssertionError will be raised if the underlying system call fails.
<Returns>
A floating amount of time in seconds.
"""
# Get the current thread handle
current_thread = _mach_thread_self()
# Allocate a structure
thread_info = thread_basic_info()
# Structure size
struct_size = ctypes.c_uint(THREAD_BASIC_INFO_SIZE)
# Make the system call
result = _thread_info(current_thread, THREAD_BASIC_INFO,ctypes.byref(thread_info), ctypes.byref(struct_size))
# Sum up the CPU usage
cpu_time = thread_info.user_time.seconds + thread_info.user_time.microseconds / 1000000.0
cpu_time += thread_info.system_time.seconds + thread_info.system_time.microseconds / 1000000.0
# Safety check, result should be 0
# Do the safety check after we free the memory to avoid leaks
assert(result == 0)
# Return the structure
return cpu_time | 6d83314e8ceee0336b6c0ed7f71fa49e89b24ca8 | 3,653,399 |
def get_data_upload_id(jwt: str) -> str:
"""Function to get a temporary upload ID from
DAFNI data upload API
Args:
jwt (str): Users JWT
Returns:
str: Temporary Upload ID
"""
url = f"{DATA_UPLOAD_API_URL}/nid/upload/"
data = {"cancelToken": {"promise": {}}}
return dafni_post_request(url, jwt, data, allow_redirect=True) | dcce05a8efda1c90e6a78a19757f57deffd0c247 | 3,653,400 |
def StationMagnitudeContribution_TypeInfo():
"""StationMagnitudeContribution_TypeInfo() -> RTTI"""
return _DataModel.StationMagnitudeContribution_TypeInfo() | d9af45a3bbe993de37c351b5791ba8b87aeeedc9 | 3,653,401 |
def _get_operations(rescale=0.003921, normalize_weight=0.48):
"""Get operations."""
operation_0 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'RandomCrop',
'weight': [32, 32, 4, 4, 4, 4],
'padding_mode': "constant",
'pad_if_needed': False,
'fill_value': 0
}
operation_1 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Rescale',
'rescale': rescale,
'shift': 0,
'num_classes': 10
}
operation_2 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Normalize',
'weights': [normalize_weight]
}
return [operation_0, operation_1, operation_2] | a3bab4147f1a2020fb87853fc30bede277f0f4bd | 3,653,402 |
def itm_command(
ticker: str = None,
):
"""Options ITM"""
# Check for argument
if ticker is None:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
current_price = yfinance_model.get_price(ticker)
df_date, df_cotm, df_citm, df_potm, df_pitm = [], [], [], [], []
for date in dates:
df_date.append(date)
options = yfinance_model.get_option_chain(ticker, date)
call_oi = options.calls.set_index("strike")["openInterest"].fillna(0)
put_oi = options.puts.set_index("strike")["openInterest"].fillna(0)
df_cotm.append(int(call_oi[call_oi.index >= current_price].sum()))
df_citm.append(int(call_oi[call_oi.index <= current_price].sum()))
df_pitm.append(int(put_oi[put_oi.index >= current_price].sum()))
df_potm.append(int(put_oi[put_oi.index <= current_price].sum()))
# Calculate the total per column
df_date.append("<b>Total</b>")
total = [df_citm, df_cotm, df_pitm, df_potm]
for x in total:
x.append(sum(x))
# Create the DataFrame
df = pd.DataFrame(
{
"Expiry": df_date,
"Calls ITM": df_citm,
"Calls OTM": df_cotm,
"Puts ITM": df_pitm,
"Puts OTM": df_potm,
}
)
formats = {
"Calls ITM": "{:,}",
"Calls OTM": "{:,}",
"Puts ITM": "{:,}",
"Puts OTM": "{:,}",
}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df.set_index("Expiry", inplace=True)
fig = imps.plot_df(
df,
fig_size=(600, (35 * len(df.index))),
col_width=[3, 2.5],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(
cells=dict(
align=["center", "right"],
font=dict(
color=["white"]
+ [imps.PLT_TBL_INCREASING] * 2
+ [imps.PLT_TBL_DECREASING] * 2
),
),
)
imagefile = imps.save_image("opt-itm.png", fig)
return {
"title": f"{ticker.upper()} Options: In The Money",
"imagefile": imagefile,
} | b2230ee5f8c520523f7ce844372a4f26d14fe53d | 3,653,403 |
def create_nx_suite(seed=0, rng=None):
"""
returns a dict of graphs generated by networkx for testing,
designed to be used in a pytest fixture
"""
if rng is None:
rng = np.random.RandomState(seed)
out_graphs = {}
for N in [1, 2, 4, 8, 16, 32, 64, 128]:
for dtype in [np.bool, np.int32, np.float32, np.complex64]:
basename = f"{N}_{str(dtype)[8:-2]}"
name = f"ladder_{basename}"
out_graphs[name] = [gen_ladder(N, dtype)]
SAMPLE_N = 5
# smp = [(4,.1),(4,.5),(4,.7),(7,.1),(7,.5),(16,.1),(16,.5),(32,.1),(100,.1)]
# for N, prob_edge in smp:
for N in [4,7,16,32,100]:
for prob_edge in [.1,.5,.7]:
dtype = np.bool
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
out_graphs[name].append(t)
dtype = np.int32
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
edge_weights = rng.randint(1, rng.randint(2, max(N//2, 3)),
size=rng.randint(1, N//2))
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
for e1, e2 in t.edges():
t[e1, e2] = rng.choice(edge_weights)
out_graphs[name].append(t)
dtype = np.float64
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
edge_weights = rng.rand(rng.randint(1, N//2)) + 0.5
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
for e1, e2 in t.edges():
t[e1, e2] = rng.choice(edge_weights)
out_graphs[name].append(t)
return out_graphs | eb64688850d48b755dc526ed3d64876d04ba3914 | 3,653,404 |
def _nearest_neighbor_features_per_object_in_chunks(
reference_embeddings_flat, query_embeddings_flat, reference_labels_flat,
ref_obj_ids, k_nearest_neighbors, n_chunks):
"""Calculates the nearest neighbor features per object in chunks to save mem.
Uses chunking to bound the memory use.
Args:
reference_embeddings_flat: Tensor of shape [n, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings_flat: Tensor of shape [m, embedding_dim], the embedding
vectors for the query frames.
reference_labels_flat: Tensor of shape [n], the class labels of the
reference frame.
ref_obj_ids: int tensor of unique object ids in the reference labels.
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
n_chunks: Integer, the number of chunks to use to save memory
(set to 1 for no chunking).
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[m, n_objects, feature_dim].
"""
chunk_size = tf.cast(tf.ceil(tf.cast(tf.shape(query_embeddings_flat)[0],
tf.float32) / n_chunks), tf.int32)
wrong_label_mask = tf.not_equal(reference_labels_flat,
ref_obj_ids[:, tf.newaxis])
all_features = []
for n in range(n_chunks):
if n_chunks == 1:
query_embeddings_flat_chunk = query_embeddings_flat
else:
chunk_start = n * chunk_size
chunk_end = (n + 1) * chunk_size
query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end]
# Use control dependencies to make sure that the chunks are not processed
# in parallel which would prevent any peak memory savings.
with tf.control_dependencies(all_features):
features = _nn_features_per_object_for_chunk(
reference_embeddings_flat, query_embeddings_flat_chunk,
wrong_label_mask, k_nearest_neighbors
)
all_features.append(features)
if n_chunks == 1:
nn_features = all_features[0]
else:
nn_features = tf.concat(all_features, axis=0)
return nn_features | e9b7af295ddfab56f70748e42fb7b06f6192a3ac | 3,653,405 |
import heapq
def heap_pop(heap):
"""
Wrapper around heapq's heappop method to support updating priorities of
items in the queue.
Main difference here is that we toss out any queue entries that have been
updated since insertion.
"""
while len(heap) > 0:
pri_board_tup = heapq.heappop(heap)
board = pri_board_tup[1]
if not board == None:
del ENTRY_FINDER[board]
return pri_board_tup
raise KeyError('Pop from empty queue :(') | c640fd178a399332fc10ccbb55085cb08d118865 | 3,653,406 |
def _variant_po_to_dict(tokens) -> CentralDogma:
"""Convert a PyParsing data dictionary to a central dogma abundance (i.e., Protein, RNA, miRNA, Gene).
:type tokens: ParseResult
"""
dsl = FUNC_TO_DSL.get(tokens[FUNCTION])
if dsl is None:
raise ValueError('invalid tokens: {}'.format(tokens))
concept = tokens[CONCEPT]
return dsl(
namespace=concept[NAMESPACE],
name=concept[NAME],
identifier=concept.get(IDENTIFIER),
xrefs=tokens.get(XREFS),
variants=[
_variant_to_dsl_helper(variant_tokens)
for variant_tokens in tokens[VARIANTS]
],
) | 28e989087a91accf793eaaada2e65a71ee145c32 | 3,653,407 |
def project(name, param):
"""a tilemill project description, including a basic countries-of-the-world layer."""
return {
"bounds": [-180, -85.05112877980659, 180, 85.05112877980659],
"center": [0, 0, 2],
"format": "png",
"interactivity": False,
"minzoom": 0,
"maxzoom": 22,
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"Stylesheet": ["style.mss"],
"Layer": [
{
"id": "countries",
"name": "countries",
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"geometry": "polygon",
"Datasource": {
"file": "http://mapbox-geodata.s3.amazonaws.com/natural-earth-1.4.0/"
"cultural/10m-admin-0-countries.zip",
"type": "shape"
}
},
],
"scale": 1,
"metatile": 2,
"name": name,
"description": param['properties']['name'],
} | 9609c523cccc99168bbc0e7dbf10fe8624d399c2 | 3,653,408 |
def get_deepest():
"""Return tokens with largest liquidities.
Returns:
str: HTML-formatted message.
"""
url = config.URLS['deepest']
api_params = {'limit': 5,
'orderBy': 'usdLiquidity',
'direction': 'desc',
'key': POOLS_KEY
}
response = api_call(url, params=api_params)
formatted_response = ft.format_deepest(response['results'])
return formatted_response | c944f20f65dd68716b4f436b02ec5e373c04848f | 3,653,409 |
def _grompp_str(op_name, gro_name, checkpoint_file=None):
"""Helper function, returns grompp command string for operation."""
mdp_file = signac.get_project().fn('mdp_files/{op}.mdp'.format(op=op_name))
cmd = '{gmx} grompp -f {mdp_file} -c {gro_file} {checkpoint} -o {op}.tpr -p'.format(
gmx=gmx_exec, mdp_file=mdp_file, op=op_name, gro_file=gro_name,
checkpoint='' if checkpoint_file is None else ('-t ' + checkpoint_file))
return workspace_command(cmd) | 9201bd49fd09ce9faa268d8ea4d33482cea5d7ad | 3,653,410 |
def get_role_with_name(role_name: str) -> Role:
"""Get role with given name."""
role = Role.query.filter(Role.name == role_name).one()
return role | e20858ef1bcbb54d2c1d09ba9d3a54bf15dfa658 | 3,653,412 |
def namespace_store_factory(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session, pvc_factory_session
):
"""
Create a NamespaceStore factory.
Calling this fixture lets the user create namespace stores.
Args:
request (object): Pytest built-in fixture
cld_mgr (CloudManager): Cloud Manager object containing all
connections to clouds
mcg_obj (MCG): MCG object containing data and utils
related to MCG
cloud_uls_factory: Factory for creation of underlying storage
Returns:
func: Factory method - allows the user to create namespace stores
"""
created_nss = []
cmdMap = {
"cli": cli_create_namespacestore,
"oc": oc_create_namespacestore,
}
def _create_nss(method, nss_dict):
"""
Tracks creation and cleanup of all the namespace stores that were created in the current scope
Args:
method (str): String for selecting method of namespace store creation (CLI/OC)
nss_dict (dict): Dictionary containing storage provider as key and a list of tuples
as value.
Namespace store dictionary examples - 'CloudName': [(amount, region), (amount, region)]
i.e. - 'aws': [(3, us-west-1),(2, eu-west-2)]
Returns:
list: A list of the NamespaceStore objects created by the factory in the current scope
"""
current_call_created_nss = []
for platform, nss_lst in nss_dict.items():
for nss_tup in nss_lst:
if platform.lower() == "nsfs":
uls_name = nss_tup[0] or create_unique_resource_name(
constants.PVC.lower(), platform
)
pvc_factory_session(
custom_data=template_pvc(uls_name, size=nss_tup[1])
)
else:
# Create the actual target bucket on the request service
uls_dict = cloud_uls_factory_session({platform: [(1, nss_tup[1])]})
uls_name = list(uls_dict[platform])[0]
nss_name = create_unique_resource_name(constants.MCG_NSS, platform)
# Create the actual namespace resource
cmdMap[method.lower()](
nss_name, platform, mcg_obj_session, uls_name, cld_mgr, nss_tup
)
nss_obj = NamespaceStore(
name=nss_name,
method=method.lower(),
mcg_obj=mcg_obj_session,
uls_name=uls_name,
)
created_nss.append(nss_obj)
current_call_created_nss.append(nss_obj)
nss_obj.verify_health()
return current_call_created_nss
def nss_cleanup():
for nss in created_nss:
nss.delete()
request.addfinalizer(nss_cleanup)
return _create_nss | cc2d090d8dc0f12d89331ada54e4054e117e544d | 3,653,413 |
def get_user(request, username):
"""
Gets a user's information.
return:
{
status: HTTP status,
name: string,
gender: string,
marital_status: string,
first_name: string
}
"""
data = get_user_info(username)
if data:
return Response({'data': data}, status=200)
else:
return Response(status=404) | 9820b441718629780ff72ab00776fc2d4c95a63f | 3,653,414 |
def find_changes(d_before, d_after):
"""
Returns a dictionary of changes in the format:
{
<system id>: {
<changed key>: <Change type>,
...
},
...
}
The changes should describe the differences between d_before and d_after.
"""
changes = dict()
for k in d_after:
if k not in d_before:
changes[k] = Change.Addition
elif type(d_before[k]) is dict and type(d_after[k]) is dict:
nested = find_changes(d_before[k], d_after[k])
if len(nested) > 0:
changes[k] = nested
elif d_before[k] != d_after[k]:
changes[k] = Change.Edit
# Apply removals
for k in d_before:
if k not in d_after:
changes[k] = Change.Removal
return changes | 02e5eea5ac1264c593d542a8f745a8d3571d5fac | 3,653,415 |
def check_vector_inbetween(v1, v2, point):
""" Checks if point lies inbetween two vectors v1, v2. Returns boolean. """
if (np.dot(np.cross(v1, point), np.cross(v1, v2))) >= 0 and (np.dot(np.cross(v2, point), np.cross(v2, v1))) >= 0:
return True
else:
return False | 6eeaa4a9e37ea345c3399c103dadbc45c306887c | 3,653,416 |
def accuracy(y_preds, y_test):
"""
Function to calculate the accuracy of algorithm
:param y_preds: predictions for test data
:param y_test: actual labels for test data
:return: accuracy in percentage
"""
return np.sum(np.where(y_preds == y_test, 1, 0)) * 100 / len(y_test) | f41522663ae9a35e976f4d848f14e42ef0993fd9 | 3,653,417 |
def get_all(factory='official', **kwargs):
"""Construct and return an list of Class `Event`.
hookを呼び出す.
Args:
factory: `Event` の取得用マネージャ 今のところ,京大公式HP用のみ.
EventFactoryMixin classを継承したクラスか 'official' に対応
date (:obj:`datetime`, optional): 欲しいイベントのdatetime.
`month` , `year` とどちらかを選択.両方指定した場合,こちらが優先される.
year (int, optional): イベントを取得する年.
両方指定した場合, `date` が優先される.
month (int, optional): イベントを取得する月.
両方指定した場合, `date` が優先される.
Returns:
generator of Events
"""
return kueventparser(factory=factory, method='get_all', **kwargs) | bc1fabe37fc8065ff5394259607caf32c6345b41 | 3,653,418 |
def nudupl(f):
"""Square(f) following Cohen, Alg. 5.4.8.
"""
L = int(((abs(f.discriminant))/4)**(1/4))
a, b, c = f[0], f[1], f[2]
# Step 1 Euclidean step
d1, u, v = extended_euclid_xgcd(b, a)
A = a//d1
B = b//d1
C = (-c*u) % A
C1 = A-C
if C1 < C:
C = -C1
# Step 2 Partial reduction
d, v, v2, v3, z = parteucl(A, C, L)
# Step 3 Special case
if z==0:
g = (B*v3+c)//d
a2 = d**2
c2 = v3**2
b2 = b + (d+v3)**2 - a2 - c2
c2 = c2 + g*d1
else:
# Step 4 Final computations
e = (c*v + B*d)//A
g = (e*v2 - B)//v
b2 = e*v2 + v*g
if d1>1:
b2 = d1*b2
v = d1*v
v2 = d1*v2
a2 = d**2
c2 = v3**2
b2 = b2 + (d+v3)**2 - a2 - c2
a2 = a2 + e*v
c2 = c2 + g*v2
f2 = type(f)((a2, b2, c2))
return f2 | 7f5a16c0fc2611a5f9dc0ebde00e3587c188f944 | 3,653,420 |
def remove_schema(name):
"""Removes a configuration schema from the database"""
schema = controller.ConfigurationSchema()
schema.remove(name)
return 0 | e45b415ea6eb57402790b04bbf1fa63749242a77 | 3,653,421 |
def get_unassigned_independent_hyperparameters(outputs):
"""Going backward from the outputs provided, gets all the independent
hyperparameters that are not set yet.
Setting an hyperparameter may lead to the creation of additional hyperparameters,
which will be most likely not set. Such behavior happens when dealing with,
for example, hyperparameters associated with substitutition
modules such as :func:`deep_architect.modules.siso_optional`,
:func:`deep_architect.modules.siso_or`, and :func:`deep_architect.modules.siso_repeat`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs to start the traversal at.
Returns:
OrderedSet[deep_architect.core.Hyperparameter]:
Ordered set of hyperparameters that are currently present in the
graph and not have been assigned a value yet.
"""
assert not is_specified(outputs)
unassigned_indep_hs = OrderedSet()
for h in get_all_hyperparameters(outputs):
if not isinstance(
h, DependentHyperparameter) and not h.has_value_assigned():
unassigned_indep_hs.add(h)
return unassigned_indep_hs | 6f28f3fcbaf4a875c3a42cdb4fc9ad715c99a093 | 3,653,422 |
def get_theta_benchmark_matrix(theta_type, theta_value, benchmarks, morpher=None):
"""Calculates vector A such that dsigma(theta) = A * dsigma_benchmarks"""
if theta_type == "benchmark":
n_benchmarks = len(benchmarks)
index = list(benchmarks).index(theta_value)
theta_matrix = np.zeros(n_benchmarks)
theta_matrix[index] = 1.0
elif theta_type == "morphing":
theta_matrix = morpher.calculate_morphing_weights(theta_value)
else:
raise ValueError("Unknown theta {}".format(theta_type))
return theta_matrix | aa84006b7a69faa8803ecea20ffd24e35f178185 | 3,653,423 |
def reorder_points(point_list):
"""
Reorder points of quadrangle.
(top-left, top-right, bottom right, bottom left).
:param point_list: List of point. Point is (x, y).
:return: Reorder points.
"""
# Find the first point which x is minimum.
ordered_point_list = sorted(point_list, key=lambda x: (x[0], x[1]))
first_point = ordered_point_list[0]
# Find the third point. The slope is middle.
slope_list = [[cal_slope(first_point, p), p] for p in ordered_point_list[1:]]
ordered_slope_point_list = sorted(slope_list, key=lambda x: x[0])
first_third_slope, third_point = ordered_slope_point_list[1]
# Find the second point which is above the line between the first point and the third point.
# All that's left is the fourth point.
if above_line(ordered_slope_point_list[0][1], third_point, first_third_slope):
second_point = ordered_slope_point_list[0][1]
fourth_point = ordered_slope_point_list[2][1]
reverse_flag = False
else:
second_point = ordered_slope_point_list[2][1]
fourth_point = ordered_slope_point_list[0][1]
reverse_flag = True
# Find the top left point.
second_fourth_slope = cal_slope(second_point, fourth_point)
if first_third_slope < second_fourth_slope:
if reverse_flag:
reorder_point_list = [fourth_point, first_point, second_point, third_point]
else:
reorder_point_list = [second_point, third_point, fourth_point, first_point]
else:
reorder_point_list = [first_point, second_point, third_point, fourth_point]
return reorder_point_list | 8ef3466616ecf003750cce7e1125d913d258cf15 | 3,653,425 |
import torch
def ppg_acoustics_collate(batch):
"""Zero-pad the PPG and acoustic sequences in a mini-batch.
Also creates the stop token mini-batch.
Args:
batch: An array with B elements, each is a tuple (PPG, acoustic).
Consider this is the return value of [val for val in dataset], where
dataset is an instance of PPGSpeechLoader.
Returns:
ppg_padded: A (batch_size, feature_dim_1, num_frames_1) tensor.
input_lengths: A batch_size array, each containing the actual length
of the input sequence.
acoustic_padded: A (batch_size, feature_dim_2, num_frames_2) tensor.
gate_padded: A (batch_size, num_frames_2) tensor. If "1" means reaching
stop token. Currently assign "1" at the last frame and the padding.
output_lengths: A batch_size array, each containing the actual length
of the output sequence.
"""
# Right zero-pad all PPG sequences to max input length.
# x is (PPG, acoustic), x[0] is PPG, which is an (L(varied), D) tensor.
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([x[0].shape[0] for x in batch]), dim=0,
descending=True)
max_input_len = input_lengths[0]
ppg_dim = batch[0][0].shape[1]
ppg_padded = torch.FloatTensor(len(batch), max_input_len, ppg_dim)
ppg_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
curr_ppg = batch[ids_sorted_decreasing[i]][0]
ppg_padded[i, :curr_ppg.shape[0], :] = curr_ppg
# Right zero-pad acoustic features.
feat_dim = batch[0][1].shape[1]
max_target_len = max([x[1].shape[0] for x in batch])
# Create acoustic padded and gate padded
acoustic_padded = torch.FloatTensor(len(batch), max_target_len, feat_dim)
acoustic_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
curr_acoustic = batch[ids_sorted_decreasing[i]][1]
acoustic_padded[i, :curr_acoustic.shape[0], :] = curr_acoustic
gate_padded[i, curr_acoustic.shape[0] - 1:] = 1
output_lengths[i] = curr_acoustic.shape[0]
ppg_padded = ppg_padded.transpose(1, 2)
acoustic_padded = acoustic_padded.transpose(1, 2)
return ppg_padded, input_lengths, acoustic_padded, gate_padded,\
output_lengths | 1357a8a9fa901a9be4f79ea13fd5ae7c3810bbeb | 3,653,426 |
def problem004():
"""
Find the largest palindrome made from the product of two 3-digit numbers.
"""
return largest_palindrome_from_product_of_two_n_digit_numbers(3) | 516c98d75ac2b4e286e58ea940d49c1d2bcd2dc7 | 3,653,427 |
import torch
def residual_l1_max(reconstruction: Tensor, original: Tensor) -> Tensor:
"""Construct l1 difference between original and reconstruction.
Note: Only positive values in the residual are considered, i.e. values below zero are clamped.
That means only cases where bright pixels which are brighter in the input (likely lesions) are kept."""
residual = original - reconstruction
return torch.where(residual > 0.0, residual, torch.zeros_like(residual)) | 8649b1947845c0e3f9e57c0ec2e68d7bed94be5d | 3,653,428 |
def build_url(path):
"""
Construct an absolute url by appending a path to a domain.
"""
return 'http://%s%s' % (DOMAIN, path) | fa9df465607082993571ca71c576d7b250f6cc76 | 3,653,429 |
def get_registration_url(request, event_id):
"""
Compute the absolute URL to create a booking on a given event
@param request: An HttpRequest used to discover the FQDN and path
@param event_id: the ID of the event to register to
"""
registration_url_rel = reverse(booking_create, kwargs={"event_id": event_id})
return request.build_absolute_uri(registration_url_rel) | d8b344c6574a120d365a718934f5dc0e78173a6f | 3,653,430 |
def create_no_args_decorator(decorator_function,
function_for_metadata=None,
):
"""
Utility method to create a decorator that has no arguments at all and is implemented by `decorator_function`, in
implementation-first mode or usage-first mode.
The created decorator is a function with var-args. When called it checks the length
(0=called with parenthesis, 1=called without, 2=error).
Note: we prefer to use this var-arg signature rather than a "(_=None)" signature, because it is more readable for
the decorator's help.
:param decorator_function:
:param function_for_metadata: an alternate function to use for the documentation and module metadata of the
generated function
:return:
"""
if function_for_metadata is None:
function_for_metadata = decorator_function
@with_signature(None,
func_name=function_for_metadata.__name__,
doc=function_for_metadata.__doc__,
module_name=function_for_metadata.__module__)
def new_decorator(*_):
"""
Code for your decorator, generated by decopatch to handle the case when it is called without parenthesis
"""
if len(_) == 0:
# called with no args BUT parenthesis: @foo_decorator().
return with_parenthesis_usage(decorator_function, *_)
elif len(_) == 1:
first_arg_value = _[0]
if can_arg_be_a_decorator_target(first_arg_value):
# called with no arg NOR parenthesis: @foo_decorator
return no_parenthesis_usage(decorator_function, first_arg_value)
# more than 1 argument or non-decorable argument: not possible
raise TypeError("Decorator function '%s' does not accept any argument."
"" % decorator_function.__name__)
return new_decorator | 7067974d7bd15c238968f78aa0057086458940bf | 3,653,431 |
import torch
def compute_batch_jacobian(input, output, retain_graph=False):
"""
Compute the Jacobian matrix of a batch of outputs with respect to
some input (normally, the activations of a hidden layer).
Returned Jacobian has dimensions Batch x SizeOutput x SizeInput
Args:
input (list or torch.Tensor): Tensor or sequence of tensors
with the parameters to which the Jacobian should be
computed. Important: the requires_grad attribute of input needs to
be True while computing output in the forward pass.
output (torch.Tensor): Tensor with the values of which the Jacobian is
computed
Returns (torch.Tensor): 3D tensor containing the Jacobian of output with
respect to input: batch_size x output_size x input_size.
"""
batch_jacobian = torch.Tensor(output.shape[0], output.shape[1], input.shape[1])
assert output.shape[0] == input.shape[0], \
"Batch size needs to be the same for both input and output"
for batch_idx in range(output.shape[0]):
for i, output_elem in enumerate(output[batch_idx]):
if i < output.shape[1]: rg = True
else: rg = retain_graph
gradients = torch.autograd.grad(output_elem, input, retain_graph=rg)[0][batch_idx].detach()
batch_jacobian[batch_idx, i, :] = gradients
return batch_jacobian | c18f596a3500f2f82e2b4716e6f9892a01fb31c7 | 3,653,433 |
def is_associative(value):
"""Checks if `value` is an associative object meaning that it can be
accessed via an index or key
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is associative.
Example:
>>> is_associative([])
True
>>> is_associative({})
True
>>> is_associative(1)
False
>>> is_associative(True)
False
.. versionadded:: 2.0.0
"""
return hasattr(value, '__getitem__') | 5d2a9e0e69ad793a98657dc13b26f79900f29294 | 3,653,434 |
def join_audio(audio1, audio2):
"""
>>> join_audio(([1], [4]), ([2, 3], [5, 6]))
([1, 2, 3], [4, 5, 6])
"""
(left1, right1) = audio1
(left2, right2) = audio2
left = left1 + left2
right = right1 + right2
audio = (left, right)
return audio | 23348b746469d362fd66371d61142b4227814ff3 | 3,653,435 |
def csi_from_sr_and_pod(success_ratio_array, pod_array):
"""Computes CSI (critical success index) from success ratio and POD.
POD = probability of detection
:param success_ratio_array: np array (any shape) of success ratios.
:param pod_array: np array (same shape) of POD values.
:return: csi_array: np array (same shape) of CSI values.
"""
return (success_ratio_array ** -1 + pod_array ** -1 - 1.) ** -1 | 84952fe6f7c8bd780c64c53183342ab0d8f3f90f | 3,653,436 |
def compute_secondary_observables(data):
"""Computes secondary observables and extends matrix of observables.
Argument
--------
data -- structured array
must contains following fields: length, width, fluo, area, time
Returns
-------
out -- structured array
new fields are added (check `out.dtype.names`)
"""
ell, w, fluo, area, time = map(np.array,
zip(*data[['length',
'width',
'fluo',
'area',
'time']])
)
if len(time) > 1:
delta_t = time[1]-time[0]
age = (time - time[0] + delta_t/2.)/(time[-1] - time[0] + delta_t)
else:
age = np.nan
volume = spherocylinder_volume(ell, w)
concentration = fluo/volume
density = fluo/area
ALratio = area/ell
out = append_fields(data,
['volume',
'concentration',
'density',
'ALratio',
'age'],
[volume,
concentration,
density,
ALratio,
age],
usemask=False, fill_value=np.nan)
return out | 7141d16a579e4b629e25fee3a33c9a844a08e48f | 3,653,438 |
def get_account_number(arn):
"""
Extract the account number from an arn.
:param arn: IAM SSL arn
:return: account number associated with ARN
"""
return arn.split(":")[4] | 3d0fe552691ae98cf0dc70bc2055297f01a5d800 | 3,653,439 |
def get_hashtags(tweet):
"""return hashtags from a given tweet
Args:
tweet (object): an object representing a tweet
Returns:
list: list of hastags in a tweet
"""
entities = tweet.get('entities', {})
hashtags = entities.get('hashtags', [])
return [get_text(tag) for tag in hashtags if get_text(
tag) not in ['rdc', 'drc', 'rdcongo', 'drcongo']] | ef222d64294c62d27e86a4c8520bb197701ed1af | 3,653,440 |
def get_appliance_ospf_neighbors_state(
self,
ne_id: str,
) -> dict:
"""Get appliance OSPF neighbors state
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - ospf
- GET
- /ospf/state/neighbors/{neId}
:param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``
:type ne_id: str
:return: Returns dictionary of OSPF neighbors state
:rtype: dict
"""
return self._get("/ospf/state/interfaces/{}".format(ne_id)) | 25a985ccf8b00ee3f27ea43d2a8371eef2443963 | 3,653,442 |
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj) | 64ffab112b0f0397541f8661a861a958c8ccf26e | 3,653,444 |
def first_index_k_zeros_left(qstr, k, P):
"""
For a binary string qstr, return the first index of q with k (mod P) zeros to the left.
Return: index in [0, qstr.length]
"""
num_zeros_left = 0
for j in range(qstr.length+1):
if (num_zeros_left - k) % P == 0:
return j
if j == qstr.length:
raise Exception("No valid position found")
if qstr[j] == 0:
num_zeros_left += 1 | 62e505290fb32b43860deae3477dec718028e7af | 3,653,445 |
def transform_points(points, transf_matrix):
"""
Transform (3,N) or (4,N) points using transformation matrix.
"""
if points.shape[0] not in [3, 4]:
raise Exception("Points input should be (3,N) or (4,N) shape, received {}".format(points.shape))
return transf_matrix.dot(np.vstack((points[:3, :], np.ones(points.shape[1]))))[:3, :] | f478dfdfe41c694ada251deca33820336001d61e | 3,653,446 |
def get_lat_long(zip):
"""
This function takes a zip code and looks up the latitude and longitude using
the uszipcode package. Documentation: https://pypi.python.org/pypi/uszipcode
"""
search = ZipcodeSearchEngine()
zip_data = search.by_zipcode(zip)
lat = zip_data['Latitude']
long = zip_data['Longitude']
return lat, long | 4fa8dc583bba9a6068db58ab86c2cab5f310edc4 | 3,653,447 |
def propose_perturbation_requests(current_input, task_idx, perturbations):
"""Wraps requests for perturbations of one task in a EvaluationRequest PB.
Generates one request for each perturbation, given by adding the perturbation
to current_input.
Args:
current_input: the current policy weights
task_idx: The index of the task to evaluate.
perturbations: A list of perturbations.
Returns:
A list of requests, one for each perturbation.
"""
requests = []
for p_idx, p in enumerate(perturbations):
perturbed_input = current_input + p
requests.append(
first_order_pb2.TaskEvaluationRequest(
request_task_idx=task_idx,
input_idx=p_idx,
eval_order=TASK_VALUE_EVAL_ORDER,
current_input=perturbed_input.tolist()))
return requests | 279da36eb633005c8f8ee79e66b71b3bdf8783f3 | 3,653,448 |
import google
def id_token_call_credentials(credentials):
"""Constructs `grpc.CallCredentials` using
`google.auth.Credentials.id_token`.
Args:
credentials (google.auth.credentials.Credentials): The credentials to use.
Returns:
grpc.CallCredentials: The call credentials.
"""
request = google.auth.transport.requests.Request()
return grpc.metadata_call_credentials(
IdTokenAuthMetadataPlugin(credentials, request)
) | 433bb494d9f8de529a891f529a42f89af0b5ef77 | 3,653,449 |
import time
def test_analyze(request,hash,db_name):
"""
Get features of a sequence, using the sequence's sha-1 hash as the
identifier.
"""
db = blat.models.Feature_Database.objects.get(name=db_name)
sequence = blat.models.Sequence.objects.get(db=db,hash=hash)
ts = int(time.mktime(sequence.modified.timetuple()))
return render_to_response(
'test/analyze.html', { "hash" : hash, "mtime" : ts },
context_instance=RequestContext(request)
) | 173ebb356167558cb64a35265caa39e828a43bae | 3,653,450 |
from typing import List
def _collect_scaling_groups(owner: str) -> List:
"""Collect autoscaling groups that contain key `ES_role` and belong to the specified owner"""
client = boto3.client("autoscaling")
print("Collecting scaling groups")
resp = client.describe_auto_scaling_groups()
assert "NextToken" not in resp, "did not program to handle pagination"
groups = resp['AutoScalingGroups']
result = []
for group in groups:
if _get_tag_val(group['Tags'], 'Owner') == owner and \
any([tag['Key'] == ES_ROLE_KEY for tag in group['Tags']]):
result.append(group)
return result | f1f75e6158450aaef834a910f8c36bb8812b1ede | 3,653,451 |
def cross_entropy_loss(logits, labels, label_smoothing=0., dtype=jnp.float32):
"""Compute cross entropy for logits and labels w/ label smoothing
Args:
logits: [batch, length, num_classes] float array.
labels: categorical labels [batch, length] int array.
label_smoothing: label smoothing constant, used to determine the on and off values.
dtype: dtype to perform loss calcs in, including log_softmax
"""
num_classes = logits.shape[-1]
labels = jax.nn.one_hot(labels, num_classes, dtype=dtype)
if label_smoothing > 0:
labels = labels * (1 - label_smoothing) + label_smoothing / num_classes
logp = jax.nn.log_softmax(logits.astype(dtype))
return -jnp.mean(jnp.sum(logp * labels, axis=-1)) | 7b6ce3145bc85433e54cef0ac85570eeb0fe7230 | 3,653,452 |
import torch
def set_optimizer(name, model, learning_rate):
"""
Specify which optimizer to use during training.
Initialize a torch.optim optimizer for the given model based on the specified name and learning rate.
Parameters
----------
name : string or None, default = 'adam'
The name of the torch.optim optimizer to be used. The following
strings are accepted as arguments: 'adagrad', 'adam', 'adamax', 'adamw', 'rmsprop', or 'sgd'
model : utils.models.EncoderDecoder
The model which is to be optimized
learning_rate : float or None
The learning rate to be used by the optimizer. If set to None, the default value as defined in
torch.optim is used
Returns
-------
torch.optim optimizer class
A torch.optim optimizer that implements one of the following algorithms:
Adagrad, Adam, Adamax, AdamW, RMSprop, or SGD (stochastic gradient descent)
SGD is set to use a momentum of 0.5.
"""
if name == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if name == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
if name == "adamw":
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
if name == "adagrad":
optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate)
if name == "adamax":
optimizer = torch.optim.Adamax(model.parameters(), lr=learning_rate)
if name == "rmsprop":
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
return optimizer | 5c1a5e836176b90506ca6344c01ce6828b43d917 | 3,653,453 |
import httplib
import urllib
def get_http_url(server_path, get_path):
"""
Вариант с использованием httplib напрямую; ничем не лучше urllib2
server_path = "example.com"
get_path = "/some_path"
"""
# urllib - более высокого уровня библиотека, которая в случае http использует
# httplib;
# используем httplib ради лучшего детектирования ошибок
direct_http = 1
if direct_http:
conn = httplib.HTTPConnection(server_path)
try:
conn.request("GET", get_path)
except:
raise RuntimeError("Cant connect to: " + server_path)
response = conn.getresponse()
if response.reason != 'OK':
raise RuntimeError("Error getting data from: " + get_path)
#print response.status, response.reason, response.msg
return response
else:
f = urllib.urlopen("http://" + server_path + get_path)
#print f.info()
return f | d759609b1c48af28e678fa75bd9ff102f7eaafae | 3,653,454 |
def not_found_view(request):
"""Not Found view.
"""
model = request.context
return render_main_template(model, request, contenttile='not_found') | 0fe250d09f8fc007ffb07f848e59e779da9aefb0 | 3,653,455 |
import torch
def top_filtering(
logits, top_k=0, top_p=0.0, threshold=-float("Inf"), filter_value=-float("Inf")
):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
Taken from `interact.py`
"""
assert (
logits.dim() == 1
) # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits | 7b230fc959e0078f1cfc5b2f2f991c79e0f4fd86 | 3,653,456 |
def get_physical_type(obj):
"""
Return the physical type that corresponds to a unit (or another
physical type representation).
Parameters
----------
obj : quantity-like or `~astropy.units.PhysicalType`-like
An object that (implicitly or explicitly) has a corresponding
physical type. This object may be a unit, a
`~astropy.units.Quantity`, an object that can be converted to a
`~astropy.units.Quantity` (such as a number or array), a string
that contains a name of a physical type, or a
`~astropy.units.PhysicalType` instance.
Returns
-------
`~astropy.units.PhysicalType`
A representation of the physical type(s) of the unit.
Examples
--------
The physical type may be retrieved from a unit or a
`~astropy.units.Quantity`.
>>> import astropy.units as u
>>> u.get_physical_type(u.meter ** -2)
PhysicalType('column density')
>>> u.get_physical_type(0.62 * u.barn * u.Mpc)
PhysicalType('volume')
The physical type may also be retrieved by providing a `str` that
contains the name of a physical type.
>>> u.get_physical_type("energy")
PhysicalType({'energy', 'torque', 'work'})
Numbers and arrays of numbers correspond to a dimensionless physical
type.
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
"""
if isinstance(obj, PhysicalType):
return obj
if isinstance(obj, str):
return _physical_type_from_str(obj)
try:
unit = obj if isinstance(obj, core.UnitBase) else quantity.Quantity(obj, copy=False).unit
except TypeError as exc:
raise TypeError(f"{obj} does not correspond to a physical type.") from exc
unit = _replace_temperatures_with_kelvin(unit)
physical_type_id = unit._get_physical_type_id()
unit_has_known_physical_type = physical_type_id in _physical_unit_mapping
if unit_has_known_physical_type:
return _physical_unit_mapping[physical_type_id]
else:
return PhysicalType(unit, "unknown") | 03d28bdb9a507939e52bc0021dae3c539b4954a5 | 3,653,457 |
def reverse(list):
"""Returns a new list or string with the elements or characters in reverse
order"""
if isinstance(list, str):
return "".join(reversed(list))
return _list(reversed(list)) | ba74d9e4e54782114f534fb4c888c681ab708b67 | 3,653,458 |
from typing import Dict
def PubMedDiabetes(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/linqs",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the PubMedDiabetes graph.
The graph is automatically retrieved from the LINQS repository. The Pubmed Diabetes dataset consists of 19717 scientific publications from
PubMed database pertaining to diabetes classified into one of three classes.
The citation network consists of 44338 links. Each publication in the dataset
is described by a TF/IDF weighted word vector from a dictionary which consists
of 500 unique words.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of PubMedDiabetes graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{namata2012query,
title={Query-driven active surveying for collective classification},
author={Namata, Galileo and London, Ben and Getoor, Lise and Huang, Bert and EDU, UMD},
booktitle={10th International Workshop on Mining and Learning with Graphs},
volume={8},
year={2012}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PubMedDiabetes",
repository="linqs",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs,
callbacks=[
parse_linqs_pubmed_incidence_matrix
],
callbacks_arguments=[
{
"cites_path": "Pubmed-Diabetes/Pubmed-Diabetes/data/Pubmed-Diabetes.DIRECTED.cites.tab",
"content_path": "Pubmed-Diabetes/Pubmed-Diabetes/data/Pubmed-Diabetes.NODE.paper.tab",
"node_path": "nodes.tsv",
"edge_path": "edges.tsv"
}
]
)() | ddac0cfb8a525c42fe5a8d6c1a70677ab57451e0 | 3,653,459 |
def find_neighbor_indices(atoms, probe, k):
"""
Returns list of indices of atoms within probe distance to atom k.
"""
neighbor_indices = []
atom_k = atoms[k]
radius = atom_k.radius + probe + probe
indices = list(range(k))
indices = indices + list(range(k+1, len(atoms)))
for i in indices:
atom_i = atoms[i]
dist = pos_distance(atom_k.pos, atom_i.pos)
if dist < radius + atom_i.radius:
neighbor_indices.append(i)
return neighbor_indices | 05c3218357d660d6b66c3d614bfcb0d78431d32e | 3,653,460 |
def genDir(EAs):
"""
Generate the projection direction given the euler angles. Since the image
is in the x-y plane, the projection direction is given by R(EA)*z where
z = (0,0,1)
"""
dir_vec = np.array([rotmat3D_EA(*EA)[:, 2] for EA in EAs])
return dir_vec | 0753fad9638ca8b0ac4e899ad103dc08266a208b | 3,653,461 |
def plainica(x, reducedim=0.99, backend=None, random_state=None):
""" Source decomposition with ICA.
Apply ICA to the data x, with optional PCA dimensionality reduction.
Parameters
----------
x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples)
data set
reducedim : {int, float, 'no_pca'}, optional
A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All
components that describe in total less than `1-reducedim` of the variance are removed by the PCA step.
An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA.
If set to 'no_pca' the PCA step is skipped.
backend : dict-like, optional
Specify backend to use. When set to None the backend configured in config.backend is used.
Returns
-------
result : ResultICA
Source decomposition
"""
x = atleast_3d(x)
t, m, l = np.shape(x)
if backend is None:
backend = scotbackend
# pre-transform the data with PCA
if reducedim == 'no pca':
c = np.eye(m)
d = np.eye(m)
xpca = x
else:
c, d, xpca = backend['pca'](x, reducedim)
# run on residuals ICA to estimate volume conduction
mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state)
# correct (un)mixing matrix estimatees
mx = mx.dot(d)
ux = c.dot(ux)
class Result:
unmixing = ux
mixing = mx
return Result | 7ffe9ebc78220898c84459fed61fc0f32fe05e69 | 3,653,462 |
def all_equal(values: list):
"""Check that all values in given list are equal"""
return all(values[0] == v for v in values) | 8ed08f63959367f3327554adc11b1286291963d8 | 3,653,464 |
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None | 035c8bf68b4ff7e4fbdb7ed1b2601f04110287d8 | 3,653,465 |
from datetime import datetime
def new_revision(partno):
"""
Presents the form to add a new revision, and creates it upon POST submit
"""
_load_if_released(partno) # ensures the component exists and is released
form = RevisionForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
now = datetime.now()
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'released': False # a new revision is not already released
},
'$push': {
'revisions': {
'date': now,
'comment': form.comment.data
},
'history': {
'date': now,
'user': current_user.id,
'message': 'new revision created'
}
}
}
)
if result.modified_count == 1:
flash('new revision created', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/revision_form.html', form=form, partno=partno) | 722a3860e9daeb4bd5d9339f7dcaf5245c51b5de | 3,653,466 |
def fresnel_parameter(rays, diffraction_points):
""" returns the fresnel diffraction parameter (always as a positive)
Parameters
----------
rays : [n] list of shapely LineString (3d)
diffraction_points: [n] list of Points (3d)
diffraction point which the ray is rounding
Returns
-------
fresnel diffraction parameters: [n,] float array
"""
wavelength = 0.1903 # GPS L1 signal frequency of 1575.42 MHz
distances = np.array([r.project(d)
for r, d in zip(rays, diffraction_points)])
nearest_points = (r.interpolate(d) for r, d in zip(rays, distances))
diffraction_distances = np.array(
[d.z-p.z for p, d in zip(nearest_points, diffraction_points)])
v = np.where(distances == 0, -np.inf, diffraction_distances *
(2 / (wavelength * distances))**0.5)
return v | cd398797161f1e9e66805cd09162359ed6e89330 | 3,653,467 |
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize()
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y in zip(data, label):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
return eval_metric.get() | 79dcd9b0d1920952b5badd4aa9f3f234776f6e06 | 3,653,468 |
from re import S
def add_unique_geom_id(point_gdf: gpd.GeoDataFrame, log: Logger=None) -> gpd.GeoDataFrame:
"""Adds an unique identifier (string) to GeoDataFrame of points based on point locations (x/y).
"""
point_gdf[S.xy_id] = [f'{str(round(geom.x, 1))}_{str(round(geom.y, 1))}' for geom in point_gdf[S.geometry]]
unique_count = point_gdf[S.xy_id].nunique()
unique_share = round(100 * unique_count/len(point_gdf.index), 2)
log.info(f'found {unique_count} unique sampling points ({unique_share} %)')
return point_gdf | 0663e24b217c2911083d68146a5d8ff25c4fd8bd | 3,653,469 |
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return _TOPOLOGY.get_data_parallel_rank() | a1da062793f6798e2e56809b3076c811f786a82b | 3,653,470 |
import math
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | ebfd9a84885a95ec6e4e7b2d88a0fb69fbbfaea1 | 3,653,471 |
import torch
def alexnet(pretrained=False):
"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet()
if pretrained:
model_path = './model/alexnet.pth.tar'
pretrained_model = torch.load(model_path)
model.load_state_dict(pretrained_model['state_dict'])
return model | a42df7c926472b88501001eefd691959e6acb3ac | 3,653,473 |
import torch
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1) | c0740c5ddc7c1f866b4c3cb2986f45a672d22e49 | 3,653,474 |
def recall_k(sent_im_dist, im_labels, ks=(1, 5, 10)):
"""
Compute recall at given ks.
"""
im_labels = tf.cast(im_labels, tf.bool)
def retrieval_recall(dist, labels, k):
# Use negative distance to find the index of
# the smallest k elements in each row.
pred = tf.nn.top_k(-dist, k=k)[1]
# Create a boolean mask for each column (k value) in pred,
# s.t. mask[i][j] is 1 iff pred[i][k] = j.
pred_k_mask = lambda topk_idx: tf.one_hot(topk_idx, tf.shape(labels)[1],
on_value=True, off_value=False,
dtype=tf.bool)
# Create a boolean mask for the predicted indices
# by taking logical or of boolean masks for each column,
# s.t. mask[i][j] is 1 iff j is in pred[i].
pred_mask = tf.reduce_any(tf.map_fn(
pred_k_mask, tf.transpose(pred), dtype=tf.bool), axis=0)
# pred_mask = tf.map_fn(create_pred_mask, pred)
# Entry (i, j) is matched iff pred_mask[i][j] and labels[i][j] are 1.
matched = tf.cast(tf.logical_and(pred_mask, labels), dtype=tf.float32)
return tf.reduce_mean(tf.reduce_max(matched, axis=1))
img_sent_recall = [retrieval_recall(tf.transpose(sent_im_dist),
tf.transpose(im_labels), k) for k in ks]
sent_img_recall = [retrieval_recall(sent_im_dist, im_labels, k) for k in ks]
return img_sent_recall + sent_img_recall | 188f2cb4c3581f9c565253fbb17797a408ce3d74 | 3,653,475 |
def get_suggestion(project_slug, lang_slug, version_slug, pagename, user):
"""
| # | project | version | language | What to show |
| 1 | 0 | 0 | 0 | Error message |
| 2 | 0 | 0 | 1 | Error message (Can't happen) |
| 3 | 0 | 1 | 0 | Error message (Can't happen) |
| 4 | 0 | 1 | 1 | Error message (Can't happen) |
| 5 | 1 | 0 | 0 | A link to top-level page of default version |
| 6 | 1 | 0 | 1 | Available versions on the translation project |
| 7 | 1 | 1 | 0 | Available translations of requested version |
| 8 | 1 | 1 | 1 | A link to top-level page of requested version |
"""
suggestion = {}
if project_slug:
try:
proj = Project.objects.get(slug=project_slug)
if not lang_slug:
lang_slug = proj.language
try:
ver = Version.objects.get(
project__slug=project_slug, slug=version_slug)
except Version.DoesNotExist:
ver = None
if ver: # if requested version is available on main project
if lang_slug != proj.language:
try:
translations = proj.translations.filter(
language=lang_slug)
if translations:
ver = Version.objects.get(
project__slug=translations[0].slug, slug=version_slug)
else:
ver = None
except Version.DoesNotExist:
ver = None
# if requested version is available on translation project too
if ver:
# Case #8: Show a link to top-level page of the version
suggestion['type'] = 'top'
suggestion['message'] = "What are you looking for?"
suggestion['href'] = proj.get_docs_url(ver.slug, lang_slug)
# requested version is available but not in requested language
else:
# Case #7: Show available translations of the version
suggestion['type'] = 'list'
suggestion['message'] = (
"Requested page seems not to be translated in "
"requested language. But it's available in these "
"languages.")
suggestion['list'] = []
suggestion['list'].append({
'label': proj.language,
'project': proj,
'version_slug': version_slug,
'pagename': pagename
})
for t in proj.translations.all():
try:
Version.objects.get(
project__slug=t.slug, slug=version_slug)
suggestion['list'].append({
'label': t.language,
'project': t,
'version_slug': version_slug,
'pagename': pagename
})
except Version.DoesNotExist:
pass
else: # requested version does not exist on main project
if lang_slug == proj.language:
trans = proj
else:
translations = proj.translations.filter(language=lang_slug)
trans = translations[0] if translations else None
if trans: # requested language is available
# Case #6: Show available versions of the translation
suggestion['type'] = 'list'
suggestion['message'] = (
"Requested version seems not to have been built yet. "
"But these versions are available.")
suggestion['list'] = []
for v in Version.objects.public(user, trans, True):
suggestion['list'].append({
'label': v.slug,
'project': trans,
'version_slug': v.slug,
'pagename': pagename
})
# requested project exists but requested version and language
# are not available.
else:
# Case #5: Show a link to top-level page of default version
# of main project
suggestion['type'] = 'top'
suggestion['message'] = 'What are you looking for??'
suggestion['href'] = proj.get_docs_url()
except Project.DoesNotExist:
# Case #1-4: Show error mssage
suggestion['type'] = 'none'
suggestion[
'message'] = "We're sorry, we don't know what you're looking for"
else:
suggestion['type'] = 'none'
suggestion[
'message'] = "We're sorry, we don't know what you're looking for"
return suggestion | 66ddf3e44f006fcd1339b0483c3219c429643353 | 3,653,476 |
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, cval=0, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
assert not is_seg, "do not use this patch for resampling segmentations"
print("running patched resample_data_or_seg function")
dtype_data = data.dtype
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.all(shape == new_shape):
print("no resampling necessary")
return data
data = data.astype(float)
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
if do_separate_z:
print("separate z, order in z is", order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = np.empty(shape=(data.shape[0], new_shape[0], new_shape[1], new_shape[2]), dtype=dtype_data)
do_z = shape[axis] != new_shape[axis]
if do_z:
if axis == 0:
buffer = np.empty(shape=(shape[axis], new_shape_2d[0], new_shape_2d[1]), dtype=float)
elif axis == 1:
buffer = np.empty(shape=(new_shape_2d[0], shape[axis], new_shape_2d[1]), dtype=float)
else:
buffer = np.empty(shape=(new_shape_2d[0], new_shape_2d[1], shape[axis]), dtype=float)
else:
buffer = None
for c in range(data.shape[0]):
if do_z:
reshaped_data = buffer
else:
reshaped_data = reshaped_final_data[c]
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data[slice_id, :, :] = resize_fn(data[c, slice_id], new_shape_2d, order, cval=cval, **kwargs)
elif axis == 1:
reshaped_data[:, slice_id, :] = resize_fn(data[c, :, slice_id], new_shape_2d, order, cval=cval, **kwargs)
else:
reshaped_data[:, :, slice_id] = resize_fn(data[c, :, :, slice_id], new_shape_2d, order, cval=cval, **kwargs)
if do_z:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
reshaped_final_data[c] = zoom(reshaped_data, (1 / row_scale, 1 / col_scale, 1 / dim_scale), order=order_z, cval=cval, mode='nearest')
else:
print("no separate z, order", order)
reshaped_final_data = np.empty(shape=(data.shape[0], new_shape[0], new_shape[1], new_shape[2]), dtype=dtype_data)
for c in range(data.shape[0]):
reshaped_final_data[c] = resize_fn(data[c], new_shape, order, cval=cval, **kwargs)
return reshaped_final_data | ab7aa7ab1db40ec605d7069ccf3b1bc8751c3855 | 3,653,477 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.