content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import requests
def build_response(status=OK, etag='etag', modified='modified', max_age=None):
"""Make a requests.Response object suitable for testing.
Args:
status: HTTP status
exp-time: cache expire time (set to future for fresh cache, past for
stale cache (defaults to stale))
etag: etag cache-control header
modified: last-modified cache-control header
Returns:
A Response instance populated according to the arguments.
"""
headers = {'last-modified': modified, 'etag': etag, 'Cache-Control':
'max-age={}'.format(max_age)}
test_response = requests.Response()
test_response.status_code = status
test_response.headers = headers
return test_response | f9a97da74b7802511180f30dc45e9df5d5e87f51 | 3,651,849 |
from typing import List
from re import T
def split_list(big_list: List[T], delimiter: T) -> List[List[T]]:
"""Like string.split(foo), except for lists."""
cur_list: List[T] = []
parts: List[List[T]] = []
for item in big_list:
if item == delimiter:
if cur_list:
parts.append(cur_list)
cur_list = []
else:
cur_list.append(item)
if cur_list:
parts.append(cur_list)
return parts | c56dd88a7376f002ae6b91c3b227c8a16991ca31 | 3,651,850 |
def generate_partitions(data):
"""
Generates a random nested partition for an array of integers
:param data:
:return:
"""
if len(data) == 1:
return data
else:
mask1 = np.random.choice(len(data), np.floor(len(data)/2), replace=False)
par1 = [data[i] for i in range(len(data)) if i in mask1]
par2 = [data[i] for i in range(len(data)) if i not in mask1]
return [generate_partitions(par1), generate_partitions(par2)] | 164749c135de1cf690bb209a18270a5550cdefc8 | 3,651,851 |
def randomRectangularCluster(nRow, nCol, minL, maxL, mask=None):
"""
Create a random rectangular cluster neutral landscape model with
values ranging 0-1.
Parameters
----------
nRow : int
The number of rows in the array.
nCol : int
The number of columns in the array.
minL: int
The minimum possible length of width and height for each random
rectangular cluster.
maxL: int
The maximum possible length of width and height for each random
rectangular cluster.
mask : array, optional
2D array used as a binary mask to limit the elements with values.
Returns
-------
out : array
2D array.
"""
if mask is None:
mask = np.ones((nRow, nCol))
# Create an empty array of correct dimensions
array = np.zeros((nRow, nCol)) - 1
# Keep applying random clusters until all elements have a value
while np.min(array) == -1:
width = np.random.choice(range(minL, maxL))
height = np.random.choice(range(minL, maxL))
row = np.random.choice(range(-maxL, nRow))
col = np.random.choice(range(-maxL, nCol))
array[row:row + width, col:col + height] = np.random.random()
# Apply mask and rescale 0-1
maskedArray = maskArray(array, mask)
rescaledArray = linearRescale01(maskedArray)
return(rescaledArray) | b53db06114ac3a465c1e0444bed59aa7403bba83 | 3,651,853 |
def add_arc():
"""
:return: arc object
"""
l_hand = GArc(200, 200, 60, 150, x=480, y=270)
l_hand.filled = True
l_hand.fill_color = "#8eded9"
r_hand = GArc(200, 200, -30, 120, x=650, y=300)
r_hand.filled = True
r_hand.fill_color = "#8eded9"
return l_hand, r_hand | 667004d534d58ab11e9b41ca42572dc445ffcf7d | 3,651,855 |
def get_data_item_or_add(results_dic, name, n_hid, epochs, horizon, timesteps):
""" Return or create a new DataItem in `results_dic` with the corresponding
metadata.
"""
if name not in results_dic:
results_dic[name] = []
found = False
for item in results_dic[name]:
if item.is_metadata(n_hid, epochs, horizon, timesteps):
found = True
return item
if not found:
results_dic[name].append(
DataItem(n_hid, epochs, horizon, timesteps))
return results_dic[name][-1] | e6c713cd89b7a9816f52be11a4730f1cef60355c | 3,651,856 |
def midcurve_atm_fwd_rate(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
benchmark_type: str = None,
floating_rate_tenor: str = None,
clearing_house: str = None, location: PricingLocation = None, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day atm forward rate for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param benchmark_type: benchmark type e.g. LIBOR
:param floating_rate_tenor: floating index rate
:param clearing_house: Example - "LCH", "EUREX", "JSCC", "CME"
:param location: Example - "TKO", "LDN", "NYC"
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption implied normal volatility curve
"""
df = _get_swaption_measure(asset, benchmark_type=benchmark_type, floating_rate_tenor=floating_rate_tenor,
effective_date=forward_tenor, expiration_tenor=expiration_tenor,
termination_tenor=termination_tenor, clearing_house=clearing_house, source=source,
real_time=real_time, start=DataContext.current.start_date,
end=DataContext.current.end_date,
query_type=QueryType.MIDCURVE_ATM_FWD_RATE, location=location)
return _extract_series_from_df(df, QueryType.MIDCURVE_ATM_FWD_RATE) | e77ba9ef705c2eefe0f46431862697b6a840d6fd | 3,651,857 |
def extrode_multiple_urls(urls):
""" Return the last (right) url value """
if urls:
return urls.split(',')[-1]
return urls | 34ec560183e73100a62bf40b34108bb39f2b04b4 | 3,651,858 |
def build_header(cp: Config) -> str:
"""Build the email header for a SMTP email message"""
header = '\n'.join([
'From: {}'.format(cp.sender),
'To: {}'.format(''.join(cp.receiver)),
'Subject: {}\n\n'.format(cp.subject)
])
return header | a0c9fdc820d4a454c0384c46775d3e1359710fad | 3,651,859 |
def apex_distance(r0, rc, Rc, uvec):
"""
Implements equation (E4) of TYH18
"""
R0 = rc + Rc * uvec - r0
return np.hypot(*R0) | f88d59727fce25306ae6ef0856941efdbb80a712 | 3,651,860 |
def pixel2phase(data):
"""
converts each channel of images in the data to phase component of its 2-dimensional discrete Fourier transform.
:param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:return: numpy array with same shape as data
"""
channels = data.shape[-1]
return fourier(data)[:, :, :, channels:] | 1b6b2c513cc20fe9642dd375dd17ee2205692912 | 3,651,861 |
def take_last_while(predicate, list):
"""Returns a new list containing the last n elements of a given list, passing
each value to the supplied predicate function, and terminating when the
predicate function returns false. Excludes the element that caused the
predicate function to fail. The predicate function is passed one argument:
(value)"""
for i, e in enumerate(reversed(list)):
if not predicate(e):
return list[-i:]
return list | 19468c9130e9ab563eebd97c30c0e2c74211e44b | 3,651,862 |
import re
from bs4 import BeautifulSoup
def get_notes() -> str:
"""Scrape notes and disclaimers from dashboard."""
# As of 6/5/20, the only disclaimer is "Data update weekdays at 4:30pm"
with get_firefox() as driver:
notes = []
match = re.compile('disclaimers?', re.IGNORECASE)
driver.implicitly_wait(30)
driver.get(dashboard_url)
soup = BeautifulSoup(driver.page_source, 'html5lib')
has_notes = False
text = soup.get_text().splitlines()
for text_item in text:
if match.search(text_item):
notes.append(text_item.strip())
has_notes = True
if not has_notes:
raise FormatError(
"This dashboard url has changed. None of the <div> elements contains'Disclaimers' " + dashboard_url)
return '\n\n'.join(notes) | 7ec0efab1c5ed17c1878ece751bffe82d77f0105 | 3,651,863 |
def signup_logout(request):
"""
Just wrapping the built in
"""
return logout_view(request, template_name='logged_out.html') | 14c403720c396aa8bbb37752ce304bb2804dd46b | 3,651,864 |
def stress_rotation(stress, angle):
"""
Rotates a stress vector against a given angle.
This rotates the stress from local to the global axis sytem.
Use a negative angle to rotate from global to local system.
The stress vector must be in Voigt notation and engineering stress is used.
Parameters
----------
stress : vector
The matrix that must be rotated.
angle : float
The rotation angle in degrees.
Returns
-------
stress_rot : vector
A rotated version of the matrix.
"""
angle = angle * np.pi/180 # convert to radians
m = np.cos(-angle)
n = np.sin(-angle)
T1_inv = np.matrix([[m**2, n**2, 2*m*n],
[n**2, m**2, -2*m*n],
[-m*n, m*n, m**2-n**2]])
stress_rot = T1_inv * stress
return stress_rot | 96ae75ae61fdbee0cf120e6d705cadc265452e7d | 3,651,865 |
def blue_noise(x=None, hue=None, data=None, dodge=False, orient='v', plot_width=None,
color='black', palette='tab10', size=3, centralized=False,
filename='', scaling=10):
""" Renders a *Blue Noise Plot* from the given data.
Args:
x (str in data): Variables that specify positions on the data-encoding axes.
hue (str in data): Optional. Grouping variable that will produce points with different
colors.
data (pandas.DataFrame): Input data structure. Long-form collection of vectors that can be
assigned to named variables.
dodge (boolean): Optional. Wether to dodge the categorical classes of the plot.
Defaults to False.
orient ("v" | "h"): Optional. Orientation of the plot (vertical or horizontal).
Defaults to 'v'.
color (str): Color to use for markers, in case there is only one class (hue not given).
Defaults to 'black'.
palette (str): Method for choosing the colors to use when mapping the hue semantic.
String values are passed to color_palette(). List or dict values imply
categorical mapping, while a colormap object implies numeric mapping.
Defaults to 'tab10'.
size (float): The marker size in points**2.
centralized (boolean): Optional. Where the plot should be centralized or not.
Defaults to False.
plot_width (float): Optional. Width of the plot. This is a ratio, assuming the encoding axis
is between 0 and 1. So, 0.2 for plot_width would give you a plot with is
5 times as wide in the encoding axis as in the non-encoding axis.
filename (str): Optional. Filename of the plot to render.
scaling (int): Optional. Scaling for the size of plot.
Defaults to 10 for a 740 pixel lot (long side).
Returns:
List[List[[float, float]]] 2D-Array, relaxed points. Here the first dimension of the array
encodes the clases in the data. So for a single-class blue noise plot,
len(blue_noise_plot) would be 1.
Each of these arrays contains arrays with points within this class.
"""
return __plot(x=x, hue=hue, data=data, dodge=dodge, orient=orient, plot_width=plot_width,
color=color, palette=palette, size=size, centralized=centralized,
filename=filename, scaling=scaling, method='blue_noise') | 8743dacba9ddac1b1e73e676962d850876c5b2f3 | 3,651,866 |
def get_repository(auth_user: check_auth, repository_id: hug.types.text):
"""
GET: /repository/{repository_id}
Returns the CLA repository requested by UUID.
"""
return cla.controllers.repository.get_repository(repository_id) | ad9bb45a4d4526b790abb7f89d72a3deafb2d10f | 3,651,867 |
def _and(mat,other,obj,m):
"""
Can only be used with '&' operator not with 'and'
Multi-column boolean matrices' values are compared with 'and' operator, meaning that 1 false value
causes whole row to be reduced to a false value
"""
if mat.BOOL_MAT:
if isinstance(other,obj):
if mat.dim!=other.dim:
raise ValueError("Dimensions of the matrices don't match")
if not other.BOOL_MAT:
raise TypeError("Can't compare bool matrix to non-bool matrix")
d0,d1 = mat.dim
o = other.matrix
true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False]
data = []
#Reduce multiple columns into one
#Remove rows with false boolean values
for i in range(d0):
mrow,orow = m[i],o[i]
if (false in mrow) or (false in orow):
data.append([false])
continue
data.append([true])
return obj(dim=[d0,1],
data=data,
features=mat.features[:1],
index=mat.index[:],
implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false})
else:
d0,d1 = mat.dim
true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False]
data = []
if isinstance(other,obj):
if mat.dim!=other.dim:
raise ValueError("Dimensions of the matrices don't match")
if other.BOOL_MAT:
raise TypeError("Can't compare non-bool matrix to bool matrix")
o = other.matrix
for i in range(d0):
mrow,orow = m[i],o[i]
data.append([true if (bool(mrow[j]) and bool(orow[j])) else false for j in range(d1)])
elif isinstance(other,list):
if mat.d1!=len(other):
raise ValueError("Length of the list doesn't match matrix's column amount")
for i in range(d0):
mrow = m[i]
data.append([true if (bool(mrow[j]) and bool(other[j])) else false for j in range(d1)])
else:
for i in range(d0):
mrow = m[i]
data.append([true if (bool(mrow[j]) and bool(other)) else false for j in range(d1)])
return obj(dim=[d0,d1],
data=data,
features=mat.features[:],
index=mat.index[:],
implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false}) | ac4e4d5f205c6aeb068d9fb427839e4e8f85f0ea | 3,651,869 |
def _parcel_profile_helper(pressure, temperature, dewpt):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpt)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, np.array([]) * press_lower.units,
temp_lower[:-1], temp_lcl, np.array([]) * temp_lower.units)
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:]) | 2e9abd03dbf4617e53ea19ac7a415025567195e8 | 3,651,871 |
def best_param_search(low=1, margin=1, func=None):
"""
Perform a binary search to determine the best parameter value.
In this specific context, the best
parameter is (the highest) value of the parameter (e.g. batch size)
that can be used to run a func(tion)
(e.g., training) successfully. Beyond a certain value,
the function fails to run for reasons such as out-of-memory.
param low: a starting low value to start searching from (defaults to 1).
param margin: denotes the margin allowed when choosing the
configuration parameter (and the optimal parameter).
param func: the function that is required to be run with the
configuration parameter.
"""
assert low > 0
assert margin > 0
assert func is not None
# Determine if the function succeeds to run at the starting (low) value.
# If not, keep lowering the value of low until the run succeeds.
try:
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception:
success = False
print("Run failed! The starting value of the parameter is itself too high!\n")
while not success and low > 0:
try:
low = low // 2
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception:
print("Run failed! Lowering the parameter value.\n")
if not success:
print("The function failed to run even at the lowest parameter value !")
return
# Set coarse limits on low (function succeeds to run) and
# high (function does not succeed running).
while success:
high = 2 * low
try:
print(f"Trying with a parameter value of {high}.")
func(high)
low = high
except Exception:
success = False
print("Run failed!\n")
print(
f"Low and high parameter values set to {low} and {high} respectively."
)
# Binary search to find the optimal value of low (within the margin).
current_margin = high - low
while current_margin > margin:
mid = (low + high) // 2
try:
print(f"Trying with a parameter value of {mid}.")
func(mid)
low = mid
except Exception:
high = mid
print("Run failed!\n")
print(f"Low and high parameter values set to {low} and {high} respectively.")
current_margin = high - low
print(f"Setting the parameter value to {low}\n")
return low | 6392d8c019ebb50a49c46e724e62fd63671a00df | 3,651,873 |
def get_supervised_timeseries_data_set(data, input_steps):
"""This function transforms a univariate timeseries into a supervised learning problem where the input consists
of sequences of length input_steps and the output is the prediction of the next step
"""
series = pd.Series(data)
data_set = pd.DataFrame({'t' : series, 't+1' : series.shift(-1)})
if input_steps > 1:
x_values = np.concatenate([data[i:i+input_steps]
.reshape(1, input_steps) for i in range(len(series) - input_steps)])
timesteps_df = pd.DataFrame(x_values[:,:-1], index=np.arange(input_steps - 1, input_steps - 1 + len(x_values)),
columns = ['t-' + str(input_steps - i) for i in range(1, input_steps)])
data_set = pd.concat([timesteps_df, data_set], axis=1, join='inner')
data_set = data_set.dropna()
X = data_set.drop('t+1', axis=1)
y = data_set.loc[:,'t+1']
return (X, y) | 0fce866ea266c15e83e57795f86fcfe4fee4a54e | 3,651,875 |
def load_template_spectra_from_folder(parent_folder,
spectrum_identifier,
normalization=None):
"""
Load template spectrum data into a dictionary. This allows templates from
different folders to be loaded into different dictionaries.
Parameters:
-----------
parent_folder : string
Name of folder or path
spectrum_identifier : string
Radioactive source identifier. Ex: '235U'
normalization : string or boolean
Default = None
Accepts: 'normalheight', 'normalarea', None
How the dataset should be normalized.
Returns:
--------
temp_dict : Dictionary containing all template spectra from a folder.
"""
temp_dict = {}
def normalize_spectrum(ID):
"""
Normalizes the spectrum data.
Parameters:
-----------
ID : string
The ID key for the radioactive source in your spectrum.
Returns:
--------
temp_dict : Dictionary
Contains all normalized datasets.
"""
temp_spectrum = an.read_spectrum(
parent_folder + ID + spectrum_identifier)
if np.max(temp_spectrum) == 0:
print(ID + ' Contains no values')
if normalization is None:
return temp_spectrum
elif normalization == 'normalheight':
return temp_spectrum / np.max(temp_spectrum)
elif normalization == 'normalarea':
return temp_spectrum / np.sum(temp_spectrum)
for i in range(len(an.isotopes) - 3):
temp_dict[an.isotopes[i]] = normalize_spectrum(
an.isotopes_sources_GADRAS_ID[i])
return temp_dict | 466d0eb74de197ddb18e289f072d9451bc7ea2d8 | 3,651,877 |
import json
def remove_screenshot_from_object(request):
"""
Removes the screenshot from being associated with a top-level object.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
analyst = request.user.username
obj = request.POST.get('obj', None)
oid = request.POST.get('oid', None)
sid = request.POST.get('sid', None)
result = delete_screenshot_from_object(obj, oid, sid, analyst)
return HttpResponse(json.dumps(result),
mimetype="application/json") | 36836120c79dc8d825c91370074da09fd2255c6d | 3,651,878 |
import csv
import json
def read_csv_as_dicts(
filename,
newline="",
delimiter=",",
quotechar='"',
encoding="utf-8",
remove_prefix=True,
prefix="dv.",
json_cols=CSV_JSON_COLS,
false_values=["FALSE"],
true_values=["TRUE"],
):
"""Read in CSV file into a list of :class:`dict`.
This offers an easy import functionality of your data from CSV files.
See more at
`csv <https://docs.python.org/3/library/csv.html>`_.
CSV file structure:
1) The header row contains the column names.
2) A row contains one dataset
3) A column contains one specific attribute.
Recommendation: Name the column name the way you want the attribute to be
named later in your Dataverse object. See the
`pyDataverse templates <https://github.com/GDCC/pyDataverse_templates>`_
for this. The created :class:`dict` can later be used for the `set()`
function to create Dataverse objects.
Parameters
----------
filename : str
Filename with full path.
newline : str
Newline character.
delimiter : str
Cell delimiter of CSV file. Defaults to ';'.
quotechar : str
Quote-character of CSV file. Defaults to '"'.
encoding : str
Character encoding of file. Defaults to 'utf-8'.
Returns
-------
list
List with one :class:`dict` each row. The keys of a :class:`dict` are
named after the columen names.
"""
assert isinstance(filename, str)
assert isinstance(newline, str)
assert isinstance(delimiter, str)
assert isinstance(quotechar, str)
assert isinstance(encoding, str)
with open(filename, "r", newline=newline, encoding=encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)
data = []
for row in reader:
data.append(dict(row))
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if val in false_values:
ds_tmp[key] = False
ds_tmp[key] = True
elif val in true_values:
ds_tmp[key] = True
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
if remove_prefix:
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if key.startswith(prefix):
ds_tmp[key[len(prefix) :]] = val
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
if len(json_cols) > 0:
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if key in json_cols:
ds_tmp[key] = json.loads(val)
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
return data | bf15b684120445adf7e6ba8ae18befd64ad6a99f | 3,651,879 |
def get_interface_breakout_param(dut,**kwargs):
"""
Author: Naveen Nag
email : [email protected]
:param dut:
:param interface:
:param fields:
:return: interface breakout speed
Usage:
port.get_interface_breakout_param(dut1, 'Ethernet4')
:return - ['4x10G', 'Completed']
"""
param_breakout = []
if 'interface' not in kwargs :
st.error("Mandatory argument \'interface\' is missing")
return False
if 'Eth' in kwargs['interface']:
st.log('Physical interface name is provided, mapping it to a port group')
res1 = get_interface_breakout_mode(dut, kwargs['interface'], 'port')
if res1:
kwargs['interface'] = 'port ' + res1[0]['port']
else:
st.error('Invalid interface, cannot get the status')
return False
output = st.show(dut, "show interface breakout {}".format(kwargs['interface']), type='klish')
if len(output) == 0:
st.error("Provided interface is not a breakout port")
return False
else:
param_breakout.append(str(output[0]['breakout_mode'].strip('G')))
param_breakout.append(output[0]['status'])
return param_breakout | 43286c8dbc29fef096d34c567f3f7c4ff2a06691 | 3,651,882 |
def home():
"""Home view"""
if flask.session.get('userid'):
leaderboard_players = rankedlist(
member=db.web.session.query(
models.Member).get(
flask.session['userid']))
member = db.web.session.query(
models.Member).get(
flask.session.get('userid'))
else:
leaderboard_players = rankedlist(
member=db.web.session.query(
models.Member).filter_by(
rank=3).first())
member = None
news = db.web.session.query(models.NewsArticle).order_by(
desc("date"))
return render_template('content_home.html', news=news, member=member,
leaderboard_players=leaderboard_players) | 89211f6b79eae71b757201a2d8b234000a3e42bf | 3,651,884 |
def is_number(string):
""" Tests if a string is valid float. """
try:
float(string)
return True
except ValueError:
return False | 1c46820de59b932ec565af55c565d175eef58c3c | 3,651,885 |
def inputs(filename, batch_size, n_read_threads = 3, num_epochs = None, image_width = 200, image_height=290):
"""
reads the paired images for comparison
input: name of the file to load from, parameters of the loading process
output: the two images and the label (a logit classifier for 2 class - yes or no)
"""
with tf.device('/cpu:0'): #we need to load using the CPU or it allocated a stupid amount of memory
x1, x2, y_ = pc.input_pipeline([filename], batch_size, n_read_threads, num_epochs=num_epochs, imgwidth = image_width, imgheight = image_height)
return x1, x2, y_ | 1b22a3f5b28513a2f65312981205b2df40acd2b3 | 3,651,887 |
def abs_p_diff(predict_table, categA='sandwich', categB='sushi'):
"""Calculates the absolute distance between two category predictions
:param predict_table: as returned by `predict_table`
:param categA: the first of two categories to compare
:param categB: the second of two categoreis to compare
:returns: series with the absolute difference between the predictions
:rtype: pandas Series
"""
return abs(predict_table['p_%s' % categA] - predict_table['p_%s' % categB]) | 235bfc7df29ac4a2b67baff9dfa3ee62204a9aed | 3,651,889 |
def _is_target_feature(column_names, column_mapping):
"""Assert that a feature only contains target columns if it contains any."""
column_names_set = set(column_names)
column_types = set(column['type']
for column_name, column in column_mapping.iteritems()
if column_name in column_names_set)
if 'target' in column_types:
assert len(column_types) == 1, (
'Features with target columns can only contain target columns.'
'Found column_types: %s for columns %s' % (column_types,
column_names))
return True
else:
return False | 098af45938c616dd0ff2483a27131f15ba50797b | 3,651,890 |
def _default_mono_text_dataset_hparams():
"""Returns hyperparameters of a mono text dataset with default values.
See :meth:`texar.MonoTextData.default_hparams` for details.
"""
return {
"files": [],
"compression_type": None,
"vocab_file": "",
"embedding_init": Embedding.default_hparams(),
"delimiter": " ",
"max_seq_length": None,
"length_filter_mode": "truncate",
"pad_to_max_seq_length": False,
"bos_token": SpecialTokens.BOS,
"eos_token": SpecialTokens.EOS,
"other_transformations": [],
"variable_utterance": False,
"utterance_delimiter": "|||",
"max_utterance_cnt": 5,
"data_name": None,
"@no_typecheck": ["files"]
} | bfd015cc93bd974b6486cf07cf72f1dfb7443b61 | 3,651,892 |
def validate_engine_mode(engine_mode):
"""
Validate database EngineMode for DBCluster
Property: DBCluster.EngineMode
"""
VALID_DB_ENGINE_MODES = (
"provisioned",
"serverless",
"parallelquery",
"global",
"multimaster",
)
if engine_mode not in VALID_DB_ENGINE_MODES:
raise ValueError(
"DBCluster EngineMode must be one of: %s" % ", ".join(VALID_DB_ENGINE_MODES)
)
return engine_mode | 69f7952a998b6ca593106c92710909104e21f55f | 3,651,893 |
from datetime import datetime
def run_command(cmd, log_method=log.info):
"""Subprocess wrapper for capturing output of processes to logs
"""
if isinstance(cmd, str):
cmd = cmd.split(" ")
start = datetime.utcnow()
log_method("Starting run_command for: {}".format(" ".join([str(x) for x in cmd])))
p = sp.Popen(cmd, bufsize=0, stdout=sp.PIPE, stderr=sp.STDOUT)
ret_val = None
while True:
line = p.stdout.readline()
ret_val = p.poll()
if not line and ret_val != None:
break
log_method(line.decode())
log_method("Completed run_command in {} for: {}".format((datetime.utcnow() - start).total_seconds(), " ".join(cmd)))
return ret_val | 8366c9306810d927daf82b473db86dc67b0d84c6 | 3,651,895 |
def jar(state: State, fail: Fail):
"""
Store a function by a name
"""
(identifier, (code, rest)) = state.infinite_stack()
if identifier.tag != "atom":
fail(f"{identifier} is not an atom")
if code.tag not in ["code", "native"]:
fail(f"{code} is not code")
if code.tag == "code":
code = code.with_name(identifier.value)
return state.with_stack(rest).set_name(identifier.value, code) | 5f60a30ff7bed1a453bfe6ff354dc34a6fabee4f | 3,651,896 |
import requests
import json
def get_daily_activity(p_sap_date: str) -> dict:
""" Returns activities on the given date """
fiori_url = config.CONSTANTS["ECZ_DAHA_DAILY_URL"] + "?date=" + p_sap_date
resp = requests.get(
fiori_url,
auth=HTTPBasicAuth(
config.CONSTANTS["ECZ_DAHA_USER"],
config.CONSTANTS["ECZ_DAHA_PASS"]))
resp_as_dict = json.loads(resp.text)
return resp_as_dict | 68da0af50b0fc828d6eae3d1685911f039bd9732 | 3,651,898 |
import random
from typing import Optional
def load_example_abc(title: Optional[str] = None) -> str:
"""Load a random example ABC if `title` not provided.
Case ignored in the title.
"""
if title is None:
k = random.choice(list(examples))
else:
k = title.lower()
abc = examples.get(k)
if abc is None:
example_list = "\n".join(f" {t!r}" for t in examples)
raise ValueError("invalid tune title. Valid options are:\n" f"{example_list}")
return abc | d1deba6a03814da68c5d47a7018a6768059fef62 | 3,651,899 |
from typing import Dict
def get_last_confirmed() -> Dict:
"""
This function get the last day saved on mongodb and
show us the confirmed cases and the accumulated.
- The country is the only needed path parameter.
"""
date = db.find({}, {"date": 1, "_id": 0}).sort("date", -1).limit(1)
date = list(date)
pipeline = [
{"$match": {"date": {"$eq": date[0]["date"]}}},
{
"$project": {
"cases": 1,
"cases_accumulated": 1,
"date": {"$dateToString": {"format": "%Y-%m-%d", "date": "$date"}},
}
},
{
"$group": {
"_id": "$date",
"cases_accumulated": {"$sum": "$cases_accumulated"},
"cases": {"$sum": "$cases"},
}
},
{"$project": {"date": "$_id", "cases": 1, "cases_accumulated": 1, "_id": 0}},
]
result = db.aggregate(pipeline)
return loads(json_util.dumps(list(result)[0])) | af2cf9ea3da1d361bf1347c389c2c9a6f095629e | 3,651,901 |
def _newNode( cls, named ):
"""Construct new instance of cls, set proper color, and add to objects"""
if not scene.visible:
scene.visible = 1
if not [k for k in ('color','red','green','blue') if k in named]:
named['color'] = scene.foreground
if 'display' in named:
target = named['display']
del named['display'] # XXX fix when have backref added
else:
target = scene
if not target.visible:
target.visible = 1
node = cls(**named)
objs = target.objects
objs.append( node )
target.objects = objs
return node | 7b81b4ec5c1a540f7159dd532c87cc2ebd3c7150 | 3,651,902 |
def MplJs():
"""
Serves the generated matplotlib javascript file. The content
is dynamically generated based on which toolbar functions the
user has defined. Call `FigureManagerWebAgg` to get its
content.
"""
js_content = FigureManagerWebAgg.get_javascript()
resp = make_response(js_content, 200)
resp.headers['Content-Type'] = 'application/javascript'
return resp | d7b34b86d75f1375f6788a40245e7d04cb3ce6d9 | 3,651,903 |
def VVSS2021_fig4_plot(data, model, sizes=fig_sizes, cmaps=colormaps):
"""
Create and save a plot of the results from the linear regression reaction time model
:param data: the data frame
:param model: the fitted reaction time model
:param cmaps: a dictionary of colormaps
:param sizes: a dictionary of sizes
:return: Nothind
"""
fig, axs = plt.subplots(1, 1, figsize=(sizes['width'], sizes['height']))
sampleIDs = [1, 2, 3, 4, 5, 6]
t_cm_discrete = cmaps['t_cm'](np.linspace(0, 1, len(sampleIDs)))
for col, c in zip(sampleIDs, t_cm_discrete):
tw = 'sampleProbHit_0{}'.format(col)
pred_rt = model.coefs.loc['(Intercept)', 'Estimate'] + model.coefs.loc[tw, 'Estimate'] * data.loc[:, tw]
axs.plot(data[tw], pred_rt, label='sample {}'.format(col), color=c, linewidth=5)
axs.legend(loc=(1, 0))
axs.set_ylabel('response time [s]')
axs.set_xlabel('normalized p[H]')
fig.savefig(path_figs + "Fig4_lmRTs.pdf", bbox_inches='tight')
return None | 47e63763073c454f1c44cf6e4c590b8b7a985f43 | 3,651,904 |
def stat(noten):
""" Berechne Mittelwert, Median, min, max, oberes und unteres Quantil """
minimum = round(min(noten), 2)
maximum = round(max(noten), 2)
_median = median(noten)
_mittelwert = mittelwert(noten)
[unteres_quartil, oberes_quartil] = quartile(noten)
return [minimum, unteres_quartil, _median, _mittelwert, oberes_quartil, maximum] | 89d00c9b91b142366a4ca927298931a2f22bc715 | 3,651,905 |
def translation_activate_block(function=None, language=None):
"""
Activate language only for one method or function
"""
def _translation_activate_block(function):
def _decorator(*args, **kwargs):
tmp_language = translation.get_language()
try:
translation.activate(language or settings.LANGUAGE_CODE)
return function(*args, **kwargs)
finally:
translation.activate(tmp_language)
return wraps(function)(_decorator)
if function:
return _translation_activate_block(function)
else:
return _translation_activate_block | 8615b02e4e3aa0560be0734f8e6564755f5e5e9b | 3,651,906 |
def _loaded_schema_collections(schema_file_relative_dir) -> SchemaCollectionManager:
"""A loaded ``SchemaCollectionManager`` object, but this should never be modified. This object manages ``Schema``
objects corresponding to ``tests/{datasets,formats,licenses}.yaml``. Note that these are not necessarily the same as
the ones used in other schema fixtures, so please do not assume that it is equal to other schema fixtures. One
purpose of this fixture is to reduce repeated call in the test to the same function when ``loaded_schemata`` is
used. The other purpose is to provide other session-scoped fixtures access to the loaded schemata, because
session-scoped fixtures can't load function-scoped fixtures.
"""
return SchemaCollectionManager(datasets=DatasetSchemaCollection(schema_file_relative_dir / 'datasets.yaml'),
formats=FormatSchemaCollection(schema_file_relative_dir / 'formats.yaml'),
licenses=LicenseSchemaCollection(schema_file_relative_dir / 'licenses.yaml')) | ba5d03c8ad1c622391247ef505ccad21476c17d2 | 3,651,907 |
import uuid
def dag(name=None, child_tasks=None, edges=None, target=None):
"""
Create a DAG task
Args:
name (str): Name for the task
child_tasks (list [Task]): Child tasks within this dag
edges (list [tuple (Ref, Ref)]): List of tuples of ref(Task).
Each element denotes an edge from
first task to the second.
target (Ref): Target entity reference
Returns:
(Task): DAG task
"""
dag_edges = []
for edge in edges or []:
if len(edge) != 2:
raise ValueError("DAG edges require a tuple of two task references")
for task_ref in edge:
if not getattr(task_ref, "__kind__") == "app_ref":
raise ValueError("{} is not a valid task reference".format(task_ref))
from_ref = edge[0]
to_ref = edge[1]
dag_edges.append({"from_task_reference": from_ref, "to_task_reference": to_ref})
# This follows UI naming convention for runbooks
name = name or str(uuid.uuid4())[:8] + "_dag"
kwargs = {
"name": name,
"child_tasks_local_reference_list": [
task.get_ref() for task in child_tasks or []
],
"attrs": {"edges": dag_edges},
"type": "DAG",
}
if target:
kwargs["target_any_local_reference"] = target
return _task_create(**kwargs) | ce12e46141ab030297303b4d55585475eb74f2cf | 3,651,908 |
async def async_validate_pdf_signature(
embedded_sig: EmbeddedPdfSignature,
signer_validation_context: ValidationContext = None,
ts_validation_context: ValidationContext = None,
ac_validation_context: ValidationContext = None,
diff_policy: DiffPolicy = None,
key_usage_settings: KeyUsageConstraints = None,
skip_diff: bool = False) -> PdfSignatureStatus:
"""
.. versionadded:: 0.9.0
.. versionchanged: 0.11.0
Added ``ac_validation_context`` param.
Validate a PDF signature.
:param embedded_sig:
Embedded signature to evaluate.
:param signer_validation_context:
Validation context to use to validate the signature's chain of trust.
:param ts_validation_context:
Validation context to use to validate the timestamp's chain of trust
(defaults to ``signer_validation_context``).
:param ac_validation_context:
Validation context to use to validate attribute certificates.
If not supplied, no AC validation will be performed.
.. note::
:rfc:`5755` requires attribute authority trust roots to be specified
explicitly; hence why there's no default.
:param diff_policy:
Policy to evaluate potential incremental updates that were appended
to the signed revision of the document.
Defaults to
:const:`~pyhanko.sign.diff_analysis.DEFAULT_DIFF_POLICY`.
:param key_usage_settings:
A :class:`.KeyUsageConstraints` object specifying which key usages
must or must not be present in the signer's certificate.
:param skip_diff:
If ``True``, skip the difference analysis step entirely.
:return:
The status of the PDF signature in question.
"""
sig_object = embedded_sig.sig_object
if embedded_sig.sig_object_type != '/Sig':
raise SignatureValidationError("Signature object type must be /Sig")
# check whether the subfilter type is one we support
subfilter_str = sig_object.get('/SubFilter', None)
_validate_subfilter(
subfilter_str,
(SigSeedSubFilter.ADOBE_PKCS7_DETACHED, SigSeedSubFilter.PADES),
"%s is not a recognized SubFilter type in signatures."
)
if ts_validation_context is None:
ts_validation_context = signer_validation_context
embedded_sig.compute_integrity_info(
diff_policy=diff_policy, skip_diff=skip_diff
)
status_kwargs = embedded_sig.summarise_integrity_info()
ts_status_kwargs = await collect_timing_info(
embedded_sig.signer_info, ts_validation_context,
raw_digest=embedded_sig.external_digest
)
status_kwargs.update(ts_status_kwargs)
if 'signer_reported_dt' not in status_kwargs:
# maybe the PDF signature dictionary declares /M
signer_reported_dt = embedded_sig.self_reported_timestamp
if signer_reported_dt is not None:
status_kwargs['signer_reported_dt'] = signer_reported_dt
status_kwargs = await cms_basic_validation(
embedded_sig.signed_data, status_cls=PdfSignatureStatus,
raw_digest=embedded_sig.external_digest,
validation_context=signer_validation_context,
status_kwargs=status_kwargs, key_usage_settings=key_usage_settings
)
tst_validity = status_kwargs.get('timestamp_validity', None)
timestamp_found = (
tst_validity is not None
and tst_validity.valid and tst_validity.trusted
)
sv_update = report_seed_value_validation(
embedded_sig, status_kwargs['validation_path'], timestamp_found
)
status_kwargs.update(sv_update)
if ac_validation_context is not None:
ac_validation_context.certificate_registry.register_multiple(
embedded_sig.other_embedded_certs
)
status_kwargs.update(
await collect_signer_attr_status(
sd_attr_certificates=embedded_sig.embedded_attr_certs,
signer_cert=embedded_sig.signer_cert,
validation_context=ac_validation_context,
sd_signed_attrs=embedded_sig.signer_info['signed_attrs']
)
)
return PdfSignatureStatus(**status_kwargs) | fb4a8ae244d80c672ddc35c94d75953ab2d7d119 | 3,651,909 |
import inspect
def get_one_to_many_foreign_key_column_name(model, name):
"""
Returns the constituent column names for the foreign key on the remote
table of the one-to-many relationship specified by name.
Args:
model (class or object): The given model class or model instance.
name (string): The name of the attribute on `model` which is a
one-to-many relationship.
Return:
list: One-to-many foreign key column names as a list of strings.
"""
if not inspect.isclass(model):
return get_one_to_many_foreign_key_column_name(model.__class__, name)
attr = getattr(model, name, None)
if not attr:
# Unknown attribute.
return []
remote_columns = getattr(attr.property, 'remote_side', None)
if not remote_columns:
# This is not a one-to-many relationship.
return []
remote_tables = set(c.table.name for c in remote_columns)
if len(remote_tables) > 1:
# This is a many-to-many relationship with a cross reference table.
return []
foreign_key_column_names = []
for remote_column in remote_columns:
if getattr(remote_column, 'foreign_keys', False):
foreign_key_column_names.append(remote_column.name)
else:
remote_model = get_model_by_table(model, remote_column.table)
if remote_model:
# Quasi foreign keys don't actually have foreign_keys set,
# but they need to be treated as though they did.
foreign_keys = getattr(remote_model, 'quasi_foreign_keys', [])
if remote_column.name in foreign_keys:
foreign_key_column_names.append(remote_column.name)
return foreign_key_column_names | f829de2cbb29034f033f3c124837ac888f7526eb | 3,651,910 |
from typing import List
def formula(formula: str, formula_param: str, cols: List[str]) -> Aggregation:
""" Create a user defined formula aggregation.
Args:
formula (str): the user defined formula to apply to each group
formula_param (str): the parameter name within the formula
cols (List[str]): the columns to aggregate on, can be renaming expressions, i.e. "new_col = col"
Returns:
an aggregation
"""
return Aggregation(j_aggregation=_JAggregation.AggFormula(formula, formula_param, *cols)) | 86247179aa3252bf24500c53b2cf7c20eb9afe62 | 3,651,913 |
def num_false_positives(df):
"""Total number of false positives (false-alarms)."""
return df.noraw.Type.isin(['FP']).sum() | 6aa339b86d15072c6a6910a43e70281575da5d36 | 3,651,914 |
import ctypes
def repmot(instr, marker, value, repcase, lenout=None):
"""
Replace a marker with the text representation of an ordinal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmot_c.html
:param instr: Input string.
:type instr: str
:param marker: Marker to be replaced.
:type marker: str
:param value: Replacement value.
:type value: int
:param repcase: Case of replacement text.
:type repcase: str
:param lenout: Optional available space in output string.
:type lenout: int
:return: Output string.
:rtype: str
"""
if lenout is None:
lenout = ctypes.c_int(len(instr) + len(marker) + 15)
instr = stypes.stringToCharP(instr)
marker = stypes.stringToCharP(marker)
value = ctypes.c_int(value)
repcase = ctypes.c_char(repcase.encode(encoding='UTF-8'))
out = stypes.stringToCharP(lenout)
libspice.repmot_c(instr, marker, value, repcase, lenout, out)
return stypes.toPythonString(out) | 1be758f8c594805ae08c4b2e62014809e62039ad | 3,651,915 |
def emulatte_RESOLVE(
thicks, resistivity, freqs, nfreq, spans, height,
vca_index=None, add_noise=False, noise_ave=None, noise_std=None
):
"""
return : ndarray
[
Re(HCP1), Re(HCP2), Re(HCP3), (Re(VCX)), Re(HCP4), Re(HCP5),
Im(HCP1), Im(HCP2), Im(HCP3), (Im(VCX)), Im(HCP4), Im(HCP5),
]
"""
#フォワード計算
tc = [0, 0, -height]
hankel_filter = 'werthmuller201'
moment = 1
displacement_current = False
res = np.append(2e14, resistivity)
model = fwd.model(thicks)
model.set_properties(res=res)
fields = []
primary_fields = []
# HCP, VCA応答の計算
for i in range(nfreq):
f = np.array([freqs[i]])
rc = [-spans[i], 0, -height]
# VCAあり
if (nfreq == 6) and (i == vca_index):
hmdx = fwd.transmitter("HMDx", f, moment=moment)
model.locate(hmdx, tc, rc)
resp = model.emulate(hankel_filter=hankel_filter)
resp = resp['h_x'][0]
primary_field = moment / (2 * np.pi * spans[i] ** 3)
# VCAなし
else:
vmd = fwd.transmitter("VMD", f, moment=moment)
model.locate(vmd, tc, rc)
resp = model.emulate(hankel_filter=hankel_filter)
resp = resp['h_z'][0]
primary_field = - moment / (4 * np.pi * spans[i] ** 3)
fields.append(resp)
primary_fields.append(primary_field)
fields = np.array(fields)
primary_fields = np.array(primary_fields)
#1次磁場、2次磁場をppmに変換
inph_total_field = np.real(fields)
quad_secondary_field = np.imag(fields)
inph_secondary_field = inph_total_field - primary_fields
real_ppm = abs(inph_secondary_field / primary_fields) * 1e6
imag_ppm = abs(quad_secondary_field / primary_fields) * 1e6
# bookpurnongのそれぞれの周波数のノイズレベル Christensen(2009)
# ノイズ付加
add = np.random.choice([True, False], p=[0.7, 0.3])
if (add_noise & add):
noise = [nlv for nlv in zip(noise_ave, noise_std)]
for index, nlv in enumerate(noise):
inphnoise = np.random.normal(nlv[0], nlv[1])
quadnoise = np.random.normal(nlv[0], nlv[1])
real_ppm[index] = real_ppm[index] + inphnoise
imag_ppm[index] = imag_ppm[index] + quadnoise
resp = np.hstack([real_ppm, imag_ppm])
return resp | 0cdd44d9d3d53c1813ed25f230e29adec36fca5e | 3,651,917 |
import csv
from pathlib import Path
def metadata_dict_chex_mimic(metadata_location):
"""Reads whole csv to find image_name, creates dict with nonempty bboxes
Output:
Bboxes dictionary with key the img_name and values the bboxes themselves."""
bboxes = {}
with open(metadata_location) as f_obj:
reader = csv.reader(f_obj, delimiter=',')
next(reader) # skip header
for line in reader:
_, img_name, x, y, w, h = [int(entry) if entry.isnumeric() else entry for entry in line]
if h != 0 and w != 0: # only append nonempty bboxes
img_name = str(Path(img_name)) # compatibility between different OS
bboxes.setdefault(img_name, []) # these two lines allow safe placing of multiple values for key
bboxes[img_name].append([x, y, w, h])
return bboxes | 716358d1eb5a77c177a41076bb630108d7ffc934 | 3,651,918 |
def create_feature_df(cnv_dict, feature_type, labels, csv=False):
"""Creates a pandas Dataframe containing cnvs as rows and features as columns"""
# get features for each CNV
cnv_features = []
if csv:
for chrom in cnv_dict:
for cnv in cnv_dict[chrom]:
if cnv.tads:
cnv_features.append(
np.append([cnv.chr, cnv.start, cnv.end], cnv.annotate(feature_type)))
feature_df = pd.DataFrame(data=cnv_features, columns=[
'CHR', 'START', 'END'] + labels)
else:
for chrom in cnv_dict:
for cnv in cnv_dict[chrom]:
if cnv.tads:
cnv_features.append(cnv.annotate(feature_type))
feature_df = pd.DataFrame(data=cnv_features, columns=labels)
return feature_df | 76af71f73ee09a7cbfbafed3ad447b20a98e0da5 | 3,651,919 |
import xml
def simulationcell_from_axes(axes, bconds='p p p', rckc=15.):
""" construct the <simulationcell> xml element from axes
Args:
axes (np.array): lattice vectors
bconds (str, optional): boundary conditions in x,y,z directions.
p for periodic, n for non-periodic, default to 'p p p'
rckc: long-range cutoff paramter rc*kc, default to 15
Return:
etree.Element: representing <simulationcell>
"""
def pad_line(line): # allow content to be selected by double clicked
return ' ' + line + ' '
# write primitive lattice vectors
lat_node = etree.Element('parameter', attrib={
'name': 'lattice',
'units': 'bohr'
})
lat_node.text = xml.arr2text(axes)
# write boundary conditions
bconds_node = etree.Element('parameter', {'name': 'bconds'})
bconds_node.text = pad_line(bconds)
# write long-range cutoff parameter
lr_node = etree.Element('parameter', {'name': 'LR_dim_cutoff'})
lr_node.text = pad_line(str(rckc))
# build <simulationcell>
sc_node = etree.Element('simulationcell')
sc_node.append(lat_node)
sc_node.append(bconds_node)
sc_node.append(lr_node)
return sc_node | c3cdc77f9cce7ef09418459832c60b6570d7e11c | 3,651,920 |
def diff_seq(seq1, seq0):
"""Returns the difference of two sequences: seq1 - seq0.
Args:
seq1: The left operand.
seq0: The right operand.
Returns:
The difference of the two sequences.
"""
return (seq1 - seq0) % MAX_SEQ | cd1632357d6ff61fcd2a32ba71a6a6be2454521d | 3,651,921 |
def method_menu():
"""Method menu items
1. Add a new method
2. Duplicate selected method
3. Remove selected method
------------------------------
4. Clear methods
"""
message_method = "You are about to delete all methods. Do you want to continue?"
method_items = [
menu_item(icon_text("fas fa-plus-circle", "Add a new method"), id="add_method"),
menu_item(
icon_text("fas fa-clone", "Duplicate selection"), id="duplicate_method"
),
menu_item(
icon_text("fas fa-minus-circle", "Remove selection"), id="remove_method"
),
menu_item(divider=True),
menu_item("Clear all methods", id="clear-methods"),
menu_item(divider=True),
menu_item("Measurement", header=True),
menu_item(
dcc.Upload(
icon_text("fas fa-paperclip", "Add to selection"),
id="add-measurement-for-method",
)
),
menu_item(
icon_text("fas fa-times-circle", "Remove from selection"),
id="remove-measurement-from-method",
),
dcc.ConfirmDialog(id="confirm-clear-methods", message=message_method),
]
# Callbacks for the add, duplicate, and remove methods
_ = [
app.clientside_callback(
f"""function() {{
document.getElementById("{t}-method-button").click();
throw window.dash_clientside.PreventUpdate;
}}""",
Output(f"{t}-method-button", "n_clicks"),
Input(f"{t}_method", "n_clicks"),
prevent_initial_call=True,
)
for t in TARGET
]
# Callbacks for the clear all methods
app.clientside_callback(
"""function(n) {
if (n == null) throw window.dash_clientside.PreventUpdate;
return true;
}""",
Output("confirm-clear-methods", "displayed"),
Input("clear-methods", "n_clicks"),
prevent_initial_call=True,
)
return create_submenu(label="Method", children=method_items, right=False) | d22a820a249728650b49f75ccfcdc254f3a84e76 | 3,651,922 |
def shapeanalysis_OuterWire(*args):
"""
* Returns the outer wire on the face <Face>. This is replacement of the method BRepTools::OuterWire until it works badly. Returns the first wire oriented as outer according to FClass2d_Classifier. If none, last wire is returned.
:param face:
:type face: TopoDS_Face &
:rtype: TopoDS_Wire
"""
return _ShapeAnalysis.shapeanalysis_OuterWire(*args) | 7fa38d16cdfe40f802dea7d93666870e82d5cf26 | 3,651,923 |
def is_numeric(val: str) -> bool:
"""Check whether an unparsed string is a numeric value"""
if val in MISSING_VALUES:
return True
try:
float(val)
except Exception:
return False
else:
return True | 72d6095c32f3bd89c0ae8bda22dc4b9a6461468b | 3,651,924 |
def expand_options(sent, as_strings=True):
"""
['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]
For example:
Will it (rain|pour) (today|tomorrow|)?
---->
Will it rain today?
Will it rain tomorrow?
Will it rain?
Will it pour today?
Will it pour tomorrow?
Will it pour?
Args:
sent (list<str>): List of sentence in sentence
Returns:
list<list<str>>: Multiple possible sentences from original
"""
return expand_parentheses(sent, as_strings) | 2b07ac0cfee7339b11016f68792500bf855df019 | 3,651,926 |
def gcd_recursive_by_divrem(m, n):
"""
Computes the greatest common divisor of two numbers by recursively getting remainder from
division.
:param int m: First number.
:param int n: Second number.
:returns: GCD as a number.
"""
if n == 0:
return m
return gcd_recursive_by_divrem(n, m % n) | bd25d9cea4813e523ea6bb9bd85c24bf43dd2744 | 3,651,927 |
def repeat(atoms, coord):
"""
Repeat atoms (:class:`AtomArray` or :class:`AtomArrayStack`)
multiple times in the same model with different coordinates.
Parameters
----------
atoms : AtomArray, shape=(n,) or AtomArrayStack, shape=(m,n)
The atoms to be repeated.
coord : ndarray, dtype=float, shape=(k,n,3) or shape=(k,m,n,3)
The coordinates to be used fr the repeated atoms.
The length of first dimension determines the number of repeats.
If `atoms` is an :class:`AtomArray` 3 dimensions, otherwise
4 dimensions are required.
Returns
-------
repeated: AtomArray, shape=(n*k,) or AtomArrayStack, shape=(m,n*k)
The repeated atoms.
Whether an :class:`AtomArray` or an :class:`AtomArrayStack` is
returned depends on the input `atoms`.
Examples
--------
>>> atoms = array([
... Atom([1,2,3], res_id=1, atom_name="N"),
... Atom([4,5,6], res_id=1, atom_name="CA"),
... Atom([7,8,9], res_id=1, atom_name="C")
... ])
>>> print(atoms)
1 N 1.000 2.000 3.000
1 CA 4.000 5.000 6.000
1 C 7.000 8.000 9.000
>>> repeat_coord = np.array([
... [[0,0,0], [1,1,1], [2,2,2]],
... [[3,3,3], [4,4,4], [5,5,5]]
... ])
>>> print(repeat(atoms, repeat_coord))
1 N 0.000 0.000 0.000
1 CA 1.000 1.000 1.000
1 C 2.000 2.000 2.000
1 N 3.000 3.000 3.000
1 CA 4.000 4.000 4.000
1 C 5.000 5.000 5.000
"""
if isinstance(atoms, AtomArray) and coord.ndim != 3:
raise ValueError(
f"Expected 3 dimensions for the coordinate array, got {coord.ndim}"
)
elif isinstance(atoms, AtomArrayStack) and coord.ndim != 4:
raise ValueError(
f"Expected 4 dimensions for the coordinate array, got {coord.ndim}"
)
repetitions = len(coord)
orig_length = atoms.array_length()
new_length = orig_length * repetitions
if isinstance(atoms, AtomArray):
if coord.ndim != 3:
raise ValueError(
f"Expected 3 dimensions for the coordinate array, "
f"but got {coord.ndim}"
)
repeated = AtomArray(new_length)
repeated.coord = coord.reshape((new_length, 3))
elif isinstance(atoms, AtomArrayStack):
if coord.ndim != 4:
raise ValueError(
f"Expected 4 dimensions for the coordinate array, "
f"but got {coord.ndim}"
)
repeated = AtomArrayStack(atoms.stack_depth(), new_length)
repeated.coord = coord.reshape((atoms.stack_depth(), new_length, 3))
else:
raise TypeError(
f"Expected 'AtomArray' or 'AtomArrayStack', "
f"but got {type(atoms).__name__}"
)
for category in atoms.get_annotation_categories():
annot = np.tile(atoms.get_annotation(category), repetitions)
repeated.set_annotation(category, annot)
if atoms.bonds is not None:
bonds = atoms.bonds
for _ in range(repetitions-1):
bonds += atoms.bonds
repeated.bonds = bonds
if atoms.box is not None:
repeated.box = atoms.box.copy()
return repeated | b4f86bad25061807370d0f1eacdb0637eb8a19cc | 3,651,928 |
def get_mzi_delta_length(m, neff=2.4, wavelength=1.55):
""" m*wavelength = neff * delta_length """
return m * wavelength / neff | 5bcd4b9b217c79a06b48856f7801060787f12e52 | 3,651,929 |
def yagzag2radec(yag, zag, q):
"""
Given ACA Y-ang, Z-ang and pointing quaternion determine RA, Dec. The
input ``yag`` and ``zag`` values can be 1-d arrays in which case the output
``ra`` and ``dec`` will be corresponding arrays of the same length.
:param yag: ACA Y angle (degrees)
:param zag: ACA Z angle (degrees)
:param q: Quaternion
:rtype: list ra, dec (degrees)
"""
try:
one = np.ones(len(yag))
except TypeError:
one = 1.0
d_aca = np.array([one, tan(radians(yag)), tan(radians(zag))])
d_aca *= 1.0 / np.sum(d_aca**2)
eci = np.dot(q.transform, d_aca)
return eci2radec(eci) | e7266f5c0dd0763238c3f12fafebea19c080022d | 3,651,930 |
from tensorflow.keras.preprocessing.image import load_img
from typing import Union
def load_image(
path: str,
color_mode="rgb",
target_size: Union[None, ImageSize] = None,
normalize=False,
) -> np.ndarray:
"""Load an RGB image from the given path, optionally resizing it.
:param path: Path to the image
:param color_mode: "rgb", "bgr" or "grayscale"
:param target_size: Target size of the image (width, height).
:param normalize: Normalize values to [0.0, [1.0]
"""
pil_color_mode = color_mode
if pil_color_mode == "bgr":
pil_color_mode = "rgb"
pil = load_img(path, color_mode=pil_color_mode, target_size=target_size)
image = np.array(pil)
if color_mode == "bgr":
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if normalize:
image = image / 255.0
return image | 9b80d39561e3ccdae778675e011ab5c52c04db4f | 3,651,931 |
def deit_base_patch16_384():
"""
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
cfg = ViTConfig(
name="deit_base_patch16_384",
url="",
input_size=(384, 384),
patch_size=16,
embed_dim=768,
nb_blocks=12,
nb_heads=12,
crop_pct=1.0,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return ViT, cfg | 720968c8a95de30c836631fd5505c8029eaf46c0 | 3,651,932 |
def update_output(
event_id,
event_dividers,
light_dividers,
filename,
geometry,
do_plot_tracks,
do_plot_opids,
figure,
):
"""Update 3D event display end event id"""
fig = go.Figure(figure)
if event_dividers is None:
return no_update, no_update, no_update, no_update, no_update
try:
fig.data = []
fig.add_traces(
draw_event(
filename,
GEOMETRIES[geometry],
event_dividers,
light_dividers,
event_id,
do_plot_tracks,
do_plot_opids,
)
)
except IndexError as err:
print("IndexError", err)
return fig, {"display": "none"}, True, no_update, no_update
except KeyError as err:
print("KeyError", err)
return fig, {"display": "none"}, True, "Select a geometry first", no_update
url_filename = filename.replace(DOCKER_MOUNTED_FOLDER, "")
return (
fig,
{"height": "85vh"},
False,
no_update,
f"https://larnddisplay.lbl.gov/{url_filename}?geom={geometry}#{event_id}",
) | 370f7a165ddaf870d4806564d8b86f4d5c4e90b5 | 3,651,933 |
import requests
def get_containerports(marathon_url, app_id):
"""
Get containerports if we have portmapping.
marathon_url : [string] the URL of the marathon service
app_id : [string] ID of the running marathon app
Method : GET
Return : list of ports
"""
api_endpoint = '/v2/apps/'
headers = {'Content-Type': 'application/json'}
url = marathon_url + api_endpoint + app_id
print(url)
r = requests.get(url, headers=headers)
print(r.status_code)
containerports = []
for portmapping in r.json()['app']['container']['docker']['portMappings']:
containerports.append(portmapping['containerPort'])
return containerports | eb22656e58b2b7156015b84c63755cb4f4348502 | 3,651,934 |
def backward_algorithm(O, HMM_model):
"""HMM Backward Algorithm.
Args:
O: (o1, o2, ..., oT), observations
HMM_model: (pi, A, B), (init state prob, transition prob, emitting prob)
Return:
prob: the probability of HMM_model generating O.
"""
pi, A, B = HMM_model
T = len(O)
N = len(pi)
prob = 0.0
# Begin Assignment
# 后向概率矩阵
betas = np.zeros((N, T))
for i in range(N):
betas[i][0] = 1
for t in range(1, T):
for i in range(N):
for j in range(N):
betas[i][t] += A[i][j]*B[j][O[T-t]]*betas[j][t-1]
for i in range(N):
prob += pi[i]*B[i][O[0]]*betas[i][-1]
# End Assignment
return prob | da958ddd8d8943546030ba4306b7f632061d96bc | 3,651,935 |
def scan_codes(code_type, image):
"""Get *code_type* codes from a PIL Image
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_type (str): Code type to search
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None
"""
assert Image.isImageType(image)
converted_image = image.convert('L') # Convert image to gray scale (8 bits per pixel).
raw = converted_image.tobytes() # Get image data.
width, height = converted_image.size # Get image size.
return zbar_code_scanner('{0}.enable'.format(code_type).encode(), raw, width, height) | 02c70551138ffc5dc386e753d7532c28466de97e | 3,651,936 |
def get_summoner_masteries(summoner_ids):
"""
https://developer.riotgames.com/api/methods#!/1017/3450
Args:
summoner_ids (int | list<int>): the summoner ID(s) to get mastery pages for
Returns:
dict<str, MasteryPages>: the requested summoners' mastery pages
"""
# Can only have 40 summoners max if it's a list
if isinstance(summoner_ids, list) and len(summoner_ids) > 40:
raise ValueError("Can only get masteries for up to 40 summoners at once.")
id_string = ",".join(str(x) for x in summoner_ids) if isinstance(summoner_ids, list) else str(summoner_ids)
# Get JSON response
request = "{version}/summoner/{ids}/masteries".format(version=cassiopeia.dto.requests.api_versions["summoner"], ids=id_string)
response = cassiopeia.dto.requests.get(request)
# Convert response to Dto type
for id_, masteries in response.items():
response[id_] = cassiopeia.type.dto.summoner.MasteryPages(masteries)
return response | 55b9395cd452e444f049d05af0718ca847f346d0 | 3,651,937 |
def make_pod_spec(
name,
image_spec,
image_pull_policy,
image_pull_secret,
port,
cmd,
node_selector,
run_as_uid,
fs_gid,
env,
working_dir,
volumes,
volume_mounts,
labels,
cpu_limit,
cpu_guarantee,
mem_limit,
mem_guarantee,
lifecycle_hooks,
init_containers,
):
"""
Make a k8s pod specification for running a user notebook.
Parameters:
- name:
Name of pod. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
- image_spec:
Image specification - usually a image name and tag in the form
of image_name:tag. Same thing you would use with docker commandline
arguments
- image_pull_policy:
Image pull policy - one of 'Always', 'IfNotPresent' or 'Never'. Decides
when kubernetes will check for a newer version of image and pull it when
running a pod.
- image_pull_secret:
Image pull secret - Default is None -- set to your secret name to pull
from private docker registry.
- port:
Port the notebook server is going to be listening on
- cmd:
The command used to execute the singleuser server.
- node_selector:
Dictionary Selector to match nodes where to launch the Pods
- run_as_uid:
The UID used to run single-user pods. The default is to run as the user
specified in the Dockerfile, if this is set to None.
- fs_gid
The gid that will own any fresh volumes mounted into this pod, if using
volume types that support this (such as GCE). This should be a group that
the uid the process is running as should be a member of, so that it can
read / write to the volumes mounted.
- env:
Dictionary of environment variables.
- volumes:
List of dictionaries containing the volumes of various types this pod
will be using. See k8s documentation about volumes on how to specify
these
- volume_mounts:
List of dictionaries mapping paths in the container and the volume(
specified in volumes) that should be mounted on them. See the k8s
documentaiton for more details
- working_dir:
String specifying the working directory for the notebook container
- labels:
Labels to add to the spawned pod.
- cpu_limit:
Float specifying the max number of CPU cores the user's pod is
allowed to use.
- cpu_guarentee:
Float specifying the max number of CPU cores the user's pod is
guaranteed to have access to, by the scheduler.
- mem_limit:
String specifying the max amount of RAM the user's pod is allowed
to use. String instead of float/int since common suffixes are allowed
- mem_guarantee:
String specifying the max amount of RAM the user's pod is guaranteed
to have access to. String ins loat/int since common suffixes
are allowed
- lifecycle_hooks:
Dictionary of lifecycle hooks
- init_containers:
List of initialization containers belonging to the pod.
"""
api_client = ApiClient()
pod = V1Pod()
pod.kind = "Pod"
pod.api_version = "v1"
pod.metadata = V1ObjectMeta()
pod.metadata.name = name
pod.metadata.labels = labels.copy()
pod.spec = V1PodSpec()
security_context = V1PodSecurityContext()
if fs_gid is not None:
security_context.fs_group = int(fs_gid)
if run_as_uid is not None:
security_context.run_as_user = int(run_as_uid)
pod.spec.security_context = security_context
if image_pull_secret is not None:
pod.spec.image_pull_secrets = []
image_secret = V1LocalObjectReference()
image_secret.name = image_pull_secret
pod.spec.image_pull_secrets.append(image_secret)
if node_selector:
pod.spec.node_selector = node_selector
pod.spec.containers = []
notebook_container = V1Container()
notebook_container.name = "notebook"
notebook_container.image = image_spec
notebook_container.working_dir = working_dir
notebook_container.ports = []
port_ = V1ContainerPort()
port_.name = "notebook-port"
port_.container_port = port
notebook_container.ports.append(port_)
notebook_container.env = [V1EnvVar(k, v) for k, v in env.items()]
notebook_container.args = cmd
notebook_container.image_pull_policy = image_pull_policy
notebook_container.lifecycle = lifecycle_hooks
notebook_container.resources = V1ResourceRequirements()
notebook_container.resources.requests = {}
if cpu_guarantee:
notebook_container.resources.requests['cpu'] = cpu_guarantee
if mem_guarantee:
notebook_container.resources.requests['memory'] = mem_guarantee
notebook_container.resources.limits = {}
if cpu_limit:
notebook_container.resources.limits['cpu'] = cpu_limit
if mem_limit:
notebook_container.resources.limits['memory'] = mem_limit
notebook_container.volume_mounts = volume_mounts
pod.spec.containers.append(notebook_container)
pod.spec.init_containers = init_containers
pod.spec.volumes = volumes
return api_client.sanitize_for_serialization(pod) | 0be1782f91ab4de7a0baf0291eb3fcf9c1fc57a4 | 3,651,938 |
def preprocess(text, remove_punct=False, remove_num=True):
"""
preprocess text into clean text for tokenization
"""
# 1. normalize
text = normalize_unicode(text)
# 2. to lower
text = text.lower()
# 3. space
text = spacing_punctuation(text)
text = spacing_number(text)
# (optional)
if remove_punct:
text = remove_punctuation(text)
# 4. de-contract
text = decontracted(text)
# 5. handle number
if remove_num:
text = remove_number(text)
else:
text = clean_number(text)
# 6. remove space
text = remove_space(text)
return text | 289ed6c3032840191ea792b01cb4b3a17535ddf2 | 3,651,939 |
def verify_ptp_calibration_states(
device, states, domain, max_time=15, check_interval=5
):
""" Verify ptp parent values in show ptp parent command
Args:
device (`obj`): Device object
states ('str): PTP calibration state
domain ('str): PTP domain
max_time (int): Maximum wait time for the trigger,
in second. Default: 15
check_interval (int): Wait time between iterations when looping is needed,
in second. Default: 5
Returns:
True
False
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse("show ptp brief | ex FA")
except SchemaEmptyParserError:
pass
if out:
result = True
else:
result = False
if result:
return True
timeout.sleep()
return False | 648e9753b418365d8469ce17bc709ad67d814bf6 | 3,651,940 |
def get_auth_use_case():
"""Get use case instance."""
return auth_use_case | a01595d40d2693ff2b4023a8d7938b4af7734ca3 | 3,651,941 |
def pagerank(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, nstart=None, weight='weight',
dangling=None):
"""Returns the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified). This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank(G, alpha=0.9)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop after
an error tolerance of ``len(G) * tol`` has been reached. If the
number of iterations exceed `max_iter`, a
:exc:`networkx.exception.PowerIterationFailedConvergence` exception
is raised.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each edge in the
directed graph to two edges.
See Also
--------
pagerank_numpy, pagerank_scipy, google_matrix
Raises
------
PowerIterationFailedConvergence
If the algorithm fails to converge to the specified tolerance
within the specified number of iterations of the power iteration
method.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
# Choose fixed starting vector if not given
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
# Normalized nstart vector
s = float(sum(nstart.values()))
x = dict((k, v / s) for k, v in nstart.items())
if personalization is None:
# Assign uniform personalization vector if not given
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = dict((k, v / s) for k, v in personalization.items())
if dangling is None:
# Use personalization vector if dangling vector not specified
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = dict((k, v / s) for k, v in dangling.items())
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
# check convergence, l1 norm
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter) | 1d6e758275a3caf33049e5c042b7bde8f4cff17d | 3,651,943 |
from pathlib import Path
import zlib
import tqdm
def fetch_file(name, chunksize=16 * 1024):
"""
Fetch a datafile from a compressed/gzipped URL source.
Parameters
----------
name : :class:`str`
Name of the file to fetch.
chunksize : :class:`int`
Number of bytes to read in a chunk.
"""
fp, url, compressed = [
(Path(k), url, compressed)
for (k, (url, compressed)) in MANIFEST.items()
if name.lower() in Path(k).name.lower()
][0]
if "1drv" in url:
url = get_onedrive_directlink(
url
) # allow direct access to file object for 1drv
# construct relative path from this file
local_target = (Path(__file__).parent / fp).resolve()
if not local_target.exists():
if not local_target.parent.exists():
local_target.parent.mkdir(parents=True)
if compressed:
dec = zlib.decompressobj(
32 + zlib.MAX_WBITS
) # offset 32 to skip the header
decompress = dec.decompress
else:
decompress = lambda x: x
with urlopen(url) as response:
pbar = tqdm.tqdm(
total=int(response.headers["content-length"]),
unit="b",
unit_scale=True,
unit_divisor=1024,
desc=str(fp.name),
)
CHUNKSIZE = 16 * 1024
with open(local_target, "wb") as f:
while True:
chunk = response.read(chunksize)
if chunk:
rv = decompress(chunk)
f.write(rv)
pbar.update(len(chunk))
else:
break
return fp | f22eb09220135b542bb3b0e599abe896664dffa3 | 3,651,944 |
def create_include(workflow_stat):
"""
Generates the html script include content.
@param workflow_stat the WorkflowInfo object reference
"""
include_str = """
<script type='text/javascript' src='bc_action.js'>
</script>
<script type='text/javascript' src='bc_""" + workflow_stat.wf_uuid +"""_data.js'>
</script>
"""
return include_str | 24151952c9dd5bc4034916dae90a3760fc06ca44 | 3,651,945 |
import random
def choose_sample_from_group(
group: general.ParameterListType,
) -> general.ParameterValuesType:
"""
Choose single sample from group DataFrame.
"""
# Make continous index from 0
indexes = [idx for idx in range(len(group))]
assert len(indexes) > 0
# Choose from indexes
choice = random.choices(population=indexes, k=1)[0]
# Get the dict at choice index
chosen_dict = group[choice]
assert isinstance(chosen_dict, dict)
return chosen_dict | 27f1c8a9ca4640b881f5bdd3faca0db4b1b882da | 3,651,946 |
def path_available(filepath):
# type: (str) -> bool
"""Return true if filepath is available"""
parent_directory = dirname(filepath)
if not exists(parent_directory):
raise ParentDirectoryDoesNotExist(parent_directory, filepath)
return not exists(filepath) | efd506d2028f2c55e88dfc618395620571205773 | 3,651,947 |
from typing import Dict
from typing import Any
from typing import Callable
def memory_item_to_resource(urn: URN, items: Dict[str, Any] = None, loader: Callable = None) -> CloudWandererResource:
"""Convert a resource and its attributes to a CloudWandererResource.
Arguments:
urn (URN): The URN of the resource.
items (dict): The dictionary of items stored under this URN. (Secondary Attributs, BaseResource)
loader (Callable): The method which can be used to fulfil the :meth:`CloudWandererResource.load`
"""
items = items or {}
attributes = [
attribute
for item_type, attribute in items.items()
if item_type not in ["SubresourceUrns", "BaseResource", "ParentUrn"]
]
base_resource: Dict[str, Any] = next(
iter(resource for item_type, resource in items.items() if item_type == "BaseResource"), {}
)
return CloudWandererResource(
urn=urn,
subresource_urns=items.get("SubresourceUrns"),
resource_data=base_resource,
secondary_attributes=attributes,
loader=loader,
) | 0bf680574f2ef3038d9d29c656a657e4e7a172ec | 3,651,948 |
def sample_user(email='[email protected]', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password) | deb5c45287a8ff546e2631c4409d10015b550e5c | 3,651,949 |
import PIL
import random
def ShearX(img: Image, magnitude: float) -> Image:
"""Shear the image on x-axis."""
return img.transform(
img.size,
PIL.Image.AFFINE,
(1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
PIL.Image.BICUBIC,
fillcolor=FILLCOLOR,
) | 9d534cfc8f7cc5497356b8e07115d42f666aac5d | 3,651,950 |
import numpy
def read_onsets(onsets_path: PathLike) -> numpy.array:
"""
Read a text file containing onsets. Return it as a list of floats.
"""
with open(onsets_path, "r") as io:
lines = io.readlines()
onsets = numpy.array([float(line) for line in lines])
return onsets | cbfa38ce0a5e2ea4d6c465251ee8c3e6ec47d04f | 3,651,952 |
def format_specific_efficacy(method, type_1: str, type_2: str = None):
""" Format the efficacy string specifically for defense or attack. """
effective, ineffective, useless = format_damage(method, type_1, type_2)
type_name = format_type(type_1, type_2)
s = "**{}** \N{EN DASH} **{}**\n".format(type_name, "DEFENSE" if method is defense_method else "ATTACK")
if effective:
s += "Super effective: `{}`\n".format(", ".join(effective))
if ineffective:
s += "Not very effective: `{}`\n".format(", ".join(ineffective))
if useless:
s += "No effect: `{}`\n".format(", ".join(useless))
return s | 095f943cda0dfdf1803ae38b16c6b9d7f8fd3e1f | 3,651,953 |
def getSuffixes(algorithm, seqType) :
""" Get the suffixes for the right algorithm with the right
sequence type
"""
suffixes = {}
suffixes['LAST'] = {}
suffixes['BLAST'] = {}
suffixes['BLAST']['nucl'] = ['nhr', 'nsq', 'nin']
suffixes['BLAST']['prot'] = ['phr', 'psq', 'pin']
suffixes['LAST']['nucl'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ]
suffixes['LAST']['prot'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ]
if not algorithm in suffixes:
return None
if not seqType in suffixes[algorithm]:
return None
return suffixes[algorithm][seqType] | 9ab699a71be73381c4dff555f0ef19201589e82f | 3,651,955 |
import tempfile
from robocorp_code.path_operations import get_user
from robocorp_code.path_operations import make_numbered_dir_with_cleanup
from robocorp_code.path_operations import LOCK_TIMEOUT
from typing import Optional
from pathlib import Path
def make_numbered_in_temp(
keep: int = 10,
lock_timeout: float = -1,
tmpdir: Optional[Path] = None,
register=None,
) -> Path:
"""
Helper to create a numbered directory in the temp dir with automatic disposal
of old contents.
"""
user = get_user() or "unknown"
temproot = tmpdir if tmpdir else Path(tempfile.gettempdir())
rootdir = temproot / f"robocorp-code-{user}"
rootdir.mkdir(exist_ok=True)
return make_numbered_dir_with_cleanup(
prefix="rcc-",
root=rootdir,
keep=keep,
lock_timeout=lock_timeout if lock_timeout > 0 else LOCK_TIMEOUT,
register=register,
) | 9ba3d08d933d961099d5169afc25c152177857b3 | 3,651,956 |
def handle_server_api(output, kwargs):
""" Special handler for API-call 'set_config' [servers] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if name:
server = config.get_config('servers', name)
if server:
server.set_dict(kwargs)
old_name = name
else:
config.ConfigServer(name, kwargs)
old_name = None
Downloader.do.update_server(old_name, name)
return name | 0c4396619c1aee1151642de3edeb4b28d76acb9c | 3,651,957 |
def compare_names(namepartsA, namepartsB):
"""Takes two name-parts lists (as lists of words) and returns a score."""
complement = set(namepartsA) ^ set(namepartsB)
intersection = set(namepartsA) & set(namepartsB)
score = float(len(intersection))/(len(intersection)+len(complement))
return score | 87cbceaaa0acce0b83b5faf66cbe909ad52382eb | 3,651,958 |
def Normal_VaR(return_matrix, theta,Horizon): #500 datas needed
"""
Compute the Value-at-Risk and Conditional Value-at-Risk
Parameters
----------
risk_returns : np.ndarray
theta : np.float64
Horizon : np.int16
Returns
----------
np.ndarray,np.ndarray VaR , CVaR
"""
mean_forecast,var_forecast,conditional_volatility = Arch_data(return_matrix , Horizon )
excess_innovations = Extract_Excess_Innovations(return_matrix , mean_forecast , conditional_volatility )
mu,scale = Dist_parameters(excess_innovations)
VaR,CVaR = Var_CVaR_extractor(mean_forecast,var_forecast,scale,mu,theta)
return VaR,CVaR | a2b911d647c942724dc30480bb90db7c83e200bb | 3,651,959 |
def oscillator_amplitude(state, ders, period, floquet, zero_phase_lc, phase_warmup_periods=5, thr=0.0, dt=0.005):
"""calculates the isostable amplitude of the oscillator from dynamical equations
:param state: state of the system
:param ders: a list of state variable derivatives
:param period: oscillator period
:param floquet: floquet exponent
:param zero_phase_lc: zero phase limit cycle state
:param phase_warmup_periods: how many periods to wait for evaluating the asymptotic phase shift (default 5)
:param thr: threshold determining zero phase (default 0.0)
:param dt: time step (default 0.005)
:return: isostable amplitude of state"""
# get phase
phase = oscillator_phase(state, ders, period, phase_warmup_periods, thr=thr, dt=dt)
# calculate time to evolve to zero isochron
time = (1-phase/(2*pi))*period
# evolve to 0 isochron
state = integrate_period(state, ders, time, dt)
# amplitude sign
if(inside_limit_cycle(state, ders, period)):
sign = -1
else:
sign = 1
return 0.5*sign*distance(state,zero_phase_lc)*exp(floquet*time) | b6a55d9965eea712be2f49dbbc1f186d268f82bf | 3,651,960 |
def commonprefix(a, b):
"""Find longest common prefix of `a` and `b`."""
pos = 0
length = min(len(a), len(b))
while pos < length and a[pos] == b[pos]:
pos += 1
return pos, b | 75e2f9ac6c3d0c38986cba5f8409ddc87fe8edbe | 3,651,961 |
def parse_datetime(strtime):
"""
Parse a string date, time & tz into a datetime object:
2003-03-20 05:00:00-07
"""
offset = int(strtime[-3:])
date_time = dt.strptime(strtime[:-4], '%Y-%m-%d %H:%M:%S')
offset = timedelta(hours=offset)
return (date_time + offset).replace(tzinfo=utc) | c7537ed913a4d0b20a71b7253725231c32c9f60b | 3,651,962 |
from typing import List
from typing import cast
from typing import Iterable
def traverse_depth_first(base: AnyDependency) -> List[AnyDependency]:
"""Performs a depth first traversal of the dependency tree.
"""
def _traverse_tree_2(base: AnyDependency) -> List[AnyDependency]:
queue: List[AnyDependency] = []
current_idx = 0
queue.append(base)
while len(queue) != current_idx:
node = queue[current_idx]
if not isinstance(node, UnresolvedDependency):
queue.extend(cast(Iterable, node.dependencies))
current_idx += 1
return queue
deps = _traverse_tree_2(base)
return deps | 1172f3b97110cc41c68631d3e6a91a0ea8d20627 | 3,651,963 |
def update_config(
client,
key,
*,
value=None,
remove=False,
global_only=False,
commit_message=None
):
"""Add, update, or remove configuration values."""
section, section_key = _split_section_and_key(key)
if remove:
value = client.remove_value(
section, section_key, global_only=global_only
)
if value is None:
raise errors.ParameterError('Key "{}" not found.'.format(key))
else:
client.set_value(section, section_key, value, global_only=global_only)
return value | 59f71b2608ddcfb38cdf1845720d782b7858607f | 3,651,964 |
from datetime import datetime
def parse_time(t):
""" parse a date time string, or a negative number as
the number of seconds ago.
returns unix timestamp in MS
"""
try:
tint = int(t)
if tint <= 0:
return int(nowms() + (tint * 1000))
except ValueError:
pass
#the parsed date may or may not have a tz; if it does not, localize it.
parsed = dtparse(t)
if not parsed.tzinfo:
parsed = parsed.replace(tzinfo=tzlocal())
#Get the millisec by subtracting epoch in the same tz, then x 1000
return int((parsed - datetime.fromtimestamp(0, parsed.tzinfo)).total_seconds() * 1000) | 68189b1d0aa2f73152a77a1a790fc6a291e5ff25 | 3,651,965 |
def _get_duration(tmin: np.datetime64, tmax: np.datetime64) -> str:
"""
Determine the duration of the given datetimes.
See also: `ISO 8601 Durations <https://en.wikipedia.org/wiki/ISO_8601#Durations>`_
:param tmin: Time minimum
:param tmax: Time maximum
:return: Temporal resolution formatted as an ISO 8601:2004 duration string
"""
delta = tmax - tmin
day = np.timedelta64(1, 'D')
days = (delta.astype('timedelta64[D]') / day) + 1
return 'P{}D'.format(int(days)) | e56c399402a1325bc519443ea4caea57be2806e7 | 3,651,966 |
import math
def get_polyend_circle_angles(a, b, isLeft):
"""
theta0 = pi/2 + betta, theta1 = 2 * pi + betta;
betta = pi/2 - alpha;
alpha = atan(a)
"""
if a is None and b is None:
return None, None
alpha = math.pi / 2.0 if a is None else math.atan(a)
betta = math.pi / 2.0 - alpha
shift = 0.0 if isLeft else math.pi
theta0 = betta + shift
theta1 = theta0 + math.pi
return theta0, theta1 | 9547ba4ea9f74cba3d52d90bb24dc8c4b246fbff | 3,651,967 |
import re
def get_search_cache_key(prefix, *args):
""" Generate suitable key to cache twitter tag context
"""
key = '%s_%s' % (prefix, '_'.join([str(arg) for arg in args if arg]))
not_allowed = re.compile('[^%s]' % ''.join([chr(i) for i in range(33, 128)]))
key = not_allowed.sub('', key)
return key | f3ff5baa13e4e84deb5c13cd8d5b618ba75c8699 | 3,651,969 |
def main(argv=None):
"""Run pragma-no-mutate filter with specified command line arguments.
"""
return PragmaNoMutateFilter().main(argv) | f268a010b454fe28307e8e304dca3d57fe1e635a | 3,651,970 |
import math
def independence_single_value(values, sigma=0.70):
"""
This calculates the independence of the models for a given metric
where the metric is single valued, e.g. the slope of a gradient.
------Input------
values (list) : The single values for each model.
sigma (float) : The value of sigma_s
-----Returns-----
S (np.array 2D) : The inter model similarity
W (np.array 1D) : The weight per model from the similarity calculation
"""
sigma_s = sigma
# Can first calculate inter model distances S and D
S = np.zeros((len(values), len(values)))
# Weightings W dims=num_models
W = np.zeros((len(values), 1))
for i, model_i in enumerate(values):
i_data = model_i
for j, model_j in enumerate(values):
if i != j:
j_data = model_j
s = math.exp(-((i_data - j_data) ** 2).sum() / (1 * sigma_s ** 2))
S[i, j] = s
for ii in range(len(values)):
w = 1 / (1 + np.nansum(S[ii], 0))
W[ii] = w
W /= np.nansum(W)
return S, W | 0802966fed4d9cb5b9e2d525d10593534c5c51a0 | 3,651,971 |
def extract_fingerprints(atoms, i_jbond_dict, radius):
"""Extract the r-radius subgraphs (i.e., fingerprints)
from a molecular graph using Weisfeiler-Lehman algorithm."""
if (len(atoms) == 1) or (radius == 0):
fingerprints = [fingerprint_dict[a] for a in atoms]
else:
nodes = atoms
i_jedge_dict = i_jbond_dict
for _ in range(radius):
"""Update each node ID considering its neighboring nodes and edges
(i.e., r-radius subgraphs or fingerprints)."""
fingerprints = []
for i, j_edge in i_jedge_dict.items():
neighbors = [(nodes[j], edge) for j, edge in j_edge]
fingerprint = (nodes[i], tuple(sorted(neighbors)))
fingerprints.append(fingerprint_dict[fingerprint])
nodes = fingerprints
"""Also update each edge ID considering two nodes
on its both sides."""
_i_jedge_dict = defaultdict(lambda: [])
for i, j_edge in i_jedge_dict.items():
for j, edge in j_edge:
both_side = tuple(sorted((nodes[i], nodes[j])))
edge = edge_dict[(both_side, edge)]
_i_jedge_dict[i].append((j, edge))
i_jedge_dict = _i_jedge_dict
return np.array(fingerprints) | beaa457e0eb514ca7fbfaca846378a0d23c2b94c | 3,651,972 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.