content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def compact_axis_angle_from_matrix(R):
"""Compute compact axis-angle from rotation matrix.
This operation is called logarithmic map. Note that there are two possible
solutions for the rotation axis when the angle is 180 degrees (pi).
We usually assume active rotations.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z). The angle is
constrained to [0, pi].
"""
a = axis_angle_from_matrix(R)
return compact_axis_angle(a) | a7493a5ed1c622b9cbec6e9f0771e62f7f4712e2 | 3,656,600 |
def _generate_IPRange(Range):
"""
IP range to CIDR and IPNetwork type
Args:
Range: IP range
Returns:
an array with CIDRs
"""
if len(Range.rsplit('.')) == 7 and '-' in Range and '/' not in Range:
if len(Range.rsplit('-')) == 2:
start_ip, stop_ip = Range.rsplit('-')
if isIP(start_ip) and isIP(stop_ip):
return iprange_to_cidrs(start_ip, stop_ip)
else:
return []
else:
return []
elif len(Range.rsplit('.')) == 4 and '-' not in Range and '/' in Range:
return IPNetwork(Range)
else:
return [] | d86f8db8e87313b12f35669ee25cc3f3d229c631 | 3,656,601 |
def is_dict_homogeneous(data):
"""Returns True for homogeneous, False for heterogeneous.
An empty dict is homogeneous.
ndarray behaves like collection for this purpose.
"""
if len(data) == 0:
return True
k0, v0 = next(iter(data.items()))
ktype0 = type(k0)
vtype0 = type(v0)
if ktype0 in collection_types or ktype0 == np.ndarray or vtype0 in collection_types or vtype0 == np.ndarray:
return False
for k, v in data.items():
ktype = type(k)
vtype = type(v)
if (ktype != ktype0 or ktype in collection_types or ktype == np.ndarray) or \
(vtype != vtype0 or vtype in collection_types or vtype == np.ndarray):
return False
return True | 921e66639cd6a8584e99e14852158594b1001ef9 | 3,656,602 |
from typing import Union
from typing import Callable
from re import T
from typing import Generator
from typing import Any
def translate(item: Union[Callable[P, T], Request]) -> Union[Generator[Any, Any, None], Callable[P, T]]:
"""Override current language with one from language header or 'lang' parameter.
Can be used as a context manager or a decorator. If a function is decorated,
one of the parameters for the function must be a `rest_framework.Request` object.
"""
if not isinstance(item, Request):
@wraps(item)
def decorator(*args: P.args, **kwargs: P.kwargs) -> Any:
request = None
for arg in chain(args, kwargs.values()):
if isinstance(arg, Request):
request = arg
break
if request is None:
raise ValueError("No Request-object in function parameters.")
with override(get_language(request)):
return item(*args, **kwargs) # type: ignore
return decorator
@contextmanager
def context_manager(request: Request) -> Generator[Any, Any, None]:
with override(get_language(request)):
yield
return context_manager(item) | 5042cc77efb1477444f8f9611055fb3e183cf3d3 | 3,656,603 |
def get_all(isamAppliance, check_mode=False, force=False, ignore_error=False):
"""
Retrieving the current runtime template files directory contents
"""
return isamAppliance.invoke_get("Retrieving the current runtime template files directory contents",
"/mga/template_files?recursive=yes", ignore_error=ignore_error) | 9ff291b63471b57b110885c35939c8afe3d2f0d8 | 3,656,604 |
from sys import path
def build_path(dirpath, outputfile):
"""
Build function
"""
#some checks
if not path.exists(dirpath):
print("Path does not exist!")
return 1
if not path.isdir(dirpath):
print("Path is not folder")
return 1
#for now SQLite
try:
output = create_engine("sqlite:///{}".format(outputfile))
except:
print("Cannot create output file")
return 1
SQLABase.metadata.create_all(output)
session = sessionmaker(bind=output)()
def record_wrapper(filename):
record = record_from_file(filename)
session.add(record)
session.commit()
chdir(dirpath)
recursiveListing(".", record_wrapper) | 01492296925a259873e327d8eae938710fe78f20 | 3,656,605 |
import os
def _fix_importname(mname):
"""
:param mname:
"""
mname = os.path.normpath(mname)
mname = mname.replace(".", "")
mname = mname.replace("-", "")
mname = mname.replace("_", "")
mname = mname.replace(os.path.sep, "")
mname = mname.replace(os.path.pathsep, "")
return mname | 22f8ab56800a593502822a612c3f642e8cec22ea | 3,656,606 |
def main(args, out, err):
""" This wraps GURepair's real main function so
that we can handle exceptions and trigger our own exit
commands.
This is the entry point that should be used if you want
to use this file as a module rather than as a script.
"""
cleanUpHandler = BatchCaller(args.verbose, out)
gr_instance = GPURepairInstance(args, out, err, cleanUpHandler)
def handleTiming(exitCode):
if gr_instance.time:
print(gr_instance.getTiming(exitCode), file = out)
def doCleanUp(timing, exitCode):
if timing:
# We must call this before cleaning up globals
# because it depends on them
cleanUpHandler.register(handleTiming, exitCode)
# We should call this last.
cleanUpHandler.call()
try:
returnCode = gr_instance.invoke()
except Exception:
# Something went very wrong
doCleanUp(timing = False, exitCode = 0) # It doesn't matter what the exitCode is
raise
doCleanUp(timing = True, exitCode = returnCode) # Do this outside try block so we don't call twice!
return returnCode | c506306a93804ab60c1a6805e9c53a0fd9dd7cfd | 3,656,607 |
import requests
import os
import io
def getSymbolData(symbol, sDate=(2000,1,1), adjust=False, verbose=True, dumpDest=None):
"""
get data from Yahoo finance and return pandas dataframe
Parameters
-----------
symbol : str
Yahoo finanance symbol
sDate : tuple , default (2000,1,1)
start date (y,m,d)
adjust : bool , default False
use adjusted close values to correct OHLC. adj_close will be ommited
verbose : bool , default True
print output
dumpDest : str, default None
dump raw data for debugging
Returns
---------
DataFrame
"""
period1 = int(dt.datetime(*sDate).timestamp()) # convert to seconds since epoch
period2 = int(dt.datetime.now().timestamp())
params = (symbol, period1, period2, _token['crumb'])
url = "https://query1.finance.yahoo.com/v7/finance/download/{0}?period1={1}&period2={2}&interval=1d&events=history&crumb={3}".format(*params)
data = requests.get(url, cookies={'B':_token['cookie']})
data.raise_for_status() # raise error in case of bad request
if dumpDest is not None:
fName = symbol+'_dump.csv'
with open(os.path.join(dumpDest, fName),'w') as fid:
fid.write(data.text)
buf = io.StringIO(data.text) # create a buffer
df = pd.read_csv(buf,index_col=0,parse_dates=True, na_values=['null']) # convert to pandas DataFrame
# rename columns
newNames = [c.lower().replace(' ','_') for c in df.columns]
renames = dict(zip(df.columns,newNames))
df = df.rename(columns=renames)
# remove duplicates
df = df[~df.index.duplicated(keep='first')]
if verbose:
print(('Got %i days of data' % len(df)))
if adjust:
df = _adjust(df,removeOrig=True)
return df | 24e06e34e4f5a4705d59a2ffb32c767bbc25adcf | 3,656,608 |
def generateLouvainCluster(edgeList):
"""
Louvain Clustering using igraph
"""
Gtmp = nx.Graph()
Gtmp.add_weighted_edges_from(edgeList)
W = nx.adjacency_matrix(Gtmp)
W = W.todense()
graph = Graph.Weighted_Adjacency(
W.tolist(), mode=ADJ_UNDIRECTED, attr="weight", loops=False) # ignore the squiggly underline, not errors
louvain_partition = graph.community_multilevel(
weights=graph.es['weight'], return_levels=False)
size = len(louvain_partition)
hdict = {}
count = 0
for i in range(size):
tlist = louvain_partition[i]
for j in range(len(tlist)):
hdict[tlist[j]] = i
count += 1
listResult = []
for i in range(count):
listResult.append(hdict[i])
return listResult, size | c171474bdd81456cbbc488b0a8cb826f881419ec | 3,656,609 |
def exprvars(name, *dims):
"""Return a multi-dimensional array of expression variables.
The *name* argument is passed directly to the
:func:`pyeda.boolalg.expr.exprvar` function,
and may be either a ``str`` or tuple of ``str``.
The variadic *dims* input is a sequence of dimension specs.
A dimension spec is a two-tuple: (start index, stop index).
If a dimension is given as a single ``int``,
it will be converted to ``(0, stop)``.
The dimension starts at index ``start``,
and increments by one up to, but not including, ``stop``.
This follows the Python slice convention.
For example, to create a 4x4 array of expression variables::
>>> vs = exprvars('a', 4, 4)
>>> vs
farray([[a[0,0], a[0,1], a[0,2], a[0,3]],
[a[1,0], a[1,1], a[1,2], a[1,3]],
[a[2,0], a[2,1], a[2,2], a[2,3]],
[a[3,0], a[3,1], a[3,2], a[3,3]]])
"""
return _vars(Expression, name, *dims) | 6b65872029de938d37c9e968f696587e2a03ff8c | 3,656,610 |
def cell_segmenter(im, thresh='otsu', radius=20.0, image_mode='phase',
area_bounds=(0,1e7), ecc_bounds=(0, 1)):
"""
This function segments a given image via thresholding and returns
a labeled segmentation mask.
Parameters
----------
im : 2d-array
Image to be segmented. This may be of either float or integer
data type.
thresh : int, float, or 'otsu'
Value used during thresholding operation. This can either be a value
('int' or 'float') or 'otsu', the threshold value will be
determined automatically using Otsu's thresholding method.
radius : float
Radius for gaussian blur for background subtractino. Default value
is 20.
image_mode : 'phase' or 'fluorescence'
Mode of microsocopy used to capture the image. If 'phase', objects with
intensity values *lower* than the provided threshold will be selected.
If 'fluorescence', values *greater* than the provided threshold will be
selected. Default value is 'phase'.
area_bounds : tuple of ints.
Range of areas of acceptable objects. This should be probided in units
of square pixels.
eec_bounds : tuple of floats
Range of eccentricity values of acceptable objects. These values should
range between 0.0 and 1.0.
Returns
-------
im_labeled : 2d-array, int
Labeled segmentation mask.
"""
# Apply a median filter to remove hot pixels
med_selem = skimage.morphology.square(3)
im_filt = skimage.filters.median(im, selem=med_selem)
# Perform gaussian subtraction
im_sub = bg_subtract(im_filt, radius)
# Determine the thresholding method
if thresh is 'otsu':
thresh = skimage.filters.threshold_otsu(im_sub)
# Determine the image mode and apply threshold
if image_mode is 'phase':
im_thresh = im_sub < thresh
elif image_mode is 'fluorescence':
im_thresh = im_sub > thresh
else:
raise ValueError("Image mode not recognized. Must be 'phase'"
+ "or 'fluorescence'.")
# Label the objects
im_label = skimage.measure.label(im_thresh)
# Apply the area and eccentricity bounds
im_filt = area_ecc_filter(im_label, area_bounds, ecc_bounds)
# Remove objects touching the border
im_border = skimage.segmentation.clear_border(im_filt, buffer_size=5)
# Relabel the image
im_border = im_border > 0
im_label = skimage.measure.label(im_border)
return im_label | f9a8fa3c29cbb213ed67c3df93106a81f53ae985 | 3,656,611 |
from datetime import datetime
def generate_report(start_date, end_date):
"""Generate the text report"""
pgconn = get_dbconn('isuag', user='nobody')
days = (end_date - start_date).days + 1
totalobs = days * 24 * 17
df = read_sql("""
SELECT station, count(*) from sm_hourly WHERE valid >= %s
and valid < %s GROUP by station ORDER by station
""", pgconn, params=(start_date, end_date + datetime.timedelta(days=1)),
index_col='station')
performance = min([100, df['count'].sum() / float(totalobs) * 100.])
return """
Iowa Environmental Mesonet Data Delivery Report
===============================================
Dataset: ISU Soil Moisture Network
Performance Period: %s thru %s
Reported Performance: %.1f%%
Reporting Platforms: %.0f
Additional Details
==================
Total Required Obs: %.0f (24 hourly obs x 17 platforms x %.0f days)
Observations Delivered: %.0f
Report Generated: %s
.END
""" % (start_date.strftime("%d %b %Y"), end_date.strftime("%d %b %Y"),
performance, len(df.index), totalobs, days, df['count'].sum(),
datetime.datetime.now().strftime("%d %B %Y %H:%M %p")) | f71b5ab58922b9018abc1868661f88c268de8f94 | 3,656,612 |
import pathlib
def whole(eventfile,par_list,tbin_size,mode,ps_type,oversampling,xlims,vlines):
"""
Plot the entire power spectrum without any cuts to the data.
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means bin by 0.05s!
mode - whether we want to show or save the plot.
ps_type - obtain power spectrum through the periodogram method ('period') or
the manual FFT way ('manual') or both ('both')
oversampling - whether to perform oversampling. Array will consist of
[True/False, oversampling factor]
xlims - a list or array: first entry = True/False as to whether to impose an
xlim; second and third entry correspond to the desired x-limits of the plot
vlines - a list or array: first entry = True/False as to whether to draw
a vertical line in the plot; second entry is the equation for the vertical line
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if 'TIME' not in par_list:
raise ValueError("You should have 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if mode != 'show' and mode != 'save':
raise ValueError("Mode should either be 'show' or 'save'!")
if ps_type != 'period' and ps_type != 'manual' and ps_type != 'both':
raise ValueError("ps_type should either be 'period' or 'show' or 'save'!")
if type(oversampling) != list and type(oversampling) != np.ndarray:
raise TypeError("oversampling should either be a list or an array!")
if type(xlims) != list and type(xlims) != np.ndarray:
raise TypeError("xlims should either be a list or an array!")
if type(vlines) != list and type(vlines) != np.ndarray:
raise TypeError("vlines should either be a list or an array!")
parent_folder = str(pathlib.Path(eventfile).parent)
data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list)
times = data_dict['TIME']
counts = np.ones(len(times))
shifted_t = times-times[0]
t_bins = np.linspace(0,np.ceil(shifted_t[-1]),int(np.ceil(shifted_t[-1])*1/tbin_size+1))
summed_data, bin_edges, binnumber = stats.binned_statistic(shifted_t,counts,statistic='sum',bins=t_bins) #binning the time values in the data
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
if ps_type == 'period':
plt.figure()
pdgm_f,pdgm_ps = Lv2_ps_method.pdgm(t_bins,summed_data,xlims,vlines,True,oversampling)
plt.title('Power spectrum for ' + obj_name + ', ObsID: ' + str(obsid) + '\n Periodogram method' + '\n Includes whole time interval and energy range',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_pdgm.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return pdgm_f, pdgm_ps
if ps_type == 'manual':
plt.figure()
manual_f,manual_ps = Lv2_ps_method.manual(t_bins,summed_data,xlims,vlines,True,oversampling)
plt.title('Power spectrum for ' + obj_name + ', ObsID ' + str(obsid) + '\n Manual FFT method' + '\n Includes whole time interval and energy range',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_manual.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return manual_f, manual_ps
if ps_type == 'both':
pdgm_f,pdgm_ps = Lv2_ps_method.pdgm(t_bins,summed_data,xlims,vlines,False,oversampling)
manual_f,manual_ps = Lv2_ps_method.manual(t_bins,summed_data,xlims,vlines,False,oversampling)
fig, (ax1,ax2) = plt.subplots(2,1)
fig.suptitle('Power spectra for ' + obj_name + ', ObsID ' + str(obsid) + '\n both periodogram and manual FFT method' + '\n Includes whole time interval and energy range' , fontsize=12)
ax1.semilogy(pdgm_f,pdgm_ps,'b-')#/np.mean(pdgm_ps),'b-') #periodogram; arrays already truncated!
ax1.set_xlabel('Hz',fontsize=12)
ax1.set_ylabel('Normalized power spectrum',fontsize=10)
ax2.semilogy(manual_f,manual_ps,'r-')#/np.mean(manual_ps),'r-') #manual FFT; arrays already truncated!
ax2.set_xlabel('Hz',fontsize=12)
ax2.set_ylabel('Normalized power spectrum',fontsize=10)
if xlims[0] == True:
ax1.set_xlim([xlims[1],xlims[2]])
ax2.set_xlim([xlims[1],xlims[2]])
if vlines[0] == True:
ax1.axvline(x=vlines[1],color='k',alpha=0.5,lw=0.5)
ax2.axvline(x=vlines[1],color='k',alpha=0.5,lw=0.5)
ax2.axhline(y=2,color='k',alpha=0.3,lw=0.3)
plt.subplots_adjust(hspace=0.2)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'ps_' + obsid + '_bin' + str(tbin_size) + 's_both.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return pdgm_f, pdgm_ps, manual_f, manual_ps | 77b51cc8774bdb1b670e2a6b56a9cd65213f70de | 3,656,613 |
def handle_postback():
"""Handles a postback."""
# we need to set an Access-Control-Allow-Origin for use with the test AJAX postback sender
# in normal operations this is NOT needed
response.set_header('Access-Control-Allow-Origin', '*')
args = request.json
loan_id = args['request_token']
merchant_loan_id = args.get('merchant_transaction_id')
action = args['updates'].get('action')
if action == 'refund':
# process a refund
amount = args['updates']['amount']
return handle_refund(loan_id, amount)
loan_status = args['updates']['status']
return handle_status_update(loan_id, loan_status) | 59683921b7a21f50c2905c47c33036fd75ce54f4 | 3,656,614 |
def get_bb_bev_from_obs(dict_obs, pixor_size=128):
"""Input dict_obs with (B,H,W,C), return (B,H,W,3)"""
vh_clas = tf.squeeze(dict_obs['vh_clas'], axis=-1) # (B,H,W,1)
# vh_clas = tf.gather(vh_clas, 0, axis=-1) # (B,H,W)
vh_regr = dict_obs['vh_regr'] # (B,H,W,6)
decoded_reg = decode_reg(vh_regr, pixor_size) # (B,H,W,8)
lidar = dict_obs['lidar']
B = vh_regr.shape[0]
images = []
for i in range(B):
corners, _ = pixor_postprocess(vh_clas[i], decoded_reg[i]) # (N,4,2)
image = get_bev(lidar, corners, pixor_size) # (H,W,3)
images.append(image)
images = tf.convert_to_tensor(images, dtype=np.uint8) # (B,H,W,3)
return images | d286ec0c3132c2dcb931cb941fd247810c0ce1cf | 3,656,615 |
def get_hard_edges(obj):
"""
:param str obj:
:returns: all hard edges from the given mesh in a flat list
:rtype: list of str
"""
return [obj + '.e[' + str(i) + ']'
for i, edgeInfo in enumerate(cmds.polyInfo(obj + '.e[*]', ev=True))
if edgeInfo.endswith('Hard\n')] | 67de22469a38e55e88d21f1853280138795a04cb | 3,656,616 |
def make_system(l=70):
"""
Making and finalizing a kwant.builder object describing the system
graph of a closed, one-dimensional wire with l number of sites.
"""
sys = kwant.Builder()
lat = kwant.lattice.chain()
sys[(lat(x) for x in range(l))] = onsite
sys[lat.neighbors()] = hopping
return sys.finalized() | fa3d25933fd086519569cbb24ff77bf3c86c1303 | 3,656,617 |
from typing import Type
from pathlib import Path
from typing import Dict
def _gen_test_methods_for_rule(
rule: Type[CstLintRule],
fixture_dir: Path,
rules_package: str
) -> TestCasePrecursor:
"""Aggregates all of the cases inside a single CstLintRule's VALID and INVALID attributes
and maps them to altered names with a `test_` prefix so that 'unittest' can discover them
later on and an index postfix so that individual tests can be selected from the command line.
:param CstLintRule rule:
:param Path fixture_dir:
:param str rules_package:
:returns:
:rtype: TestCasePrecursor
"""
valid_tcs = {}
invalid_tcs = {}
requires_fixtures = False
fixture_paths: Dict[str, Path] = {}
fixture_subdir: Path = get_fixture_path(fixture_dir, rule.__module__, rules_package)
if issubclass(rule, CstLintRule):
if rule.requires_metadata_caches():
requires_fixtures = True
if hasattr(rule, "VALID"):
for idx, test_case in enumerate(getattr(rule, "VALID")):
name = f"test_VALID_{idx}"
valid_tcs[name] = test_case
if requires_fixtures:
fixture_paths[name] = fixture_subdir / f"{rule.__name__}_VALID_{idx}.json"
if hasattr(rule, "INVALID"):
for idx, test_case in enumerate(getattr(rule, "INVALID")):
name = f"test_INVALID_{idx}"
invalid_tcs[name] = test_case
if requires_fixtures:
fixture_paths[name] = fixture_subdir / f"{rule.__name__}_INVALID_{idx}.json"
return TestCasePrecursor(
rule=rule,
test_methods={**valid_tcs, **invalid_tcs},
fixture_paths=fixture_paths,
) | 5a12d84bdcff039179ef9b9f1105e6beecccbf05 | 3,656,618 |
def evaluate_score_batch(
predicted_classes=[], # list, len(num_classes), str(code)
predicted_labels=[], # shape (num_examples, num_classes), T/F for each code
predicted_probabilities=[], # shape (num_examples, num_classes), prob. [0-1] for each code
raw_ground_truth_labels=[], # list(('dx1', 'dx2'), ('dx1', 'dx3'), ...)
weights_file="evaluation-2020/weights.csv",
normal_class="426783006",
equivalent_classes=[
["713427006", "59118001"],
["284470004", "63593006"],
["427172004", "17338001"],
],
):
"""This is a helper function for getting
auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric
without needing the directories of labels and prediction outputs.
It is useful for directly calculating the scores given the
classes, predicted labels, and predicted probabilities.
"""
label_classes, labels = _load_labels(
raw_ground_truth_labels,
normal_class=normal_class,
equivalent_classes_collection=equivalent_classes,
)
output_classes, binary_outputs, scalar_outputs = _load_outputs(
predicted_classes,
predicted_labels,
predicted_probabilities,
normal_class=normal_class,
equivalent_classes_collection=equivalent_classes,
)
classes, labels, binary_outputs, scalar_outputs = organize_labels_outputs(
label_classes, output_classes, labels, binary_outputs, scalar_outputs
)
weights = load_weights(weights_file, classes)
# Only consider classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
classes = [x for i, x in enumerate(classes) if indices[i]]
labels = labels[:, indices]
scalar_outputs = scalar_outputs[:, indices]
binary_outputs = binary_outputs[:, indices]
weights = weights[np.ix_(indices, indices)]
auroc, auprc = compute_auc(labels, scalar_outputs)
accuracy = compute_accuracy(labels, binary_outputs)
f_measure = compute_f_measure(labels, binary_outputs)
f_beta_measure, g_beta_measure = compute_beta_measures(
labels, binary_outputs, beta=2
)
challenge_metric = compute_challenge_metric(
weights, labels, binary_outputs, classes, normal_class
)
return (
auroc,
auprc,
accuracy,
f_measure,
f_beta_measure,
g_beta_measure,
challenge_metric,
) | 314f94433704cc2986df9082a749caaf52738f08 | 3,656,619 |
def pair(data, color=None, tooltip=None, mark='point', width=150, height=150):
"""
Create pairwise scatter plots of all column combinations.
In contrast to many other pairplot tools,
this function creates a single scatter plot per column pair,
and no distribution plots along the diagonal.
Parameters
----------
data : DataFrame
pandas DataFrame with input data.
color : str
Column in **data** used for the color encoding.
tooltip: str
Column in **data** used for the tooltip encoding.
mark: str
Shape of the points. Passed to Chart.
One of "circle", "square", "tick", or "point".
width: int or float
Chart width.
height: int or float
Chart height.
Returns
-------
ConcatChart
Concatenated Chart of pairwise column scatter plots.
"""
# TODO support categorical?
col_dtype = 'number'
# color = 'species:N' # must be passed with a type, enh: autoetect
# tooltip = alt.Tooltip('species')
cols = data.select_dtypes(col_dtype).columns
# Setting a non-existing column with specified type passes through without effect
# and eliminates the need to hvae a separate plotting section for colored bars below.
if color is None:
color = ':Q'
if tooltip is None:
tooltip = ':Q'
# Infer color data type if not specified
if color[-2:] in [':Q', ':T', ':N', ':O']:
color_alt = alt.Color(color, title=None, legend=alt.Legend(orient='left', offset=width * -1.6))
# The selection fields parmeter does not work with the suffix
legend_color = color.split(':')[0]
else:
color_alt = alt.Color(color, title=None, type=alt.utils.infer_vegalite_type(data[color]))
legend_color = color
# Set up interactions
brush = alt.selection_interval()
color = alt.condition(brush, color_alt, alt.value('lightgrey'))
legend_click = alt.selection_multi(fields=[legend_color], bind='legend')
opacity = alt.condition(legend_click, alt.value(0.8), alt.value(0.2))
hidden_axis = alt.Axis(domain=False, title='', labels=False, ticks=False)
# Create corner of pair-wise scatters
i = 0
exclude_zero = alt.Scale(zero=False)
col_combos = list(combinations(cols, 2))[::-1]
subplot_row = []
while i < len(cols) - 1:
plot_column = []
for num, (y, x) in enumerate(col_combos[:i+1]):
if num == 0 and i == len(cols) - 2:
subplot = alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero),
alt.Y(y, scale=exclude_zero))
elif num == 0:
subplot = (
alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero, axis=hidden_axis),
alt.Y(y, scale=exclude_zero)))
elif i == len(cols) - 2:
subplot = (
alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero),
alt.Y(y, scale=exclude_zero, axis=hidden_axis)))
else:
subplot = (
alt.Chart(data, mark=mark).encode(
alt.X(x, scale=exclude_zero, axis=hidden_axis),
alt.Y(y, scale=exclude_zero, axis=hidden_axis)))
plot_column.append(
subplot
.encode(opacity=opacity, color=color, tooltip=tooltip)
.properties(width=width, height=height))
subplot_row.append(alt.hconcat(*plot_column))
i += 1
col_combos = col_combos[i:]
return (
alt.vconcat(*subplot_row)
.add_selection(brush, legend_click)) | ed712972e503795bbfaeac6844a225444d946018 | 3,656,620 |
import tqdm
def gauss_kernel(model_cell, x, y, z, sigma=1):
"""
Convolute aligned pixels given coordinates `x`, `y` and values `z` with a gaussian kernel to form the final image.
Parameters
----------
model_cell : :class:`~colicoords.cell.Cell`
Model cell defining output shape.
x : :class:`~numpy.ndarray`
Array with combined x-coordinates of aligned pixels.
y : :class:`~numpy.ndarray`
Array with combined y-coordinates of aligned pixels.
z : :class:`~numpy.ndarray`
Array with pixel values of aligned pixels.
sigma : :obj:`float`
Sigma of the gaussian kernel.
Returns
-------
output : :class:`~numpy.ndarray`
Output aligned image.
"""
output = np.empty(model_cell.data.shape)
coords = np.array([x, y])
for index in tqdm(np.ndindex(model_cell.data.shape), desc='Gaussian kernel', total=np.product(model_cell.data.shape)):
xi, yi = index
xp, yp = model_cell.coords.x_coords[xi, yi], model_cell.coords.y_coords[xi, yi]
dist = distance.cdist(np.array([[xp, yp]]), coords.T).squeeze()
bools = dist < 5*sigma
weights = gauss_2d(x[bools], y[bools], xp, yp, sigma=sigma)
avg = np.average(z[bools], weights=weights)
output[xi, yi] = avg
return output | 0ff61121fbf330e3e15862b82b0929ae3b8748f9 | 3,656,621 |
def get_configs_from_multiple_files():
"""Reads training configuration from multiple config files.
Reads the training config from the following files:
model_config: Read from --model_config_path
train_config: Read from --train_config_path
input_config: Read from --input_config_path
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(FLAGS.train_config_path, 'r') as f:
text_format.Merge(f.read(), train_config)
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f:
text_format.Merge(f.read(), model_config)
input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f:
text_format.Merge(f.read(), input_config)
return model_config, train_config, input_config | 4f561235568667a6fe71d77c23769ea8878ebe20 | 3,656,622 |
def line_to_numbers(line: str) -> t.List[int]:
"""Split a spreadsneet line into a list of numbers.
raises:
ValueError
"""
return list(map(int, line.split())) | fce9af5e1c213fd91f0edf8d7fa5877f15374908 | 3,656,623 |
def bits_to_amps(bits):
"""helper function to convert raw data from usb device to amps"""
return bits*BITS_TO_AMPS_SLOPE + BITS_TO_AMPS_Y_INTERCEPT | 5653582987b6a7924c11f037badc1a61541c6ca2 | 3,656,624 |
import re
def fields_to_dict(fields):
""" FIXME:
https://www.debuggex.com/r/24QPqzm5EsR0e2bt
https://www.debuggex.com/r/0SjmBL55ySna0kFF
https://www.debuggex.com/r/Vh9qvHkCV4ZquS14
"""
result = {}
if not fields or len(fields.strip()) == 0:
return result
# look_behind_keys = re.findall('{(\w*?),', fields)
# look_behind_pattern_list = ['(?<!{' + k + ')' for k in look_behind_keys]
# # FIXME: '(?<!{[^,]*),<look_forward_pattern>' will trigger "look-behind requires
# # fixed-width pattern"
# look_behind_pattern = ''.join(look_behind_pattern_list)
# # FIXME: not support nested bracket: field{id,name,description{abc,def}}
# look_forward_pattern = '(?![a-zA-Z0-9,\}:\[\]]*?})'
# # sample pattern: '(?<!{id)(?<!{email),<look_forward_pattern>'
# re_pattern = look_behind_pattern + ',' + look_forward_pattern
splited_fields = []
word_block = ''
bracket_counter = 0
field_len = len(fields)
for index, word in enumerate(fields):
if word == '{':
bracket_counter = bracket_counter + 1
if word == '}':
bracket_counter = bracket_counter - 1
# move to new word block
if word == ',' and bracket_counter == 0:
splited_fields.append(word_block)
word_block = ''
else:
word_block += word
# add remaining word_block
if word_block != '' and index==field_len-1:
splited_fields.append(word_block)
for key in splited_fields:
key = key.strip()
value = {}
if key.find('{') > -1:
# get sub fields: field{<sub_fields>} and assign its value
sub_field = re.findall('{(.*)}', key)
value = fields_to_dict(sub_field[0])
# clean key
key = re.sub('{(.*)}', '', key)
if key.find('[') > -1:
# get & set slide range: [a:b]
value['__slice'] = re.findall('\[(.*)\]', key)[0]
# clean key
key = re.sub('\[(.*)\]', '', key)
result[key] = value
return result | 2e7ebc2e9277ef693498d04c731f309e17fd4501 | 3,656,625 |
import os
import glob
def get_font_paths(fonts_dir):
"""
Load font path recursively from a folder
:param fonts_dir: folder contains ttf、otf or ttc format font
:return: path of all fonts
"""
print('Load fonts from %s' % os.path.abspath(fonts_dir))
fonts = glob.glob(fonts_dir + '/**/*', recursive=True)
fonts = list(filter(lambda x: os.path.isfile(x), fonts))
print("Total fonts num: %d" % len(fonts))
if len(fonts) == 0:
print("Not found fonts in fonts_dir")
exit(-1)
return fonts | bf6368f90023fd59d64d358e6dac919627feb9ab | 3,656,626 |
def time_difference(t_early, t_later):
"""
Compute the time difference between t_early and t_later
Parameters:
t_early: np.datetime64, list or pandas series.
t_later: np.datetime64, list or pandas series.
"""
if type(t_early) == list:
t1 = np.array(t_early)
elif type(t_early) == pd.Series:
t1 = np.array(t_early.tolist())
else:
t1 = np.array([t_early])
if type(t_later) == list:
t2 = np.array(t_later)
elif type(t_later) == pd.Series:
t2 = np.array(t_later.tolist())
else:
t2 = np.array([t_later])
timedelta2float = np.vectorize(lambda x: x / np.timedelta64(3600, 's'))
t_diff = timedelta2float(t2 - t1)
return t_diff | 0d4e6bac3aed2e5a2848c4289dadc92120a4f7a1 | 3,656,627 |
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
""" Convolutional block with two convolutions followed by batch normalisation (if True) and with ReLU activations.
input_tensor: A tensor. Input tensor on which the convolutional block acts.
n_filters: An integer. Number of filters in this block.
kernel_size: An integer. Size of convolutional kernel.
batchnorm: A bool. Perform batch normalisation after each convolution if True.
:return: A tensor. The output of the operation.
"""
# first convolutional layer
x = layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(input_tensor)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
# second convolutional layer
x = layers.Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(x)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
return x | 8bb435ed1e091fff26d49290a8ca6d0c9c12ec67 | 3,656,628 |
def check_credentials(username):
"""
Function that check if a Credentials exists with that username and return true or false
"""
return Credentials.if_credential_exist(username) | 8515bbc39afd003fc193cbb80c97f5f718657fa6 | 3,656,629 |
def rpc_category_to_super_category(category_id, num_classes):
"""Map category to super-category id
Args:
category_id: list of category ids, 1-based
num_classes: 1, 17, 200
Returns:
super-category id, 0-based
"""
cat_id = -1
assert num_classes in RPC_SUPPORT_CATEGORIES, \
'Not support {} density categories'.format(num_classes)
if num_classes == 17:
cat_id = _categories[category_id]
elif num_classes == 1:
cat_id = 0
elif num_classes == 200:
cat_id = category_id - 1
assert 199 >= cat_id >= 0
return cat_id | 8056aea308f66a65a4135a6fc7f061873d990624 | 3,656,630 |
def setup_integration():
"""Set up a test resource."""
print('Setting up a test integration for an API')
return Integration(name='myapi',
base_url='https://jsonplaceholder.typicode.com') | d2720db6ae520e21edc555ad0c899652c6584406 | 3,656,631 |
def secondsToHMS(intervalInSeconds):
"""converts time in seconds to a string representing time in hours, minutes, and seconds
:param intervalInSeconds: a time measured in seconds
:returns: time in HH:MM:SS format
"""
interval = [0, 0, intervalInSeconds]
interval[0] = (interval[2] / 3600) - ((interval[2] % 3600) / 3600)
interval[1] = ((interval[2] % 3600) / 60) - ((interval[2] % 3600) % 60) / 60
interval[2] = interval[2] % 60
intervalString = '{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(interval[0],
interval[1], interval[2])
return intervalString | b38d4b886eaabd1361c162b6b7f55e11493dfb60 | 3,656,632 |
import itertools
def build_rdn(coords, r, **kwargs):
"""
Reconstruct edges between nodes by radial distance neighbors (rdn) method.
An edge is drawn between each node and the nodes closer
than a threshold distance (within a radius).
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
r : float, optional
Radius in which nodes are connected.
Examples
--------
>>> coords = make_simple_coords()
>>> pairs = build_rdn(coords, r=60)
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
"""
tree = BallTree(coords, **kwargs)
ind = tree.query_radius(coords, r=r)
# clean arrays of neighbors from self referencing neighbors
# and aggregate at the same time
source_nodes = []
target_nodes = []
for i, arr in enumerate(ind):
neigh = arr[arr != i]
source_nodes.append([i]*(neigh.size))
target_nodes.append(neigh)
# flatten arrays of arrays
source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)
target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)
# remove duplicate pairs
pairs = np.hstack((source_nodes, target_nodes))
pairs = np.sort(pairs, axis=1)
pairs = np.unique(pairs, axis=0)
return pairs | 83f2d68fbb854e2ef25e03f5d58d6c96c02c0127 | 3,656,633 |
def find_layer(model, type, order=0):
"""
Given a model, find the Nth layer of the specified type.
:param model: the model that will be searched
:param type: the lowercase type, as it is automatically saved by keras in the layer's name (e.g. conv2d, dense)
:param order: 0 by default (the first matching layer will be returned)
:return: The index of the matching layer or None if it was not found.
"""
num_found = 0
for layer in model.layers:
if type + '_' in layer.get_config()['name']:
if order == num_found:
return layer
num_found += 1
return None | 6d4e08c181900774b9e5666a11df9767f68a10ca | 3,656,634 |
def _interpretable(model):
# type: (Union[str, h2o.model.ModelBase]) -> bool
"""
Returns True if model_id is easily interpretable.
:param model: model or a string containing a model_id
:returns: bool
"""
return _get_algorithm(model) in ["glm", "gam", "rulefit"] | 4ae73e5b7ed98b61b56920985128212e3051c789 | 3,656,635 |
def apply_pb_correction(obs,
pb_sensitivity_curve,
cutoff_radius):
"""
Updates the primary beam response maps for cleaned images in an ObsInfo object.
Args:
obs (ObsInfo): Observation to generate maps for.
pb_sensitivity_curve: Primary beam sensitivity as a function of radius
in units of image pixels. (Should be 1.0 at the exact centre).
cutoff_radius: Radius at which to mask the output image (avoids
extremely high corrected values for noise fluctuations at large
radii). Units: image pixels.
"""
assert isinstance(obs, ObsInfo)
def update_pb_map_for_img(flux_map_path):
pbmap = generate_primary_beam_response_map(flux_map_path,
pb_sensitivity_curve,
cutoff_radius)
return pbmap
def process_clean_maps(clean_maps):
pbmap = update_pb_map_for_img(clean_maps.flux)
img_path = clean_maps.image
pb_img_path = img_path+'.pbcor'
generate_pb_corrected_image(img_path, pb_img_path,
pbmap)
clean_maps.pbcor = pb_img_path
if obs.maps_masked.ms.image:
process_clean_maps(obs.maps_masked.ms)
if obs.maps_open.ms.image:
process_clean_maps(obs.maps_open.ms)
if obs.maps_hybrid.ms.image:
process_clean_maps(obs.maps_hybrid.ms) | 02ee2913ce781f4a02e85910c69cfe5b534e62f4 | 3,656,636 |
def makeLoadParams(args):
"""
Create load parameters for start load request out of command line arguments.
Args:
args (dict): Parsed command line arguments.
"""
load_params = {'target': {},
'format': {'date_time': {},
'boolean': {}},
'load_options': {},
'advanced_options': {}}
add_param(load_params['target'], 'database', args.target_database)
add_param(load_params['target'], 'schema', args.target_schema)
add_param(load_params['target'], 'table', args.target_table)
if len(load_params['target']) == 0:
del load_params['target']
add_param(load_params['format'], 'type', args.type)
add_param(load_params['format'], 'field_separator', args.field_separator)
add_param(load_params['format'], 'trailing_field_separator',
args.trailing_field_separator, False)
add_param(load_params['format'], 'enclosing_character',
args.enclosing_character)
add_param(load_params['format'], 'escape_character', args.escape_character)
add_param(load_params['format'], 'null_value', args.null_value)
add_param(load_params['format'], 'has_header_row',
args.has_header_row, False)
add_param(load_params['format'], 'flexible', args.flexible, False)
add_param(load_params['format']['date_time'], 'converted_to_epoch',
args.date_converted_to_epoch, False)
add_param(load_params['format']['date_time'], 'date_format',
args.date_format)
add_param(load_params['format']['date_time'], 'time_format',
args.time_format)
add_param(load_params['format']['date_time'], 'date_time_format',
args.date_time_format)
add_param(load_params['format']['date_time'], 'second_fraction_start',
args.second_fraction_start)
add_param(load_params['format']['date_time'], 'skip_second_fraction',
args.skip_second_fraction, False)
if len(load_params['format']['date_time']) == 0:
del load_params['format']['date_time']
add_param(load_params['format']['boolean'], 'use_bit_values',
args.use_bit_boolean_values, False)
add_param(load_params['format']['boolean'], 'true_format', args.true_format)
add_param(load_params['format']['boolean'], 'false_format',
args.false_format)
if len(load_params['format']['boolean']) == 0:
del load_params['format']['boolean']
if len(load_params['format']) == 0:
del load_params['format']
add_param(load_params['load_options'], 'empty_target',
args.empty_target, False)
add_param(load_params['load_options'], 'max_ignored_rows',
args.max_ignored_rows)
if len(load_params['load_options']) == 0:
del load_params['load_options']
add_param(load_params['advanced_options'], 'validate_only',
args.validate_only, False)
add_param(load_params['advanced_options'], 'file_target_dir',
args.file_target_dir)
if len(load_params['advanced_options']) == 0:
del load_params['advanced_options']
print('Created load params: ', load_params)
return load_params | f1c0e9297775305c36acbb950bfc05e785bde87c | 3,656,637 |
from hash import HashTable
def empty_hash():
"""Initialize empty hash table."""
test_hash = HashTable()
return test_hash | 02700169c89427af4d2db123e110ec383d9332eb | 3,656,638 |
def denoise_sim(image, std, denoiser):
"""Simulate denoising problem
Args:
image (torch.Tensor): image tensor with shape (C, H, W).
std (float): standard deviation of additive Gaussian noise
on the scale [0., 1.].
denoiser: a denoiser instance (as in algorithms.denoiser).
The std argument for this denoiser is already specified
if applicable.
Returns:
denoised_image (torch.Tensor): tensor of denoised image
noisy_image (torch.Tensor): tensor of noisy image
"""
print('deploy.sim.denoise_sim: Simulating noisy image...')
noisy_image = gutil.add_noise(image, std)
print('deploy.sim.denoise_sim: Begin image denoising...')
denoised_image = denoiser(noisy_image, std=std)
return denoised_image, noisy_image | 216944b26c3ca0e04b8b5801766321fe60ee7e02 | 3,656,639 |
def _find_weektime(datetime, time_type='min'):
"""
Finds the minutes/seconds aways from midnight between Sunday and Monday.
Parameters
----------
datetime : datetime
The date and time that needs to be converted.
time_type : 'min' or 'sec'
States whether the time difference should be specified in seconds or minutes.
"""
if time_type == 'sec':
return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second
elif time_type == 'min':
return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute
else:
raise ValueError("Invalid time type specified.") | 2ed28166d239dabdc9f8811812e472810b10c7d7 | 3,656,640 |
from typing import List
from typing import Tuple
def linear_to_image_array(pixels:List[List[int]], size:Tuple[int,int]) -> np.ndarray:
"""\
Converts a linear array ( shape=(width*height, channels) ) into an array
usable by PIL ( shape=(height, width, channels) )."""
a = np.array(pixels, dtype=np.uint8)
split = np.split(pixels, [i*size[0] for i in range(1,size[1])])
return np.array(split, dtype=np.uint8) | 431170c71a3d6464be5dd5b9d248b2866ba3ac6a | 3,656,641 |
def stop_processes(hosts, pattern, verbose=True, timeout=60):
"""Stop the processes on each hosts that match the pattern.
Args:
hosts (list): hosts on which to stop the processes
pattern (str): regular expression used to find process names to stop
verbose (bool, optional): display command output. Defaults to True.
timeout (int, optional): command timeout in seconds. Defaults to 60
seconds.
Returns:
dict: a dictionary of return codes keys and accompanying NodeSet
values indicating which hosts yielded the return code.
Return code keys:
0 No processes matched the criteria / No processes killed.
1 One or more processes matched the criteria and a kill was
attempted.
"""
result = {}
log = getLogger()
log.info("Killing any processes on %s that match: %s", hosts, pattern)
if hosts is not None:
commands = [
"rc=0",
"if pgrep --list-full {}".format(pattern),
"then rc=1",
"sudo pkill {}".format(pattern),
"if pgrep --list-full {}".format(pattern),
"then sleep 5",
"pkill --signal KILL {}".format(pattern),
"fi",
"fi",
"exit $rc",
]
result = pcmd(hosts, "; ".join(commands), verbose, timeout, None)
return result | 898a358b5e61952d72be15eecb10b00ce8bd2efd | 3,656,642 |
def field_as_table_row(field):
"""Prints a newforms field as a table row.
This function actually does very little, simply passing the supplied
form field instance in a simple context used by the _field_as_table_row.html
template (which is actually doing all of the work).
See soc/templates/soc/templatetags/_field_as_table_row.html for the CSS
styles used by this template tag.
Usage:
{% load forms_helpers %}
...
<table>
{% field_as_table_row form.fieldname %}
...
</table>
Args:
field: a Django newforms field instance
Returns:
a simple context containing the supplied newforms field instance:
{ 'field': field }
"""
return {'field': field} | 74d120e2a46ae8465832d98ddf02848b5b2cc936 | 3,656,643 |
def get_samples(select_samples: list, avail_samples: list) -> list:
"""Get while checking the validity of the requested samples
:param select_samples: The selected samples
:param avail_samples: The list of all available samples based on the range
:return: The selected samples, verified
"""
# Sample number has to be positive
if True in [_ < 0 for _ in select_samples]:
raise ValueError(
"Number of samples with -ns has to be strictly positive!")
# Sample number has to be within the available sample
elif False in [_ in avail_samples for _ in select_samples]:
raise ValueError(
"Some or all selected samples are not available in the design")
return select_samples | e1c0c98697d2c504d315064cbdfbad379165d317 | 3,656,644 |
def createMemoLayer(type="", crs=4326, name="", fields={"id":"integer"}, index="no"):
"""
Créer une couche en mémoire en fonction des paramètres
:param type (string): c'est le type de geometrie "point", "linestring",
"polygon", "multipoint","multilinestring","multipolygon"
:param crs (int): systeme de projection CRS
:param fields (dict): {nom_champ : type_champ(longueur)} field=name : type(length,precision)
types : "integer", "double", "string(length)"
:param name (string): C'est le nom de la couche qui apparaitra dans la légende
:param index (string): indique si on créer un indice spatial
:return (QgsVectorLayer): on retourene un objet QgsVectorLayer
"""
# on créer l'uri et on ajoute tous les champs
uri="%s?crs=epsg:%s"%(type,crs)
for key, value in fields.items():
uri="%s&field=%s:%s"%(uri,key, value)
uri="%s&index=%s"%(uri,index)
# on créer l'objet QgsVectorLayer
memLayer = QgsVectorLayer(uri, name, "memory")
return memLayer | 713823d9b59b7c4ccf7bdd938a720d385629e02f | 3,656,645 |
import json
def load_templates(package):
"""
Returns a dictionary {name: template} for the given instrument.
Templates are defined as JSON objects, with stored in a file named
"<instrument>.<name>.json". All templates for an instrument should
be stored in a templates subdirectory, made into a package by inclusion
of an empty __init__.py file. They can then be loaded using::
from dataflow import core as df
from . import templates
...
instrument = df.Instrument(
...
templates=df.load_templates(templates),
)
"""
templates = {}
for filename in resources.contents(package):
if filename.endswith('.json'):
name = filename.split('.')[-2]
template = json.loads(resources.read_text(package, filename))
templates[name] = template
return templates | 6213eb6e8b7be0bb7057da49d02fe495d7db6660 | 3,656,646 |
def get_count_matrix(args):
"""首先获取数据库中全部文档的id,然后遍历id获取文档内容,再逐文档
进行分词,生成计数矩阵。"""
global DOC2IDX
with DocDB(args.db_path) as doc_db:
doc_ids = doc_db.get_doc_ids()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
row, col, data = [], [], []
_count = partial(count, args)
for i in doc_ids:
b_row, b_col, b_data = _count(i)
row.extend(b_row)
col.extend(b_col)
data.extend(b_data)
# 创建稀疏矩阵,这里用的是按行压缩的方法(Compressed Sparse Row, csr)
# 关于什么是csr_matrix,参考:
# https://www.pianshen.com/article/7967656077/
# https://zhuanlan.zhihu.com/p/342942385
count_matrix = sp.csr_matrix((data, (row, col)), shape=(args.hash_size, len(doc_ids)))
count_matrix.sum_duplicates()
return count_matrix, (DOC2IDX, doc_ids) | 6279666c6dfdf66dba13edfe57e55525de15d894 | 3,656,647 |
def communication_round(model, clients, train_data, train_labels, train_people, val_data, val_labels, val_people,
val_all_labels, local_epochs, weights_accountant, individual_validation, local_operation):
"""
One round of communication between a 'server' and the 'clients'. Each client 'downloads' a global model and trains
a local model, updating its weights locally. When all clients have updated their weights, they are 'uploaded' to
the server and averaged.
:param model: Tensorflow Graph
:param clients: numpy array, array of unique client IDs
:param train_data: numpy array
:param train_labels: numpy array
:param train_people: numpy array
:param val_data: numpy array
:param val_labels: numpy array
:param val_people: numpy array
:param val_all_labels: numpy array
:param local_epochs: int, local epochs to be trained
:param weights_accountant: WeightsAccountant object
:param individual_validation: bool, if true, validation history for every local epoch in a federated setting
is stored (typically not necessary)
:param local_operation: string, valid arguments are "global_averaging", "localized_learning",
and "local_models"
:return:
Pandas DataFrame, training history
"""
# Split train and validation data into clients
train_data, train_labels = dL.split_data_into_clients_dict(train_people, train_data, train_labels)
if val_data is not None:
val_data, val_labels, val_people, val_all_labels = \
dL.split_data_into_clients_dict(val_people, val_data, val_labels, val_people, val_all_labels)
# Train each client
history = {}
for client in clients:
Output.print_client_id(client)
results = client_learning(model, client, local_epochs, train_data, train_labels, val_data, val_labels,
val_people, val_all_labels, weights_accountant, individual_validation)
# Append each client's results to the history dictionary
for key, val in results.items():
history.setdefault(key, []).extend(val)
# Pop general metrics from history as these are duplicated with client metrics, e.g. 'loss' == 'subject_43_loss'
for metric in model.metrics_names:
history.pop(metric, None)
history.pop("val_" + metric, None)
# If there is localization (e.g. the last layer of the model is not being averaged, indicated by less "shared
# weights" compared to total "default weights"), then we adapt local models to the new shared layers
if local_operation == 'localized_learning':
# Average all updates marked as "global"
weights_accountant.federated_averaging(layer_type='global')
# Decrease the learning rate for local adaptation only
K.set_value(model.optimizer.lr, K.get_value(model.optimizer.lr) / LR_FACTOR)
# Freeze the global layers
change_layer_status(model, 'global', 'freeze')
# Reconnect the Convolutional layers
for client in clients:
Output.print_client_id(client)
client_learning(model, client, local_epochs, train_data, train_labels, val_data, val_labels,
val_people, val_all_labels, weights_accountant, individual_validation)
# Unfreeze the global layers
change_layer_status(model, 'global', 'unfreeze')
# Increase the learning rate again
K.set_value(model.optimizer.lr, K.get_value(model.optimizer.lr) * LR_FACTOR)
elif local_operation == 'local_models':
print("No federated averaging.")
pass
elif local_operation == 'global_averaging':
weights_accountant.federated_averaging()
else:
raise ValueError('local_operation only accepts "global_averaging", "localized_learning", and "local_models"'
' as arguments. "{}" was given.'.format(local_operation))
return history | f8a8ef93845e09394cea6a2f6077a0ae2dfaed18 | 3,656,648 |
import collections
def _find_stop_area_mode(query_result, ref):
""" Finds the mode of references for each stop area.
The query results must have 3 columns: primary key, foreign key
reference and number of stop points within each area matching that
reference, in that order.
:param ref: Name of the reference column.
:returns: Two lists; one to be to be used with `bulk_update_mappings`
and the other strings for invalid areas.
"""
# Group by stop area and reference
stop_areas = collections.defaultdict(dict)
for row in query_result:
stop_areas[row[0]][row[1]] = row[2]
# Check each area and find mode matching reference
update_areas = []
invalid_areas = {}
for sa, count in stop_areas.items():
max_count = [k for k, v in count.items() if v == max(count.values())]
if len(max_count) == 1:
update_areas.append({"code": sa, ref: max_count[0]})
else:
invalid_areas[sa] = max_count
return update_areas, invalid_areas | e4677638b272e67d2ae21ee97f71f1f1700fd072 | 3,656,649 |
def get_all_funds_ranking(fund_type: str = 'all',
start_date: str = '-1y',
end_date: str = arrow.now(),
sort: str = 'desc',
subopts: str = '',
available: str = 1):
"""Get all funds ranking from 'fund.eastmoney.com'. (基金排行)
:param fund_type: (optional) fund type, default is `all`.
value: ct场内 gp股票 hh混合 zq债券 zs指数 bb保本 qdii lof fof
:param start_date: (optional) start date of the custom return, default is `-1y`.
value: -nd -nw -nm -ny cyear or YYYY-MM-DD
:param end_date: (optional) the end date of the results, default is `now`.
:param sort: (optional) results order, default is `desc`.
:param subopts: (optional) some suboptions. format is a list of options(`first,second`).
Suboptions for bonds(有关债券的子选项):
- first option is bonds type(债券类型).
value: cz长债 dz短债 hz混债 dkz定开债 kzz可转债
- second option is leverage ratio(杠杆比例).
value: 0-100 100-150 150-200 200+
Suboptions for stock index(有关指数的子选项):
- first option is index type(标的).
value: hs沪深 hy行业 dp大盘 zxp中小盘 gz股指 zz债指
- second option is stock index operation(运作方式).
value: bd被动 zq增强
Suboptions for QDII fonds.
- first option is fond type(基金类型).
vaule: qqgp全球股票 ytgp亚太股票 dzh大中华区 xxsc新兴市场 jzgj金砖国家
cssc成熟市场 us美国股票 qqidx全球指数 etf hh股债混合 zq债券 sp商品
:param available: (optional) `1` can buy, `0` including both, default is `1`.
:return: a list of the funds.
:rtype: `pd.DataFrame`.
"""
dtype = fund_type == 'ct' and 'fb' or 'kf'
begin = str2date(start_date).format('YYYY-MM-DD')
end = arrow.get(end_date).format('YYYY-MM-DD')
opt1, opt2 = _funds_ranking_subopts(fund_type, subopts)
params = dict(op='ph',dt=dtype,ft=fund_type,rs='',gs=0,sc='zzf',st=sort,pi=1,pn=10000) # 场内基金
fund_type != 'ct' and params.update(dict(sd=begin,ed=end,qdii=opt1,tabSubtype=opt2,dx=available))
resp = sess.get(api.all_funds_rank, params=params)
obj = js2obj(resp.text, 'rankData')
# dataframe
if fund_type == 'ct': # 场内基金
cols = 'code,name,1,date,nav,cnav,-1week,-1month,-3month,-6month,-1year,-2year,'\
'-3year,current_year,since_create,issue_date,,,,,,type'
newcols = cols.replace('1','type,issue_date',1).split(',issue_date,,')[0]
else: # 基金排行
cols = 'code,name,1,date,nav,cnav,percent,-1week,-1month,-3month,-6month,-1year,-2year,'\
'-3year,current_year,since_create,issue_date,,custom,2,,,,'
newcols = cols.replace('1','issue_date',1).replace('issue_date,,','').split(',2')[0]
df = pd.DataFrame([i.split(',')[:-1] for i in obj['datas']],
columns=cols.split(',')).ffill(None)[newcols.split(',')]
df['date'] = pd.to_datetime(df['date'])
df['issue_date'] = pd.to_datetime(df['issue_date'])
df[['nav','cnav']] = df[['nav','cnav']].applymap(lambda x:x and float(x) or None)
colnum = fund_type == 'ct'\
and range(df.columns.get_loc('-1week'), len(df.columns))\
or range(df.columns.get_loc('percent'), len(df.columns))
df.iloc[:,colnum] = df.iloc[:,colnum].applymap(lambda x:x and float(x)/100 or None)
return df | 55dd84c8f8830d6c60411de858a9aec1f14a30be | 3,656,650 |
from typing import List
from typing import Any
from re import T
def _conform_list(li: List[Any]) -> List[T]:
"""
Ensures that every element in *li* can conform to one type
:param li: list to conform
:return: conformed list
"""
conform_type = li[0].__class__
for i in li:
if isinstance(i, StrictType):
conform_type = i.__class__
break
base_type = (
conform_type.__base__ if conform_type.__base__ != object else None
) # do not let base_type be 'object'
if not all(type(i) == conform_type or type(i) == base_type for i in li):
raise Exception(f"{li} can not be conformed to the {conform_type}")
return [i if isinstance(i, conform_type) else conform_type(i) for i in li] | 29131a9f5979318e0fc50408b67938ffbd56fa5a | 3,656,651 |
def _255_to_tanh(x):
"""
range [0, 255] to range [-1, 1]
:param x:
:return:
"""
return (x - 127.5) / 127.5 | a60a67ee489093292fc58136a8f01387482fb162 | 3,656,652 |
import os
import re
import sys
def read_content(filename):
"""Read content and metadata from file into a dictionary."""
# Read file content.
text = fread(filename)
# Read metadata and save it in a dictionary.
date_slug = os.path.basename(filename).split('.')[0]
match = re.search('^(?:(\\d\\d\\d\\d-\\d\\d-\\d\\d)-)?(.+)$', date_slug)
content = {
'date': format_date(match.group(1) or '1970-01-01'),
'date_ymd': match.group(1) or '1970-01-01',
'date_rfc_2822': format_date(match.group(1) or '1970-01-01', date_format_override='%a, %d %b %Y %H:%M:%S +0000'),
'slug': match.group(2),
}
# Convert Markdown content to HTML.
if filename.endswith(('.md', '.mkd', '.mkdn', '.mdown', '.markdown')):
# Separate text and template variables
variables, text = separate_content_and_variables(text)
text = variables + "{% include 'md_header.html' %}" + \
commonmark.commonmark(text) + "{% include 'md_footer.html' %}"
# Optional additional parsing
if 'add_parser' in sys.modules:
text = add_parser.parse(text, filename)
# Update the dictionary with content text and summary text.
content.update({
'content': text,
})
return content | 5337830593959e0bf29b4d369789120a265badfc | 3,656,653 |
import torch
def train_one_epoch(train_loader, model, criterion, optimizer, epoch, opt, num_train_samples, no_acc_eval=False):
""" model training
:param train_loader: train dataset loader
:param model: model
:param criterion: loss criterion
:param optimizer:
:param epoch: current epoch
:param num_train_samples: total number of samples in train_loader
:param no_acc_eval (bool): accuray eval in model training
:return:
"""
info = {}
losses = AverageMeter('Loss ', ':6.4g')
top1 = AverageMeter('Acc@1 ', ':6.2f')
top5 = AverageMeter('Acc@5 ', ':6.2f')
# switch to train mode
model.train()
lr_scheduler = global_utils.LearningRateScheduler(mode=opt.lr_mode,
lr=opt.lr,
num_training_instances=num_train_samples,
target_lr=opt.target_lr,
stop_epoch=opt.epochs,
warmup_epoch=opt.warmup,
stage_list=opt.lr_stage_list,
stage_decay=opt.lr_stage_decay)
lr_scheduler.update_lr(batch_size=epoch * num_train_samples)
optimizer.zero_grad()
batches_per_allreduce_count = 0
for i, (input_, target) in enumerate(train_loader):
if not opt.independent_training:
lr_scheduler.update_lr(batch_size=input_.shape[0] * opt.world_size)
else:
lr_scheduler.update_lr(batch_size=input_.shape[0])
current_lr = lr_scheduler.get_lr()
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr * opt.batches_per_allreduce
bool_label_smoothing = False
bool_mixup = False
if not opt.dist_mode == 'cpu':
input_ = input_.cuda(opt.gpu, non_blocking=True)
target = target.cuda(opt.gpu, non_blocking=True)
transformed_target = target
with torch.no_grad():
if hasattr(opt, 'label_smoothing') and opt.label_smoothing:
bool_label_smoothing = True
if hasattr(opt, 'mixup') and opt.mixup:
bool_mixup = True
if bool_label_smoothing and not bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
if not bool_label_smoothing and bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes)
input_, transformed_target = mixup(input_, transformed_target)
if bool_label_smoothing and bool_mixup:
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
input_, transformed_target = mixup(input_, transformed_target)
# compute output
output = model(input_)
model_saved = model.module if hasattr(model, 'module') else model
logit_loss = criterion(output, transformed_target)
ts_feature_loss, ts_logit_loss = model_saved.compute_ts_distill_loss()
loss = logit_loss + opt.teacher_feature_weight * ts_feature_loss + opt.teacher_logit_weight * ts_logit_loss
# measure accuracy and record loss
input_size = int(input_.size(0))
if not no_acc_eval:
# pylint: disable=unbalanced-tuple-unpacking
acc1, acc5 = accuracy(output.data, target, topk=(1, 5))
top1.update(float(acc1[0]), input_size)
top5.update(float(acc5[0]), input_size)
else:
acc1 = [0]
acc5 = [0]
losses.update(float(loss), input_size)
if opt.apex:
if opt.dist_mode == 'horovod':
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
batches_per_allreduce_count += 1
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
else:
# if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
# if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.step()
else:
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.zero_grad()
batches_per_allreduce_count = 0
loss.backward()
batches_per_allreduce_count += 1
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_value_(model_saved.parameters(), opt.grad_clip)
if batches_per_allreduce_count >= opt.batches_per_allreduce:
optimizer.step()
if i % opt.print_freq == 0:
print(
f'<rank {opt.rank}> Train epoch={epoch}, i={i}, loss={float(loss):4g}, \
logit_loss={float(logit_loss):4g}, ts_feature_loss={float(ts_feature_loss):4g}, \
ts_logit_loss={float(ts_logit_loss):4g}, \
acc1={float(acc1[0]):4g}%, acc5={float(acc5[0]):4g}%, lr={current_lr:4g}')
top1_acc_avg = top1.avg
top5_acc_avg = top5.avg
losses_acc_avg = losses.avg
# if distributed, sync
if opt.dist_mode == 'horovod' and (not opt.independent_training):
sync_tensor = torch.tensor([top1.sum, top1.count, top5.sum, top5.count,
losses.sum, losses.count], dtype=torch.float32)
hvd.allreduce(sync_tensor, name='sync_tensor_topk_acc')
top1_acc_avg = (sync_tensor[0] / sync_tensor[1]).item()
top5_acc_avg = (sync_tensor[2] / sync_tensor[3]).item()
losses_acc_avg = (sync_tensor[4] / sync_tensor[5]).item()
elif opt.dist_mode == 'apex' and opt.distributed:
sync_tensor = torch.tensor([top1.sum, top1.count, top5.sum, top5.count,
losses.sum, losses.count], dtype=torch.float32).cuda()
dist.all_reduce(sync_tensor, op=dist.ReduceOp.SUM)
top1_acc_avg = (sync_tensor[0] / sync_tensor[1]).item()
top5_acc_avg = (sync_tensor[2] / sync_tensor[3]).item()
losses_acc_avg = (sync_tensor[4] / sync_tensor[5]).item()
else:
pass
info['losses_acc'] = losses_acc_avg
info['top1_acc'] = top1_acc_avg
info['top5_acc'] = top5_acc_avg
return info | 5b5efd1292322090abcb795fc633638f478f0afa | 3,656,654 |
import datetime
def Write(Variable, f):
"""Function to Convert None Strings to Strings and Format to write to file with ,"""
if isinstance(Variable, str) == False:
if isinstance(Variable, datetime.datetime) == True:
return f.write(f"{Variable.strftime('%Y-%m-%d')},")
else:
Variable = round(Variable, 2)
return f.write(f"{str(Variable)},")
elif isinstance(Variable, str) == True:
return f.write(f"{(Variable)},") | 9963c4117c7cc3f19d91331ed6c36e5733cffb56 | 3,656,655 |
def graphs_infos():
"""
Build and return a JSON file containing some information on all the graphs.
The json file is built with the following format:
[
For each graph in the database :
{
'graph_id': the id of the graph,
'name': the name of the graph,
'iso': the string 'true' or 'false' depending if the graph belongs to J or not
}
]
:return: a JSON file containing some information on all the graphs.
"""
return jsonify(gdb.get_graph_infos()) | ab6fee49188ad422e1e3a5e2763510ae791a840b | 3,656,656 |
def collect_compare(left, right):
"""
returns a tuple of four lists describing the file paths that have
been (in order) added, removed, altered, or left the same
"""
return collect_compare_into(left, right, [], [], [], []) | 2a29d7b896fb037a8784e7c82794d9b67eb2924a | 3,656,657 |
def _get_smallest_vectors(supercell, primitive, symprec):
"""
shortest_vectors:
Shortest vectors from an atom in primitive cell to an atom in
supercell in the fractional coordinates. If an atom in supercell
is on the border centered at an atom in primitive and there are
multiple vectors that have the same distance and different
directions, several shortest vectors are stored. The
multiplicity is stored in another array, "multiplicity".
[atom_super, atom_primitive, multiple-vectors, 3]
multiplicity:
Number of multiple shortest vectors (third index of "shortest_vectors")
[atom_super, atom_primitive]
"""
p2s_map = primitive.get_primitive_to_supercell_map()
size_super = supercell.get_number_of_atoms()
size_prim = primitive.get_number_of_atoms()
shortest_vectors = np.zeros((size_super, size_prim, 27, 3), dtype='double')
multiplicity = np.zeros((size_super, size_prim), dtype='intc')
reduced_bases = get_reduced_bases(supercell.get_cell(), symprec)
reduced_bases_inv = np.linalg.inv(reduced_bases)
primitive_lattice = primitive.get_cell()
primitive_lattice_inv = np.linalg.inv(primitive_lattice)
# matrix that converts fractional positions in the reduced bases into
# fractional positions in the primitive lattice
supercell_to_primitive_frac = reduced_bases.dot(primitive_lattice_inv)
# all positions are reduced into the cell formed by the reduced bases
supercell_fracs = np.dot(supercell.get_positions(), reduced_bases_inv)
supercell_fracs -= np.rint(supercell_fracs)
for s_index, s_pos in enumerate(supercell_fracs): # run in supercell
for j, p_index in enumerate(p2s_map): # run in primitive
p_pos = supercell_fracs[p_index]
# find smallest vectors equivalent under the supercell lattice
vectors = _get_equivalent_smallest_vectors_simple(s_pos - p_pos,
reduced_bases,
symprec)
# return primitive-cell-fractional vectors rather than supercell-fractional
vectors = [np.dot(v, supercell_to_primitive_frac) for v in vectors]
multiplicity[s_index][j] = len(vectors)
for k, elem in enumerate(vectors):
shortest_vectors[s_index][j][k] = elem
return shortest_vectors, multiplicity | 352d4e7ba9552fa4fe5abdb9eb45c4555dff603d | 3,656,658 |
def root():
"""Root endpoint that only checks if the server is running."""
return 'Server is running...' | ea9ecd1c736e9379795f361462ed54f464a4008b | 3,656,659 |
def clone_model(model, **new_values):
"""Clones the entity, adding or overriding constructor attributes.
The cloned entity will have exactly the same property values as the
original entity, except where overridden. By default, it will have no
parent entity or key name, unless supplied.
Args:
model: datastore_services.Model. Model to clone.
**new_values: dict(str: *). Keyword arguments to override when
invoking the cloned entity's constructor.
Returns:
datastore_services.Model. A cloned, and possibly modified, copy of self.
Subclasses of BaseModel will return a clone with the same type.
"""
# Reference implementation: https://stackoverflow.com/a/2712401/4859885.
cls = model.__class__
model_id = new_values.pop('id', model.id)
props = {k: v.__get__(model, cls) for k, v in cls._properties.items()} # pylint: disable=protected-access
props.update(new_values)
return cls(id=model_id, **props) | ed668632c8917ad685b86fb5c71146be7c9b3b96 | 3,656,660 |
def learn_laterals(frcs, bu_msg, perturb_factor, use_adjaceny_graph=False):
"""Given the sparse representation of each training example,
learn perturbation laterals. See train_image for parameters and returns.
"""
if use_adjaceny_graph:
graph = make_adjacency_graph(frcs, bu_msg)
graph = adjust_edge_perturb_radii(frcs, graph, perturb_factor=perturb_factor)
else:
graph = nx.Graph()
graph.add_nodes_from(range(frcs.shape[0]))
graph = add_underconstraint_edges(frcs, graph, perturb_factor=perturb_factor)
graph = adjust_edge_perturb_radii(frcs, graph, perturb_factor=perturb_factor)
edge_factors = np.array(
[(edge_source, edge_target, edge_attrs['perturb_radius'])
for edge_source, edge_target, edge_attrs in graph.edges_iter(data=True)])
return graph, edge_factors | 68333bca0fc3231470268ece6478b372767a6648 | 3,656,661 |
def get_info(ingest_ldd_src_dir):
"""Get LDD version and namespace id."""
# look in src directory for ingest LDD
ingest_ldd = find_primary_ingest_ldd(ingest_ldd_src_dir)
# get ingest ldd version
tree = ETree.parse(ingest_ldd[0])
root = tree.getroot()
ldd_version = root.findall(f'.//{{{PDS_NS}}}ldd_version_id')[0].text
ns_id = root.findall(f'.//{{{PDS_NS}}}namespace_id')[0].text
return ingest_ldd, ns_id, ldd_version | 92c4d6f8f18c4204d2a8483584b6f1409d9ee243 | 3,656,662 |
def generate_tfidf(corpus_df, dictionary):
"""Generates TFIDF matrix for the given corpus.
Parameters
----------
corpus_df : pd.DataFrame
The corpus dataframe.
dictionary : gensim.corpora.dictionary.Dictionary
Dictionary defining the vocabulary of the TFIDF.
Returns
-------
X : np.ndarray
TFIDF matrix with documents as rows and vocabulary as the columns.
"""
tfidf_model = TfidfModel(
corpus_df.bag_of_words.apply(lambda x: dictionary.doc2bow(x)))
model = tfidf_model[
corpus_df.bag_of_words.apply(lambda x: dictionary.doc2bow(x))]
X = corpus2csc(model, len(dictionary)).T
return X | 6c5cd6b569010c69b446223a099cfd745d51ce6c | 3,656,663 |
import os
import logging
import yaml
def load_config_file(file_path, fallback_file_path):
"""Load YAML format configuration file
:param file_path: The path to config file
:type file_path: `str`
:param fallback_file_path: The fallback path to config file
:type fallback_file_path: `str`
:return: config_map
:rtype: dict
"""
file_path = file_path if os.path.isfile(file_path) else fallback_file_path
logging.info("Loading %s..", file_path)
if not os.path.isfile(file_path):
# check if the config file exists
abort_framework("Config file not found at: {}".format(file_path))
try:
config_map = yaml.load(FileOperations.open(file_path, "r"))
return config_map
except yaml.YAMLError:
abort_framework("Error parsing config file at: {}".format(file_path)) | 1c756b10892f1a6cffa78dd80852049061b7521d | 3,656,664 |
from typing import Tuple
from typing import Optional
import torch
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask | 8ecd84ca805112312d43bd8ba3f4c0aa3918800d | 3,656,665 |
import matplotlib.pyplot as plt
import numpy as np
def _rankingmap_mpl(countrymasksnc, ranking, x, scenario=None, method='number', title='', label=''):
"""
countrymasksnc : nc.Dataset instance of countrymasks.nc
ranking: Ranking instance
method: "number" (default) or "value"
"""
if method not in ['number', 'value']:
raise ValueError('method must be "number" or "value"')
areas = ranking.areas
ds = countrymasksnc
lat, lon = ds['lat'][:], ds['lon'][:]
ni, nj = lat.size, lon.size
data = np.empty((ni, nj), dtype=int if method == 'number' else float)
mask = np.ones((ni, nj), dtype=bool) # undefined areas
for area in areas:
if 'm_'+area not in ds.variables:
print('! rankingmap::', area, 'not found in counrty masks')
continue
value = getattr(ranking, method)(area, x, scenario)
if value == 'undefined':
print('! rankingmap::value', area, 'undefined')
continue
m = ds['m_'+area][:] > 0
data[m] = value
mask[m] = False
fig, ax = plt.subplots(1,1)
h = ax.imshow(np.ma.array(data, mask=mask), extent=[-180, 180, -90, 90],
cmap=plt.cm.viridis_r if method == "number" else plt.cm.viridis,
vmax=len(areas) if method=='number' else None)
# default_title = getattr(ranking, 'plot_label_y','')+' :: ranking: '+method
if ranking.plot_type == 'indicator_vs_temperature':
details = 'warming level: {} {}'.format(x, ranking.plot_unit_x)
else:
details = 'period: {}, scenario: {}'.format(x, {'rcp26':'RCP 2.6', 'rcp45':'RCP 4.5', 'rcp60':'RCP 6', 'rcp85':'RCP 8.5'}.get(scenario, scenario))
default_title = getattr(ranking, 'plot_label_y','') + '\n' + details
default_label = 'ranking number' if method == 'number' else ('ranking value ({})'.format(getattr(ranking, 'plot_unit_y')))
ax.set_title(title or default_title)
plt.colorbar(h, ax=ax, orientation='horizontal', label=label or default_label)
return fig | d0e7006d832408fcd77b8b0fe219a6e9faf478e5 | 3,656,666 |
from typing import Optional
from typing import List
from typing import Dict
from typing import Any
def fetch_data(
property: Property,
start_date: dt.date,
*,
end_date: Optional[dt.date] = None,
dimensions: Optional[List[Dimension]] = None,
) -> List[Dict[str, Any]]:
"""Query Google Search Console API for data.
Args:
property (Property): Property to request data for.
start_date (dt.date): Earliest day to request information for.
end_date (Optional[dt.date]): Latest day to request information for. Default to
``None``. Will be set to ``start_date`` if ``None``.
dimensions (Optional[List[Dimension]], optional): Dimensions to request from
API. Defaults to ``None``. Will be set to ``["page", "device"]`` if
``None``.
Returns:
List[Dict[str, Any]]: Response from API.
"""
if end_date is None:
end_date = start_date
if dimensions is None:
dimensions = ["page", "device"]
results = []
start_row = 0
ROW_LIMIT = 25000
while True:
request = {
"startDate": start_date.isoformat(),
"endDate": end_date.isoformat(),
"dimensions": dimensions,
"rowLimit": ROW_LIMIT,
"startRow": start_row,
"dataState": "all",
}
response = (
searchconsole_service.searchanalytics()
.query(siteUrl=property.url, body=request)
.execute()
)
start_row += ROW_LIMIT
result = response.get("rows", [])
results.extend(result)
if len(result) == 0:
break
return results | cb871f6e269005db9a338c4bf75949b8ba9ea04a | 3,656,667 |
import os
def fileOpenDlg(tryFilePath="",
tryFileName="",
prompt=_translate("Select file to open"),
allowed=None):
"""A simple dialogue allowing read access to the file system.
:parameters:
tryFilePath: string
default file path on which to open the dialog
tryFileName: string
default file name, as suggested file
prompt: string (default "Select file to open")
can be set to custom prompts
allowed: string (available since v1.62.01)
a string to specify file filters.
e.g. "Text files (\\*.txt) ;; Image files (\\*.bmp \\*.gif)"
See http://pyqt.sourceforge.net/Docs/PyQt4/qfiledialog.html
#getOpenFileNames
for further details
If tryFilePath or tryFileName are empty or invalid then
current path and empty names are used to start search.
If user cancels, then None is returned.
"""
ensureQtApp()
if allowed is None:
allowed = ("All files (*.*);;"
"PsychoPy Data (*.psydat);;"
"txt (*.txt *.dlm *.csv);;"
"pickled files (*.pickle *.pkl);;"
"shelved files (*.shelf)")
fdir = os.path.join(tryFilePath, tryFileName)
filesToOpen = QtWidgets.QFileDialog.getOpenFileNames(parent=None,
caption=prompt,
directory=fdir,
filter=allowed)
if type(filesToOpen) == tuple: # some versions(?) of PyQt return (files, filter)
filesToOpen = filesToOpen[0]
filesToOpen = [str(fpath) for fpath in filesToOpen
if os.path.exists(fpath)]
if len(filesToOpen) == 0:
return None
return filesToOpen | 932625c6779a5738c23ff340c448fb594d38c7ca | 3,656,668 |
def inport(port_type, disconnected_value):
"""Marks this field as an inport"""
assert port_type in port_types, \
"Got %r, expected one of %s" % (port_type, port_types)
tag = "inport:%s:%s" % (port_type, disconnected_value)
return tag | a9335d99b65a4944ef58f06b90f8978e7478ec13 | 3,656,669 |
def Align(samInHandle, fa, id, position, varId, refseq, altseq, mapq = 20):
"""
position is the left break point of the variants
And the position should be 1-base for convenience.
Because I use something like fa[id][position-1] to get bases from fa string
"""
if position < 1:
raise ValueError('[ERROR] The reference position is not 1-base: %r' % position)
if id not in fa:
raise ValueError('#[ERROR] The reference did not contain %s' % id)
rr,aa,com,diff = 0,0,0,0
for pileup in samInHandle.pileup(id, position-1, position):
pos = pileup.pos + 1 # 0-base index to 1-base index
if pos != position: continue
for read in [al for al in pileup.pileups if al.alignment.mapq >= mapq]:
refPos = read.alignment.pos - read.alignment.qstart # 0-base
# Next if the position is 2bp near the end of the reads
if position > refPos + read.alignment.rlen - 2: continue
q = Ref2QryPos(read.alignment.pos, position, read.alignment.cigar)
if q > read.alignment.rlen:
raise ValueError('#[BUG] The query position(%r) is > read length(%r)'
% (q, read.alignment.rlen))
if q == read.alignment.rlen: continue
refSeq = fa[id][position:refPos+read.alignment.rlen]
qrySeq = altseq + fa[id][position+len(refseq)-1:position+len(refseq)+read.alignment.rlen-q]
# [Debug]
# print '[POS]', id, pos, read.alignment.pos+1, '\n[QRY]', fa[id][refPos:position], qrySeq, read.alignment.qstart, q,'\n[TAR]',fa[id][refPos:position],refSeq,'\n[SEQ]', read.alignment.seq, read.alignment.cigar, read.alignment.cigarstring, read.alignment.is_secondary, '\n'
zr, _ = SumMismatchQuality(read.alignment.seq[q:], read.alignment.qual[q:], refSeq) # Reference
za, _ = SumMismatchQuality(read.alignment.seq[q:], read.alignment.qual[q:], qrySeq) # Alternate
if zr == 0 and za == 0:
com += 1 # Common perfect
elif zr == 0 and za > 0:
rr += 1 # Reference perfect
elif zr > 0 and za == 0:
aa += 1 # Alternate perfect
else:
diff += 1 # All im-perfect
#read.alignment.tags += [('ZJ', varId)] + [('ZR', zr)] + [('ZA', za)] # Not output to save the store
#samOutHandle.write(read.alignment) # Not output to save the store
return rr,aa,com,diff | aa7238f421472e1be24ce290ddd5c3b6f5e2cba4 | 3,656,670 |
def _empty_aggregate(*args: npt.ArrayLike, **kwargs) -> npt.ArrayLike:
"""Return unchaged array."""
return args[0] | c7f6ebc345517b10a3b65c5ac0f0bf060cdf7634 | 3,656,671 |
def kfpartial(fun, *args, **kwargs):
""" Allows to create partial functions with arbitrary arguments/keywords """
return partial(keywords_first(fun), *args, **kwargs) | 7f7dbbdf484e36c2734e47b448f081812cb8a326 | 3,656,672 |
def power_state_update(system_id, state):
"""Report to the region about a node's power state.
:param system_id: The system ID for the node.
:param state: Typically "on", "off", or "error".
"""
client = getRegionClient()
return client(
UpdateNodePowerState,
system_id=system_id,
power_state=state) | b05730fe9e45b3ee81adb7e8047b0b87e3bf7556 | 3,656,673 |
from typing import Any
def build_post307_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest:
"""Post redirected with 307, resulting in a 200 after redirect.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in
our example to find the input shape. Simple boolean value true.
:paramtype json: any
:keyword content: Pass in binary content you want in the body of the request (typically bytes,
a byte iterator, or stream input). Simple boolean value true.
:paramtype content: any
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
json = True # Optional. Default value is True.
"""
content_type = kwargs.pop("content_type", None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/http/redirect/307")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=url, headers=header_parameters, json=json, content=content, **kwargs) | 2c26cfed95a33fe700b83d7e1fa4eb93ef312721 | 3,656,674 |
def rm_ssp_storage(ssp_wrap, lus, del_unused_images=True):
"""Remove some number of LogicalUnits from a SharedStoragePool.
The changes are flushed back to the REST server.
:param ssp_wrap: SSP EntryWrapper representing the SharedStoragePool to
modify.
:param lus: Iterable of LU ElementWrappers or LUEnt EntryWrappers
representing the LogicalUnits to delete.
:param del_unused_images: If True, and a removed Disk LU was the last one
linked to its backing Image LU, the backing Image
LU is also removed.
:return: The (possibly) modified SSP wrapper.
"""
if _rm_lus(ssp_wrap.logical_units, lus,
del_unused_images=del_unused_images):
# Flush changes
ssp_wrap = ssp_wrap.update()
return ssp_wrap | 0c61becd8f9e23ac269ef0546abb0857facd89de | 3,656,675 |
def urp_detail_view(request, pk):
"""Renders the URP detail page
"""
urp = get_object_or_404(URP, pk=pk)
ctx = {
'urp': urp,
}
# if user is logged in as a student, check if user has already applied
if request.user.is_authenticated:
if request.user.uapuser.is_student:
ctx['applied'] = Application.objects.filter(applicant=request.user, urp=urp).exists()
else:
ctx['applied'] = True
return render(request, 'post/urp_detail.html', context=ctx) | 15e7e86cf2e47bccda52682bdf205e43d8a03f5f | 3,656,676 |
import functools
def squeeze_excite(input_name, squeeze_factor):
"""Returns a squeeze-excite block."""
ops = []
append = functools.partial(append_op, ops)
append(op_name="se/pool0",
op_type=OpType.AVG_POOL,
input_kwargs={"window_shape": 0},
input_names=[input_name])
append(op_name="se/dense1",
op_type=OpType.DENSE,
op_kwargs={"features": f"S:-1%{squeeze_factor}"})
append(op_name="se/swish2",
op_type=OpType.SWISH)
append(op_name="se/dense3",
op_type=OpType.DENSE,
op_kwargs={"features": f"S:-1*{squeeze_factor}"})
append(op_name="se/sigmoid4",
op_type=OpType.SIGMOID)
append(op_name="se/mul5",
op_type=OpType.MUL,
input_names=[input_name, ops[-1].name])
return ops | 907acc7f31db9ab4d70f976320fdd779b66b7160 | 3,656,677 |
def get_code_v2(fl = r'C:\Users\bogdan\code_seurat\WholeGenome_MERFISH\Coordinates_code_1000region.csv'):
"""
Given a .csv file with header this returns 2 dictionaries: tad_to_PR,PR_to_tad
"""
lst = [(ln[:-1].split(',')[0].replace('__','_'),['R'+R for R in ln[:-1].split(',')[3].split('--')])
for ln in open(fl,'r')][1:]
tad_to_PR = dict(lst)
PR_to_tad = {Rs_to_Rnm(Rs):nm for nm,Rs in lst}
return tad_to_PR,PR_to_tad | f5a9e1bbd1f404819a700ee43cff826333ce736c | 3,656,678 |
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2017/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2017-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float')
obs_din = pd.DataFrame(obs_din,dtype='float')
resid = (obs_din - din_126001A).values
lsq = np.sum(resid ** 2, axis=0)
lsq = lsq.reshape(lsq.shape[0], 1)
print(f'Finish {lsq.shape[0]} run')
return lsq | e43679a0808108560714e32def9399ce45a6bd8e | 3,656,679 |
def finnegans_wake_unicode_chars():
"""Data fixture that returns a string of all unicode characters in Finnegan's Wake."""
return '¤·àáãéìóôþŒŠŸˆ–—‘’‚“”‡…‹' | 78205c9181545544a61ef1eab6c2f51d212dac13 | 3,656,680 |
from pathlib import Path
import posixpath
import os
def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")):
"""
获取文件上传目录
:param upload_key:
:return:
"""
root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key])
def func(folder):
path = security.safe_join(root_path, folder)
os.makedirs(path, exist_ok=True)
return path
return func | 39d0d4055f2a9933b9578b74fb14d5fa637154f0 | 3,656,681 |
def kit(): # simpler version
"""Open communication with the dev-kit once for all tests."""
return usp.Devkit() | 3001cbfeaf212e9a09e512c102eae6bffa263375 | 3,656,682 |
def givens_rotation(A):
"""Perform QR decomposition of matrix A using Givens rotation."""
(num_rows, num_cols) = np.shape(A)
# Initialize orthogonal matrix Q and upper triangular matrix R.
Q = np.identity(num_rows)
R = np.copy(A)
# Iterate over lower triangular matrix.
(rows, cols) = np.tril_indices(num_rows, -1, num_cols)
for (row, col) in zip(rows, cols):
# Compute Givens rotation matrix and
# zero-out lower triangular matrix entries.
if R[row, col] != 0:
(c, s) = _givens_rotation_matrix_entries(R[col, col], R[row, col])
G = np.identity(num_rows)
G[[col, row], [col, row]] = c
G[row, col] = s
G[col, row] = -s
R = np.dot(G, R)
Q = np.dot(Q, G.T)
return (Q, R) | 207cadc90c7c4aab76c7422d314b5470ce17251a | 3,656,683 |
from typing import Union
from pathlib import Path
from typing import Optional
import json
def lex_from_str(
*,
in_str: Union[str, Path],
grammar: str = "standard",
ir_file: Optional[Union[str, Path]] = None,
) -> JSONDict:
"""Run grammar of choice on input string.
Parameters
----------
in_str : Union[str, Path]
The string to be parsed.
grammar : str
Grammar to be used. Defaults to "standard".
ir_file : Optional[Union[str, Path]]
File to write intermediate representation to (JSON format).
None by default, which means file is not written out.
Returns
-------
The contents of the input string as a dictionary.
Raises
------
:exc:`ParselglossyError`
"""
try:
lexer = dispatch_grammar(grammar)
except KeyError:
raise ParselglossyError(f"Grammar {grammar} not available.")
ir = parse_string_to_dict(lexer, in_str)
if ir_file is not None:
ir_file = path_resolver(ir_file)
with ir_file.open("w") as out:
json.dump(ir, out, cls=ComplexEncoder, indent=4)
return ir | 5416bd56426012c56050a0dba2835385fa4177e5 | 3,656,684 |
def e() -> ProcessBuilder:
"""
Euler's number (e)
:return: The numerical value of Euler's number.
"""
return process('e', ) | f984b5de5a0b95109c9ec2fe5a2b30c880226b28 | 3,656,685 |
def get_or_create_anonymous_cart_from_token(token,
cart_queryset=Cart.objects.all()):
"""Returns open anonymous cart with given token or creates new.
:type cart_queryset: saleor.cart.models.CartQueryset
:type token: string
:rtype: Cart
"""
return cart_queryset.open().filter(token=token, user=None).get_or_create(
defaults={'user': None})[0] | 8ffb1f64b77c97b260502f1d4c689e3a4edc4f36 | 3,656,686 |
import os
def outcar_parser(request):
"""A fixture that loads OUTCAR."""
try:
name = request.param
except AttributeError:
# Test not parametrized
name = 'OUTCAR'
testdir = os.path.dirname(__file__)
outcarfile = testdir + '/' + name
outcar = Outcar(file_path=outcarfile)
return outcar | 5fe8b1ddb7f55e233104cec9a5be94624bc77ce9 | 3,656,687 |
from typing import Any
def accept_data(x: Any) -> Any:
"""Accept any types of data and return it as convenient type.
Args:
x: Any type of data.
Returns:
Any: Accepted data.
"""
if isinstance(x, str):
return x
elif isinstance(x, list):
return x
elif isinstance(x, dict):
return x
elif isinstance(x, tuple):
return x
elif isinstance(x, set):
return x
elif isinstance(x, float):
return x
elif isinstance(x, int):
return x
elif isinstance(x, bool):
return x
elif isinstance(x, type(None)):
return x
else:
return x | 9862995eafb7015fc446466e2dbb7774be39f54b | 3,656,688 |
def custom_model_template(model_type: str, target: str, result0: str, result1: str) -> str:
"""Template for feature behaviour reason generated from DICE
Returns:
str: behaviour
"""
if model_type == 'classifier':
tipo = 'category'
elif model_type == 'regressor':
tipo = 'continuous'
behaviour = get_behaviour(tipo = tipo, result0 = result0, result1 = result1)
phrase = generic_type_template(tipo = tipo, name = target, behaviour = behaviour, result0 = result0, result1 = result1)
result = color.BLUE + f" the output of the model {phrase}." + color.END
return result | bbd43a462f6d9d65984dbd242c7fe8a5d2be5e39 | 3,656,689 |
def merge_dict_list(merged, x):
""" merge x into merged recursively.
x is either a dict or a list
"""
if type(x) is list:
return merged + x
for key in x.keys():
if key not in merged.keys():
merged[key] = x[key]
elif x[key] is not None:
merged[key] = merge_dict_list(merged[key], x[key])
return merged | 00685be39a0b1447c81ecd8de777ebab38aa9bfe | 3,656,690 |
def is_ref(variant, exclude_alleles=None):
"""Returns true if variant is a reference record.
Variant protos can encode sites that aren't actually mutations in the
sample. For example, the record ref='A', alt='.' indicates that there is
no mutation present (i.e., alt is the missing value).
Args:
variant: nucleus.genomics.v1.Variant.
exclude_alleles: list(str). The alleles in this list will be ignored.
Returns:
True if there are no actual alternate alleles.
"""
relevant_alts = _non_excluded_alts(variant.alternate_bases, exclude_alleles)
return not relevant_alts | 2c762bbf070f375b546f0902e3567ca5542cc774 | 3,656,691 |
def gomc_sim_completed_properly(job, control_filename_str):
"""General check to see if the gomc simulation was completed properly."""
job_run_properly_bool = False
output_log_file = "out_{}.dat".format(control_filename_str)
if job.isfile(output_log_file):
# with open(f"workspace/{job.id}/{output_log_file}", "r") as fp:
with open(f"{output_log_file}", "r") as fp:
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if "Move" in line:
split_move_line = line.split()
if (
split_move_line[0] == "Move"
and split_move_line[1] == "Type"
and split_move_line[2] == "Mol."
and split_move_line[3] == "Kind"
):
job_run_properly_bool = True
else:
job_run_properly_bool = False
return job_run_properly_bool | 20635ba94b5176298216ad5807e6428a5fb957c2 | 3,656,692 |
from typing import Union
from typing import Optional
def rv_precision(
wavelength: Union[Quantity, ndarray],
flux: Union[Quantity, ndarray],
mask: Optional[ndarray] = None,
**kwargs,
) -> Quantity:
"""Calculate the theoretical RV precision achievable on a spectrum.
Parameters
----------
wavelength: array-like or Quantity
Wavelength of spectrum.
flux: array-like or Quantity
Flux of spectrum.
mask: array-like, Quantity or None
Masking function array to apply to the pixel weights.
kwargs:
Kwargs for sqrt_sum_wis
Returns
-------
RVrms: astropy.Quantity
Radial velocity precision of spectra in m/s.
"""
return c / sqrt_sum_wis(wavelength, flux, mask=mask, **kwargs) | 91d6a741d992bd915549becd371d29b6634b92ef | 3,656,693 |
def changenonetoNone(s):
"""Convert str 'None' to Nonetype
"""
if s=='None':
return None
else:
return s | 9f6af1580d8b47d2a7852e433f7ba8bbd5c7044d | 3,656,694 |
def quaternion_2_rotation_matrix(q):
"""
四元数转化为旋转矩阵
:param q:
:return: 旋转矩阵
"""
rotation_matrix = np.array([[np.square(q[0]) + np.square(q[1]) - np.square(q[2]) - np.square(q[3]),
2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])],
[2 * (q[1] * q[2] + q[0] * q[3]),
np.square(q[0]) - np.square(q[1]) + np.square(q[2]) - np.square(q[3]),
2 * (q[2] * q[3] - q[0] * q[1])],
[2 * (q[1] * q[3] - q[0] * q[2]), 2 * (q[2] * q[3] + q[0] * q[1]),
np.square(q[0]) - np.square(q[1]) - np.square(q[2]) + np.square(q[3])]],
dtype=np.float32)
return rotation_matrix | f2e420a1e0b6838fb2ce5f9288842e1ae39134c9 | 3,656,695 |
def sum(mat, axis, target=None):
"""
Sum the matrix along the given dimension, where 0 represents the leading
dimension and 1 represents the non-leading dimension. If a target is
not prvided, a new vector is created for storing the result.
"""
m = _eigenmat.get_leading_dimension(mat.p_mat)
n = _eigenmat.get_nonleading_dimension(mat.p_mat)
if axis == 0:
# sum along leading dimension
if not target:
target = empty((1, n))
elif axis == 1:
# sum along non-leading dimension
if not target:
target = empty((m, 1))
err_code = _eigenmat.sum_by_axis(mat.p_mat, target.p_mat, ct.c_int(axis))
if err_code:
raise generate_exception(err_code)
return target | 426ba7b2673a52663e04d3c6f07fb2f4e001244b | 3,656,696 |
from datetime import datetime
def convert_created_time_to_datetime(datestring):
"""
Args:
datestring (str): a string object either as a date or
a unix timestamp
Returns:
a pandas datetime object
"""
if len(datestring) == 30:
return pd.to_datetime(datestring)
else:
return pd.to_datetime(datetime.fromtimestamp(int(datestring[:10]))) | 2559d079b5b7174d192e3a5d9178701ae7080d3b | 3,656,697 |
def identify_word_classes(tokens, word_classes):
"""
Match word classes to the token list
:param list tokens: List of tokens
:param dict word_classes: Dictionary of word lists to find and tag with the
respective dictionary key
:return: Matched word classes
:rtype: list
"""
if word_classes is None:
word_classes = []
classes = set()
for key in word_classes:
for token in tokens:
if token.lower() in word_classes[key]:
classes.add(key)
return classes | ca7aa602d19ac196321af19c42a60df415c7d115 | 3,656,698 |
from typing import List
from typing import Tuple
def find_connecting_stops(routes) -> List[Tuple[Stop, List[Route]]]:
"""
Find all stops that connect more than one route.
Return [Stop, [Route]]
"""
stops = {}
for route in sorted(routes, key=Route.name):
for stop in route.stops():
id_ = stop.id()
if id_ not in stops:
stops[id_] = (stop, [])
last(stops[id_]).append(route)
return list(filter(lambda p: length(last(p)) > 1, stops.values())) | 599e9e5d3fc0a6d0de84a58f1549da9423f35af3 | 3,656,699 |
Subsets and Splits