content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
async def async_setup_entry(hass, entry):
"""Set up the Samsung TV platform."""
# Initialize bridge
data = entry.data.copy()
bridge = _async_get_device_bridge(data)
if bridge.port is None and bridge.default_port is not None:
# For backward compat, set default port for websocket tv
data[CONF_PORT] = bridge.default_port
hass.config_entries.async_update_entry(entry, data=data)
bridge = _async_get_device_bridge(data)
def stop_bridge(event):
"""Stop SamsungTV bridge connection."""
bridge.stop()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_bridge)
)
hass.data[DOMAIN][entry.entry_id] = bridge
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | a5e411560c8f3f1e609d6675061a72000984e0ce | 3,016 |
def bring_contact_bonus_list(pb_client, obj_pb_ids, arm_pb_id, table_pb_id):
""" For some bring goals, may be useful to also satisfy an object touching table and
not touching arm condition. """
correct_contacts = []
for o in obj_pb_ids:
o2ee_contact = len(pb_client.getContactPoints(o, arm_pb_id)) > 0
o2t_contact = len(pb_client.getContactPoints(o, table_pb_id)) > 0
correct_contacts.append(not o2ee_contact and o2t_contact)
return correct_contacts | 6c0033b0bfb1d3f4d08c8ca114855e089fe852f7 | 3,017 |
def topic(**kwargs):
"""
:param to: Topic ID
:return:
"""
return api_request('topic', kwargs) | 2d2af8f74db1ffde7732ecff529911b7058154bf | 3,018 |
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
daily_temp_df = pd.read_csv(filename, parse_dates={'DayOfYear': ['Date']})
daily_temp_df = clean_data(daily_temp_df)
return daily_temp_df | 381eac22a1c3c0c9ad85d0c416fb1c182429153e | 3,019 |
def group_intents(input_file, intent_file, slot_file):
"""
Groups the dataset based on the intents and returns it.
Args:
input_file : The path to the input file
intent_file : The path to the intent file
slot_file : The path to the slot file
Returns:
A dict mapping intents to a list of tuples. Each tuple contains the
input sentence and it's corresponding slots have a given intent.
"""
intent_groups = defaultdict(list)
with open(input_file, 'r') as input_fd, \
open(intent_file, 'r') as intent_fd, \
open(slot_file, 'r') as slot_fd:
for ip, intent, slot in zip(input_fd, intent_fd, slot_fd):
ip, intent, slot = ip.rstrip(), intent.rstrip(), slot.rstrip()
intent_groups[intent].append((ip, slot))
return intent_groups | 8db186fc91ddc3adfbc163ef63e66cc408f3b47d | 3,020 |
def _get_path(string): # gets file path from variable name
"""
Gets path that a variable holds, convert it to start from root (.),
esolves any symbolic link and returns the converted path.
"""
varname = string.replace("(long)", "")
try:
path = c.VAR_STACK[varname]
except KeyError:
if c.verbose:
print "[-] ERROR: {0} is not a variable holding path".format(varname)
return 1
path = _res_path(path)
return _abspath(path) | 794660ed9571dded27f5907c4ac1d9cdc99f41b6 | 3,021 |
def create_service_endpoint(service_endpoint_type, authorization_scheme, name,
github_access_token=None, github_url=None,
azure_rm_tenant_id=None, azure_rm_service_principal_id=None,
azure_rm_service_prinicipal_key=None, azure_rm_subscription_id=None,
azure_rm_subscription_name=None, organization=None,
project=None, detect=None):
"""Create a service endpoint
:param service_endpoint_type: Type of service endpoint
:type service_endpoint_type: str
:param name: Name of service endpoint to create
:type name: str
:param authorization_scheme: Authorization to be used in service endpoint creation
Github service endpoint supports PersonalAccessToken
AzureRm service endpoint supports ServicePrincipal
:type authorization_scheme: str
:param github_access_token: PAT token of github for creating github service endpoint
:type github_access_token: str
:param github_url: Url for github for creating service endpoint
:type github_url: str
:param azure_rm_tenant_id: tenant id for creating azure rm service endpoint
:type azure_rm_tenant_id: str
:param azure_rm_service_principal_id: service principal id for creating azure rm service endpoint
:type azure_rm_service_principal_id: str
:param azure_rm_service_prinicipal_key: key/password for service principal used to create azure rm service endpoint
:type azure_rm_service_prinicipal_key: str
:param azure_rm_subscription_id: subscription id for azure rm service endpoint
:type azure_rm_subscription_id: str
:param azure_rm_subscription_name: name of azure subscription for azure rm service endpoint
:type azure_rm_subscription_name: str
:param organization: Azure Devops organization URL. Example: https://dev.azure.com/MyOrganizationName/
:type organization: str
:param project: Name or ID of the project.
:type project: str
:param detect: Automatically detect organization. Default is "on".
:type detect: str
:rtype: :class:`ServiceEndpoint <service_endpoint.v4_1.models.ServiceEndpoint>`
"""
try:
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_service_endpoint_client(organization)
if (service_endpoint_type == SERVICE_ENDPOINT_TYPE_GITHUB and
authorization_scheme == SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN):
service_endpoint_authorization = EndpointAuthorization(
parameters={'accessToken': github_access_token},
scheme=SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN)
service_endpoint_to_create = ServiceEndpoint(
authorization=service_endpoint_authorization,
name=name, type=SERVICE_ENDPOINT_TYPE_GITHUB, url=github_url)
return client.create_service_endpoint(service_endpoint_to_create, project)
if (service_endpoint_type == SERVICE_ENDPOINT_TYPE_AZURE_RM and
authorization_scheme == SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL):
service_endpoint_authorization = EndpointAuthorization(
parameters={'tenantid': azure_rm_tenant_id,
'serviceprincipalid': azure_rm_service_principal_id,
'authenticationType': 'spnKey',
'serviceprincipalkey': azure_rm_service_prinicipal_key},
scheme=SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL)
service_endpoint_data = {
'subscriptionId': azure_rm_subscription_id,
'subscriptionName': azure_rm_subscription_name,
'environment': 'AzureCloud',
'creationMode': 'Manual'
}
service_endpoint_to_create = ServiceEndpoint(
authorization=service_endpoint_authorization, data=service_endpoint_data,
name=name, type=SERVICE_ENDPOINT_TYPE_AZURE_RM, url='https://management.azure.com/')
return client.create_service_endpoint(service_endpoint_to_create, project)
raise CLIError('This combination of endpoint type is not supported with this authorization scheme.')
except VstsServiceError as ex:
raise CLIError(ex) | 75ad8fdb237d4dac9105bc33b96273f67482375f | 3,022 |
import math
def voronoi_diagram_interpolation(interpolationcellid, id0, id1, voronoiDataset0,
voronoiDataset1, centerlines, step,
clippingPoints):
"""Given two Voronoi datasets interpolate the data sets along the centerline.
Args:
interpolationcellid (int): LineID of the centerline
id0 (int): Start ID.
id1 (int): Stop ID.
voronoiDataset0 (vtkPolyData): First Voronoi dataset.
voronoiDataset1 (vtkPolyData): Second Voronoi dataset.
centerlines (vtkPolyData): Centerline to interpolate along.
step (int): Direction to interpolate
clippingPoints (vtkPoints): Location of clipping points.
Returns:
finalNewVoronoiPoints (vtkPoints): New points to the Voronoi diagram.
finalRadiusArray (vtkDoubelArray): Array to hold the radius for each point.
"""
cellLine = extract_single_line(centerlines, interpolationcellid)
startPoint = clippingPoints.GetPoint(id0)
endPoint = clippingPoints.GetPoint(id1)
startId = cellLine.FindPoint(startPoint)
endId = cellLine.FindPoint(endPoint)
gapStartId = startId + 1 * step
gapEndId = endId - 1 * step
arrivalId = gapEndId + 1 * step
endSavingInterval = gapEndId + 1 * step
numberOfGapPoints = int(math.fabs(gapEndId - gapStartId)) + 1
numberOfInterpolationPoints = voronoiDataset0.GetNumberOfPoints()
numberOfCenterlinesPoints = cellLine.GetNumberOfPoints()
numberOfAddedPoints = numberOfGapPoints * numberOfInterpolationPoints
finalNewVoronoiPoints = vtk.vtkPoints()
cellArray = vtk.vtkCellArray()
finalRadiusArray = get_vtk_array(radiusArrayName, 1, numberOfAddedPoints)
count = 0
for i in range(numberOfInterpolationPoints):
voronoiPoint = voronoiDataset0.GetPoint(i)
voronoiPointRadius = voronoiDataset0.GetPointData().GetArray(radiusArrayName).GetTuple1(i)
centerlinePointLocator = get_vtk_point_locator(cellLine)
closestPointId = centerlinePointLocator.FindClosestPoint(voronoiPoint)
closestPoint = cellLine.GetPoint(closestPointId)
voronoiVector = [0.0, 0.0, 0.0]
voronoiVector[0] = voronoiPoint[0] - closestPoint[0]
voronoiVector[1] = voronoiPoint[1] - closestPoint[1]
voronoiVector[2] = voronoiPoint[2] - closestPoint[2]
voronoiVectorNorm = vtk.vtkMath.Norm(voronoiVector)
rotationAngle = compute_voronoi_vector_to_centerline_angle(closestPointId, voronoiVector, cellLine)
PTPoints = vtk.vtkPoints()
range_step = 1 if closestPointId < arrivalId else -1
for j in range(closestPointId, arrivalId, range_step):
localtangent = [0.0, 0.0, 0.0]
newVoronoiVector = [0.0, 0.0, 0.0]
newVoronoiPoint = [0.0, 0.0, 0.0]
transform = vtk.vtkTransform()
point0 = cellLine.GetPoint(j)
if (j < numberOfCenterlinesPoints - 1):
point1 = [0.0, 0.0, 0.0]
cellLine.GetPoint(j + 1, point1)
localtangent[0] += point1[0] - point0[0]
localtangent[1] += point1[1] - point0[1]
localtangent[2] += point1[2] - point0[2]
if (j > 0):
point2 = [0.0, 0.0, 0.0]
cellLine.GetPoint(j - 1, point2)
localtangent[0] += point0[0] - point2[0]
localtangent[1] += point0[1] - point2[1]
localtangent[2] += point0[2] - point2[2]
localnormal = cellLine.GetPointData().GetArray(parallelTransportNormalsArrayName).GetTuple3(j)
localnormaldot = vtk.vtkMath.Dot(localtangent, localnormal)
localtangent[0] -= localnormaldot * localnormal[0]
localtangent[1] -= localnormaldot * localnormal[1]
localtangent[2] -= localnormaldot * localnormal[2]
vtk.vtkMath.Normalize(localtangent)
transform.RotateWXYZ(rotationAngle, localtangent)
transform.TransformNormal(localnormal, newVoronoiVector)
vtk.vtkMath.Normalize(newVoronoiVector)
newVoronoiPoint[0] = point0[0] + voronoiVectorNorm * newVoronoiVector[0]
newVoronoiPoint[1] = point0[1] + voronoiVectorNorm * newVoronoiVector[1]
newVoronoiPoint[2] = point0[2] + voronoiVectorNorm * newVoronoiVector[2]
PTPoints.InsertNextPoint(newVoronoiPoint)
numberOfPTPoints = PTPoints.GetNumberOfPoints()
lastPTPoint = PTPoints.GetPoint(PTPoints.GetNumberOfPoints() - 1)
voronoiPointLocator = get_vtk_point_locator(voronoiDataset1)
arrivalVoronoiPointId = voronoiPointLocator.FindClosestPoint(lastPTPoint)
arrivalVoronoiPoint = voronoiDataset1.GetPoint(arrivalVoronoiPointId)
arrivalVoronoiPointRadius = voronoiDataset1.GetPointData().GetArray(radiusArrayName).GetTuple1(
arrivalVoronoiPointId)
arrivalCenterlinePointLocator = get_vtk_point_locator(cellLine)
arrivalCenterlineClosestPointId = arrivalCenterlinePointLocator.FindClosestPoint(arrivalVoronoiPoint)
arrivalCenterlineClosestPoint = cellLine.GetPoint(arrivalCenterlineClosestPointId)
arrivalVoronoiVector = [0.0, 0.0, 0.0]
arrivalVoronoiVector[0] = arrivalVoronoiPoint[0] - arrivalCenterlineClosestPoint[0]
arrivalVoronoiVector[1] = arrivalVoronoiPoint[1] - arrivalCenterlineClosestPoint[1]
arrivalVoronoiVector[2] = arrivalVoronoiPoint[2] - arrivalCenterlineClosestPoint[2]
arrivalVoronoiVectorNorm = vtk.vtkMath.Norm(arrivalVoronoiVector)
radiusArray = compute_spline(voronoiPointRadius, arrivalVoronoiPointRadius, numberOfPTPoints)
vectorNormArray = compute_spline(voronoiVectorNorm, arrivalVoronoiVectorNorm, numberOfPTPoints)
pointsToGap = (gapStartId - closestPointId) * step
if pointsToGap < 0 or PTPoints.GetNumberOfPoints() <= pointsToGap:
continue
for k in range(gapStartId, endSavingInterval, step):
ptpoint = PTPoints.GetPoint(pointsToGap)
clpoint = cellLine.GetPoint(k)
vector = [0.0, 0.0, 0.0]
vector[0] = ptpoint[0] - clpoint[0]
vector[1] = ptpoint[1] - clpoint[1]
vector[2] = ptpoint[2] - clpoint[2]
vtk.vtkMath.Normalize(vector)
norm = vectorNormArray.GetTuple1(pointsToGap)
newvector = [0.0, 0.0, 0.0]
newvector[0] = norm * vector[0]
newvector[1] = norm * vector[1]
newvector[2] = norm * vector[2]
newpoint = [0.0, 0.0, 0.0]
newpoint[0] = clpoint[0] + newvector[0]
newpoint[1] = clpoint[1] + newvector[1]
newpoint[2] = clpoint[2] + newvector[2]
finalNewVoronoiPoints.InsertNextPoint(newpoint)
cellArray.InsertNextCell(1)
cellArray.InsertCellPoint(count)
if pointsToGap > 0:
finalRadiusArray.SetTuple1(count, radiusArray.GetTuple1(pointsToGap))
pointsToGap += 1
count += 1
return finalNewVoronoiPoints, finalRadiusArray | 13c719b89f737bac625cf76c7d64a5c34a856fdd | 3,023 |
def plot_kde_matrix(df, w, limits=None, colorbar=True, refval=None):
"""
Plot a KDE matrix.
Parameters
----------
df: Pandas Dataframe
The rows are the observations, the columns the variables.
w: np.narray
The corresponding weights.
colorbar: bool
Whether to plot the colorbars or not.
limits: dictionary, optional
Dictionary of the form ``{"name": (lower_limit, upper_limit)}``.
refval: dict, optional
A reference parameter to be shown in the plots (e.g. the
underlying ground truth parameter used to simulate the data
for testing purposes). Default: None.
"""
grid = sns.PairGrid(df, diag_sharey=False)
if limits is None:
limits = {}
default = (None, None)
def off_diagonal(x, y, **kwargs):
df = pd.concat((x, y), axis=1)
plot_kde_2d(df, w,
x.name, y.name,
xmin=limits.get(x.name, default)[0],
xmax=limits.get(x.name, default)[1],
ymin=limits.get(y.name, default)[0],
ymax=limits.get(y.name, default)[1],
ax=plt.gca(), title=False, colorbar=colorbar,
refval=refval)
def scatter(x, y, **kwargs):
alpha = w / w.max()
colors = np.zeros((alpha.size, 4))
colors[:, 3] = alpha
plt.gca().scatter(x, y, color="k")
if refval is not None:
plt.gca().scatter([refval[x.name]], [refval[y.name]], color='C1')
plt.gca().set_xlim(*limits.get(x.name, default))
plt.gca().set_ylim(*limits.get(y.name, default))
def diagonal(x, **kwargs):
df = pd.concat((x,), axis=1)
plot_kde_1d(df, w, x.name,
xmin=limits.get(x.name, default)[0],
xmax=limits.get(x.name, default)[1],
ax=plt.gca(), refval=refval)
grid.map_diag(diagonal)
grid.map_upper(scatter)
grid.map_lower(off_diagonal)
return grid | a0272a0f819fc5bca6144c9a8293f29f415327b8 | 3,024 |
import logging
def _submit_to_all_logs(log_list, certs_chain):
"""Submits the chain to all logs in log_list and validates SCTs."""
log_id_to_verifier = _map_log_id_to_verifier(log_list)
chain_der = [c.to_der() for c in certs_chain]
raw_scts_for_cert = []
for log_url in log_list.keys():
res = _submit_to_single_log(log_url, chain_der)
if res:
raw_scts_for_cert.append(res)
else:
logging.info("No SCT from log %s", log_url)
validated_scts = []
for raw_sct in raw_scts_for_cert:
key_id = raw_sct.id.key_id
try:
log_id_to_verifier[key_id].verify_sct(raw_sct, certs_chain)
validated_scts.append(raw_sct)
except error.SignatureError as err:
logging.warning(
'Discarding SCT from log_id %s which does not validate: %s',
key_id.encode('hex'), err)
except KeyError as err:
logging.warning('Could not find CT log validator for log_id %s. '
'The log key for this log is probably misconfigured.',
key_id.encode('hex'))
scts_for_cert = [tls_message.encode(proto_sct)
for proto_sct in validated_scts
if proto_sct]
sct_list = client_pb2.SignedCertificateTimestampList()
sct_list.sct_list.extend(scts_for_cert)
return tls_message.encode(sct_list) | 16081a2ddbc924c0490af5f7c3ffc625300486cd | 3,029 |
import sqlite3
def create_connection(db_file: str):
"""Create database file."""
conn = None
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
except Error as e:
print(e)
return conn | a50dff80de36e391aeea7f6867cc85334f4bc690 | 3,031 |
def _simplex_gradient(op, grad_wrt_weight):
"""Register gradient for SimplexInterpolationOp."""
grad_wrt_input = simplex_gradient(
input=op.inputs[0],
weight=op.outputs[0],
grad_wrt_weight=grad_wrt_weight,
lattice_sizes=op.get_attr('lattice_sizes'))
return [grad_wrt_input] | 58447f073cdf4feb6e1f115b039c8573ab1048ca | 3,032 |
def generate_experiment():
"""
Generate elastic scattering experiments which are reasonable but random
"""
exp_dict = {}
exp_keys = ['qmin', 'qmax', 'qbin', 'rmin', 'rmax', 'rstep']
exp_ranges = [(0, 1.5), (19., 25.), (.8, .12), (0., 2.5), (30., 50.),
(.005, .015)]
for n, k in enumerate(exp_keys):
exp_dict[k] = rs.uniform(exp_ranges[n][0], exp_ranges[n][1])
exp_dict['sampling'] = rs.choice(['full', 'ns'])
return exp_dict | f913cbfc6f871fa290dd6cdb3e5da874b06243b4 | 3,033 |
def connectDB():
"""function to start the database connection using MongoClient from pymongo and the connection link from .env file path. Using certifi to provide certificate in order to enable the connection
Returns:
Cursor: database white-shark
"""
try:
client = MongoClient(f"{MONGO_URI}", tlsCAFile=ca)
return client["white-shark"]
except:
print("Connection failed") | 6d04fbc03ed45d5ec2b868dd52270f7dd1d7339d | 3,034 |
import gzip
def load_dicom(filename):
"""Loads in a given dicom file using a pydicom library
:param filename: a path to the .dcm.gz or .dcm file
:type filename: Union[str, os.path]
:return: pydicom.dataset.FileDataset or pydicom.dicomdir.DicomDir
:raises TypeError: raised if the file extension does not end with .dcm nor .gz
"""
if filename.endswith('.dcm'):
ds = dicom.dcmread(filename)
elif filename.endswith('.gz'):
with gzip.open(filename) as fd:
ds = dicom.dcmread(fd, force=True)
else:
raise TypeError
ds.file_meta.TransferSyntaxUID = dicom.uid.ImplicitVRLittleEndian
return ds | 898b2003049dd91d53f57e28208ad82b5449632e | 3,035 |
import torch
def load_pytorch_policy(fpath, itr, deterministic=False):
""" Load a pytorch policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
print('\n\nLoading from %s.\n\n'%fname)
model = torch.load(fname)
# make function for producing an action given a single state
def get_action(x):
with torch.no_grad():
x = torch.as_tensor(x, dtype=torch.float32)
if deterministic:
action = model.pi(x)[0].mean.numpy()
else:
action = model.act(x)
return action
return get_action | d368f9b120c78c00e446ab6a4b2b63e893507de7 | 3,036 |
def make_server(dashboard):
"""
Creates the server by mounting various API endpoints and static file content for the dashboard
Parameters
----------
dashboard : plsexplain.Dashboard
The dashboard instance to server
Returns
-------
FastAPI
The application instance that hosts the dashboard instance.
"""
app = FastAPI()
asset_folder = join(abspath(dirname(dirname(__file__))), "client/dist/images")
app.add_api_route("/api/metadata", get_model_metadata(dashboard), methods=["get"])
app.add_api_route("/api/performance", get_model_performance(dashboard), methods=["get"])
app.add_api_route("/api/model/features", get_feature_importance(dashboard), methods=["get"])
app.add_api_route("/api/model/features/{name:str}", get_feature_profile(dashboard), methods=["get"])
app.add_api_route("/api/dataset", get_dataset(dashboard), methods=["get"])
app.add_api_route("/api/predictions/{index:int}/breakdown", get_prediction_breakdown(dashboard), methods=["get"])
app.add_api_route("/api/predictions/{index}/profile/{feature}", get_prediction_profile(dashboard), methods=["get"])
app.mount("/images", StaticFiles(directory=asset_folder), name="static")
app.add_api_route("/{sub_path:path}", get_client_app, methods=["get"], response_class=HTMLResponse)
return app | a7e7e599ba8166d4a27818dcc21da25bb66a4171 | 3,037 |
def bash_complete_line(line, return_line=True, **kwargs):
"""Provides the completion from the end of the line.
Parameters
----------
line : str
Line to complete
return_line : bool, optional
If true (default), will return the entire line, with the completion added.
If false, this will instead return the strings to append to the original line.
kwargs : optional
All other keyword arguments are passed to the bash_completions() function.
Returns
-------
rtn : set of str
Possible completions of prefix
"""
# set up for completing from the end of the line
split = line.split()
if len(split) > 1 and not line.endswith(" "):
prefix = split[-1]
begidx = len(line.rsplit(prefix)[0])
else:
prefix = ""
begidx = len(line)
endidx = len(line)
# get completions
out, lprefix = bash_completions(prefix, line, begidx, endidx, **kwargs)
# reformat output
if return_line:
preline = line[:-lprefix]
rtn = {preline + o for o in out}
else:
rtn = {o[lprefix:] for o in out}
return rtn | 571e0822cd7a4d44e19c969072d624123640d1f1 | 3,038 |
def use_board(name):
"""
Use Board.
"""
_init_pins()
return r_eval("pins::use_board(\"" + name + "\")") | 5f08450f48fca6ca827383f4a57f006ee6e50836 | 3,039 |
def add_parameter(name, initial_value=1.0, **kwargs):
"""Adds a new global parameter to the model.
:param name: the name for the new global parameter
:type name: str
:param initial_value: optional the initial value of the parameter (defaults to 1)
:type initial_value: float
:param kwargs: optional parameters, recognized are:
* | `model`: to specify the data model to be used (if not specified
| the one from :func:`.get_current_model` will be taken)
* all other parameters from :func:`set_parameters`.
:return: the newly created parameter
"""
dm = kwargs.get('model', model_io.get_current_model())
assert (isinstance(dm, COPASI.CDataModel))
model = dm.getModel()
assert (isinstance(model, COPASI.CModel))
parameter = model.createModelValue(name, initial_value)
if parameter is None:
raise ValueError('A global parameter named ' + name + ' already exists')
set_parameters(name, **kwargs)
return parameter | 8fa0839f1a38fa78add8ab35b2eb03f0c3d4bbd8 | 3,040 |
from samplesheets.models import GenericMaterial
def get_sample_libraries(samples, study_tables):
"""
Return libraries for samples.
:param samples: Sample object or a list of Sample objects within a study
:param study_tables: Rendered study tables
:return: GenericMaterial queryset
"""
if type(samples) not in [list, QuerySet]:
samples = [samples]
sample_names = [s.name for s in samples]
study = samples[0].study
library_names = []
for k, assay_table in study_tables['assays'].items():
sample_idx = get_index_by_header(
assay_table, 'name', obj_cls=GenericMaterial, item_type='SAMPLE'
)
for row in assay_table['table_data']:
if row[sample_idx]['value'] in sample_names:
last_name = get_last_material_name(row, assay_table)
if last_name not in library_names:
library_names.append(last_name)
return GenericMaterial.objects.filter(
study=study, name__in=library_names
).order_by('name') | 284d08b313d982a4b6d6fe9d780f1a668f036455 | 3,041 |
def parse_next_frame(data):
"""
Parse the next packet from this MQTT data stream.
"""
if not data:
return None, b''
if len(data) < 2:
# Not enough data yet
return None, data
packet_type, flag1, flag2, flag3, flag4 = bitstruct.unpack('u4b1b1b1b1', data[0:1])
length = None
# Figure out the length of the packet
seek_point = 0
seek_multiplier = 1
packet_length = 0
encoded_byte = -1
while (encoded_byte & 128) != 0:
seek_point += 1
if len(data) < 1 + seek_point:
# Not enough data
return None, data
encoded_byte, = bitstruct.unpack('u8', data[seek_point:seek_point+1])
packet_length += (encoded_byte & 127) * seek_multiplier
seek_multiplier = seek_multiplier * 128
if seek_multiplier > 128 * 128 * 128:
raise ParseFailure()
# Do we have the whole packet?
if len(data) < 1 + seek_point + packet_length:
# Not the whole packet yet
return None, data
# Build the frame
frame = Frame(
packet_type=PacketType(packet_type),
flags=(flag1, flag2, flag3, flag4),
body=data[1 + seek_point:packet_length + 1 + seek_point])
# Return the data we didn't consume
data = data[1 + seek_point + packet_length:]
return frame, data | ce725ce871fdbd45fbf5d7367049171e7001469b | 3,042 |
import random
def pick_glance_api_server():
"""Return which Glance API server to use for the request
This method provides a very primitive form of load-balancing suitable for
testing and sandbox environments. In production, it would be better to use
one IP and route that to a real load-balancer.
Returns (host, port)
"""
host_port = random.choice(FLAGS.glance_api_servers)
host, port_str = host_port.split(':')
port = int(port_str)
return host, port | e32e75b675f0b3e07c71ae172423b6393f213a4d | 3,043 |
def remove_punctuation(transcriptions):
"""
:param: transcriptions is the dictionary containing text file that has been
converted into an array.
:return: cleaned string of words
This function removes punctuations from the story """
parsed_string = dumps(transcriptions)
punctuations = '''[],!.'"\\?'''
for char in parsed_string:
if char in punctuations:
parsed_string = parsed_string.replace(char, '')
return parsed_string | 5800a97a2a232f41161c9c8357cd826212d8302e | 3,044 |
def snakify(str_: str) -> str:
"""Convert a string to snake case
Args:
str_: The string to convert
"""
return str_.replace(" ", "_").lower() | c40d972fc99f2cb99f3c2b4a83296e793018c32b | 3,045 |
def search_images(
project,
image_name_prefix=None,
annotation_status=None,
return_metadata=False
):
"""Search images by name_prefix (case-insensitive) and annotation status
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param image_name_prefix: image name prefix for search
:type image_name_prefix: str
:param annotation_status: if not None, annotation statuses of images to filter,
should be one of NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param return_metadata: return metadata of images instead of names
:type return_metadata: bool
:return: metadata of found images or image names
:rtype: list of dicts or strs
"""
project, project_folder = get_project_and_folder_metadata(project)
team_id, project_id = project["team_id"], project["id"]
if annotation_status is not None:
annotation_status = common.annotation_status_str_to_int(
annotation_status
)
if project_folder is not None:
project_folder_id = project_folder["id"]
else:
project_folder_id = get_project_root_folder_id(project)
result_list = []
params = {
'team_id': team_id,
'project_id': project_id,
'annotation_status': annotation_status,
'offset': 0,
'folder_id': project_folder_id
}
if image_name_prefix is not None:
params['name'] = image_name_prefix
total_got = 0
total_images = 0
while True:
response = _api.send_request(
req_type='GET', path='/images-folders', params=params
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't search images " + response.text
)
response = response.json()
images = response["images"]
folders = response["folders"]
results_images = images["data"]
for r in results_images:
if return_metadata:
result_list.append(r)
else:
result_list.append(r["name"])
total_images += len(results_images)
if images["count"] <= total_images:
break
total_got += len(results_images) + len(folders["data"])
params["offset"] = total_got
if return_metadata:
def process_result(x):
x["annotation_status"] = common.annotation_status_int_to_str(
x["annotation_status"]
)
return x
return list(map(process_result, result_list))
else:
return result_list | dc5733e0c22419f850592ee2f8dd3b13e177c99b | 3,046 |
def bibtexNoteszotero(bibtex_names):
"""
params:
bibtex_names, {}
response, {}
return: notes_dict, {}
"""
#
notes_dict = {}
notes_dict["itemType"] = "note"
notes_dict["relations"] = {}
notes_dict["tags"] = []
notes_dict["note"] = bibtex_names["notes"].strip()
#
return notes_dict | 97e30f746f59ee5e1cfed8581a2dc272fc4b477f | 3,047 |
def iliev_test_5(N=10000,
Ns=10,
L=15. | units.kpc,
dt=None):
"""
prepare iliev test and return SPH and simplex interfaces
"""
gas, sources = iliev_test_5_ic(N, Ns, L)
conv = nbody_system.nbody_to_si(1.0e9 | units.MSun, 1.0 | units.kpc)
sph = Fi(conv, use_gl=False, mode='periodic', redirection='none')
sph.initialize_code()
sph.parameters.use_hydro_flag = True
sph.parameters.radiation_flag = False
sph.parameters.self_gravity_flag = False
sph.parameters.gamma = 1
sph.parameters.isothermal_flag = True
sph.parameters.integrate_entropy_flag = False
sph.parameters.timestep = dt
sph.parameters.verbosity = 0
sph.parameters.pboxsize = 2*L
sph.commit_parameters()
sph.gas_particles.add_particles(gas)
sph.commit_particles()
# sph.start_viewer()
rad = SimpleX(number_of_workers=1, redirection='none')
rad.initialize_code()
rad.parameters.box_size = 2*L
rad.parameters.hilbert_order = 0
rad.commit_parameters()
gas.add_particles(sources)
rad.particles.add_particles(gas)
rad.commit_particles()
return sph, rad | f4675e8c7f51c18cb31644295a1d5945e453de5b | 3,048 |
def run(model, model_params, T,
method, method_params, num_iter,
tmp_path="/tmp/consistency_check.txt",
seed=None,
verbose=False,
simplified_interface=True):
"""
Wrapper around the full consistency check pipeline.
Parameters
----------
model : str
Name of the generative model.
Implemented models are: gn, generalized_gn.
model_params : dict
Parameters of the generative model.
T : int
Number of generative step.
method : str
Name of the inference method.
Implemented methods are: degree, OD, random_expand,
snowball_sampling, biased_snowball_sampling.
method_params : dict
Parameters of the inference method.
num_iter : int
Number of repetition of the inference method.
Note that all repetitions run on the same model instance.
tmp_path : str
Location where temporary files will be written.
verbose : bool
Output logs to stdout.
simplified_interface : bool
Assume that the generator is compiled with a simplified interface (i.e., not Boost).
Returns
-------
scores : list of dict
A list of scores (one per repetition).
Each entry of the list corresponds to a repetition of the method.
An entry in the list is a dictionary, whose key is the name of
the comparison measure.
Warning
-------
This function has side-effects. It writes and read from a temporary
location (defaulted to /tmp/) to communicate with pre-compiled modules.
If multiple instances run at the same time, make sure to pass different
temporary paths to each instances.
"""
# Tests
if {model} & available_models == set():
raise NotImplementedError("Model '" + str(model) +
"' not implemented.")
if {method} & available_methods == set():
raise NotImplementedError("Method '" + str(method) +
"' not implemented.")
# Generate history
generated_history = gn.run(model, model_params, T,
verbose=verbose, seed=seed,
simplified_interface=simplified_interface)
encoded_history, encoding, tag_encoding = obfuscate_history(generated_history, seed=seed)
_write_history(encoded_history, tmp_path)
# Infer and compute similarity
scores = []
for i in range(num_iter):
output = im.run(tmp_path, method, method_params, verbose=verbose)
if len(generated_history) != len(output):
RuntimeError("Length of generated and inferred data don't match.")
inferred = deobfuscate_history([x[0] for x in output], encoding, tag_encoding)
res = cp.corr(generated_history,
[(e, _[1]) for e, _ in zip(inferred, output)])
scores.append(res)
# Garbage collection
remove(tmp_path)
return scores | 152eb33e3e6c68306c3f965f734bc8d7ddeb4010 | 3,049 |
def clean_street(address: str) -> str:
"""
Function to clean street strings.
"""
address = address.lower()
address = _standardize_street(address)
address = _abb_replace(address)
address = _ordinal_rep(address)
if address in SPECIAL_CASES.keys(): # Special cases
address = SPECIAL_CASES[address]
return address | af166bd9ebd51e1a157135587c4efdb6469181ea | 3,050 |
def unroll_policy_for_eval(
sess,
env,
inputs_feed,
prev_state_feed,
policy_outputs,
number_of_steps,
output_folder,
):
"""unrolls the policy for testing.
Args:
sess: tf.Session
env: The environment.
inputs_feed: dictionary of placeholder for the input modalities.
prev_state_feed: placeholder for the input to the prev_state of the model.
policy_outputs: tensor that contains outputs of the policy.
number_of_steps: maximum number of unrolling steps.
output_folder: output_folder where the function writes a dictionary of
detailed information about the path. The dictionary keys are 'states' and
'distance'. The value for 'states' is the list of states that the agent
goes along the path. The value for 'distance' contains the length of
shortest path to the goal at each step.
Returns:
states: list of states along the path.
distance: list of distances along the path.
"""
prev_state = [
np.zeros((1, FLAGS.lstm_cell_size), dtype=np.float32) for _ in range(2)
]
prev_action = np.zeros((1, 1, FLAGS.action_size + 1), dtype=np.float32)
obs = env.reset()
distances_to_goal = []
states = []
unique_id = '{}_{}'.format(env.cur_image_id(), env.goal_string)
for _ in range(number_of_steps):
distances_to_goal.append(
np.min([
len(
nx.shortest_path(env.graph, env.pose_to_vertex(env.state()),
env.pose_to_vertex(target_view)))
for target_view in env.targets()
]))
states.append(env.state())
feed_dict = {inputs_feed[mtype]: [[obs[mtype]]] for mtype in inputs_feed}
feed_dict[prev_state_feed[0]] = prev_state[0]
feed_dict[prev_state_feed[1]] = prev_state[1]
action_values, prev_state = sess.run(policy_outputs, feed_dict=feed_dict)
chosen_action = np.argmax(action_values[0])
obs, _, done, info = env.step(np.int32(chosen_action))
prev_action[0][0][chosen_action] = 1.
prev_action[0][0][-1] = float(info['success'])
# If the agent chooses action stop or the number of steps exceeeded
# env._episode_length.
if done:
break
# logging.info('distance = %d, id = %s, #steps = %d', distances_to_goal[-1],
output_path = os.path.join(output_folder, unique_id + '.npy')
with tf.gfile.Open(output_path, 'w') as f:
print 'saving path information to {}'.format(output_path)
np.save(f, {'states': states, 'distance': distances_to_goal})
return states, distances_to_goal | 59979b74f7ff7dcdaf7c875ce93d3333f467cd0d | 3,051 |
def is_iterable(o: any) -> bool:
"""
Checks if `o` is iterable
Parameters
----------
o : any
The value to be checked.
Examples
--------
>>> is_iterable(list(range(5)))
True
>>> is_iterable(5)
False
>>> is_iterable('hello world')
True
>>> is_iterable(None)
False
"""
try:
_ = iter(o)
except TypeError:
return False
return True | f3124d5ead76977c45899c589e0c6873abafd773 | 3,053 |
def fit(init_file, semipar=False):
""" """
check_presence_init(init_file)
dict_ = read(init_file)
# Perform some consistency checks given the user's request
check_presence_estimation_dataset(dict_)
check_initialization_dict(dict_)
# Semiparametric Model
if semipar is True:
quantiles, mte_u, X, b1_b0 = semipar_fit(init_file) # change to dict_
# Construct MTE
# Calculate the MTE component that depends on X
mte_x = np.dot(X, b1_b0)
# Put the MTE together
mte = mte_x.mean(axis=0) + mte_u
# Accounting for variation in X
mte_min = np.min(mte_x) + mte_u
mte_max = np.max(mte_x) + mte_u
rslt = {
"quantiles": quantiles,
"mte": mte,
"mte_x": mte_x,
"mte_u": mte_u,
"mte_min": mte_min,
"mte_max": mte_max,
"X": X,
"b1-b0": b1_b0,
}
# Parametric Normal Model
else:
check_par(dict_)
rslt = par_fit(dict_)
return rslt | 05d484c0aae6e739881714eb7ec81c982503cf15 | 3,054 |
def date2gpswd(date):
"""Convert date to GPS week and day of week, return int tuple (week, day).
Example:
>>> from datetime import date
>>> date2gpswd(date(2017, 5, 17))
(1949, 3)
>>> date2gpswd(date(1917, 5, 17))
Traceback (most recent call last):
...
ValueError: Invalid date: 1917-05-17, too early.
"""
return __date2weeksday(date, GPS_START_DATE) | 29000e900ffb743b29d41fa752fc4da5f470e1b8 | 3,055 |
import html
def __make_sliders(no, f):
"""Create dynamic sliders for a specific field"""
style = {'width':'20%', 'display': 'none'}
return html.Div(id={'index': f'Slider_{no}', 'type':'slider'},
children=[__make_slider(no, i) for i in range(1,f+1)], style=style) | b4fb97042e22d06e903f77a13381f9323acacacf | 3,056 |
def kurtosis(iterable, sample=False):
""" Returns the degree of peakedness of the given list of values:
> 0.0 => sharper peak around mean(list) = more infrequent, extreme values,
< 0.0 => wider peak around mean(list),
= 0.0 => normal distribution,
= -3 => flat
"""
a = iterable if isinstance(iterable, list) else list(iterable)
return moment(a, 4, sample) / (moment(a, 2, sample) ** 2.0 or 1) - 3 | ba53f2425de5ffbf6cff0724e7128953554c829b | 3,057 |
def gen_data(shape, dtype, epsilon):
"""Generate data for testing the op."""
var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
m = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
v = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
lr = np.random.rand(1).astype(dtype)
beta1 = np.random.rand(1).astype(dtype)
beta2 = np.random.rand(1).astype(dtype)
beta1_power = beta1 * beta1
inputs = [var, m, v, grad, lr, beta1, beta1_power, beta2]
one = np.array([1]).astype(dtype)
epsilon = np.array([epsilon]).astype(dtype)
out_m = beta1 * m + (one - beta1) * grad
out_v = np.maximum(beta2 * v, np.abs(grad))
out_var = var - lr * out_m / ((one - beta1_power) * (out_v + epsilon))
expects = [out_var, out_m, out_v]
args = inputs
return inputs, expects, args | 75f790eda84ab2e718d504d26a303a6639775829 | 3,058 |
def factor_tmom_T1_RTN_60(df: pd.DataFrame):
"""
factor example
"""
factor = df['return'].rolling(60).sum()
return factor | 53b5700902bf409015f4e1c2063f741cea3736ee | 3,059 |
def get_dataset_info(dataset_name='mnist'):
"""Method to return dataset information for a specific dataset_name.
Args:
dataset_name: a string representing the dataset to be loaded using tfds
Returns:
A dictionary of relevant information for the loaded dataset.
"""
ds_info = tfds.builder(dataset_name).info
dataset_information = {
'num_classes': ds_info.features['label'].num_classes,
'data_shape': ds_info.features['image'].shape,
'train_num_examples': ds_info.splits['train'].num_examples,
}
return dataset_information | b4e36c966a34a3eacd327484e8d88d54303c0ea8 | 3,060 |
def full_model(mode, hparams):
"""Make a clause search model including input pipeline.
Args:
mode: Either 'train' or 'eval'.
hparams: Hyperparameters. See default_hparams for details.
Returns:
logits, labels
Raises:
ValueError: If the model returns badly shaped tensors.
"""
if hparams.use_averages:
raise NotImplementedError('Figure out how to eval with Polyak averaging')
kind, model = all_models.make_model(name=hparams.model, mode=mode,
hparams=hparams, vocab=FLAGS.vocab)
batch_size = mode_batch_size(mode, hparams)
if kind == 'sequence':
# Read
_, conjectures, clauses, labels = inputs.sequence_example_batch(
mode=mode, batch_size=batch_size, shuffle=True)
clauses = tf.reshape(clauses, [2 * batch_size, -1])
labels = tf.reshape(labels, [2 * batch_size])
# Embed
vocab_size, _ = inputs.read_vocab(FLAGS.vocab)
conjectures, clauses = model_utils.shared_embedding_layer(
(conjectures, clauses), dim=hparams.embedding_size, size=vocab_size)
# Classify
conjectures = model.conjecture_embedding(conjectures)
conjectures = tf.reshape(
tf.tile(tf.reshape(conjectures, [batch_size, 1, -1]), [1, 2, 1]),
[2 * batch_size, -1])
clauses = model.axiom_embedding(clauses)
logits = model.classifier(conjectures, clauses)
elif kind == 'tree':
examples = inputs.proto_batch(mode=mode, batch_size=batch_size)
def weave(**ops):
return clause_loom.weave_clauses(
examples=examples, vocab=FLAGS.vocab, **ops)
logits, labels = model(weave)
elif kind == 'fast':
examples = inputs.proto_batch(mode=mode, batch_size=batch_size)
conjecture_sizes, conjecture_flat, clauses, labels = (
gen_clause_ops.random_clauses_as_fast_clause(
examples, vocab=FLAGS.vocab))
conjectures = jagged.Jagged(conjecture_sizes, conjecture_flat)
logits = model(conjectures, clauses)
# Done!
return fix_logits(kind, logits), labels | 3ab3a089628f9460b9f1ae9800ec9003fdae5d17 | 3,061 |
def aggregatePredictions(df_pred, threshold=0.8):
"""
Aggregates probabilistic predictions, choosing the
state with the largest probability, if it exceeds
the threshold.
:param pd.DataFrame df_pred:
columns: state
rows: instance
values: float
:param float threshold:
:return pd.Series:
index: instance
values: state or np.nan if below threshold
"""
MISSING = -1
columns = df_pred.columns
values = []
df = df_pred.applymap(lambda v: 1 if v >= threshold
else MISSING)
for idx, row in df_pred.iterrows():
row_list = row.tolist()
pos = row_list.index(max(row_list))
values.append(columns[pos])
ser = pd.Series(values, index=df_pred.index)
ser = ser.apply(lambda v: np.nan if v == MISSING else v)
return ser | a5d8efbe24d45279e80ff461e900dd3ac4921659 | 3,062 |
from typing import Any
def is_input_element(obj: Any) -> bool:
"""
Returns True, if the given object is an :class:`.InputElement`, or a
subclass of InputElement.
"""
return isinstance(obj, InputElement) | c3fbaea9588d40e2fa370aab32688c7e926bd265 | 3,064 |
def line_intersects_grid((x0,y0), (x1,y1), grid, grid_cell_size=1):
""" Performs a line/grid intersection, finding the "super cover"
of a line and seeing if any of the grid cells are occupied.
The line runs between (x0,y0) and (x1,y1), and (0,0) is the
top-left corner of the top-left grid cell.
>>> line_intersects_grid((0,0),(2,2),[[0,0,0],[0,1,0],[0,0,0]])
True
>>> line_intersects_grid((0,0),(0.99,2),[[0,0,0],[0,1,0],[0,0,0]])
False
"""
grid_cell_size = float(grid_cell_size)
x0 = x0 / grid_cell_size
x1 = x1 / grid_cell_size
y0 = y0 / grid_cell_size
y1 = y1 / grid_cell_size
dx = abs(x1 - x0)
dy = abs(y1 - y0)
x = int(math.floor(x0))
y = int(math.floor(y0))
if dx != 0:
dt_dx = 1.0 / dx
else:
dt_dx = inf
if dy != 0:
dt_dy = 1.0 / dy
else:
dt_dy = inf
t = 0.0
n = 1
if (dx == 0):
x_inc = 0
t_next_horizontal = dt_dx
elif (x1 > x0):
x_inc = 1
n += int(math.floor(x1)) - x
t_next_horizontal = (math.floor(x0) + 1 - x0) * dt_dx
else:
x_inc = -1
n += x - int(math.floor(x1))
t_next_horizontal = (x0 - math.floor(x0)) * dt_dx
if (dy == 0):
y_inc = 0
t_next_vertical = dt_dy
elif (y1 > y0):
y_inc = 1
n += int(math.floor(y1)) - y
t_next_vertical = (math.floor(y0) + 1 - y0) * dt_dy
else:
y_inc = -1
n += y - int(math.floor(y1))
t_next_vertical = (y0 - math.floor(y0)) * dt_dy
while (n > 0):
if grid[y][x] == 1:
return True
if (t_next_vertical < t_next_horizontal):
y += y_inc
t = t_next_vertical
t_next_vertical += dt_dy
else:
x += x_inc
t = t_next_horizontal
t_next_horizontal += dt_dx
n -= 1
return False | f9710a61bcb101202295e50efb86800e855f00d5 | 3,065 |
def SimuGumbel(n, m, theta):
"""
# Gumbel copula
Requires:
n = number of variables to generate
m = sample size
theta = Gumbel copula parameter
"""
v = [np.random.uniform(0,1,m) for i in range(0,n)]
X = levy_stable.rvs(alpha=1/theta, beta=1,scale=(np.cos(np.pi/(2*theta)))**theta,loc=0, size=m)
phi_t = lambda t: np.exp(-t**(1/theta))
u = [phi_t(-np.log(v[i])/X) for i in range(0,n)]
return u | 55eba3c327b99b0bd6157b61dff9d161feda0519 | 3,066 |
import math
def Norm(x, y):
"""求一个二维向量模长"""
return math.pow(math.pow(x, 2) + math.pow(y, 2), 0.5) | 4c161ada3c446d996f6e33be602a9475948f5bf8 | 3,067 |
from typing import Optional
from typing import Union
from typing import Callable
from typing import List
def make_roi(ms_experiment: ms_experiment_type, tolerance: float,
max_missing: int, min_length: int, min_intensity: float,
multiple_match: str, targeted_mz: Optional[np.ndarray] = None,
start: Optional[int] = None, end: Optional[int] = None,
mz_reduce: Union[str, Callable] = "mean",
sp_reduce: Union[str, Callable] = "sum",
mode: Optional[str] = None
) -> List[Roi]:
"""
Make Region of interest from MS data in centroid mode.
Used by MSData to as the first step of the centWave algorithm.
Parameters
----------
ms_experiment: pyopenms.MSExperiment
max_missing : int
maximum number of missing consecutive values. when a row surpass this
number the roi is considered as finished and is added to the roi list if
it meets the length and intensity criteria.
min_length : int
The minimum length of a roi to be considered valid.
min_intensity : float
Minimum intensity in a roi to be considered valid.
tolerance : float
mz tolerance to connect values across scans
start : int, optional
First scan to analyze. If None starts at scan 0
end : int, optional
Last scan to analyze. If None, uses the last scan number.
multiple_match : {"closest", "reduce"}
How to match peaks when there is more than one match. If mode is
`closest`, then the closest peak is assigned as a match and the
others are assigned to no match. If mode is `reduce`, then unique
mz and intensity values are generated using the reduce function in
`mz_reduce` and `sp_reduce` respectively.
mz_reduce : "mean" or Callable
function used to reduce mz values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. See the following prototype:
.. code-block:: python
def mz_reduce(mz_match: np.ndarray) -> float:
pass
sp_reduce : {"mean", "sum"} or Callable
function used to reduce intensity values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. To use custom functions see the prototype shown on
`mz_reduce`.
targeted_mz : numpy.ndarray, optional
if a list of mz is provided, roi are searched only using this list.
mode : str, optional
mode used to create Roi objects.
Returns
-------
roi: list[Roi]
Notes
-----
To create a ROI, m/z values in consecutive scans are connected if they are
within the tolerance`. If there's more than one possible m/z value to
connect in the next scan, two different strategies are available, using the
`multiple_match` parameter: If "closest" is used, then m/z values are
matched to the closest ones, and the others are used to create new ROI. If
"reduce" is used, then all values within the tolerance are combined. m/z and
intensity values are combined using the `mz_reduce` and `sp_reduce`
parameters respectively. If no matching value has be found in a scan, a NaN
is added to the ROI. If no matching values are found in `max_missing`
consecutive scans the ROI is flagged as finished. In this stage, two
checks are made before the ROI is considered valid:
1. The number of non missing values must be higher than `min_length`.
2. The maximum intensity value in the ROI must be higher than
`min_intensity`.
If the two conditions are meet, the ROI is added to the list of valid ROI.
"""
if start is None:
start = 0
if end is None:
end = ms_experiment.getNrSpectra()
if targeted_mz is None:
mz_seed, _ = ms_experiment.getSpectrum(start).get_peaks()
targeted = False
else:
mz_seed = targeted_mz
targeted = True
size = end - start
rt = np.zeros(size)
processor = _RoiProcessor(mz_seed, max_missing=max_missing,
min_length=min_length,
min_intensity=min_intensity, tolerance=tolerance,
multiple_match=multiple_match,
mz_reduce=mz_reduce, sp_reduce=sp_reduce,
mode=mode)
for k_scan in range(start, end):
sp = ms_experiment.getSpectrum(k_scan)
rt[k_scan - start] = sp.getRT()
mz, spint = sp.get_peaks()
processor.add(mz, spint, targeted=targeted)
processor.append_to_roi(rt, targeted=targeted)
# add roi not completed during last scan
processor.flag_as_completed()
processor.append_to_roi(rt)
return processor.roi | f8b3edbe24091082d1d20af6fdd7875449716a43 | 3,068 |
def get_all_zcs_containers(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Retrieves details for all Zadara Container Services (ZCS) containers
configured on the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying ZCS containers from.
Optional.
:type: limit: int
:param limit: The maximum number of ZCS containers to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/containers.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs) | 6d2d6ba7037323174d93d2191aef93c072bd0030 | 3,069 |
def jordan_wigner(n):
"""
Generates the Jordan-Wigner representation of the fermionic creation, annihilation,
and Majorana operators for an n-mode system.
The convention for the Majorana operators is as follows:
c_j=aj^{dag}+aj
c_{n+j}=i(aj^{dag}-aj)
"""
s = ket(2, 0) @ dag(ket(2, 1))
S = su_generators(2)
a = {} # Dictionary for the annihilation operators
c = {} # Dictionary for the Majorana operators
for j in range(1, n + 1):
a[j] = tensor([S[3], j - 1], s, [S[0], n - j])
c[j] = dag(a[j]) + a[j]
c[n + j] = 1j * (dag(a[j]) - a[j])
return a, c | 193a2b91f84e2789b46a8767900938bcac8f83f9 | 3,070 |
import logging
def make_update(label: str, update_time: str, repeat: str, data: str, news: str) -> list[dict]:
"""Schedules an update with name 'label' to happen in 'interval' seconds. Updates saved covid
data, news and repeats the update depending on the content of the respective parameters. Adds
to global 'scheduled_updates' list and returns scheduler queue.
"""
# Check that at least one option has been chosen
if not data and not news:
logging.warning("Attempted to schedule update without selecting any options.")
return scheduler.queue
# Check update will be in at least 5 seconds from current time
interval = hhmm_to_seconds(update_time) - hhmm_to_seconds( current_time_hhmm() )
if interval < 5:
logging.warning("Attempted to schedule update too soon.")
return scheduler.queue
# Dictionary to store all information about the update
update = {
'title': label,
'content': f"At {update_time} this update will: "
}
if data:
# Schedule data update
update['data'] = schedule_covid_updates(interval, label)
update['content'] += "update covid data, "
logging.info("Covid data update has been scheduled for %s", update_time)
if news:
# Schedule news update
update['news'] = scheduler.enter(interval, 1, update_news, (label,))
update['content'] += "update covid news, "
logging.info("News update has been scheduled for %s", update_time)
if repeat:
# Schedule update to repeat in 24 hrs
update['repeat'] = scheduler.enter(
60*60*24, 1, make_update, (label, update_time, repeat, data, news)
)
update['content'] += "repeat in 24 hours, "
logging.info("Update %s has been scheduled to repeat itself in 24 hours", label)
# Clean up update content to be displayed
update['content'] = update['content'][ :len( update['content'] ) - 2 ]
scheduled_updates.append(update)
return scheduler.queue | d284d51695229005650eed58de10297ad200c8e4 | 3,071 |
def performTest(name, test): #{{{
"""
Given a series of writes in `test', generate a format string
and pass it to the vulnerable program. If the writes were successful
without destroying any other memory locations, return True.
Terminates after 2 seconds to handle infinite loops in libformatstr.
"""
f = FormatStr(maxbuf)
for (k,v) in test.items():
f[k] = v
(out, err, fill) = (None, None, None)
def sighandler(signum, frame):
raise Exception("Command timed out")
signal.signal(signal.SIGALRM, sighandler)
signal.alarm(2)
try:
payload = f.payload(offset, padding=shift)
if len(payload) > maxbuf:
print "[-] payload is longer than allowed (%d vs %s)" % (len(payload), maxbuf)
(out, err, fill) = checkOutput(payload)
except Exception,e:
print "[-] Exception occurred: %s" % e
signal.alarm(0)
if err == None or not checkMemoryDump(err, fill, f.mem):
print "[-] FAILED: Test \"%s\" failed" % name
return False
else:
print "[+] SUCCESS: Test \"%s\" succeeded" % name
return True | 829e19dbb45cfcc1788365f1c3a5459209c42e5e | 3,072 |
def readsignal_VEC(name , fa):
"""
Reads the time signal stored in the file var.txt and
written in a single column format. Returns the signal into the
single vector signal.
fa is an instrumental amplification factor
"""
path = '../data/'
channel = np.loadtxt(path + name + '.txt')
ndats = len(channel)
signal = np.zeros([ndats], dtype=float)
for i in range(ndats):
signal[i]=channel[i]*fa
#
return ndats , signal | 2365988aa8baf717f332a021e24c1a7ca6d24243 | 3,073 |
import socket
def connect(host="localhost", port=27450):
"""Connect to server."""
client = socket(AF_INET, SOCK_DGRAM)
client.connect((host, port))
return client | c60bd35b75ee9b3b5aee898b0ee58b95562627c1 | 3,075 |
def is_coroutine_generator(obj):
"""
Returns whether the given `obj` is a coroutine generator created by an `async def` function, and can be used inside
of an `async for` loop.
Returns
-------
is_coroutine_generator : `bool`
"""
if isinstance(obj, AsyncGeneratorType):
code = obj.ag_code
elif isinstance(obj, CoroutineType):
code = obj.cr_code
elif isinstance(obj, GeneratorType):
code = obj.gi_code
else:
return False
if code.co_flags&CO_ASYNC_GENERATOR:
return True
return False | fea1d344f32a0fffe7fe0bb344299e5fd54a6baa | 3,076 |
def get_scheme(scheme_id):
"""
Retrieve the scheme dict identified by the supplied scheme ID
Returns: An scheme dict
"""
for node in sd["nodes"]:
if scheme_id == node["id"]:
return node | 41c4b30496656201c58563fd5cc3ab3abe7ecf95 | 3,077 |
def Axicon(phi, n1, x_shift, y_shift, Fin):
"""
Fout = Axicon(phi, n1, x_shift, y_shift, Fin)
:ref:`Propagates the field through an axicon. <Axicon>`
Args::
phi: top angle of the axicon in radians
n1: refractive index of the axicon material
x_shift, y_shift: shift from the center
Fin: input field
Returns::
Fout: output field (N x N square array of complex numbers).
Example:
:ref:`Bessel beam with axicon <BesselBeam>`
"""
Fout = Field.copy(Fin)
k = 2*_np.pi/Fout.lam
theta = _np.arcsin(n1*_np.cos(phi/2)+phi/2-_np.pi/2)
Ktheta = k * theta
yy, xx = Fout.mgrid_cartesian
xx -= x_shift
yy -= y_shift
fi = -Ktheta*_np.sqrt(xx**2+yy**2)
Fout.field *= _np.exp(1j*fi)
return Fout | a333d29c94e79bdee1ef17ceb3993b28f2e9bd5d | 3,078 |
import urllib
import types
def addrAndNameToURI(addr, sname):
"""addrAndNameToURI(addr, sname) -> URI
Create a valid corbaname URI from an address string and a stringified name"""
# *** Note that this function does not properly check the address
# string. It should raise InvalidAddress if the address looks
# invalid.
if type(addr) is not types.StringType or \
type(sname) is not types.StringType:
raise CORBA.BAD_PARAM(omniORB.BAD_PARAM_WrongPythonType, COMPLETED_NO)
if addr == "":
raise CosNaming.NamingContextExt.InvalidAddress()
if sname == "":
return "corbaname:" + addr
else:
stringToName(sname) # This might raise InvalidName
return "corbaname:" + addr + "#" + urllib.quote(sname) | fd54c23b4e3396b224341fa54c106e4523b55314 | 3,079 |
def blkdev_uname_to_taptype(uname):
"""Take a blkdev uname and return the blktap type."""
return parse_uname(uname)[1] | e7165a9bd987d4820ed5486a06a1ceceec9c5564 | 3,080 |
def detection_layer(inputs, n_classes, anchors, img_size, data_format):
"""Creates Yolo final detection layer.
Detects boxes with respect to anchors.
Args:
inputs: Tensor input.
n_classes: Number of labels.
anchors: A list of anchor sizes.
img_size: The input size of the model.
data_format: The input format.
Returns:
Tf value [box_centers, box_shapes, confidence, classes]
"""
n_anchors = len(anchors)
inputs = tf.keras.layers.Conv2D(filters=n_anchors * (5 + n_classes),
kernel_size=1, strides=1, use_bias=True,
data_format=data_format)(inputs)
# Shape of each cell in image
shape = inputs.get_shape().as_list()
grid_shape = shape[2:4] if data_format == 'channel_first' else shape[1:3]
if data_format == 'channels_first':
# Put the channel's dim to the last position
inputs = tf.transpose(inputs, [0, 2, 3, 1])
inputs = tf.reshape(inputs,
[-1, n_anchors * grid_shape[0] * grid_shape[1], 5 + n_classes])
# Strides = # of cells in an image
strides = (img_size[0] // grid_shape[0], img_size[1] // grid_shape[1])
box_centers, box_shapes, confidence, classes = \
tf.split(inputs, [2, 2, 1, n_classes], axis=-1)
x = tf.range(grid_shape[0], dtype=tf.float32)
y = tf.range(grid_shape[1], dtype=tf.float32)
x_offset, y_offset = tf.meshgrid(x, y)
x_offset = tf.reshape(x_offset, (-1, 1))
y_offset = tf.reshape(y_offset, (-1, 1))
x_y_offset = tf.concat([x_offset, y_offset], axis=-1)
x_y_offset = tf.tile(x_y_offset, [1, n_anchors])
x_y_offset = tf.reshape(x_y_offset, [1, -1, 2])
box_centers = tf.nn.sigmoid(box_centers)
box_centers = (box_centers + x_y_offset) * strides
anchors = tf.tile(anchors, [grid_shape[0] * grid_shape[1], 1])
box_shapes = tf.exp(box_shapes) * tf.cast(anchors, dtype=tf.float32)
confidence = tf.nn.sigmoid(confidence)
classes = tf.nn.sigmoid(classes)
inputs = tf.concat([box_centers, box_shapes,
confidence, classes], axis=-1)
return inputs | 85ada39e57c80eced3dbdc145759a1caa609607d | 3,081 |
from operator import pos
def create_position_tear_sheet(returns, positions,
show_and_plot_top_pos=2, hide_positions=False,
sector_mappings=None, transactions=None,
estimate_intraday='infer', return_fig=False):
"""
Generate a number of plots for analyzing a
strategy's positions and holdings.
- Plots: gross leverage, exposures, top positions, and holdings.
- Will also print the top positions held.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
show_and_plot_top_pos : int, optional
By default, this is 2, and both prints and plots the
top 10 positions.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
Overrides show_and_plot_top_pos to 0 to suppress text output.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
transactions : pd.DataFrame, optional
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
if hide_positions:
show_and_plot_top_pos = 0
vertical_sections = 7 if sector_mappings is not None else 6
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_exposures = plt.subplot(gs[0, :])
ax_top_positions = plt.subplot(gs[1, :], sharex=ax_exposures)
ax_max_median_pos = plt.subplot(gs[2, :], sharex=ax_exposures)
ax_holdings = plt.subplot(gs[3, :], sharex=ax_exposures)
ax_long_short_holdings = plt.subplot(gs[4, :])
ax_gross_leverage = plt.subplot(gs[5, :], sharex=ax_exposures)
positions_alloc = pos.get_percent_alloc(positions)
plotting.plot_exposures(returns, positions, ax=ax_exposures)
plotting.show_and_plot_top_positions(
returns,
positions_alloc,
show_and_plot=show_and_plot_top_pos,
hide_positions=hide_positions,
ax=ax_top_positions)
plotting.plot_max_median_position_concentration(positions,
ax=ax_max_median_pos)
plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)
plotting.plot_long_short_holdings(returns, positions_alloc,
ax=ax_long_short_holdings)
plotting.plot_gross_leverage(returns, positions,
ax=ax_gross_leverage)
if sector_mappings is not None:
sector_exposures = pos.get_sector_exposures(positions,
sector_mappings)
if len(sector_exposures.columns) > 1:
sector_alloc = pos.get_percent_alloc(sector_exposures)
sector_alloc = sector_alloc.drop('cash', axis='columns')
ax_sector_alloc = plt.subplot(gs[6, :], sharex=ax_exposures)
plotting.plot_sector_allocations(returns, sector_alloc,
ax=ax_sector_alloc)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig | 87338d6acb2852f9d45fa21cb4005c602cbfc909 | 3,082 |
import csv
def statement_view(request, statement_id=None):
"""Send a CSV version of the statement with the given
``statement_id`` to the user's browser.
"""
statement = get_object_or_404(
Statement, pk=statement_id, account__user=request.user)
response = HttpResponse(mimetype='text/csv')
filename = "%s (%s).csv" % (statement.title,
statement.from_date.strftime('%B %Y'))
response['Content-Disposition'] = 'attachment; filename=%s' % (filename,)
writer = csv.writer(response)
headings = ["Tag pool name", "Tag name", "Message direction", "Total cost"]
writer.writerow(headings)
line_item_list = statement.lineitem_set.all()
for line_item in line_item_list:
writer.writerow([
line_item.tag_pool_name, line_item.tag_name,
line_item.message_direction, line_item.total_cost])
return response | c5a475086ee4a75fa76efae9cdf7bd185c8aa78a | 3,083 |
def get_proton_gamma(energy):
"""Returns relativistic gamma for protons."""
return energy / PROTON_MASS | 049e92cf85824561a50f559ef54045865da2a69b | 3,084 |
def demandNameItem(listDb,phrase2,mot):
"""
put database name of all items in string to insert in database
listDb: list with datbase name of all items
phrase2: string with database name of all items
mot: database name of an item
return a string with database name of all items separated with ','
"""
for i in range(len(listDb)):
mot = str(listDb[i])
phrase2 += mot
if not i == len(listDb)-1:
phrase2 += ','
return phrase2 | 67af8c68f0ba7cd401067e07c5de1cd25de9e66c | 3,085 |
def escape_yaml(data: str) -> str:
"""
Jinja2 фильтр для экранирования строк в yaml
экранирует `$`
"""
return data.replace("$", "$$") | d1142af7447ad372e6b0df5848beb28e0dd84e68 | 3,086 |
def stokes_linear(theta):
"""Stokes vector for light polarized at angle theta from the horizontal plane."""
if np.isscalar(theta):
return np.array([1, np.cos(2*theta), np.sin(2*theta), 0])
theta = np.asarray(theta)
return np.array([np.ones_like(theta),
np.cos(2*theta),
np.sin(2*theta),
np.zeros_like(theta)]).T | a35f342ee32cdf54e432ee52e1faefbfb3b24382 | 3,087 |
import re
from datetime import datetime
def validate_item_field(attr_value, attr_form):
"""
:param attr_value: item的属性
:param attr_form: item category的属性规则
:return:
"""
if not isinstance(attr_form, dict):
return -1, {"error": "attr_form is not a dict."}
required = attr_form.get('required')
if required == 'false':
return 0, {"msg": "success"}
field = attr_form.get('field')
if not field:
return -1, {"error": "field missed."}
if field == "string":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]):
return -1, {"error": "invalid string length."}
if attr_form.get('valid_rule') == "none":
return 0, {"msg": "success"}
elif attr_form.get('valid_rule') == "IPaddress":
pattern = re.compile(r'\d+\.\d+\.\d+\.\d+') # 匹配IP地址有待改进
elif attr_form.get('valid_rule') == "email":
pattern = re.compile(r'^(\w)+(\.\w+)*@(\w)+((\.\w+)+)$')
elif attr_form.get('valid_rule') == "phone":
pattern = re.compile(r'^\d{11}$')
else:
return -1, {"error": "invalid valid_rule."}
match = pattern.match(attr_value)
if not match:
return -1, {"error": "did not match rule: %s" % attr_form.get('valid_rule')}
elif field == "text":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]):
return -1, {"error": "invalid string length."}
elif field == "select":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a dict."}
if attr_value not in attr_form["choice"][1:-1].split("|"):
return -1, {"error": "invalid choice."}
elif field == "multiple_select":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a dict."}
for each in attr_value.split("|"):
if each not in attr_form["choice"][1:-1].split("|"):
return -1, {"error": "invalid choice."}
elif field == "integer":
if not isinstance(attr_value, int):
return -1, {"error": "attr_value is not a integer."}
if attr_value < int(attr_form["min_value"]) or attr_value > int(attr_form["max_value"]):
return -1, {"error": "invalid integer value."}
elif field == "datetime":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
try:
date_object = datetime.datetime.strptime(
attr_value, '%Y%m%d%H%M%S')
except ValueError:
return -1, {"error": "time data '%s' does not match format" % attr_value}
elif field == "reference":
if not isinstance(attr_value, str):
return -1, {"error": "attr_value is not a string."}
item_obj = Item.objects(id=attr_value)
if not item_obj:
return -1, {"error": "unknown item."}
if item_obj.category.id != attr_form["reference"]:
return -1, {"error": "wrong category."}
return 0, {"msg": "success"} | ac4687b576bb29707f55a2cb4627dc67ff07b2fa | 3,088 |
def display_instances(image, boxes, masks, ids, names, scores):
"""
take the image and results and apply the mask, box, and Label
"""
n_instances = boxes.shape[0]
colors = random_colors(n_instances)
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i, color in enumerate(colors):
# we want the colours to only ne in one color: SIFR orange ff5722
# color = (255, 87, 34)
if not np.any(boxes[i]):
continue
y1, x1, y2, x2 = boxes[i]
label = names[ids[i]]
score = scores[i] if scores is not None else None
caption = '{} {:.2f}'.format(label, score) if score else label
mask = masks[:, :, i]
image = apply_mask(image, mask, color)
image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
image = cv2.putText(
image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2
)
return image | 4268d08a7e413a0558e2b0386cbd184ffaba05ba | 3,089 |
import json
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.io.gfile.GFile(input_file, "r") as reader:
input_data = json.load(reader)["data"]
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.train_version == "v2":
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
start_position = answer["answer_start"]
else:
start_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
paragraph_text=paragraph_text,
orig_answer_text=orig_answer_text,
start_position=start_position,
is_impossible=is_impossible)
examples.append(example)
return examples | 1c893c8f443bca9c707498650142ecd5262d619d | 3,090 |
def qarange(start, end, step):
"""
Convert the cyclic measurement and control data into the required array
:param start:
:param end:
:param step:
:return: np.array
"""
if Decimal(str(end)) - Decimal(str(start)) < Decimal(str(step)) or step == 0:
return [start]
start_decimal = str(start)[::-1].find('.')
step_decimal = str(step)[::-1].find('.')
data_decimal = max([step_decimal, start_decimal])
if data_decimal == -1:
data_decimal = 0
data_number = int((Decimal(str(end)) - Decimal(str(start))) / Decimal(str(step)))
end_data = round(start + data_number * step, data_decimal)
data_np = np.linspace(start, end_data, data_number + 1)
data_list = [round(data, data_decimal) for data in data_np]
return data_list | 6e0331160f6501b4106c9e6379762f9c4bf87f1b | 3,092 |
def get_default_render_layer():
"""Returns the default render layer
:return:
"""
return pm.ls(type='renderLayer')[0].defaultRenderLayer() | b134b52bf35a46c10460ab612b14fcea44895a45 | 3,093 |
def translation(shift):
"""Translation Matrix for 2D"""
return np.asarray(planar.Affine.translation(shift)).reshape(3, 3) | 2b77265545194cabfc44728dbc7c5c95d808da38 | 3,094 |
def pad(adjacency_matrices, size):
"""Pads adjacency matricies to the desired size
This will pad the adjacency matricies to the specified size, appending
zeros as required. The output adjacency matricies will all be of size
'size' x 'size'.
Args:
adjacency_matrices: The input list of adjacency matricies.
size: The desired dimension of the output matricies.
Returns:
The resulting list of adjacency matricies.
"""
padding = size - adjacency_matrices.shape[1]
return np.pad(adjacency_matrices,
[(0, 0), (0, padding), (0, padding)],
mode='constant') | dc015eb4dd41dcf3f88ef6317dab2e3f57709453 | 3,095 |
def mapping_activities_from_log(log, name_of_activity):
"""
Returns mapping activities of activities.
:param name_of_activity:
:param log:
:return: mapping
"""
mapping_activities = dict()
unique_activities = unique_activities_from_log(log, name_of_activity)
for index, activity in enumerate(unique_activities):
mapping_activities[activity] = index
return mapping_activities | 82fc23f08e9ae3629c654a5c04bcfcecb76a8cb3 | 3,096 |
def labels_to_1hotmatrix(labels, dtype=int):
"""
Maps restricted growth string to a one-hot flag matrix. The input and
the output are equivalent representations of a partition of a set of
n elelements.
labels: restricted growth string: n-vector with entries in {0,...,n-1}.
The first entry is 0. Other entries cannot exceed any previous
entry by more than 1.
dtype: optional, default=int. Element data type for returned matrix. bool
or float can also be used.
Returns (m,n) matrix, with 0/1 entries, where m is the number of blocks in
the partition and n is the numer of elements in the partitioned set.
Columns are one-hot. If return_matrix[i,j], then element j is in
block i.
"""
m = 1 + labels.max()
B = np.arange(m).reshape(-1,1) == labels
return B.astype(dtype,copy=False) | eef80548e340477bf6881d0d14e434e0ee2f44da | 3,098 |
import select
def recall(logits, target, topk=[1,5,10], typeN=8):
"""Compute top K recalls of a batch.
Args:
logits (B x max_entities, B x max_entities x max_rois):
target (B x max_entities, B x max_entities x max_rois):
topk: top k recalls to compute
Returns:
N: number of entities in the batch
TPs: topk true positives in the batch
bound: max number of groundable entities
"""
logits, target, N, types = select(logits, target)
topk = [topk] if isinstance(topk, int) else sorted(topk)
TPs = [0] * len(topk)
bound = target.max(-1, False)[0].sum().item() # at least one detected
typeTPs = th.zeros(typeN, device=types.device)
typeN = th.zeros_like(typeTPs)
#print("target entity type count: ", types.shape, types.sum(dim=0), target.shape)
if max(topk) == 1:
top1 = th.argmax(logits, dim=1)
one_hots = th.zeros_like(target)
one_hots.scatter_(1, top1.view(-1, 1), 1)
TPs = (one_hots * target).sum().item()
hits = (one_hots * target).sum(dim=1) >= 1
typeTPs += types[hits].sum(dim=0)
typeN += types.sum(dim=0)
else:
logits = th.sort(logits, 1, descending=True)[1]
for i, k in enumerate(topk):
one_hots = th.zeros_like(target)
one_hots.scatter_(1, logits[:, :k], 1)
TPs[i] = ((one_hots * target).sum(dim=1) >= 1).float().sum().item() # hit if at least one matched
if i == 0:
hits = (one_hots * target).sum(dim=1) >= 1
typeTPs += types[hits].sum(dim=0)
typeN += types.sum(dim=0)
#print(TPs, N)
#print(typeTPs)
#print(typeN)
return N, th.Tensor(TPs + [bound]), (typeTPs.cpu(), typeN.cpu()) | ea3ec996808e25566e5bd3dd33f1a56232e5ba7a | 3,099 |
def n(request) -> int:
"""A test fixture enumerate values for `n`."""
return request.param | faec9637483670bec5d2bc687f2ee03d8c3839ea | 3,100 |
def decode(ciphered_text):
"""
Decodes the ciphered text into human readable text.
Returns a string.
"""
text = ciphered_text.replace(' ', '') # We remove all whitespaces
return ''.join([decode_map[x] if decode_map.get(x) else x for x in text]) | 717e837a4750d4c281e2ca635e141f40cf1e30ee | 3,101 |
from datetime import datetime
def parse(s):
""" Date parsing tool.
Change the formats here cause a changement in the whole application.
"""
formats = ['%Y-%m-%dT%H:%M:%S.%fZ','%d/%m/%Y %H:%M:%S','%d/%m/%Y%H:%M:%S', '%d/%m/%Y','%H:%M:%S']
d = None
for format in formats:
try:
d = datetime.strptime(s, format)
break
except ValueError:
pass
return d | c665dd91a03a6d9876b8c36a46699b813c540cea | 3,102 |
def openstack(request):
""" Context processor necessary for OpenStack Dashboard functionality.
The following variables are added to the request context:
``authorized_tenants``
A list of tenant objects which the current user has access to.
``regions``
A dictionary containing information about region support, the current
region, and available regions.
"""
context = {}
# Auth/Keystone context
context.setdefault('authorized_tenants', [])
current_dash = request.horizon['dashboard']
needs_tenants = getattr(current_dash, 'supports_tenants', False)
if request.user.is_authenticated() and needs_tenants:
context['authorized_tenants'] = request.user.authorized_tenants
# Region context/support
available_regions = getattr(settings, 'AVAILABLE_REGIONS', [])
regions = {'support': len(available_regions) > 1,
'current': {'endpoint': request.session.get('region_endpoint'),
'name': request.session.get('region_name')},
'available': [{'endpoint': region[0], 'name':region[1]} for
region in available_regions]}
context['regions'] = regions
context['cluster'] = {'title': "Cluster"}
return context | c914beb55a8609f2c363ac5e070f5531d7ce6abc | 3,103 |
def get_align_mismatch_pairs(align, ref_genome_dict=None) -> list:
"""input a pysam AlignedSegment object
Args:
align (pysam.AlignedSeqment object): pysam.AlignedSeqment object
ref_genome_dict (dict, optional): returned dict from load_reference_fasta_as_dict(). Defaults to None.
Returns:
list/None:
it returns mismatch_pair_list, just like [ref_index, align_index, ref_base, align_base];
and the "ref_index" is the same coordinate with UCSC genome browser;
When NM == 0, it returns None.
"""
# No mismatch
try:
if align.get_tag("NM") == 0:
return None
except:
return None
MD_tag_state = align.has_tag("MD")
if MD_tag_state:
# parse softclip, insertion and deletion
info_index_list = []
accu_index = 0
for cigar_type, cigar_len in align.cigartuples:
if cigar_type == 1 or cigar_type == 4:
info_index_list.append((accu_index + 1, cigar_len))
elif cigar_type == 2:
info_index_list.append((accu_index + 1, -cigar_len))
accu_index += cigar_len
# parse MD tag
mismatch_pair_list = []
cur_base = ""
cur_index = 0
bases = align.get_tag("MD")
i = 0
while i < len(bases):
base = bases[i]
if base.isdigit():
cur_base += base
i += 1
else:
cur_index += int(cur_base)
cur_base = ""
if base == "^":
i += 1
del_str = ""
while (bases[i].isalpha()) and (i < len(bases)):
del_str += bases[i]
i += 1
cur_index += len(del_str)
del_str = ""
elif base.isalpha():
cur_index += 1
ref_base = base
i += 1
# add into list
fix_index = cur_index + back_indel_shift(info_index_list, cur_index)
if fix_index < len(align.query_sequence):
mismatch_pair_list.append(
[
cur_index + align.reference_start,
cur_index - 1,
ref_base,
align.query_sequence[fix_index - 1],
]
)
else:
return None
return mismatch_pair_list
else:
mismatch_pair_list = []
for align_idx, ref_idx in align.get_aligned_pairs():
if (align_idx is not None) and (ref_idx is not None):
align_base = align.query_sequence[align_idx]
ref_base = ref_genome_dict[align.reference_name][ref_idx]
if align_base != ref_base:
mismatch_pair_list.append(
[ref_idx + 1, align_idx, ref_base, align_base]
)
return mismatch_pair_list | 79886dbbdc764e115a72728060faaf155f3fea7a | 3,104 |
def get_int(name, default=None):
"""
:type name: str
:type default: int
:rtype: int
"""
return int(get_parameter(name, default)) | 4a07f1286e54fd9e55b97868af1aa1bae595b795 | 3,105 |
def run_test(series: pd.Series, randtest_name, **kwargs) -> TestResult:
"""Run a statistical test on RNG output
Parameters
----------
series : ``Series``
Output of the RNG being tested
randtest_name : ``str``
Name of statistical test
**kwargs
Keyword arguments to pass to statistical test
Returns
-------
result : ``TestResult`` or ``MultiTestResult``
Data containers of the test's result(s).
Raises
------
TestNotFoundError
If `randtest_name` does not match any available statistical tests
TestError
Errors raised when running ``randtest_name``
"""
try:
func = getattr(_randtests, randtest_name)
except AttributeError as e:
raise TestNotFoundError() from e
with Progress(*columns, console=console, transient=True) as progress:
abbrv = f_randtest_abbreviations[randtest_name]
task = progress.add_task(abbrv)
try:
result = func(series, ctx=(progress, task), **kwargs)
color = "yellow" if result.failures else "green"
print_randtest_name(randtest_name, color)
console.print(result)
return result
except TestError as e:
print_randtest_name(randtest_name, "red")
print_error(e)
raise e | 045ebe4756c24672cffdb3c43d6f0158809967d1 | 3,106 |
def radians(x):
"""
Convert degrees to radians
"""
if isinstance(x, UncertainFunction):
mcpts = np.radians(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.radians(x) | 49facfcfbeac91e9ac40b91ed8dc43b25ce157a6 | 3,107 |
def screen_poisson_objective(pp_image, hp_w,hp_b, data):
"""Objective function."""
return (stencil_residual(pp_image, hp_w,hp_b, data) ** 2).sum() | 43a1ff6594cd493a0122e8c1305b84f25550fb59 | 3,108 |
def learn(request, artwork_genre=None):
"""
Returns an art genre.
"""
_genre = get_object_or_404(Genre, slug=artwork_genre)
return render_to_response('t_learn.html',
{'genre': _genre},
context_instance=RequestContext(request)) | fe4e4477e7d2764ac41a58967ad1bc5296f10715 | 3,109 |
def plot_column(path: str, column: str, outpath: str = ""):
"""Plot a single column and save to file."""
df = to_df(path)
col_df = df.set_index(["name", "datetime"])[column].unstack("name")
ax = col_df.plot(grid=True)
ax.set_xlabel("Time")
ax.set_ylabel(LABEL_MAP[column])
if outpath:
ax.get_figure().savefig(outpath, bbox_inches="tight")
return ax | 58d5033a3bb86986e30582bf3fedf36842aeded9 | 3,110 |
def get_loader():
"""Returns torch.utils.data.DataLoader for custom Pypipes dataset. """
data_loader = None
return data_loader | 0e3b0107e355169049dbdfa45cba9abdf479dcbe | 3,111 |
def attention_decoder_cell_fn(decoder_rnn_cell, memories, attention_type,
decoder_type, decoder_num_units, decoder_dropout,
mode, batch_size, beam_width=1, decoder_initial_state=None, reuse=False):
"""Create an decoder cell with attention. It takes decoder cell as argument
Args:
- memories: (encoder_outputs, encoder_state, input_length) tuple
- attention_type: "luong", "bahdanau"
- mode: "train", "test"
"""
if mode == "train":
beam_width = 1
with tf.variable_scope('attention_decoder_cell', reuse=reuse):
attention_mechanisms = []
attention_layers = []
for idx, (encoder_outputs, encoder_state, input_length) in enumerate(memories):
# Tile batch for beam search, if beam_width == 1, then nothing happens
encoder_outputs, input_length, encoder_state, beam_batch_size = prepare_beam_search_decoder_inputs(
beam_width, encoder_outputs, input_length, encoder_state, batch_size)
# Temporal attention along time step
if attention_type == "luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
decoder_num_units, memory=encoder_outputs, memory_sequence_length=input_length)
elif attention_type == "bahdanau":
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
decoder_num_units, memory=encoder_outputs, memory_sequence_length=input_length)
attention_layer = tf.layers.Dense(decoder_num_units, name="{}th_attention".format(idx),
use_bias=False, dtype=tf.float32, _reuse=reuse)
attention_mechanisms.append(attention_mechanism)
attention_layers.append(attention_layer)
#decoder_rnn_cell = single_rnn_cell(decoder_type, decoder_num_units, decoder_dropout, mode, reuse=reuse)
attention_rnn_cell = tf.contrib.seq2seq.AttentionWrapper(
decoder_rnn_cell, attention_mechanisms, attention_layer=attention_layers,
initial_cell_state=None, name="AttentionWrapper")
# Set decoder initial state
initial_state = attention_rnn_cell.zero_state(dtype=tf.float32, batch_size=beam_batch_size)
if decoder_initial_state:
decoder_initial_state = tf.contrib.seq2seq.tile_batch(decoder_initial_state, multiplier=beam_width)
initial_state = initial_state.clone(cell_state=decoder_initial_state)
return attention_rnn_cell, initial_state | ffd0199d7c0bf9f9bfb5d4a6e1d7eac4767e84d3 | 3,112 |
async def post_user_income(user: str, income: Income):
"""
This functions create a new income in the DB. It checks whether
the user exists and returns a message in case no user exists.
In the other case, creates a new document in DB with the users
new Income.
user: users uuid.
income: Income (check pyndatic model) parameters to save in DB.
"""
user_bool = user_exist(user)
if user_bool:
income_created = await create_new_income(income)
if income_created:
return {"Message": "sucesful", "payload": "Income creates sucessfully."}
else:
return {"Message": "error", "payload": "There was an error creating Income."}
else:
return {"Message": "error", "payload": "User not found."} | 3d155d72fc1e00f45ca93b804d25d1051e7c47ab | 3,113 |
from typing import Optional
def bind_rng_to_host_device(rng: jnp.ndarray,
axis_name: str,
bind_to: Optional[str] = None) -> jnp.ndarray:
"""Binds a rng to the host/device we are on.
Must be called from within a pmapped function. Note that when binding to
"device", we also bind the rng to hosts, as we fold_in the rng with axis_index
which is unique for devices across all hosts.
Args:
rng: A jax.random.PRNGKey.
axis_name: The axis of the devices we are binding rng across.
bind_to: Must be one of the 'host' or 'device'. None means no binding.
Returns:
jax.random.PRNGKey specialized to host/device.
"""
if bind_to is None:
return rng
if bind_to == 'host':
return jax.random.fold_in(rng, jax.process_index())
elif bind_to == 'device':
return jax.random.fold_in(rng, jax.lax.axis_index(axis_name))
else:
raise ValueError(
"`bind_to` should be one of the `[None, 'host', 'device']`") | a7b50e6be3fd88f6a1341e0e43017baea305c31c | 3,114 |
def get_child_ids(pid, models, myself=True, ids: set = None) -> set:
"""
获取models模型的子id集合
:param pid: models模型类ID
:param models: models模型对象
:param myself: 是否包含pid
:param ids: 所有ID集合(默认为None)
:return: ids(所有ID集合)
"""
if ids is None:
ids = set()
queryset = models.objects.filter(pid=pid)
for instance in queryset:
ids.add(instance.id)
get_child_ids(instance.id, models, myself, ids)
if myself:
ids.add(pid)
return ids | b5d9b10497eada8b3cafc32f4260ace091bbc0bf | 3,115 |
def get_tenant_id(khoros_object, community_details=None):
"""This function retrieves the tenant ID of the environment.
.. versionadded:: 2.1.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param community_details: Dictionary containing community details (optional)
:type community_details: dict, None
:returns: The tenant ID in string format
:raises: :py:exc:`khoros.errors.exceptions.GETRequestError`
"""
return get_community_field(khoros_object, 'id', community_details) | 7fe29990d7b6b99e4b677edfdb1cd32ca785654a | 3,116 |
import tokenize
def obfuscatable_variable(tokens, index):
"""
Given a list of *tokens* and an *index* (representing the current position),
returns the token string if it is a variable name that can be safely
obfuscated.
Returns '__skipline__' if the rest of the tokens on this line should be skipped.
Returns '__skipnext__' if the next token should be skipped.
If *ignore_length* is ``True``, even variables that are already a single
character will be obfuscated (typically only used with the ``--nonlatin``
option).
"""
tok = tokens[index]
token_type = tok[0]
token_string = tok[1]
line = tok[4]
if token_type != tokenize.NAME:
return None # Skip this token
if token_string in analyze.storageLocation_scope_words:
return None # Skip this token
if token_string == "pragma" or token_string == "import":
return "__skipline__"
if token_string == '_':
return None
# skipnext = ['(', ')', '{', '}', ';']
# if token_string in skipnext:
# return '__skipnext__'
if index > 0:
prev_tok = tokens[index-1]
else: # Pretend it's a newline (for simplicity)
prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
prev_tok_type = prev_tok[0]
prev_tok_string = prev_tok[1]
if index > 1:
pre_prev_tok = tokens[index-2]
else: # Pretend it's a newline (for simplicity)
pre_prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
pre_prev_tok_type = pre_prev_tok[0]
pre_prev_tok_string = pre_prev_tok[1]
try:
next_tok = tokens[index+1]
except IndexError: # Pretend it's a newline
next_tok = (54, '\n', (1, 1), (1, 2), '#\n')
next_tok_string = next_tok[1]
# if token_string == "=":
# return '__skipline__'
if prev_tok_string == '.' and pre_prev_tok_string in ('msg', 'abi', 'block', 'tx'):
return None
if prev_tok_string == '.' and token_string in ('balance', 'send', 'transfer'):
return None
#if token_string.startswith('__'):
# return None
if next_tok_string == ".":
if token_string in analyze.global_variable:
return None
#if prev_tok_string == 'import':
# return '__skipline__'
# if prev_tok_string == ".":
# return '__skipnext__'
if prev_tok_string in analyze.type_words: # declare variable
return token_string
if prev_tok_string in analyze.storageLocation_scope_words and pre_prev_tok_string in analyze.type_words: # declare variable
return token_string
if token_string[0:5] == 'fixed' or token_string[0:6] =='ufixed':
return None
if prev_tok_string[0:5] =='fixed' or prev_tok_string[0:6] =='ufixed': # declare variable
return token_string
if pre_prev_tok_string[0:5] =='fixed' or pre_prev_tok_string[0:6] =='ufixed':
if prev_tok_string in analyze.storageLocation_scope_words: # declare variable
return token_string
# if token_string == ']' and prev_tok_string == '[':
# if next_tok_string in analyze.storageLocation_scope_words:
# return '__skipnext__'
# if prev_tok_string == "for":
# if len(token_string) > 2:
# return token_string
if token_string in analyze.reserved_words:
return None
# if token_string in keyword_args.keys():
# return None
# if prev_tok_type != tokenize.INDENT and next_tok_string != '=':
# return '__skipline__'
# if not ignore_length:
# if len(token_string) < 3:
# return None
# if token_string in RESERVED_WORDS:
# return None
return token_string | 487bfd926b77260980875496991a7bcc2bc8df3f | 3,117 |
import csv
def concat_data(labelsfile, notes_file):
"""
INPUTS:
labelsfile: sorted by hadm id, contains one label per line
notes_file: sorted by hadm id, contains one note per line
"""
with open(labelsfile, 'r') as lf:
print("CONCATENATING")
with open(notes_file, 'r') as notesfile:
outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for i, (subj_id, text, hadm_id) in enumerate(notes_gen):
if i % 10000 == 0:
print(str(i) + " done")
cur_subj, cur_labels, cur_hadm = next(labels_gen)
if cur_hadm == hadm_id:
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename | b6403c4ec7797cd7d08e01e7b9a9365708bdee6f | 3,118 |
def replace_text_comment(comments, new_text):
"""Replace "# text = " comment (if any) with one using new_text instead."""
new_text = new_text.replace('\n', ' ') # newlines cannot be represented
new_text = new_text.strip(' ')
new_comments, replaced = [], False
for comment in comments:
if comment.startswith('# text ='):
new_comments.append('# text = {}'.format(new_text))
replaced = True
else:
new_comments.append(comment)
if not replaced:
new_comments.append('# text = {}'.format(new_text))
return new_comments | 4b1284966eb02ca2a6fd80f8f639adcb4f1fde6c | 3,119 |
def init_show_booking_loader(response, item=None):
"""
init ShowingBookingLoader with optional ShowingBooking item
"""
loader = ShowingBookingLoader(response=response)
if item:
loader.add_value(None, item)
return loader | 2d9c790e487ab7009c70e83a8ecb5d5e93732ff7 | 3,120 |
def get_dpifac():
"""get user dpi, source: node_wrangler.py"""
prefs = bpy.context.preferences.system
return prefs.dpi * prefs.pixel_size / 72 | dc598635eb8fdf0b3fe8b6acc3f497a65a18f099 | 3,121 |
from typing import List
def seq_row(
repeats: int = 1,
trigger: str = Trigger.IMMEDIATE,
position: int = 0,
half_duration: int = MIN_PULSE,
live: int = 0,
dead: int = 0,
) -> List:
"""Create a 50% duty cycle pulse with phase1 having given live/dead values"""
row = [
repeats,
trigger,
position,
# Phase1
half_duration,
live,
dead,
0,
0,
0,
0,
# Phase2
half_duration,
0,
0,
0,
0,
0,
0,
]
return row | 5d331f1f67f5799165f3966249e199ca43e0ec27 | 3,122 |
def call_nelder_mead_method(
f,
verts,
x_tolerance=1e-6,
y_tolerance=1e-6,
computational_budget=1000,
f_difference=10,
calls=0,
terminate_criterion=terminate_criterion_x,
alpha=1,
gamma=2,
rho=0.5,
sigma=0.5,
values=[],
):
"""Return an approximation of a local optimum.
Args:
f: a real valued n_dimensional function
verts: an array with n+1 n-dimensional vectors
dim: a integer (equal to n)
f_difference: the difference between the last and second last best approximation
calls: the number of evaluations of f so far
terminate_criterion: the termination criterion we are using (a function that returns a boolean)
x_tolerance: A positive real number
y_tolerance: A positive real number
computational_budget: An integer: the maximum number of funciton evaluations
alpha, gamma, rho, sigma: positive real numbers that influence how the algorithms behaves
values: previously evaluated function values
Returns:
out_1: an approximation of a local optimum of the function
out_2: number of evaluations of f
"""
# Pseudo code can be found on: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method
# 0 Order
if values == []:
values = np.array([f(vert) for vert in verts])
calls = calls + len(verts)
indexes = np.argsort(values)
x_0 = np.array([0, 0])
for index in indexes[:-1]:
x_0 = x_0 + verts[index]
x_0 = x_0 / (len(verts) - 1)
x_r = x_0 + alpha * (x_0 - verts[indexes[-1]])
x_e = x_0 + gamma * (x_r - x_0)
x_c = x_0 + rho * (verts[indexes[-1]] - x_0)
# 1 Termination
if (
terminate_criterion(verts, f_difference, x_tolerance, y_tolerance)
or f_difference < y_tolerance
or calls >= computational_budget
):
return [np.array(np.round(verts[indexes[0]])), calls]
# 3 Reflection
f_x_r = f(x_r)
calls += 1
if values[indexes[0]] <= f_x_r:
if f_x_r < values[indexes[-2]]:
f_difference = abs(f_x_r - values[indexes[0]])
values[indexes[-1]] = f_x_r
return call_nelder_mead_method(
f,
nm_replace_final(verts, indexes, x_r),
x_tolerance,
y_tolerance,
computational_budget,
f_difference,
calls,
terminate_criterion,
alpha,
gamma,
rho,
sigma,
values,
)
# 4 Expansion
if f_x_r < values[indexes[0]]:
# x_e = x_0 + gamma * (x_r - x_0)
f_x_e = f(x_e)
calls += 1
if f_x_e < f_x_r:
f_difference = abs(f_x_e - values[indexes[0]])
values[indexes[-1]] = f_x_e
return call_nelder_mead_method(
f,
nm_replace_final(verts, indexes, x_e),
x_tolerance,
y_tolerance,
computational_budget,
f_difference,
calls,
terminate_criterion,
alpha,
gamma,
rho,
sigma,
values,
)
else:
f_difference = abs(f_x_r - values[indexes[0]])
values[indexes[-1]] = f_x_r
return call_nelder_mead_method(
f,
nm_replace_final(verts, indexes, x_r),
x_tolerance,
y_tolerance,
computational_budget,
f_difference,
calls,
terminate_criterion,
alpha,
gamma,
rho,
sigma,
values,
)
# 5 Contraction
# x_c = x_0 + rho * (verts[indexes[-1]] - x_0)
f_x_c = f(x_c)
if f_x_c < f(verts[indexes[-1]]):
calls += 1
f_difference = abs(f_x_c - values[indexes[0]])
values[indexes[-1]] = f_x_c
return call_nelder_mead_method(
f,
nm_replace_final(verts, indexes, x_c),
x_tolerance,
y_tolerance,
computational_budget,
f_difference,
calls,
terminate_criterion,
alpha,
gamma,
rho,
sigma,
values,
)
# 6 Shrink
return call_nelder_mead_method(
f,
nm_shrink(verts, indexes, sigma),
x_tolerance,
y_tolerance,
computational_budget,
f_difference,
calls,
terminate_criterion,
alpha,
gamma,
rho,
sigma,
values,
) | 02e16b477a977239a8dc256150cdbc983235f81c | 3,123 |
def get_auth_claims_from_request(request):
"""Authenticates the request and returns claims about its authorizer.
Oppia specifically expects the request to have a Subject Identifier for the
user (Claim Name: 'sub'), and an optional custom claim for super-admin users
(Claim Name: 'role').
Args:
request: webapp2.Request. The HTTP request to authenticate.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no user
is signed in, then returns None.
"""
claims = _verify_id_token(request.headers.get('Authorization', ''))
auth_id = claims.get('sub', None)
email = claims.get('email', None)
role_is_super_admin = (
claims.get('role', None) == feconf.FIREBASE_ROLE_SUPER_ADMIN)
if auth_id:
return auth_domain.AuthClaims(auth_id, email, role_is_super_admin)
return None | 1e2aaf4f26b11defea65796331f160fd22267cf2 | 3,124 |
import io
def decrypt(**kwargs):
"""
Returns a CryptoResult containing decrypted bytes.
This function requires that 'data' is in the format generated by the
encrypt functionality in this SDK as well as other OCI SDKs that support
client side encryption.
Note this function cannot decrypt data encrypted by the KMS 'encrypt' APIs.
:param oci.encryption.MasterKeyProvider master_key_provider: (required)
A MasterKeyProvider to use for decrypting the data.
:param bytes data: (required)
The data to be decrypted. If a string is passed, it will be converted to
bytes using UTF-8 encoding. Note that this conversion will require creating
a copy of the data which may be undesirable for large payloads.
:rtype: oci.encryption.CryptoResult
"""
_ensure_required_kwargs_present(required_kwargs=['master_key_provider', 'data'], provided_kwargs=kwargs)
# leaves input alone if it is alread bytes, otherwise converts to bytes using default encoding
# this is for convenience of the caller, but will create a copy of the data if it is not already a
# bytes-like object
data = convert_to_bytes(kwargs.get('data'))
# as long as we only read from the stream, BytesIO does not create a copy of the data so this doesn't
# add memory overhead
with io.BytesIO(data) as stream_to_decrypt:
decryptor = StreamDecryptor(
stream_to_decrypt=stream_to_decrypt, master_key_provider=kwargs.get('master_key_provider')
)
return CryptoResult(data=decryptor.read(), encryption_context=decryptor.get_encryption_context()) | ba27e39abd0c72db5acd2530b7a4128b3f073dc6 | 3,125 |
Subsets and Splits