content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def mean_absolute_deviation(curve1: np.ndarray, curve2: np.ndarray, *args):
"""Calculate the mean deviation."""
diff = np.abs(curve1 - curve2)
return np.mean(diff) | 687fda24399bc71e1d1f0ca8b880a0eafc9a1a7d | 3,653,590 |
def get_segtype_hops(seg_type, connector=None): # pragma: no cover
"""
Request list of segments by type used to construct paths.
:param seg_type: The type of PathSegmentType requested.
:returns: List of SCIONDSegTypeHopReplyEntry objects.
"""
global _connector
if not connector:
connector = _connector
if not connector:
raise SCIONDLibNotInitializedError
return connector.get_segtype_hops(seg_type) | 525e9feb6ea5b0692ef67ec20aa967a60f65519b | 3,653,591 |
def build_moderation_channel_embed(ctx, channel, action):
"""
Builds a moderation embed which display some information about the mod channel being created/removed
:param ctx: The discord context
:param channel: The channel to be created/removed
:param action: either "Added" or "Removed" to tell the user what happened to the mod channel
:return embed: The moderation embed to be sent to the user
"""
embed = create_default_embed(ctx)
embed.title = "Koala Moderation - Mod Channel " + action
embed.add_field(name="Channel Name", value=channel.mention)
embed.add_field(name="Channel ID", value=channel.id)
return embed | de0f32a29019a05125f2389286140bc6dcfff198 | 3,653,592 |
def print_settings(settings):
"""
This function returns the harmonic approximation settings .
Returns
-------
text: str
Pretty-printed settings for the current Quantas run.
"""
text = '\nCalculator: Equation of state (EoS) fitting\n'
text += '\nMeasurement units\n'
text += '-------------------------------------\n'
text += ' - {:12} {}\n'.format('pressure:', settings['pressure_unit'])
text += ' - {:12} {}\n'.format('lenght:', settings['lenght_unit'])
return text | 4e64353e0c519a26ac210de1df39ce09fbf54045 | 3,653,593 |
def run_analysis(output, stimtype="gabors", comp="surp", ctrl=False,
CI=0.95, alg="sklearn", parallel=False, all_scores_df=None):
"""
run_analysis(output)
Calculates statistics on scores from runs for each specific analysis
criteria and saves them in the summary scores dataframe.
Overwrites any existing dataframe of analysed data.
Required args:
- output (str): general directory in which summary dataframe is saved.
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- CI (num) : CI for shuffled data
default: 0.95
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- parallel (bool) : if True, run information is collected in
parallel
default: False
- all_scores_df (pd df): already collated scores dataframe
default: None
Returns:
- scores_summ (pd DataFrame): dataframe with analysed scores
"""
if all_scores_df is None:
all_scores_df = run_collate(output, stimtype, comp, ctrl, alg, parallel)
stats = "mean" # across runs for shuffle CIs
if all_scores_df is None:
return
scores_summ = pd.DataFrame()
ext_test = sess_str_util.ext_test_str(
("q1v4" in output), ("rvs" in output), comp)
if ext_test == "":
ext_test = None
# common labels
comm_labs = gen_util.remove_if(info_dict(),
["uniqueid", "run_n", "epoch_n"])
# get all unique comb of labels
for acr_shuff in [False, True]:
if not acr_shuff:
df_unique = all_scores_df[comm_labs].drop_duplicates()
else:
df_unique = all_scores_df[gen_util.remove_if(comm_labs,
["mouse_n", "n_rois"])].drop_duplicates()
for _, df_row in df_unique.iterrows():
if acr_shuff and not df_row["shuffle"]:
# second pass, only shuffle
continue
vals = [df_row[x] for x in comm_labs]
curr_lines = gen_util.get_df_vals(all_scores_df, comm_labs, vals)
# assign values to current line in summary df
curr_idx = len(scores_summ)
gen_util.set_df_vals(scores_summ, curr_idx, comm_labs, vals)
# calculate stats
scores_summ = calc_stats(scores_summ, curr_lines, curr_idx, CI,
ext_test, stats=stats, shuffle=acr_shuff)
savename = get_df_name("analyse", stimtype, comp, ctrl, alg)
file_util.saveinfo(scores_summ, savename, output, overwrite=True)
return scores_summ | 5f6c44fcfc482c66ee318be3787419ee2e811962 | 3,653,594 |
from typing import List
from typing import Dict
def decrypt_ballots_with_all_guardians(
ballots: List[Dict], guardians: List[Dict], context: Dict
) -> Dict:
"""
Decrypt all ballots using the guardians.
Runs the decryption in batches, rather than all at once.
"""
ballots_per_batch = 2
decrypted_ballots: Dict = {}
for batch in batch_list(ballots, ballots_per_batch):
ballot_shares: Dict[str, List[Dict]] = {}
# Each guardian should decrypt their own shares independently...
for guardian in guardians:
response = guardian_api.decrypt_ballot_shares(batch, guardian, context)
shares: List[Dict] = response["shares"]
ballot_shares[guardian["id"]] = shares
# These shares are then gathered by the mediator and used to fully decrypt the ballots!
decrypted_batch = mediator_api.decrypt_ballots(batch, ballot_shares, context)
# The decrypted ballots are keyed by ballot ID. Merge them into the full dictionary.
decrypted_ballots = {**decrypted_ballots, **decrypted_batch}
return decrypted_ballots | 6aebf29e7d0b41fd3da23d6bbecf7e40c56e1c9f | 3,653,595 |
def getRealItemScenePos(item):
"""
Returns item's real position in scene space. Mostly for e.g. stranditems.
This will change as the root item is moved round the scene,
but should not change when zooming.
"""
view = pathview()
try:
vhitem = item.virtualHelixItem()
linepos = linecenter(item.line()) # StrandItem lines are drawn in the virtual-helix space.
except AttributeError:
# E.g. EndPointItems, caps, etc, has no VhItem, position should be in scene coordinates:
return item.scenePos()
# Should I map to scene space or maybe use pathrootitem, i.e. vhitem.mapToItem(pathroot(), *linepos) ?
# mapping to pathroot produces constant result independent of zoom and transform.
# mapping to scene produces variable results.
return vhitem.mapToScene(*linepos) | cef292e25cd99886841bb5aa5d521c8630284210 | 3,653,596 |
def get_default_upload_mode():
"""
Returns the string for the default upload mode
:return: Default upload mode string
"""
return api.MODE_DEFAULT | 003672d478dc5754ba8b62833d9c8706b482bd0f | 3,653,597 |
def remove_multi_whitespace(string_or_list):
""" Cleans redundant whitespace from extracted data """
if type(string_or_list) == str:
return ' '.join(string_or_list.split())
return [' '.join(string.split()) for string in string_or_list] | a284eb1ea685fb55afeefe78d863a716475a9182 | 3,653,598 |
def validate_board(board: list) -> bool:
"""
Checks if board fits the rules. If fits returns True, else False.
>>> validate_board(["**** ****","***1 ****","** 3****","* 4 1****",\
" 9 5 "," 6 83 *","3 1 **"," 8 2***"," 2 ****"])
False
"""
if check_rows(board) and\
check_columns(board) and\
check_color(board):
return True
return False | 8f6b2cdf9e7cecd456378a11b580b6a69d52a308 | 3,653,599 |
def get_H(m, n):
"""Calculate the distance of each point of the m, n matrix from the center"""
u = np.array([i if i <= m / 2 else m - i for i in range(m)],
dtype=np.float32)
v = np.array([i if i <= m / 2 else m - i for i in range(m)],
dtype=np.float32)
v.shape = n, 1
return (u - m/2)**2 + (v - n/2)**2 | 23ea3f28816283c42f4722a6a5044772f2c0d2c3 | 3,653,600 |
def create_users(xml_filename, test_mode=False, verbose=False):
"""
Import OET cruise record XML file and create django auth users from the list of participants
:param filename: the name of the XML file
:return: the number of users created
"""
num_created = 0
cruise_record = xml2struct(xml_filename)
participant_list = cruise_record['oet:oetcruise']['r2r:cruise']['r2r:cruiseParticipants']['r2r:cruiseParticipant']
names = [participant['r2r:cruiseParticipantName']['text'] for participant in participant_list]
for name in names:
split_name = name.split()
first_name = split_name[0]
last_name = "".join(split_name[1:])
new_user = create_user(first_name, last_name, save=not test_mode, verbose=verbose)
if new_user:
print 'Created user', new_user.username, '(%s)' % name
num_created += 1
return num_created | c15bf515f482b7b82bfa94e96708ec4d4caf96be | 3,653,601 |
import json
def writeJSONFile(filename,JSONDocument):
""" Writes a JSON document to a named file
Parameters
----------
filename : str
name of the file
JSONDocument : str
JSON document to write to the file
Returns
-------
True
"""
filename='data/'+filename
with open(filename, 'w') as outfile:
json.dump(JSONDocument, outfile)
return True | 4f20b42a5f38554589a7bb03039ba348e3b0bb15 | 3,653,602 |
def read_readme():
"""Read README content.
If the README.rst file does not exist yet
(this is the case when not releasing)
only the short description is returned.
"""
try:
return local_file('README.rst')
except IOError:
return __doc__ | ed3c00a1f6e05072b59895efc93dd2380d590553 | 3,653,603 |
def get_data_loader(dataset, dataset_dir, batch_size, workers=8, is_training=False):
""" Create data loader. """
return data.DataLoader(
get_dataset(dataset, is_training=is_training, dataset_dir=dataset_dir),
batch_size=batch_size,
shuffle=is_training,
num_workers=workers,
pin_memory=True,
) | c7a126f37a78ef527a3e51136ffd9fbacbb5ddec | 3,653,604 |
def listwhom(detailed=False):
"""Return the list of currently avalailable databases for covid19
data in PyCoA.
The first one is the default one.
If detailed=True, gives information location of each given database.
"""
try:
if int(detailed):
df = pd.DataFrame(get_db_list_dict())
df = df.T.reset_index()
df.index = df.index+1
df = df.rename(columns={'index':'Database',0: "WW/iso3",1:'Granularité',2:'WW/Name'})
return df
else:
return _db.get_available_database()
except:
raise CoaKeyError('Waiting for a boolean !') | a912113ee5f713522b5abfbdd8bc77cea54a5b10 | 3,653,605 |
def project(s):
"""Maps (x,y,z) coordinates to planar-simplex."""
# Is s an appropriate sequence or just a single point?
try:
return unzip(map(project_point, s))
except TypeError:
return project_point(s)
except IndexError: # for numpy arrays
return project_point(s) | 1039e29da4b1a7c733449354d507525d746fc389 | 3,653,606 |
def point_at_angle_on_ellipse(
phi: ArrayLike, coefficients: ArrayLike
) -> NDArray:
"""
Return the coordinates of the point at angle :math:`\\phi` in degrees on
the ellipse with given canonical form coefficients.
Parameters
----------
phi
Point at angle :math:`\\phi` in degrees to retrieve the coordinates
of.
coefficients
General form ellipse coefficients as follows: the center coordinates
:math:`x_c` and :math:`y_c`, semi-major axis length :math:`a_a`,
semi-minor axis length :math:`a_b` and rotation angle :math:`\\theta`
in degrees of its semi-major axis :math:`a_a`.
Returns
-------
:class:`numpy.ndarray`
Coordinates of the point at angle :math:`\\phi`
Examples
--------
>>> coefficients = np.array([0.5, 0.5, 2, 1, 45])
>>> point_at_angle_on_ellipse(45, coefficients) # doctest: +ELLIPSIS
array([ 1., 2.])
"""
phi = np.radians(phi)
x_c, y_c, a_a, a_b, theta = tsplit(coefficients)
theta = np.radians(theta)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x = x_c + a_a * cos_theta * cos_phi - a_b * sin_theta * sin_phi
y = y_c + a_a * sin_theta * cos_phi + a_b * cos_theta * sin_phi
return tstack([x, y]) | 223e38a209280754538c5e1141317463ae4f4b98 | 3,653,607 |
def bmm(tensor1, tensor2):
"""
Performs a batch matrix-matrix product of this tensor
and tensor2. Both tensors must be 3D containing equal number
of matrices.
If this is a (b x n x m) Tensor, batch2 is a (b x m x p) Tensor,
Result will be a (b x n x p) Tensor.
Parameters
----------
tensor1 : TensorBase
The first operand in the bmm operation
tensor2 : TensorBase
The second operand in the bmm operation
Returns
-------
TensorBase:
Output Tensor; with bmm operation
"""
_ensure_tensorbase(tensor1)
_ensure_tensorbase(tensor2)
if tensor2.data.ndim != 3:
print("dimension of tensor2 is not 3")
elif tensor1.data.ndim != 3:
print("dimension of tensor1 is not 3")
elif tensor1.encrypted or tensor2.encrypted:
return NotImplemented
else:
out = np.matmul(tensor1.data, tensor2.data)
return TensorBase(out) | f3663c612024195cda85b11019423cdb71d75da4 | 3,653,608 |
def get_monotask_from_macrotask(monotask_type, macrotask):
""" Returns a Monotask of the specified type from the provided Macrotask. """
return next((monotask for monotask in macrotask.monotasks if isinstance(monotask, monotask_type))) | 46d4516327c89755eaa3ba6f6fa3503aae0c5bd9 | 3,653,609 |
def get_services_by_type(service_type, db_session):
# type: (Str, Session) -> Iterable[models.Service]
"""
Obtains all services that correspond to requested service-type.
"""
ax.verify_param(service_type, not_none=True, not_empty=True, http_error=HTTPBadRequest,
msg_on_fail="Invalid 'service_type' value '" + str(service_type) + "' specified")
services = db_session.query(models.Service).filter(models.Service.type == service_type)
return sorted(services, key=lambda svc: svc.resource_name) | 7352eb1e126af170f0c460edd9b69c77e07e3e0a | 3,653,611 |
def abstractable(cls):
"""
A class decorator that scoops up AbstractValueRange class properties in order
to create .validate and .abstract methods for the class. Note that properties
added after the class is defined aren't counted. Each AbstractValueRange
found is is also replaced with a class instance constructed from it.
"""
cls._ranges = []
for prp in dir(cls):
a = getattr(cls, prp)
if isinstance(a, AbstractValueRange):
cls._ranges.append((prp, a))
setattr(cls, prp, cls(a.val))
cls._ranges = sorted(cls._ranges, key=lambda nr: nr[1].mn)
@classmethod
def validate(cls, val):
ovn = min(r.mn for (n, r) in cls._ranges)
ovx = max(r.mx for (n, r) in cls._ranges)
return (isinstance(val, float) and val >= ovn and val <= ovx)
@classmethod
def abstract(cls, val):
found = None
for (n, r) in cls._ranges[:-1]:
if (
( r.mn == r.mx and val == r.mn )
or (val >= r.mn and val < r.mx)
):
found = r.val
elif val < r.mn:
break
# check final range including top
if found == None:
(n, r) = cls._ranges[-1]
if (r.mn == r.mx and val == r.mn) or (val >= r.mn and val <= r.mx):
found = r.val
if found == None:
raise ValueError(
"Can't abstract value '{}' as a {}: outside acceptable range.".format(
val,
cls.__name__
)
)
return cls(found)
def _pack_(self):
for (n, r) in type(self)._ranges:
if self == r.val:
return n
return self
@classmethod
def _unpack_(cls, obj):
return cls(obj)
cls.validate = validate
cls.abstract = abstract
cls._pack_ = _pack_
cls._unpack_ = _unpack_
return cls | ac14a1148d74a38618a8adc58df5a251296e72ee | 3,653,614 |
def summary1c(sequence):
"""
What comes in: A sequence of integers, all >= 2.
What goes out:
-- Returns the sum of INDICES of the items in the sequence
that are prime.
Side effects: None.
Examples:
-- If the given sequence is [20, 23, 29, 30, 33, 29, 100, 2, 4],
then the returned value is 15, since the primes in the sequence
are at INDICES 1, 2, 5 and 7, and 1 + 2 + 5 + 7 = 15.
"""
total = 0
for k in range(len(sequence)):
if is_prime(sequence[k]):
total += k
return total
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# ------------------------------------------------------------------------- | c2c2f60fecafc883899942b389ce1780638342da | 3,653,615 |
from typing import List
from typing import Tuple
def choose_page(btn_click_list: List[Tuple[int, str]]) -> str:
"""
Given a list of tuples of (num_clicks, next_page) choose the next_page that
corresponds to exactly 1 num_clicks.
This is to help with deciding which page to go to next when clicking on one
of many buttons on a page.
The expectation is that exactly one button will have been clicked, so we get
a deterministic next page.
:param btn_click_list: List of tuples of (num_clicks, next_page).
:return: The id of the next page.
"""
for tup in btn_click_list:
if tup[0] == 1:
return tup[1]
raise ValueError(
"No clicks were detected, or the click list is misconfigured: {}".format(
btn_click_list
)
) | e61bc1e52c6531cf71bc54faea0d03976eb137ad | 3,653,616 |
from datetime import datetime
def get_content(request, path=''):
"""Get content from datastore as requested on the url path
Args:
path - comes without leading slash. / added in code
"""
content = StaticContent.get_by_key_name("/%s" % path)
if not content:
if path == '':
# Nothing generated yet. Inform user to create some content
return render_to_response("blog/themes/%s/listing.html" % config.theme,
{'config': config, 'no_post': True,})
else:
raise NotFound
serve = True
# check modifications and etag
if 'If-Modified-Since' in request.headers:
last_seen = datetime.datetime.strptime(
request.headers['If-Modified-Since'], HTTP_DATE_FMT)
if last_seen >= content.last_modified.replace(microsecond=0):
serve = False
if 'If-None-Match' in request.headers:
etags = [x.strip('" ')
for x in request.headers['If-None-Match'].split(',')]
if content.etag in etags:
serve = False
response = _output(content, serve)
return response | b7bb9550b78cb723ef669ad1c0df597f02d9d673 | 3,653,617 |
def reconstruct_entity(input_examples, entitys_iter):
""" the entitys_iter contains the prediction entity of the splited examples.
We need to reconstruct the complete entitys for each example in input_examples.
and return the results as dictionary.
input_examples: each should contains (start, end) indice.
entitys_iter: iterator of entitys
Overlaps follows first in first set order:
--------------------------------------
O O O B-PER I-PER
O O O O B-GPE I-GPE
O B-LOC I-LOC O O
--------------------------------------
O O O B-PER I-PER O B-GPE I-GPE O O
--------------------------------------
return: the complete entitys of each input example.
"""
predict_entitys = []
for i, example in enumerate(input_examples):
_entity = []
for span in example.sentence_spans:
_, _, start, end = span
# +1 to skip the first padding
_entity.extend(next(entitys_iter)[start : end])
predict_entitys.append(_entity)
assert len(predict_entitys) == len(input_examples)
return predict_entitys | 520acff8bfd0616a045ca1286c51d75ea9465f0e | 3,653,618 |
from typing import Dict
from typing import List
import yaml
def ensure_valid_schema(spec: Dict) -> List[str]:
"""
Ensure that the provided spec has no schema errors.
Returns a list with all the errors found.
"""
error_messages = []
validator = cerberus.Validator(yaml.safe_load(SNOWFLAKE_SPEC_SCHEMA))
validator.validate(spec)
for entity_type, err_msg in validator.errors.items():
if isinstance(err_msg[0], str):
error_messages.append(f"Spec error: {entity_type}: {err_msg[0]}")
continue
for error in err_msg[0].values():
error_messages.append(f"Spec error: {entity_type}: {error[0]}")
if error_messages:
return error_messages
schema = {
"databases": yaml.safe_load(SNOWFLAKE_SPEC_DATABASE_SCHEMA),
"roles": yaml.safe_load(SNOWFLAKE_SPEC_ROLE_SCHEMA),
"users": yaml.safe_load(SNOWFLAKE_SPEC_USER_SCHEMA),
"warehouses": yaml.safe_load(SNOWFLAKE_SPEC_WAREHOUSE_SCHEMA),
}
validators = {
"databases": cerberus.Validator(schema["databases"]),
"roles": cerberus.Validator(schema["roles"]),
"users": cerberus.Validator(schema["users"]),
"warehouses": cerberus.Validator(schema["warehouses"]),
}
entities_by_type = []
for entity_type, entities in spec.items():
if entities and entity_type in ["databases", "roles", "users", "warehouses"]:
entities_by_type.append((entity_type, entities))
for entity_type, entities in entities_by_type:
for entity_dict in entities:
for entity_name, config in entity_dict.items():
validators[entity_type].validate(config)
for field, err_msg in validators[entity_type].errors.items():
error_messages.append(
VALIDATION_ERR_MSG.format(
entity_type, entity_name, field, err_msg[0]
)
)
return error_messages | 216ce1189a66e83cf1b73cf5e2834434dcd73c9b | 3,653,619 |
def realord(s, pos=0):
"""
Returns the unicode of a character in a unicode string, taking surrogate pairs into account
"""
if s is None:
return None
code = ord(s[pos])
if code >= 0xD800 and code < 0xDC00:
if len(s) <= pos + 1:
print("realord warning: missing surrogate character")
return 0
code2 = ord(s[pos + 1])
if code2 >= 0xDC00 and code < 0xE000:
code = 0x10000 + ((code - 0xD800) << 10) + (code2 - 0xDC00)
return hex(code).replace("x", "") | 6683725d24a984ecf4feb2198e29a3b68c7f1d5b | 3,653,620 |
import numpy
def evaluateSpectral(left_state,right_state,xy):
"""Use this method to compute the Roe Average.
q(state)
q[0] = rho
q[1] = rho*u
q[2] = rho*v
q[3] = rho*e
"""
spec_state = numpy.zeros(left_state.shape)
rootrhoL = numpy.sqrt(left_state[0])
rootrhoR = numpy.sqrt(right_state[0])
tL = left_state/left_state[0] #Temporary variable to access e, u, v, and w - Left
tR = right_state/right_state[0] #Temporary variable to access e, u, v, and w - Right
#Calculations
denom = 1/(rootrhoL+rootrhoR)
spec_state[0] = rootrhoL*rootrhoR
spec_state[1] = (rootrhoL*tL[1]+rootrhoR*tR[1])*denom
spec_state[2] = (rootrhoL*tL[2]+rootrhoR*tR[2])*denom
spec_state[3] = (rootrhoL*tL[3]+rootrhoR*tR[3])*denom
spvec = (spec_state[0],spec_state[0]*spec_state[1],spec_state[0]*spec_state[2],spec_state[0]*spec_state[3])
P = getPressure(spvec)
dim = 1 if xy else 2 #if true provides u dim else provides v dim
spectralRadius = (numpy.sqrt(gamma*P/spec_state[0])+abs(spec_state[dim]))
spectralRadius = 0 if numpy.isnan(spectralRadius) else spectralRadius #sets spectral radius to zero if it's nan
return spectralRadius*(left_state-right_state) | f0c5d23396f486250de0e92f1abde4d03545f4f7 | 3,653,621 |
def get_multidata_bbg(requests):
"""function for multiple asynchronous refdata requests, returns a
dictionary of the form correlationID:result.
Function Parameters
----------
requests : dictionary of correlationID:request pairs. CorrelationIDs
are unique integers (cannot reuse until previous requests have
returned). Requests can be either dicts of named arguments or
list-likes of ordered arguments. Although technically anything
can be made into a blpapi.CorrelationId, integers simplify usage.
Request Parameters
----------
identifiers : list-like object of bloomberg identifiers of the form
'symbol [exchange] <yellow key>'. Symbol can be ticker/name/
cusip/etc.
fields : list-like object of bloomberg field mnemonics or CALCRT ID.
Although either can be input, only the mnemonic will be output.
overrides : list-like object of tuples or dictionary. Tuples must be of
the form [(fieldId, value), ], while dictionaries are
{fieldId: value, }.
FieldId(s) are mnemonics or CALCRT IDs, values will be converted
to the proper type if possible.
"""
with bs.Session() as session:
try:
if not isinstance(requests, dict):
raise be.InputError('request_mult_refdata requires a '
'dictionary of correlationId:input pairs')
for corr_id, req in requests.items():
if isinstance(req, dict):
inputs = req
elif hasattr(req, '__iter__'):
if len(req) == 3:
pass
elif len(req) == 2:
req = list(req)
req.append(None)
else:
raise be.InputError('Request {0} has {1} items'
', expected 2-3.'.format(corr_id, len(req)))
inputs = dict(zip((IDS, FLDS, OVDS), req))
else:
raise be.InputError('Request {0} is of type: {0}, '
'expected dict or list-like'.format(corr_id,
type(req)))
_ref_req_queue(session, corr_id, inputs)
except be.InputError as err:
print err
_refdata_to_bloomberg(session)
session.queue.join()
rtn = session.correlation_ids
return rtn | d0580910ac74fe7ac85795caa6b5321122626986 | 3,653,622 |
def specific_kinetic_energy(particles):
"""
Returns the specific kinetic energy of each particle in the set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.specific_kinetic_energy()
quantity<[0.5, 0.5] m**2 * s**-2>
"""
return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2) | 89a126c23b291a526401a00f812b40a5283319f4 | 3,653,623 |
def parse_loot_percentage(text):
"""Use to parse loot percentage string, ie: Roubo: 50% becomes 0.5"""
percentage = float(text.split(':')[1].strip("%")) / 100
return percentage | 97dc4f20f02ef0e5d3e592d3084dce80549777ce | 3,653,624 |
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False | effa9f55c82a9edcacd79e07716527f314e41f39 | 3,653,625 |
from typing import Optional
from typing import List
from typing import Dict
def list_all_queues(path: str, vhost: Optional[str] = '/') -> List[Dict]:
"""Send a request to RabbitMQ api to list all the data queues.
Args:
path: Path to the RabbitMQ management api to send the request to.
vhost: Virtual host of the RabbitMQ.
Returns:
List of all the data queues.
"""
quoted_vhost = parse.quote_plus(vhost)
queues_path = path + f'api/queues/{quoted_vhost}'
queues = request_sender.make_request('GET', queues_path)
return queues | 9b57721509fdc6ec31eebb0ca8a0f28797419d95 | 3,653,626 |
def get_tf_model_variables(config_path, init_checkpoint):
"""Return tf model parameters in a dictionary format.
Args:
config_path: path to TF model configuration file
init_checkpoint: path to saved TF model checkpoint
Returns:
tf_config: dictionary tf model configurations
tf_variables: dictionary of tf variables
tf_model: tensorflow BERT model generated using input config and checkpoint
"""
# Load saved model configuration
config = configs.BertConfig.from_json_file(config_path)
# Generate BERT TF model and initiate variable update from checkpoint
seq_len = 20
_, tf_model = bert_models.squad_model(config, seq_len)
checkpoint = tf.train.Checkpoint(model=tf_model)
checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
tf_config = config.__dict__
tf_variables = {v.name: v.numpy() for v in tf_model.variables}
return tf_config, tf_variables, tf_model | 5c9a1c138f3c12460668464d2c865787d7720e95 | 3,653,627 |
def org_unit_type_filter(queryset, passed_in_org_types):
"""Get specific Organisational units based on a filter."""
for passed_in_org_type in passed_in_org_types:
queryset = queryset.filter(org_unit_type_id=passed_in_org_type)
return queryset | 0495cabe121f8d6fdb584538f13764bd81d978c5 | 3,653,628 |
def is_str_digit(n: str) -> bool:
"""Check whether the given string is a digit or not. """
try:
float(n)
return True
except ValueError:
return False | 0e3b4c38cfd9fe2024bde2b63502c74dce307533 | 3,653,629 |
import operator
def range_check_function(bottom, top):
"""Returns a function that checks if bottom <= arg < top, allowing bottom and/or top to be None"""
if top is None:
if bottom is None:
# Can't currently happen (checked before calling this), but let's do something reasonable
return lambda _: True
else:
return partial(operator.le, bottom)
elif bottom is None:
return partial(operator.gt, top)
else:
def range_f(v):
return v >= bottom and v < top
return range_f | 95e22a544633f166b275d548fd4a07383e3ea098 | 3,653,632 |
def filter_employee():
""" When the client requests a specific employee.
Valid queries:
?employeeid=<employeeid>
Returns: json representation of product.
"""
query_parameters = request.args
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
lookup_code = query_parameters.get('employeeid')
base_query = "SELECT * FROM employee WHERE"
if lookup_code:
query = "{} employeeid = '{}'".format(base_query, lookup_code)
cursor.execute(query)
record_list = cursor.fetchall()
conn.close()
data_list = parse_employee_info(record_list)
return jsonify(data_list)
else:
conn.close()
return "<h1>404</h1><p>The employeeid was not found.</p>" | 2238b10ad528a1ce523ff206d9f41f04e369adb4 | 3,653,633 |
import chunk
def ParallelLSTDQ(D,env,w,damping=0.001,ncpus=None):
"""
D : source of samples (s,a,r,s',a')
env: environment contianing k,phi,gamma
w : weights for the linear policy evaluation
damping : keeps the result relatively stable
ncpus : the number of cpus to use
"""
if ncpus:
nprocess = ncpus
else:
nprocess = cpu_count()
pool = Pool(nprocess)
indx = chunk(len(D),nprocess)
results = []
for (i,j) in indx:
r = pool.apply_async(dict_loop,(D[i:j],env,w,0.0)) # note that damping needs to be zero here
results.append(r)
k = len(w)
A = sp.identity(k,format='csr') * damping
b = sp_create(k,1,'csr')
for r in results:
T,t = r.get()
A = A + T
b = b + t
# close out the pool of workers
pool.close()
pool.join()
w,info = solve(A,b,method="spsolve")
return A,b,w,info | 9a9ca1247fccf45c523d64e9dbff2313c6c9572b | 3,653,634 |
def get_value_from_settings_with_default_string(wf, value, default_value):
"""Returns either a value as set in the settings file or a default as specified by caller"""
try:
ret = wf.settings[value]['value']
return str(ret)
except KeyError:
return default_value | 7a08ac33073b451a6000931c6bd5b41f33b0c486 | 3,653,635 |
def jsonify(records):
"""
Parse asyncpg record response into JSON format
"""
return [dict(r.items()) for r in records] | 618cb538331c4eb637aa03f0ba857da3f2fa4c1c | 3,653,636 |
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False,
zero_pad=True):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of size [batch_size, ?, vocab_size]
labels: Tensor of size [batch_size, ?]
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the gaussian
distribution.
gaussian: Uses a gaussian distribution for label smoothing
zero_pad: use 0 as the probabitlity of the padding
in the smoothed labels. By setting this, we replicate the
numeric calculation of tensor2tensor, which doesn't set the
<BOS> token in the vocabulary.
Returns:
the cross entropy loss.
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
if zero_pad:
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 2)
else:
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tf.distributions.Normal(loc=labels, scale=confidence)
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)\
[:, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?]
# to match logits: [batch_size, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence,
dtype=logits.dtype)
if zero_pad:
soft_targets = tf.concat([tf.expand_dims(\
tf.zeros_like(labels, dtype=tf.float32), 2),\
soft_targets[:, :, 1:]], -1)
if hasattr(tf.nn, 'softmax_cross_entropy_with_logits_v2'):
cross_entropy_fn = tf.nn.softmax_cross_entropy_with_logits_v2
else:
cross_entropy_fn = tf.nn.softmax_cross_entropy_with_logits
return cross_entropy_fn(
logits=logits, labels=soft_targets) | d0374cb850d25975c5e882335933c18da9647382 | 3,653,637 |
from anyway.app_and_db import db
def get_db_matching_location_interurban(latitude, longitude) -> dict:
"""
extracts location from db by closest geo point to location found, using road number if provided and limits to
requested resolution
:param latitude: location latitude
:param longitude: location longitude
"""
def get_bounding_box(latitude, longitude, distance_in_km):
latitude = math.radians(latitude)
longitude = math.radians(longitude)
radius = 6371
# Radius of the parallel at given latitude
parallel_radius = radius * math.cos(latitude)
lat_min = latitude - distance_in_km / radius
lat_max = latitude + distance_in_km / radius
lon_min = longitude - distance_in_km / parallel_radius
lon_max = longitude + distance_in_km / parallel_radius
rad2deg = math.degrees
return rad2deg(lat_min), rad2deg(lon_min), rad2deg(lat_max), rad2deg(lon_max)
try:
except ModuleNotFoundError:
pass
distance_in_km = 5
lat_min, lon_min, lat_max, lon_max = get_bounding_box(latitude, longitude, distance_in_km)
baseX = lon_min
baseY = lat_min
distanceX = lon_max
distanceY = lat_max
polygon_str = "POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format(
baseX, baseY, distanceX, distanceY
)
query_obj = (
db.session.query(AccidentMarkerView)
.filter(AccidentMarkerView.geom.intersects(polygon_str))
.filter(AccidentMarkerView.accident_year >= 2014)
.filter(AccidentMarkerView.provider_code != BE_CONST.RSA_PROVIDER_CODE)
.filter(not_(AccidentMarkerView.road_segment_name == None))
)
markers = pd.read_sql_query(query_obj.statement, query_obj.session.bind)
geod = Geodesic.WGS84
# relevant_fields = resolution_dict[resolution]
# markers = db.get_markers_for_location_extraction()
markers["geohash"] = markers.apply( # pylint: disable=maybe-no-member
lambda x: geohash.encode(x["latitude"], x["longitude"], precision=4), axis=1
) # pylint: disable=maybe-no-member
markers_orig = markers.copy() # pylint: disable=maybe-no-member
markers = markers.loc[(markers["road1"] != None)] # pylint: disable=maybe-no-member
if markers.count()[0] == 0:
markers = markers_orig
# FILTER BY GEOHASH
curr_geohash = geohash.encode(latitude, longitude, precision=4)
if markers.loc[markers["geohash"] == curr_geohash].count()[0] > 0:
markers = markers.loc[markers["geohash"] == curr_geohash].copy()
# CREATE DISTANCE FIELD
markers["dist_point"] = markers.apply(
lambda x: geod.Inverse(latitude, longitude, x["latitude"], x["longitude"])["s12"], axis=1
).replace({np.nan: None})
most_fit_loc = (
markers.loc[markers["dist_point"] == markers["dist_point"].min()].iloc[0].to_dict()
)
final_loc = {}
for field in ["road1", "road_segment_name"]:
loc = most_fit_loc[field]
if loc not in [None, "", "nan"]:
if not (isinstance(loc, np.float64) and np.isnan(loc)):
final_loc[field] = loc
return final_loc | 6e3bd7153cc555954768dd34a5a1b0090510a834 | 3,653,638 |
import re
def get(settings_obj, key, default=None, callback=None):
"""
Return a Sublime Text plugin setting value.
Parameters:
settings_obj - a sublime.Settings object or a dictionary containing
settings
key - the name of the setting
default - the default value to return if the key value is not found.
callback - a callback function that, if provided, will be called with
the found and default values as parameters.
"""
# Parameter validation
if not isinstance(settings_obj, (dict, sublime.Settings)):
raise AttributeError("Invalid settings object")
if not isinstance(key, str):
raise AttributeError("Invalid callback function")
if callback is not None and not hasattr(callback, '__call__'):
raise AttributeError("Invalid callback function")
setting = settings_obj.get(key, default)
final_val = None
if isinstance(setting, dict) and "#multiconf#" in setting:
reject_item = False
for entry in setting["#multiconf#"]:
reject_item = False if isinstance(entry, dict) and len(entry) else True
k, v = entry.popitem()
if reject_item:
continue
for qual in re.compile(QUALIFIERS).finditer(k):
if Qualifications.exists(qual.group(1)):
reject_item = not Qualifications.eval_qual(qual.group(1), qual.group(2))
else:
reject_item = True
if reject_item:
break
if not reject_item:
final_val = v
break
if reject_item:
final_val = default
else:
final_val = setting
return callback(final_val, default) if callback else final_val | b1bab5380cb94fb6493431b8732d9c963e9f1f14 | 3,653,639 |
def site():
"""Main front-end web application"""
html = render.html("index")
return html | 0b8e144a6c366692c51a3fb5431d73fb9ed0e8c1 | 3,653,641 |
def parse_vad_label(line, frame_size: float = 0.032, frame_shift: float = 0.008):
"""Parse VAD information in each line, and convert it to frame-wise VAD label.
Args:
line (str): e.g. "0.2,3.11 3.48,10.51 10.52,11.02"
frame_size (float): frame size (in seconds) that is used when
extarcting spectral features
frame_shift (float): frame shift / hop length (in seconds) that
is used when extarcting spectral features
Returns:
frames (List[int]): frame-wise VAD label
Examples:
>>> label = parse_vad_label("0.3,0.5 0.7,0.9")
[0, ..., 0, 1, ..., 1, 0, ..., 0, 1, ..., 1]
>>> print(len(label))
110
NOTE: The output label length may vary according to the last timestamp in `line`,
which may not correspond to the real duration of that sample.
For example, if an audio sample contains 1-sec silence at the end, the resulting
VAD label will be approximately 1-sec shorter than the sample duration.
Thus, you need to pad zeros manually to the end of each label to match the number
of frames in the feature. E.g.:
>>> feature = extract_feature(audio) # frames: 320
>>> frames = feature.shape[1] # here assumes the frame dimention is 1
>>> label = parse_vad_label(vad_line) # length: 210
>>> import numpy as np
>>> label_pad = np.pad(label, (0, np.maximum(frames - len(label), 0)))[:frames]
"""
frame2time = lambda n: n * frame_shift + frame_size / 2
frames = []
frame_n = 0
for time_pairs in line.split():
start, end = map(float, time_pairs.split(","))
assert end > start, (start, end)
while frame2time(frame_n) < start:
frames.append(0)
frame_n += 1
while frame2time(frame_n) <= end:
frames.append(1)
frame_n += 1
return frames | 658a2a00b8b0b2cfdb83b649d2f87fcf23cbb6b4 | 3,653,642 |
def preprocess_image(img, img_width, img_height):
"""Preprocesses the image before feeding it into the ML model"""
x = get_square_image(img)
x = np.asarray(img.resize((img_width, img_height))).astype(np.float32)
x_transposed = x.transpose((2,0,1))
x_batchified = np.expand_dims(x_transposed, axis=0)
return x_batchified | 50540e81da95651d22dec83271257657d7978f79 | 3,653,643 |
def Pose_2_KUKA(H):
"""Converts a pose (4x4 matrix) to an XYZABC KUKA target (Euler angles), required by KUKA KRC controllers.
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
x = H[0, 3]
y = H[1, 3]
z = H[2, 3]
if (H[2, 0]) > (1.0 - 1e-10):
p = -pi / 2
r = 0
w = atan2(-H[1, 2], H[1, 1])
elif (H[2, 0]) < (-1.0 + 1e-10):
p = pi / 2
r = 0
w = atan2(H[1, 2], H[1, 1])
else:
p = atan2(-H[2, 0], sqrt(H[0, 0] * H[0, 0] + H[1, 0] * H[1, 0]))
w = atan2(H[1, 0], H[0, 0])
r = atan2(H[2, 1], H[2, 2])
return [x, y, z, w * 180 / pi, p * 180 / pi, r * 180 / pi] | 5c15c450b9be728e1c0c8727066485ec0176711c | 3,653,644 |
from typing import Optional
def skip_regenerate_image(request: FixtureRequest) -> Optional[str]:
"""Enable parametrization for the same cli option"""
return _request_param_or_config_option_or_default(request, 'skip_regenerate_image', None) | 5d621202d0b72da53994b217f570bd86ccd5ada2 | 3,653,645 |
def parse_config(tool_name, key_col_name, value_col_name):
"""Parses the "execute" field for the given tool from installation config
file.
Parameters:
tool_name: Tool name to search from file.
Raises:
STAPLERerror if config file does not exists.
STAPLERerror if tool value can not be read from file.
STAPLERerror if tool value was an empty string.
Returns:
String containing the user specified run command, None if no special
command has been defined.
"""
# Return None for the generic_base class, as it should not be in the
# config file in any case
try:
run_command = read_value_from_multi_table(CONFIG_FILE_PATH,
tool_name,
key_col_name,
value_col_name)
except STAPLERerror:
print 'Error when reading installation configuration file for ' \
'tool {0}'.format(tool_name)
logging.error('Error when reading installation configuration file '
'for the tool {0}'.format(tool_name))
raise
if run_command == 'none':
raise NotConfiguredError()
if run_command == '':
raise STAPLERerror('Error! Empty value for tool {0} was found from '
'installation configuration file !):\n{1}'.format(tool_name,
CONFIG_FILE_PATH))
return run_command | bd80078cbd488bafb8ba9ba46464460a12761b2f | 3,653,646 |
import ntpath
def path_leaf(path):
"""
Extracts file name from given path
:param str path: Path be extracted the file name from
:return str: File name
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head) | 98ef27b218fdb5003ac988c42aff163d1067021f | 3,653,647 |
import torch
def define_styleGenerator(content_nc: int, style_nc: int, n_c: int, n_blocks=4, norm='instance', use_dropout=False, padding_type='zero', cbam=False, gpu_ids=[]):
"""
This ResNet applies the encoded style from the style tensor onto the given content tensor.
Parameters:
----------
- content_nc (int): number of channels in the content tensor
- style_nc (int): number of channels in the style tensor
- n_c (int): number of channels used inside the network
- n_blocks (int): number of Resnet blocks
- norm_layer: normalization layer
- use_dropout: (boolean): if use dropout layers
- padding_type (str): the name of padding layer in conv layers: reflect | replicate | zero
- cbam (boolean): If true, use the Convolution Block Attention Module
- gpu_ids: [int]: GPU ids available to this network. Default = []
"""
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
styleGenerator = StyleGenerator(content_nc, style_nc, n_c, n_blocks=n_blocks, norm_layer=norm_layer, use_dropout=use_dropout, padding_type=padding_type, cbam=False)
init_weights(styleGenerator, "kaiming", activation='leaky_relu')
if len(gpu_ids):
return nn.DataParallel(styleGenerator, device_ids=gpu_ids)
else:
return styleGenerator | ed996a2dbd1d2375a248582db21397ee051b5f25 | 3,653,649 |
def answer():
"""
answer
"""
# logger
M_LOG.info("answer")
if "answer" == flask.request.form["type"]:
# save answer
gdct_data["answer"] = {"id": flask.request.form["id"],
"type": flask.request.form["type"],
"sdp": flask.request.form["sdp"]}
# return ok
return flask.Response(status=200)
# return
return flask.Response(status=400) | 055a590107e30e4cd582e658e2f62fcba975f3dc | 3,653,650 |
def load_requirements():
""" Helps to avoid storing requirements in more than one file"""
reqs = parse_requirements('requirements-to-freeze.txt', session=False)
reqs_list = [str(ir.req) for ir in reqs]
return reqs_list | 4dcde55604cc8fc08a4b57ad1e776612eed18808 | 3,653,651 |
def next_permutation(a):
"""Generate the lexicographically next permutation inplace.
https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
Return false if there is no next permutation.
"""
# Find the largest index i such that a[i] < a[i + 1]. If no such
# index exists, the permutation is the last permutation
for i in reversed(range(len(a) - 1)):
if a[i] < a[i + 1]:
break # found
else: # no break: not found
a.reverse()
return False # no next permutation
# Find the largest index j greater than i such that a[i] < a[j]
j = next(j for j in reversed(range(i + 1, len(a))) if a[i] < a[j])
# Swap the value of a[i] with that of a[j]
a[i], a[j] = a[j], a[i]
# Reverse sequence from a[i + 1] up to and including the final element a[n]
a[i + 1:] = reversed(a[i + 1:])
return True | b6246d53b5e0ac0e28aa5afda03d7756657a40bf | 3,653,653 |
from numpy.linalg import norm
def normalize(v):
"""
Calculate normalized vector
:param v: input vector
:return: normalized vector
"""
return v/norm(v) | 0ade14b6136e5f55410f6d4cc3fb5b466fa60566 | 3,653,654 |
import re
def replace_hyphen_by_romaji(text):
"""
長音「ー」などを仮名に置換する。
"""
# error check
if len(text) < 2:
return ""
while "-" in list(text) or "~" in list(text):
text_ = text
if (text[0] == "-" or text[0] == "~") and len(text) >= 2:
text = text[2:]
continue
text = re.sub(r"(?P<vowel>[aeiou])[-~][-~]", r"\g<vowel>x\g<vowel>", text) # "-" を 2文字
text = re.sub(r"A[-~][-~]", r"Axa", text)
text = re.sub(r"E[-~][-~]", r"Exe", text)
text = re.sub(r"O[-~][-~]", r"Oxo", text)
text = re.sub(r"U[-~][-~]", r"Uxu", text)
if text_ == text:
break # 変化しなかったら終わり
return text | 9e2d7216bbd751f49ed54519f5eaf8d516ae8025 | 3,653,655 |
def aucroc_ic50(df,threshold=500):
"""
Compute AUC ROC for predictions and targets in DataFrame, based on a given threshold
Parameters
----------
df : pandas.DataFrame with predictons in column "preds" and targets in column "targs" in nM
threshold: float, binding affinty threshold for binders in nM
Returns
--------
numpy.nan or float
"""
df =df[~df["preds"].isnull()]
is_binder = df["targs"] >= threshold
if is_binder.mean()==1.0 or is_binder.mean()==0.0 or np.isnan(is_binder.mean()):
return np.nan
else:
return roc_auc_score(1.0*is_binder,df["preds"]) | d4535bc493bcaca45fa9ba739135261b9d514aa2 | 3,653,656 |
def infer_getattr(node, context=None):
"""Understand getattr calls
If one of the arguments is an Uninferable object, then the
result will be an Uninferable object. Otherwise, the normal attribute
lookup will be done.
"""
obj, attr = _infer_getattr_args(node, context)
if (
obj is util.Uninferable
or attr is util.Uninferable
or not hasattr(obj, "igetattr")
):
return util.Uninferable
try:
return next(obj.igetattr(attr, context=context))
except (StopIteration, InferenceError, AttributeInferenceError):
if len(node.args) == 3:
# Try to infer the default and return it instead.
try:
return next(node.args[2].infer(context=context))
except InferenceError as exc:
raise UseInferenceDefault from exc
raise UseInferenceDefault | 593435273bf57430ab96034772ef38694a491813 | 3,653,657 |
def get_plugin(molcapsule: 'PyObject *', plug_no: 'int') -> "PyObject *":
"""get_plugin(molcapsule, plug_no) -> PyObject *"""
return _libpymolfile.get_plugin(molcapsule, plug_no) | b66687947619808a410d603df70895845afb4d16 | 3,653,658 |
def fake_redis_con():
"""
Purpose:
Create Fake Redis Connection To Test With
Args:
N/A
Return:
fake_redis_con (Pytest Fixture (FakeRedis Connection Obj)): Fake redis connection
that simulates redis functionality for testing
"""
return fakeredis.FakeStrictRedis() | 10d8e340e60e3d591473e942b2273871e6dcaebe | 3,653,659 |
import inspect
def verbose(function, *args, **kwargs):
"""Improved verbose decorator to allow functions to override log-level
Do not call this directly to set global verbosrity level, instead use
set_log_level().
Parameters
----------
function - function
Function to be decorated to allow for overriding global verbosity
level
Returns
-------
dec - function
The decorated function
"""
try:
arg_names = [parameter.name for parameter in
inspect.signature(function).parameters.values() if
(parameter.kind == parameter.POSITIONAL_OR_KEYWORD)]
except:
arg_names = inspect.getargspec(function).args
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
if('verbose' in arg_names):
verbose_level = args[arg_names.index('verbose')]
else:
verbose_level = default_level
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
ret = function(*args, **kwargs)
return ret | 7c2b2d8e827b6d60120b764fe964aa7e9c7b3f41 | 3,653,660 |
import torch
def bittensor_dtype_to_torch_dtype(bdtype):
""" Translates between bittensor.dtype and torch.dtypes.
Args:
bdtype (bittensor.dtype): bittensor.dtype to translate.
Returns:
dtype: (torch.dtype): translated torch.dtype.
"""
if bdtype == bittensor.proto.DataType.FLOAT32:
dtype = torch.float32
elif bdtype == bittensor.proto.DataType.FLOAT64:
dtype = torch.float64
elif bdtype == bittensor.proto.DataType.INT32:
dtype = torch.int32
elif bdtype == bittensor.proto.DataType.INT64:
dtype = torch.int64
else:
raise DeserializationException(
'Unknown bittensor.Dtype or no equivalent torch.dtype for bittensor.dtype = {}'
.format(bdtype))
return dtype | b0d6ccae56ed871224c8c45bd8aaff61846c99fa | 3,653,661 |
def read_all(dataset, table):
"""Read all data from the API, convert to pandas dataframe"""
return _read_from_json(
CFG.path.replace("data", dataset=dataset, table=table, converter="path")
) | d40016f8d8356795b9f6451b165410c25a79627c | 3,653,662 |
def compute_spectrum_welch(sig, fs, avg_type='mean', window='hann',
nperseg=None, noverlap=None,
f_range=None, outlier_percent=None):
"""Compute the power spectral density using Welch's method.
Parameters
-----------
sig : 1d or 2d array
Time series.
fs : float
Sampling rate, in Hz.
avg_type : {'mean', 'median'}, optional
Method to average across the windows:
* 'mean' is the same as Welch's method, taking the mean across FFT windows.
* 'median' uses median across FFT windows instead of the mean, to minimize outlier effects.
window : str or tuple or array_like, optional, default: 'hann'
Desired window to use. See scipy.signal.get_window for a list of available windows.
If array_like, the array will be used as the window and its length must be nperseg.
nperseg : int, optional
Length of each segment, in number of samples.
If None, and window is str or tuple, is set to 1 second of data.
If None, and window is array_like, is set to the length of the window.
noverlap : int, optional
Number of points to overlap between segments.
If None, noverlap = nperseg // 8.
f_range : list of [float, float], optional
Frequency range to sub-select from the power spectrum.
outlier_percent : float, optional
The percentage of outlier values to be removed. Must be between 0 and 100.
Returns
-------
freqs : 1d array
Frequencies at which the measure was calculated.
spectrum : 1d or 2d array
Power spectral density.
Examples
--------
Compute the power spectrum of a simulated time series using Welch's method:
>>> from neurodsp.sim import sim_combined
>>> sig = sim_combined(n_seconds=10, fs=500,
... components={'sim_powerlaw': {}, 'sim_oscillation': {'freq': 10}})
>>> freqs, spec = compute_spectrum_welch(sig, fs=500)
"""
# Calculate the short time Fourier transform with signal.spectrogram
nperseg, noverlap = check_spg_settings(fs, window, nperseg, noverlap)
freqs, _, spg = spectrogram(sig, fs, window, nperseg, noverlap)
# Throw out outliers if indicated
if outlier_percent is not None:
spg = discard_outliers(spg, outlier_percent)
# Average across windows
spectrum = get_avg_func(avg_type)(spg, axis=-1)
# Trim spectrum, if requested
if f_range:
freqs, spectrum = trim_spectrum(freqs, spectrum, f_range)
return freqs, spectrum | e7856e370d7783628afdea9777a693c4c72e2dfd | 3,653,663 |
def _function_set_name(f):
"""
return the name of a function (not the module)
@param f function
@return name
.. versionadded:: 1.1
"""
name = f.__name__
return name.split(".")[-1] | e1b73fbc520c7d9745872b0cd19766d42c027d15 | 3,653,664 |
from typing import Sequence
from pathlib import Path
from typing import Optional
from typing import Callable
from typing import Set
def _notes_from_paths(
paths: Sequence[Path],
wiki_name: str,
callback: Optional[Callable[[int, int], None]]) -> Set[TwNote]:
"""
Given an iterable of paths, compile the notes found in all those tiddlers.
:param paths: The paths of the tiddlers to generate notes for.
:param wiki_name: The name/id of the wiki these notes are from.
:param callback: Optional callable passing back progress. See :func:`find_notes`.
:return: A set of all the notes found in the tiddler files passed.
"""
notes = set()
for index, tiddler in enumerate(paths, 0):
with open(tiddler, 'rb') as f:
tid_text = f.read().decode()
tid_name = tiddler.name[:tiddler.name.find(f".{RENDERED_FILE_EXTENSION}")]
notes.update(_notes_from_tiddler(tid_text, wiki_name, tid_name))
if callback is not None and not index % 50:
callback(index+1, len(paths))
if callback is not None:
callback(len(paths), len(paths))
return notes | d522aaf2db500864eba78a4f2bd0fdfbf83051f0 | 3,653,665 |
def load_matrix(file_matrix, V):
"""load matrix
:param file_matrix: path of pre-trained matrix (output file)
:param V: vocab size
:return: matrix(list)
"""
matrix = [[0 for _ in range(V)] for _ in range(V)]
with open(file_matrix) as fp:
for line in fp:
target_id, context_id_values = line.strip().split("\t")
context_id_values = context_id_values.split()
for context_id_value in context_id_values:
context_id, value = context_id_value.split(":")
matrix[int(target_id)][int(context_id)] += float(value)
return matrix | 0a7aa27638bdc223d9860b9e39aa9b6089e59a0f | 3,653,666 |
def add(*args):
"""Adding list of values"""
return sum(args) | 9bc68771c10b537f0727e76cc07297e7d0311a5d | 3,653,667 |
import pytz
from datetime import datetime
def get_chart_dates(df, start_date=None, end_date=None, utc=True, auto_start=None, auto_end=None):
"""
Get dates for chart functions.
More info on date string formats at: https://strftime.org/
Parameters:
df : The dataframe for the chart, needed to acertain start and end dates, if none are provided.
start_date : The start date for the entire series to be contained in the chart (start of max range).
end_date : The end date for the entire series to be contained in the chart (end of max range).
auto_start : The start of the default range to display on charts, until a user clicks a differnt range.
auto_end : The end of the default range to display on charts, until a user clicks a differnt range.
"""
if utc:
utc_now = pytz.utc.localize(datetime.utcnow())
utc_now.isoformat()
utc_td_dmy_str = utc_now.strftime("%d-%m-%Y")
utc_td_ymd_str = utc_now.strftime('%Y-%m-%d')
t = utc_now
t_dmy_str = utc_td_dmy_str
t_ymd_str = utc_td_ymd_str
elif not utc:
now = datetime.now()
td_dmy_str = now.strftime("%d-%m-%Y")
td_ymd_str = now.strftime('%Y-%m-%d')
t = now
t_dmy_str = td_dmy_str
t_ymd_str = td_ymd_str
# End date:
if end_date == None:
end = df.index.max()
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (isinstance(end_date, str)):
end = datetime.strptime(end_date, '%Y-%m-%d')
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == datetime):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == date):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif isinstance(end_date, pd.Timestamp):
end = pd.to_datetime(end_date)
chart_end = end.strftime("%d-%m-%Y")
# Start date:
if start_date == None:
start = df.index.min()
chart_start = start.strftime("%d-%m-%Y")
elif (end_date != None) and (isinstance(end_date, str)):
end = datetime.strptime(end_date, '%Y-%m-%d')
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == datetime):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == date):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif isinstance(end_date, pd.Timestamp):
end = pd.to_datetime(end_date)
chart_end = end.strftime("%d-%m-%Y")
# Auto end
if auto_end == None:
auto_end = t_ymd_str
elif auto_end == 'yst':
at_end = t - timedelta(days=1)
auto_end = at_end.strftime('%Y-%m-%d')
elif (auto_end != None) and (isinstance(auto_end, str)):
at_end = datetime.strptime(auto_end, '%Y-%m-%d')
auto_end = at_end.strftime('%Y-%m-%d')
elif (auto_end != None) and (type(auto_end) == datetime):
at_end = auto_end
auto_end = at_end.strftime('%Y-%m-%d')
elif (auto_end != None) and (type(auto_end) == date):
at_end = auto_end
auto_end = at_end.strftime('%Y-%m-%d')
elif isinstance(auto_end, pd.Timestamp):
at_end = pd.to_datetime(auto_end)
auto_end = at_end.strftime('%Y-%m-%d')
# Auto start
if auto_start == None or auto_start == 'ytd':
at_st = first_day_of_current_year(time=False, utc=False)
auto_start = at_st.strftime('%Y-%m-%d')
elif auto_start == '1yr':
at_st = t - timedelta(days=365)
auto_start = at_st.strftime('%Y-%m-%d')
elif (auto_start != None) and (isinstance(auto_start, str)):
at_start = datetime.strptime(auto_start, '%Y-%m-%d')
auto_start = at_start.strftime('%Y-%m-%d')
elif (auto_start != None) and (type(auto_start) == datetime):
at_start = auto_start
auto_start = at_start.strftime('%Y-%m-%d')
elif (auto_start != None) and (type(auto_start) == date):
at_start = auto_start
auto_start = at_start.strftime('%Y-%m-%d')
elif isinstance(auto_start, pd.Timestamp):
at_start = pd.to_datetime(auto_start)
auto_start = at_start.strftime('%Y-%m-%d')
return chart_start, chart_end, auto_start, auto_end | 603b8e2cea59a52104941da7f3526e4c38b94c16 | 3,653,668 |
def Eip1(name, ospaces, index_key=None):
"""
Return the tensor representation of a Fermion ionization
name (string): name of the tensor
ospaces (list): list of occupied spaces
"""
terms = []
for os in ospaces:
i = Idx(0, os)
sums = [Sigma(i)]
tensors = [Tensor([i], name)]
operators = [FOperator(i, False)]
e1 = Term(1, sums, tensors, operators, [], index_key=index_key)
terms.append(e1)
return Expression(terms) | 3b118106e0c0839549edb5556215241bd3b5f8d4 | 3,653,670 |
import logging
def load_rtma_data(rtma_data, bbox):
"""
Load relevant RTMA fields and return them
:param rtma_data: a dictionary mapping variable names to local paths
:param bbox: the bounding box of the data
:return: a tuple containing t2, rh, lats, lons
"""
gf = GribFile(rtma_data['temp'])[1]
lats, lons = gf.latlons()
# bbox format: minlat, minlon, maxlat, maxlon
i1, i2, j1, j2 = find_region_indices(lats, lons, bbox[0], bbox[2], bbox[1], bbox[3])
t2 = np.ma.array(gf.values())[i1:i2,j1:j2] # temperature at 2m in K
td = np.ma.array(GribFile(rtma_data['td'])[1].values())[i1:i2,j1:j2] # dew point in K
precipa = np.ma.array(GribFile(rtma_data['precipa'])[1].values())[i1:i2,j1:j2] # precipitation
hgt = np.ma.array(GribFile('static/ds.terrainh.bin')[1].values())[i1:i2,j1:j2]
logging.info('t2 min %s max %s' % (np.min(t2),np.max(t2)))
logging.info('td min %s max %s' % (np.min(td),np.max(td)))
logging.info('precipa min %s max %s' % (np.min(precipa),np.max(precipa)))
logging.info('hgt min %s max %s' % (np.min(hgt),np.max(hgt)))
# compute relative humidity
rh = 100*np.exp(17.625*243.04*(td - t2) / (243.04 + t2 - 273.15) / (243.0 + td - 273.15))
return td, t2, rh, precipa, hgt, lats[i1:i2,j1:j2], lons[i1:i2,j1:j2] | 1e97228b613dc42fb51c29ace44c306ea81052cb | 3,653,671 |
import traceback
import six
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data | 2ba794797362b7761a0dc6cbf58851a60a50cc0c | 3,653,672 |
import itertools
import shlex
def combine_arg_list_opts(opt_args):
"""Helper for processing arguments like impalad_args. The input is a list of strings,
each of which is the string passed into one instance of the argument, e.g. for
--impalad_args="-foo -bar" --impalad_args="-baz", the input to this function is
["-foo -bar", "-baz"]. This function combines the argument lists by tokenised each
string into separate arguments, if needed, e.g. to produce the output
["-foo", "-bar", "-baz"]"""
return list(itertools.chain(*[shlex.split(arg) for arg in opt_args])) | 77cfc6fa54201083c2cb058b8a9493b7d020273e | 3,653,673 |
def in_data():
"""Na funçao `in_data` é tratado os dados da matriz lida do arquivo txt."""
points = {}
i, j = map(int, file.readline().split(' '))
for l in range(i):
line = file.readline().split(' ')
if len(line)==j:
for colun in range(len(line)):
if line[colun].find("\n")!= -1:
line[colun] = line[colun][-2]
if line[colun] not in '0' :
points[line[colun]] = (l, colun)
else:
raise ValueError('Incosistence number of coluns in line. ')
return points | 423b96cda6802fdfb23a36aa486b7e067999a60d | 3,653,674 |
def doom_action_space_extended():
"""
This function assumes the following list of available buttons:
TURN_LEFT
TURN_RIGHT
MOVE_FORWARD
MOVE_BACKWARD
MOVE_LEFT
MOVE_RIGHT
ATTACK
"""
space = gym.spaces.Tuple((
Discrete(3), # noop, turn left, turn right
Discrete(3), # noop, forward, backward
Discrete(3), # noop, strafe left, strafe right
Discrete(2), # noop, attack
))
return space | 27ceab538f9a7102724a81ae1f692340c3b5e2e6 | 3,653,675 |
def svn_auth_provider_invoke_first_credentials(*args):
"""
svn_auth_provider_invoke_first_credentials(svn_auth_provider_t _obj, void provider_baton, apr_hash_t parameters,
char realmstring, apr_pool_t pool) -> svn_error_t
"""
return _core.svn_auth_provider_invoke_first_credentials(*args) | 951d2554df8efa4e392668c743f2b3f51cab2f48 | 3,653,676 |
def kill_process(device, process="tcpdump", pid=None, sync=True, port=None):
"""Kill any active process
:param device: lan or wan
:type device: Object
:param process: process to kill, defaults to tcpdump
:type process: String, Optional
:param pid: process id to kill, defaults to None
:type pid: String, Optional
:param sync: Marked False if sync should not be executed;defaults to True
:type sync: Boolean,optional
:param port: port number to kill
:type port: int
:return: Console output of sync sendline command after kill process
:rtype: string
"""
if pid:
device.sudo_sendline("kill %s" % pid)
elif port:
device.sudo_sendline(r"kill $(lsof -t -i:%s)" % str(port))
else:
device.sudo_sendline("killall %s" % process)
device.expect(device.prompt)
if sync:
device.sudo_sendline("sync")
retry_on_exception(device.expect, (device.prompt,), retries=5, tout=60)
return device.before | be3947e624d1d2e8ca4015480a07bde67475c721 | 3,653,677 |
def get_sentence(soup, ets_series, cache, get_verb=False):
"""
Given an ETS example `ets_series`, find the corresponding fragment, and
retrieve the sentence corresponding to the ETS example.
"""
frg = load_fragment(soup, ets_series.text_segment_id, cache)
sentence = frg.find('s', {'n': ets_series.sentence_number})
if get_verb:
tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True)
# Offset starts from 1
verb = raw_tokens[ets_series['word_offset'] - 1].lower()
return tokenized, raw_tokens, verb
tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True)
return tokenized, raw_tokens | 1a39307d973a5fb93fea7b100f03d0797af1f1ef | 3,653,678 |
def PreAuiNotebook(*args, **kwargs):
"""PreAuiNotebook() -> AuiNotebook"""
val = _aui.new_PreAuiNotebook(*args, **kwargs)
val._setOORInfo(val)
return val | 29400857cdca1fa42058d4200111bd7eeae8410b | 3,653,679 |
def get_nsx_security_group_id(session, cluster, neutron_id):
"""Return the NSX sec profile uuid for a given neutron sec group.
First, look up the Neutron database. If not found, execute
a query on NSX platform as the mapping might be missing.
NOTE: Security groups are called 'security profiles' on the NSX backend.
"""
nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
if not nsx_id:
# Find security profile on backend.
# This is a rather expensive query, but it won't be executed
# more than once for each security group in Neutron's lifetime
nsx_sec_profiles = secgrouplib.query_security_profiles(
cluster, '*',
filters={'tag': neutron_id,
'tag_scope': 'q_sec_group_id'})
# Only one result expected
# NOTE(salv-orlando): Not handling the case where more than one
# security profile is found with the same neutron port tag
if not nsx_sec_profiles:
LOG.warn(_("Unable to find NSX security profile for Neutron "
"security group %s"), neutron_id)
return
elif len(nsx_sec_profiles) > 1:
LOG.warn(_("Multiple NSX security profiles found for Neutron "
"security group %s"), neutron_id)
nsx_sec_profile = nsx_sec_profiles[0]
nsx_id = nsx_sec_profile['uuid']
with session.begin(subtransactions=True):
# Create DB mapping
nsx_db.add_neutron_nsx_security_group_mapping(
session, neutron_id, nsx_id)
return nsx_id | 0b02a7f90d2e9e9d5917612280ed00ebfcab7f93 | 3,653,680 |
def customiseGlobalTagForOnlineBeamSpot(process):
"""Customisation of GlobalTag for Online BeamSpot
- edits the GlobalTag ESSource to load the tags used to produce the HLT beamspot
- these tags are not available in the Offline GT, which is the GT presently used in HLT+RECO tests
- not loading these tags (i.e. not using this customisation) does not result in a runtime error,
but it leads to an HLT beamspot different to the one obtained when running HLT alone
"""
if hasattr(process, 'GlobalTag'):
if not hasattr(process.GlobalTag, 'toGet'):
process.GlobalTag.toGet = cms.VPSet()
process.GlobalTag.toGet += [
cms.PSet(
record = cms.string('BeamSpotOnlineLegacyObjectsRcd'),
tag = cms.string('BeamSpotOnlineLegacy')
),
cms.PSet(
record = cms.string('BeamSpotOnlineHLTObjectsRcd'),
tag = cms.string('BeamSpotOnlineHLT')
)
]
return process | 8d0a8a0fa8e48e597dc4be910c6d9281e5ab4ae2 | 3,653,681 |
def path_to_filename(username, path_to_file):
""" Converts a path formated as path/to/file.txt to a filename, ie. path_to_file.txt """
filename = '{}_{}'.format(username, path_to_file)
filename = filename.replace('/','_')
print(filename)
return filename | a29e98db8ac4cd7f39e0f0e7fc1f76e72f5fa398 | 3,653,682 |
from typing import List
def _convert_artist_format(artists: List[str]) -> str:
"""Returns converted artist format"""
formatted = ""
for x in artists:
formatted += x + ", "
return formatted[:-2] | 66f8afb0eb09e9a66eaa728c28576bb0e5a496d3 | 3,653,683 |
def slerp(val, low, high):
"""
Spherical interpolation. val has a range of 0 to 1.
From Tom White 2016
:param val: interpolation mixture value
:param low: first latent vector
:param high: second latent vector
:return:
"""
if val <= 0:
return low
elif val >= 1:
return high
elif np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high | 499b192a90475fc3b4a888270159e98cbfa449fd | 3,653,684 |
import json
async def verify_input_body_is_json(
request: web.Request, handler: Handler
) -> web.StreamResponse:
"""
Middleware to verify that input body is of json format
"""
if request.can_read_body:
try:
await request.json()
except json.decoder.JSONDecodeError:
raise web.HTTPBadRequest(reason="Malformed JSON.")
return await handler(request) | 7c424b941d3a86e95029f60759b0f47c3d1c44d3 | 3,653,685 |
def svn_repos_get_logs4(*args):
"""
svn_repos_get_logs4(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start,
svn_revnum_t end, int limit, svn_boolean_t discover_changed_paths,
svn_boolean_t strict_node_history,
svn_boolean_t include_merged_revisions,
apr_array_header_t revprops,
svn_repos_authz_func_t authz_read_func,
svn_log_entry_receiver_t receiver, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs4(*args) | 6363e6846e7a1788eef769b529e641c14b4f0525 | 3,653,686 |
def linreg_predict(model, X, v=False):
"""
Prediction with linear regression
yhat[i] = E[y|X[i, :]], model]
v[i] = Var[y|X[i, :], model]
"""
if 'preproc' in model:
X = preprocessor_apply_to_test(model['preproc'], X)
yhat = X.dot(model['w'])
return yhat | 5b326bf06b8061e86c5b4ebc5cf2d5e43cadcd1c | 3,653,687 |
def parse_hostportstr(hostportstr):
""" Parse hostportstr like 'xxx.xxx.xxx.xxx:xxx'
"""
host = hostportstr.split(':')[0]
port = int(hostportstr.split(':')[1])
return host, port | 7d67b548728d8cc159a7baa3e5f419bf7cbbc4d3 | 3,653,688 |
def sigmoid_grad_input(x_input, grad_output):
"""sigmoid nonlinearity gradient.
Calculate the partial derivative of the loss
with respect to the input of the layer
# Arguments
x_input: np.array of size `(n_objects, n_in)`
grad_output: np.array of size `(n_objects, n_in)`
dL / df
# Output
the partial derivative of the loss
with respect to the input of the function
np.array of size `(n_objects, n_in)`
dL / dh
"""
#################
### YOUR CODE ###
#################
output = []
for x in x_input:
one = (1/(1+np.exp(-x)))
two = (np.exp(-x)/(1+np.exp(-x)))
output.append(one*two)
output = np.asarray(output*grad_output)
return output | f397cdb3c9608fa09c5053e27e57525e2a8e3ba5 | 3,653,689 |
def f_is3byte(*args):
"""f_is3byte(flags_t F, void ?) -> bool"""
return _idaapi.f_is3byte(*args) | 9fb9f351a4d595c7ecde83492d92911cd646bc0a | 3,653,690 |
def make_desired_disp(vertices, DeformType = DispType.random, num_of_vertices = -1):
"""
DispType.random: Makes a random displacement field. The first 3 degrees of freedom are assumed to
be zero in order to fix rotation and translation of the lattice.
DispType.isotropic: Every point moves towards the origin with an amount propotional to the distance from the origin
"""
if(num_of_vertices < 1):
get_num_of_verts(vertices)
if(DeformType == DispType.random):
return normalizeVec(npr.rand(2*num_of_vertices))
elif(DeformType == DispType.isotropic):
return normalizeVec(vertices.flatten())
elif(DeformType == DispType.explicit_1):
return np.vstack ((np.array([[0.0, 0.0], [0, -2], [-1, -1]]), npr.rand(num_of_vertices - 3, 2))).flatten()
elif(DeformType == DispType.explicit_2):
return np.vstack ((np.array([[0.0, 0.0], [0, 0], [-0.5 + 1.5*np.sin(np.pi/6), 0.3 - 1.5*np.cos(np.pi/6)]]),
npr.rand(num_of_vertices - 3, 2))).flatten() | 90697baa3879f22cb400c1da0923fc611d43a72c | 3,653,691 |
def do_filter():
"""Vapoursynth filtering"""
opstart_ep10 = 768
ncop = JPBD_NCOP.src_cut
ep10 = JPBD_10.src_cut
ncop = lvf.rfs(ncop, ep10[opstart_ep10:], [(0, 79), (1035, 1037)])
return ncop | 30d605e2267875eaaa4506bc27b0df380a0e48d1 | 3,653,693 |
def test_returns_less_than_expected_errors(configured_test_manager):
"""A function that doesn't return the same number of objects as specified in the stage outputs should throw an OutputSignatureError."""
@stage([], ["test1", "test2"])
def output_stage(record):
return "hello world"
record = Record(configured_test_manager, None)
with pytest.raises(OutputSignatureError):
output_stage(record) | 6f88de961911f6bc862619e67f2d72a520f2ca90 | 3,653,694 |
import pickle
def xgb(validate = True):
"""
Load XGB language detection model.
Parameters
----------
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
LANGUAGE_DETECTION : malaya._models._sklearn_model.LANGUAGE_DETECTION class
"""
if validate:
check_file(PATH_LANG_DETECTION['xgb'], S3_PATH_LANG_DETECTION['xgb'])
else:
if not check_available(PATH_LANG_DETECTION['xgb']):
raise Exception(
'language-detection/xgb is not available, please `validate = True`'
)
try:
with open(PATH_LANG_DETECTION['xgb']['vector'], 'rb') as fopen:
vector = pickle.load(fopen)
with open(PATH_LANG_DETECTION['xgb']['model'], 'rb') as fopen:
model = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('language-detection/xgb') and try again"
)
return LANGUAGE_DETECTION(model, lang_labels, vector, mode = 'xgb') | 87de42a5957facbc057ecf024334b307df09b19f | 3,653,695 |
import csv
import math
def get_current_data(csv_file):
"""
Gathers and returns list of lists of current information based in hourly data from NOAA's National Data Buoy Center
archived data. Returned list format is [current depths, current speeds, current directions].
Input parameter is any CSV or text file with the same formatting at the NDBC website.
"""
current_speed = []
current_dir = []
with open(csv_file) as data_file:
reader = csv.reader(data_file, delimiter=' ')
next(reader) # skips header line of CSV file
next(reader)
for row in reader:
while '' in row:
row.remove('')
current_depth = float(row[5])
try:
current_current_speed = float(row[7])
except ValueError:
current_current_speed = np.nan
current_current_dir = 360 - int(row[6])
if math.isclose(current_current_speed, 99.):
current_current_speed = np.nan
if math.isclose(current_current_dir, -639) or current_current_dir == 'MM':
current_current_dir = np.nan
current_speed.append(float(current_current_speed))
current_dir.append(float(current_current_dir))
current_data = {'Current Speed': current_speed, 'Current Direction': current_dir}
current_data = pd.DataFrame(data=current_data)
return current_data, current_depth | f217d66a40466f8bcc590f0cc61fc8c3687b63da | 3,653,696 |
from typing import Optional
from typing import Union
from typing import Sequence
from typing import List
def _get_batched_jittered_initial_points(
model: Model,
chains: int,
initvals: Optional[Union[StartDict, Sequence[Optional[StartDict]]]],
random_seed: int,
jitter: bool = True,
jitter_max_retries: int = 10,
) -> Union[np.ndarray, List[np.ndarray]]:
"""Get jittered initial point in format expected by NumPyro MCMC kernel
Returns
-------
out: list of ndarrays
list with one item per variable and number of chains as batch dimension.
Each item has shape `(chains, *var.shape)`
"""
random_seed = np.random.default_rng(random_seed).integers(2**30, size=chains)
assert len(random_seed) == chains
initial_points = _init_jitter(
model,
initvals,
seeds=random_seed,
jitter=jitter,
jitter_max_retries=jitter_max_retries,
)
initial_points = [list(initial_point.values()) for initial_point in initial_points]
if chains == 1:
initial_points = initial_points[0]
else:
initial_points = [np.stack(init_state) for init_state in zip(*initial_points)]
return initial_points | 2ba3573f26922cec0cd4a76646bc1d5ad96051b4 | 3,653,697 |
import warnings
import copy
def load_schema(url, resolver=None, resolve_references=False,
resolve_local_refs=False):
"""
Load a schema from the given URL.
Parameters
----------
url : str
The path to the schema
resolver : callable, optional
A callback function used to map URIs to other URIs. The
callable must take a string and return a string or `None`.
This is useful, for example, when a remote resource has a
mirror on the local filesystem that you wish to use.
resolve_references : bool, optional
If `True`, resolve all `$ref` references.
resolve_local_refs : bool, optional
If `True`, resolve all `$ref` references that refer to other objects
within the same schema. This will automatically be handled when passing
`resolve_references=True`, but it may be desirable in some cases to
control local reference resolution separately.
This parameter is deprecated.
"""
if resolve_local_refs is True:
warnings.warn(
"The 'resolve_local_refs' parameter is deprecated.",
AsdfDeprecationWarning
)
if resolver is None:
# We can't just set this as the default in load_schema's definition
# because invoking get_default_resolver at import time leads to a circular import.
resolver = extension.get_default_resolver()
# We want to cache the work that went into constructing the schema, but returning
# the same object is treacherous, because users who mutate the result will not
# expect that they're changing the schema everywhere.
return copy.deepcopy(
_load_schema_cached(url, resolver, resolve_references, resolve_local_refs)
) | b937d56eb7b23a530758327fbf463adb63be4cf4 | 3,653,698 |
def fastaDecodeHeader(fastaHeader):
"""Decodes the fasta header
"""
return fastaHeader.split("|") | 06f0af70765670dafa0b558867e2d9094c3d928b | 3,653,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.