code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def parse_table_row(self, markup, row):
""" Parses a row of cells in a Wikipedia table.
Cells in the row are separated by "||".
A "!" indicates a row of heading columns.
Each cell can contain properties before a "|",
# e.g. align="right" | Cell 2 (right aligned).
"""
if row == None:
row = WikipediaTableRow()
markup = markup.replace("!!", "||")
for cell in markup.lstrip("|!").split("||"):
# The "|" after the properties can't be part of a link.
i = cell.find("|")
j = cell.find("[[")
if i>0 and (j<0 or i<j):
data = self.plain(cell[i+1:])
properties = cell[:i].strip()
else:
data = self.plain(cell)
properties = u""
cell = WikipediaTableCell(data)
cell.properties = properties
row.append(cell)
return row | Parses a row of cells in a Wikipedia table.
Cells in the row are separated by "||".
A "!" indicates a row of heading columns.
Each cell can contain properties before a "|",
# e.g. align="right" | Cell 2 (right aligned). |
def on_session_end(self, session):
"""Triggered by the given session object when the session is about
to close normally.
in session of type :class:`ISession`
Session that is being closed
return progress of type :class:`IProgress`
Used to wait until the corresponding machine is actually
dissociated from the given session on the server.
Returned only when this session is a direct one.
"""
if not isinstance(session, ISession):
raise TypeError("session can only be an instance of type ISession")
progress = self._call("onSessionEnd",
in_p=[session])
progress = IProgress(progress)
return progress | Triggered by the given session object when the session is about
to close normally.
in session of type :class:`ISession`
Session that is being closed
return progress of type :class:`IProgress`
Used to wait until the corresponding machine is actually
dissociated from the given session on the server.
Returned only when this session is a direct one. |
def td_taper(out, start, end, beta=8, side='left'):
"""Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series.
"""
out = out.copy()
width = end - start
winlen = 2 * int(width / out.delta_t)
window = Array(signal.get_window(('kaiser', beta), winlen))
xmin = int((start - out.start_time) / out.delta_t)
xmax = xmin + winlen//2
if side == 'left':
out[xmin:xmax] *= window[:winlen//2]
if xmin > 0:
out[:xmin].clear()
elif side == 'right':
out[xmin:xmax] *= window[winlen//2:]
if xmax < len(out):
out[xmax:].clear()
else:
raise ValueError("unrecognized side argument {}".format(side))
return out | Applies a taper to the given TimeSeries.
A half-kaiser window is used for the roll-off.
Parameters
----------
out : TimeSeries
The ``TimeSeries`` to taper.
start : float
The time (in s) to start the taper window.
end : float
The time (in s) to end the taper window.
beta : int, optional
The beta parameter to use for the Kaiser window. See
``scipy.signal.kaiser`` for details. Default is 8.
side : {'left', 'right'}
The side to apply the taper to. If ``'left'`` (``'right'``), the taper
will roll up (down) between ``start`` and ``end``, with all values
before ``start`` (after ``end``) set to zero. Default is ``'left'``.
Returns
-------
TimeSeries
The tapered time series. |
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Port_channel = ET.SubElement(interfacetype, "Port-channel")
Port_channel = ET.SubElement(Port_channel, "Port-channel")
Port_channel.text = kwargs.pop('Port_channel')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
"""
subgraph = networkx.DiGraph()
for frontier_node in frontier:
for simple_path in networkx.all_simple_paths(graph, node, frontier_node):
for src, dst in zip(simple_path, simple_path[1:]):
if include_frontier or (src not in frontier and dst not in frontier):
subgraph.add_edge(src, dst)
if not list(subgraph.nodes):
# HACK: FIXME: for infinite loop nodes, this would return an empty set, so we include the loop body itself
# Make sure this makes sense (EDG thinks it does)
if (node, node) in graph.edges:
subgraph.add_edge(node, node)
return subgraph | Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph |
def output_summary(self, output_stream=sys.stdout):
"""outputs a usage tip and the list of acceptable commands.
This is useful as the output of the 'help' option.
parameters:
output_stream - an open file-like object suitable for use as the
target of a print function
"""
if self.app_name or self.app_description:
print('Application: ', end='', file=output_stream)
if self.app_name:
print(self.app_name, self.app_version, file=output_stream)
if self.app_description:
print(self.app_description, file=output_stream)
if self.app_name or self.app_description:
print('', file=output_stream)
names_list = self.get_option_names()
print(
"usage:\n%s [OPTIONS]... " % self.app_invocation_name,
end='', file=output_stream
)
bracket_count = 0
# this section prints the non-switch command line arguments
for key in names_list:
an_option = self.option_definitions[key]
if an_option.is_argument:
if an_option.default is None:
# there's no option, assume the user must set this
print(an_option.name, end='', file=output_stream)
elif (
inspect.isclass(an_option.value)
or inspect.ismodule(an_option.value)
):
# this is already set and it could have expanded, most
# likely this is a case where a sub-command has been
# loaded and we're looking to show the help for it.
# display show it as a constant already provided rather
# than as an option the user must provide
print(an_option.default, end='', file=output_stream)
else:
# this is an argument that the user may alternatively
# provide
print("[ %s" % an_option.name, end='', file=output_stream)
bracket_count += 1
print(']' * bracket_count, '\n', file=output_stream)
names_list.sort()
if names_list:
print('OPTIONS:', file=output_stream)
pad = ' ' * 4
for name in names_list:
if name in self.options_banned_from_help:
continue
option = self._get_option(name)
line = ' ' * 2 # always start with 2 spaces
if option.short_form:
line += '-%s, ' % option.short_form
line += '--%s' % name
line += '\n'
doc = option.doc if option.doc is not None else ''
if doc:
line += '%s%s\n' % (pad, doc)
try:
value = option.value
type_of_value = type(value)
converter_function = to_string_converters[type_of_value]
default = converter_function(value)
except KeyError:
default = option.value
if default is not None:
if (
(option.secret or 'password' in name.lower()) and
not self.option_definitions.admin.expose_secrets.default
):
default = '*********'
if name not in ('help',):
# don't bother with certain dead obvious ones
line += '%s(default: %s)\n' % (pad, default)
print(line, file=output_stream) | outputs a usage tip and the list of acceptable commands.
This is useful as the output of the 'help' option.
parameters:
output_stream - an open file-like object suitable for use as the
target of a print function |
def assign(name, value):
'''
Assign a single sysctl parameter for this minion
CLI Example:
.. code-block:: bash
salt '*' sysctl.assign net.inet.icmp.icmplim 50
'''
ret = {}
cmd = 'sysctl {0}="{1}"'.format(name, value)
data = __salt__['cmd.run_all'](cmd, python_shell=False)
if data['retcode'] != 0:
raise CommandExecutionError('sysctl failed: {0}'.format(
data['stderr']))
new_name, new_value = data['stdout'].split(':', 1)
ret[new_name] = new_value.split(' -> ')[-1]
return ret | Assign a single sysctl parameter for this minion
CLI Example:
.. code-block:: bash
salt '*' sysctl.assign net.inet.icmp.icmplim 50 |
def substructure(self, atoms, meta=False, as_view=True):
"""
create substructure containing atoms from nbunch list
:param atoms: list of atoms numbers of substructure
:param meta: if True metadata will be copied to substructure
:param as_view: If True, the returned graph-view provides a read-only view
of the original structure scaffold without actually copying any data.
"""
s = super().substructure(atoms, meta, as_view)
if as_view:
s.check_valence = s.explicify_hydrogens = s.implicify_hydrogens = s.reset_query_marks = frozen
s.standardize = s.aromatize = frozen
return s | create substructure containing atoms from nbunch list
:param atoms: list of atoms numbers of substructure
:param meta: if True metadata will be copied to substructure
:param as_view: If True, the returned graph-view provides a read-only view
of the original structure scaffold without actually copying any data. |
def playerJoin(config, agentCallBack, lobbyTimeout=c.INITIAL_TIMEOUT, debug=True):
"""cause an agent to join an already hosted game"""
FLAGS(sys.argv) # ignore pysc2 command-line handling (eww)
log = protocol.logging.logging
log.disable(log.CRITICAL) # disable pysc2 logging
amHosting = not bool(config.host)
thisPlayer = config.whoAmI()
operPrefix = "HOST" if amHosting else "JOIN"
operType = "%sGAME"%operPrefix
createReq = config.requestCreateDetails() if amHosting else None
joinReq = config.requestJoinDetails()
selectedIP = config.clientInitHost()
selectPort = config.clientInitPort()
controller = None # the object that manages the application process
finalResult = rh.playerSurrendered(config) # default to this player losing if somehow a result wasn't acquired normally
replayData = "" # complete raw replay data for the match
if debug: print("[%s] Starcraft2 game process is launching (fullscreen=%s)."%(operType, config.fullscreen))
with config.launchApp(ip_address=selectedIP, port=selectPort, connect=False):
try: # WARNING: if port equals the same port of the host on the same machine, this subsequent process closes!
controller = ClientController()
controller.connect(url=selectedIP, port=selectPort, timeout=lobbyTimeout) # establish socket connection
if amHosting:
if debug:
print("[%s] Starcraft2 host application is live. (%s)"%(operType, controller.status)) # status: launched
print("[%s] Creating Starcraft Game at %s"%(operType, controller))
controller.create_game(createReq)
if debug:
print("[%s] Starcraft2 is waiting for %d player(s) to join. (%s)"%(operType, config.numAgents, controller.status)) # status: init_game
print("[%s] sending request to join game. (%s)"%(operType, controller.status)) # status: init_game
else: # joining clients must wait for host to perfrom its join request
timeToWait = c.DEFAULT_HOST_DELAY
for i in range(timeToWait): # WARNING: the host must perform its join action with its client before any joining players issue join requests to their clients
if debug: print("[%s] waiting %d seconds for the host to finish its init sequence."%(operType, timeToWait-i))
time.sleep(1)
joinResp = controller.join_game(joinReq) # SC2APIProtocol.RequestJoinGame
print("[%s] connection to %s:%d was successful. Game is starting! (%s)"%(operType, selectedIP, selectPort, controller.status)) # status: in_game
thisPlayer.playerID = int(joinResp.player_id) # update playerID; repsponse to join game request is authority
if debug: print("[%s] joined match as %s."%(operType, thisPlayer)) # all players have actually joined already to advance beyond join_game (init_game)
config.updateIDs(controller.game_info(), tag=operType, debug=debug) # SC2APIProtocol.ResponseGameInfo object
if debug: print("[%s] all %d player(s) found; game has started! (%s)"%(operType, config.numGameClients, controller.status)) # status: init_game
config.save() # "publish" the configuration file for other procs
try: agentCallBack(config.name) # send the configuration to the controlling agent
except Exception as e:
print("ERROR: agent %s crashed during init: %s (%s)"%(thisPlayer.initCmd, e, type(e)))
return (rh.playerCrashed(config), "") # no replay information to get
getGameState = controller.observe # function that observes what's changed since the prior gameloop(s)
startWaitTime = now()
while True: # wait for game to end while players/bots do their thing
obs = getGameState()
result = obs.player_result
if result: # match end condition was supplied by the client
finalResult = rh.idPlayerResults(config, result)
break
try: agentCallBack(obs) # do developer's creative stuff
except Exception as e:
print("%s ERROR: agent callback %s of %s crashed during game: %s"%(type(e), agentCallBack, thisPlayer.initCmd, e))
finalResult = rh.playerCrashed(config)
break
newNow = now() # periodicially acquire the game's replay data (in case of abnormal termination)
if newNow - startWaitTime > c.REPLAY_SAVE_FREQUENCY:
replayData = controller.save_replay()
startWaitTime = newNow
replayData = controller.save_replay() # one final attempt to get the complete replay data
#controller.leave() # the connection to the server process is (cleanly) severed
except (protocol.ConnectionError, protocol.ProtocolError, remote_controller.RequestError) as e:
if "Status.in_game" in str(e): # state was previously in game and then exited that state
finalResult = rh.playerSurrendered(config) # rage quit is losing
else:
finalResult = rh.playerDisconnected(config)
print("%s Connection to game host has ended, even intentionally by agent. Message:%s%s"%(type(e), os.linesep, e))
except KeyboardInterrupt:
if debug: print("caught command to forcibly shutdown Starcraft2 client.")
finalResult = rh.playerSurrendered(config)
finally:
if replayData: # ensure replay data can be transmitted over http
replayData = base64.encodestring(replayData).decode() # convert raw bytes into str
if controller: controller.quit() # force the sc2 application to close
return (finalResult, replayData) | cause an agent to join an already hosted game |
def format_h1(s, format="text", indents=0):
"""
Encloses string in format text
Args:
s: string
format: string starting with "text", "markdown", or "rest"
indents: number of leading intenting spaces
Returns: list
>>> print("\\n".join(format_h2("Header 1", indents=10)))
Header 1
--------
>>> print("\\n".join(format_h2("Header 1", "markdown", 0)))
## Header 1
"""
_CHAR = "="
if format.startswith("text"):
return format_underline(s, _CHAR, indents)
elif format.startswith("markdown"):
return ["# {}".format(s)]
elif format.startswith("rest"):
return format_underline(s, _CHAR, 0) | Encloses string in format text
Args:
s: string
format: string starting with "text", "markdown", or "rest"
indents: number of leading intenting spaces
Returns: list
>>> print("\\n".join(format_h2("Header 1", indents=10)))
Header 1
--------
>>> print("\\n".join(format_h2("Header 1", "markdown", 0)))
## Header 1 |
def join_ops(ops1, ops2):
"""For internal use."""
i = len(ops1) - 1
j = 0
while i >= 0 and j < len(ops2):
if ops1[i] == ops2[j]:
i -= 1
j += 1
else:
break
return ops1[:i + 1] + ops2[j:] | For internal use. |
def get_bound(pts):
"""Compute a minimal rectangle that covers all the points."""
(x0, y0, x1, y1) = (INF, INF, -INF, -INF)
for (x, y) in pts:
x0 = min(x0, x)
y0 = min(y0, y)
x1 = max(x1, x)
y1 = max(y1, y)
return (x0, y0, x1, y1) | Compute a minimal rectangle that covers all the points. |
def location_id(self, location_id):
"""
Sets the location_id of this Order.
The ID of the merchant location this order is associated with.
:param location_id: The location_id of this Order.
:type: str
"""
if location_id is None:
raise ValueError("Invalid value for `location_id`, must not be `None`")
if len(location_id) < 1:
raise ValueError("Invalid value for `location_id`, length must be greater than or equal to `1`")
self._location_id = location_id | Sets the location_id of this Order.
The ID of the merchant location this order is associated with.
:param location_id: The location_id of this Order.
:type: str |
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convenience function for priting formatted table items.
"""
click.echo('%-*s%s' % (32, click.style("\t" + title, fg='green') + ':', str(value)))
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" %
(self.lambda_name, self.zappa.aws_region), fg='red'))
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
self.lambda_arn = conf['FunctionArn']
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf['Role']
status_dict["Lambda Handler"] = conf['Handler']
status_dict["Lambda Code Size"] = conf['CodeSize']
status_dict["Lambda Version"] = conf['Version']
status_dict["Lambda Last Modified"] = conf['LastModified']
status_dict["Lambda Memory Size"] = conf['MemorySize']
status_dict["Lambda Timeout"] = conf['Timeout']
status_dict["Lambda Runtime"] = conf['Runtime']
if 'VpcConfig' in conf.keys():
status_dict["Lambda VPC ID"] = conf.get('VpcConfig', {}).get('VpcId', 'Not assigned')
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Invocations',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Errors',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda function.
domain_url = self.stage_config.get('domain', None)
base_path = self.stage_config.get('base_path', None)
if domain_url:
status_dict["Domain URL"] = 'https://' + domain_url
if base_path:
status_dict["Domain URL"] += '/' + base_path
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict['Events'] = []
for rule in event_rules:
event_dict = {}
rule_name = rule['Name']
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get(u'ScheduleExpression', None)
event_dict["Event Rule State"] = rule.get(u'State', None).title()
event_dict["Event Rule ARN"] = rule.get(u'Arn', None)
status_dict['Events'].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == 'Events':
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True | Describe the status of the current deployment. |
def _nodeSetValuesFromDict(self, dct):
""" Sets values from a dictionary in the current node.
Non-recursive auxiliary function for setValuesFromDict
"""
if 'data' in dct:
qFont = QtGui.QFont()
success = qFont.fromString(dct['data'])
if not success:
msg = "Unable to create QFont from string {!r}".format(dct['data'])
logger.warn(msg)
if DEBUGGING:
raise ValueError(msg)
self.data = qFont | Sets values from a dictionary in the current node.
Non-recursive auxiliary function for setValuesFromDict |
def _process_all(self, limit):
"""
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
"""
omimids = self._get_omim_ids()
LOG.info('Have %i omim numbers to fetch records from their API', len(omimids))
LOG.info('Have %i omim types ', len(self.omim_type))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
# add genome and taxon
geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere
model.addClassToGraph(tax_id, None) # label added elsewhere
includes = set()
includes.add('all')
self.process_entries(
omimids, self._transform_entry, includes, graph, limit, self.globaltt) | This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return: |
def can_invite_others(self, user):
"""Determine if user can invite people to a group.
Be aware that this check is independent from the people (users) which
are going to be invited. The checked user is the one who invites
someone, NOT who is going to be invited.
:param user: User to be checked.
:returns: True or False.
"""
if self.is_managed:
return False
elif self.is_admin(user):
return True
elif self.subscription_policy != SubscriptionPolicy.CLOSED:
return True
else:
return False | Determine if user can invite people to a group.
Be aware that this check is independent from the people (users) which
are going to be invited. The checked user is the one who invites
someone, NOT who is going to be invited.
:param user: User to be checked.
:returns: True or False. |
def generate_content_media_type(self):
"""
Means loading value when it's specified as JSON.
.. code-block:: python
{
'contentMediaType': 'application/json',
}
"""
if self._definition['contentMediaType'] == 'application/json':
with self.l('if isinstance({variable}, bytes):'):
with self.l('try:'):
self.l('{variable} = {variable}.decode("utf-8")')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must encoded by utf8")')
with self.l('if isinstance({variable}, str):'):
with self.l('try:'):
self.l('import json')
self.l('{variable} = json.loads({variable})')
with self.l('except Exception:'):
self.l('raise JsonSchemaException("{name} must be valid JSON")') | Means loading value when it's specified as JSON.
.. code-block:: python
{
'contentMediaType': 'application/json',
} |
def init_weights(self, w, n=-1):
"""
This function initialises the adaptive weights of the filter.
**Args:**
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
**Kwargs:**
* `n` : size of filter (int) - number of filter coefficients.
**Returns:**
* `y` : output value (float) calculated from input array.
"""
if n == -1:
n = self.n
if type(w) == str:
if w == "random":
w = np.random.normal(0, 0.5, n)
elif w == "zeros":
w = np.zeros(n)
else:
raise ValueError('Impossible to understand the w')
elif len(w) == n:
try:
w = np.array(w, dtype="float64")
except:
raise ValueError('Impossible to understand the w')
else:
raise ValueError('Impossible to understand the w')
self.w = w | This function initialises the adaptive weights of the filter.
**Args:**
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
**Kwargs:**
* `n` : size of filter (int) - number of filter coefficients.
**Returns:**
* `y` : output value (float) calculated from input array. |
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=''):
"""Calculate usMaxContext based on a contextual feature subtable."""
if st.Format == 1:
for ruleset in getattr(st, '%s%sRuleSet' % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, '%s%sRule' % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 2:
for ruleset in getattr(st, '%s%sClassSet' % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, '%s%sClassRule' % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 3:
maxCtx = maxCtxContextualRule(maxCtx, st, chain)
return maxCtx | Calculate usMaxContext based on a contextual feature subtable. |
def normalize(W, copy=True):
'''
Normalizes an input weighted connection matrix. If copy is not set, this
function will *modify W in place.*
Parameters
----------
W : np.ndarray
weighted connectivity matrix
copy : bool
if True, returns a copy of the matrix. Otherwise, modifies the matrix
in place. Default value=True.
Returns
-------
W : np.ndarray
normalized connectivity matrix
'''
if copy:
W = W.copy()
W /= np.max(np.abs(W))
return W | Normalizes an input weighted connection matrix. If copy is not set, this
function will *modify W in place.*
Parameters
----------
W : np.ndarray
weighted connectivity matrix
copy : bool
if True, returns a copy of the matrix. Otherwise, modifies the matrix
in place. Default value=True.
Returns
-------
W : np.ndarray
normalized connectivity matrix |
def _to_rest_includes(models, includes):
""" Fetch the models to be included
The includes should follow a few basic rules:
* the include MUST not already be an array member
of the included array (no dupes)
* the include MUST not be the same as the primary
data if the primary data is a single resource
object (no dupes)
* the include MUST not be an array member of the
primary data if the primary data an array of
resource objects (no dupes)
Basically, each included array member should be the only
instance of that resource object in the entire restified
data.
"""
included = []
includes = includes or []
if not isinstance(models, list):
models = [models]
for include in includes:
for model in models:
rel = getattr(model, include)
if hasattr(rel, 'model') and rel.model:
rel_models = [rel.model]
elif hasattr(rel, 'models') and rel.models:
rel_models = rel.models
for rel_model in rel_models:
if rel_model in models or rel_model in included:
continue
else:
included.append(rel_model)
for idx, val in enumerate(included):
included[idx] = _to_rest(val)
return included | Fetch the models to be included
The includes should follow a few basic rules:
* the include MUST not already be an array member
of the included array (no dupes)
* the include MUST not be the same as the primary
data if the primary data is a single resource
object (no dupes)
* the include MUST not be an array member of the
primary data if the primary data an array of
resource objects (no dupes)
Basically, each included array member should be the only
instance of that resource object in the entire restified
data. |
def split_params(sym, params):
"""Helper function to split params dictionary into args and aux params
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Returns
-------
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
"""
arg_params = {}
aux_params = {}
for args in sym.list_arguments():
if args in params:
arg_params.update({args: nd.array(params[args])})
for aux in sym.list_auxiliary_states():
if aux in params:
aux_params.update({aux: nd.array(params[aux])})
return arg_params, aux_params | Helper function to split params dictionary into args and aux params
Parameters
----------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Returns
-------
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format |
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, STRING_TYPES):
return to_unicode(x)
try:
l = list(x)
except TypeError as e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ] | Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str. |
def create(self, data):
"""Create a new component
"""
response = self.http.post(str(self), json=data, auth=self.auth)
response.raise_for_status()
return response.json() | Create a new component |
def top(self, objects: Set[Object]) -> Set[Object]:
"""
Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each
box.
"""
objects_per_box = self._separate_objects_by_boxes(objects)
return_set: Set[Object] = set()
for _, box_objects in objects_per_box.items():
min_y_loc = min([obj.y_loc for obj in box_objects])
return_set.update(set([obj for obj in box_objects if obj.y_loc == min_y_loc]))
return return_set | Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each
box. |
def token(self):
'''Attempt to return the auth header token.
:return: token related to request
'''
auth_header = self.headers.get('Authorization', '')
if 'Token ' in auth_header:
return auth_header.partition('Token ')[-1]
else:
return auth_header | Attempt to return the auth header token.
:return: token related to request |
def watch(self, key, pipeline=False):
"""Watch the given key.
Marks the given key to be watch for conditional execution
of a transaction.
Args:
key (str): Key that needs to be watched
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.watch(key)
else:
self._db.watch(key) | Watch the given key.
Marks the given key to be watch for conditional execution
of a transaction.
Args:
key (str): Key that needs to be watched
pipeline (bool): True, start a transaction block. Default false. |
def _py_ex_argtype(executable):
"""Returns the code to create the argtype to assign to the methods argtypes
attribute.
"""
result = []
for p in executable.ordered_parameters:
atypes = p.argtypes
if atypes is not None:
result.extend(p.argtypes)
else:
print(("No argtypes for: {}".format(p.definition())))
if type(executable).__name__ == "Function":
result.extend(executable.argtypes)
return result | Returns the code to create the argtype to assign to the methods argtypes
attribute. |
def p_expression_uxnor(self, p):
'expression : XNOR expression %prec UXNOR'
p[0] = Uxnor(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | expression : XNOR expression %prec UXNOR |
def bundle_stylesheets(context: Context):
"""
Compiles stylesheets
"""
args = [
'--output', context.app.scss_build_path,
'--output-style', 'compressed',
]
if context.verbosity == 0:
args.append('--quiet')
if not context.use_colour:
args.append('--no-color')
for path in context.app.scss_include_paths:
args.append('--include-path')
args.append(path)
return_code = 0
for source_file in context.app.scss_source_file_set.paths_for_shell(separator=None):
return_code = context.node_tool('node-sass', *args + [source_file]) or return_code
return return_code | Compiles stylesheets |
def Run(self, unused_args):
"""Does the segfaulting."""
if flags.FLAGS.pdb_post_mortem:
logging.warning("Segfault action requested :(")
print(ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents)
else:
logging.warning("Segfault requested but not running in debug mode.") | Does the segfaulting. |
def run_training(sub_id: int, run_seed: int, run_options, process_queue):
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
docker_target_name = (run_options['--docker-target-name']
if run_options['--docker-target-name'] != 'None' else None)
# General parameters
env_path = (run_options['--env']
if run_options['--env'] != 'None' else None)
run_id = run_options['--run-id']
load_model = run_options['--load']
train_model = run_options['--train']
save_freq = int(run_options['--save-freq'])
keep_checkpoints = int(run_options['--keep-checkpoints'])
base_port = int(run_options['--base-port'])
num_envs = int(run_options['--num-envs'])
curriculum_folder = (run_options['--curriculum']
if run_options['--curriculum'] != 'None' else None)
lesson = int(run_options['--lesson'])
fast_simulation = not bool(run_options['--slow'])
no_graphics = run_options['--no-graphics']
trainer_config_path = run_options['<trainer-config-path>']
# Recognize and use docker volume if one is passed as an argument
if not docker_target_name:
model_path = './models/{run_id}-{sub_id}'.format(run_id=run_id, sub_id=sub_id)
summaries_dir = './summaries'
else:
trainer_config_path = \
'/{docker_target_name}/{trainer_config_path}'.format(
docker_target_name=docker_target_name,
trainer_config_path=trainer_config_path)
if curriculum_folder is not None:
curriculum_folder = \
'/{docker_target_name}/{curriculum_folder}'.format(
docker_target_name=docker_target_name,
curriculum_folder=curriculum_folder)
model_path = '/{docker_target_name}/models/{run_id}-{sub_id}'.format(
docker_target_name=docker_target_name,
run_id=run_id,
sub_id=sub_id)
summaries_dir = '/{docker_target_name}/summaries'.format(
docker_target_name=docker_target_name)
trainer_config = load_config(trainer_config_path)
env_factory = create_environment_factory(
env_path,
docker_target_name,
no_graphics,
run_seed,
base_port + (sub_id * num_envs)
)
env = SubprocessUnityEnvironment(env_factory, num_envs)
maybe_meta_curriculum = try_create_meta_curriculum(curriculum_folder, env)
# Create controller and begin training.
tc = TrainerController(model_path, summaries_dir, run_id + '-' + str(sub_id),
save_freq, maybe_meta_curriculum,
load_model, train_model,
keep_checkpoints, lesson, env.external_brains,
run_seed, fast_simulation)
# Signal that environment has been launched.
process_queue.put(True)
# Begin training
tc.start_learning(env, trainer_config) | Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training. |
def transcodeImage(self, media, height, width, opacity=100, saturation=100):
""" Returns the URL for a transcoded image from the specified media object.
Returns None if no media specified (needed if user tries to pass thumb
or art directly).
Parameters:
height (int): Height to transcode the image to.
width (int): Width to transcode the image to.
opacity (int): Opacity of the resulting image (possibly deprecated).
saturation (int): Saturating of the resulting image.
"""
if media:
transcode_url = '/photo/:/transcode?height=%s&width=%s&opacity=%s&saturation=%s&url=%s' % (
height, width, opacity, saturation, media)
return self.url(transcode_url, includeToken=True) | Returns the URL for a transcoded image from the specified media object.
Returns None if no media specified (needed if user tries to pass thumb
or art directly).
Parameters:
height (int): Height to transcode the image to.
width (int): Width to transcode the image to.
opacity (int): Opacity of the resulting image (possibly deprecated).
saturation (int): Saturating of the resulting image. |
def get_episode_ids(self, show_id, season):
"""Get episode ids from the show id and the season.
:param int show_id: show id.
:param int season: season of the episode.
:return: episode ids per episode number.
:rtype: dict
"""
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'tvshow-%d-%d.html' % (show_id, season), timeout=10)
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over episode rows
episode_ids = {}
for row in soup.select('table#table5 tr'):
# skip rows that do not have a link to the episode page
if not row('a', href=episode_id_re):
continue
# extract data from the cells
cells = row('td')
episode = int(cells[0].text.split('x')[1])
episode_id = int(cells[1].a['href'][8:-5])
episode_ids[episode] = episode_id
if episode_ids:
logger.debug('Found episode ids %r', episode_ids)
else:
logger.warning('No episode ids found')
return episode_ids | Get episode ids from the show id and the season.
:param int show_id: show id.
:param int season: season of the episode.
:return: episode ids per episode number.
:rtype: dict |
def get_pk_value_on_save(self, instance):
"""Generate ID if required."""
value = super(AleaIdField, self).get_pk_value_on_save(instance)
if not value:
value = self.get_seeded_value(instance)
return value | Generate ID if required. |
def bpp2newick(bppnewick):
"converts bpp newick format to normal newick"
regex1 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[:]")
regex2 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[;]")
regex3 = re.compile(r": ")
new = regex1.sub(":", bppnewick)
new = regex2.sub(";", new)
new = regex3.sub(":", new)
return new | converts bpp newick format to normal newick |
def get_nendo ():
"""今は何年度?"""
y, m = map(int, time.strftime("%Y %m").split())
return y if m >= 4 else y - 1 | 今は何年度? |
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name):
u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string
"""
found_kwargs = False
needs_fix = False
for t in raw_params[2:]:
if t.type == token.COMMA:
# Commas are irrelevant at this stage.
continue
elif t.type == token.NAME and not found_kwargs:
# Keyword-only argument: definitely need to fix.
needs_fix = True
elif t.type == token.NAME and found_kwargs:
# Return 'foobar' of **foobar, if needed.
return t.value if needs_fix else u''
elif t.type == token.DOUBLESTAR:
# Found either '*' from **foobar.
found_kwargs = True
else:
# Never found **foobar. Return a synthetic name, if needed.
return kwargs_default if needs_fix else u'' | u"""
Returns string with the name of the kwargs dict if the params after the first star need fixing
Otherwise returns empty string |
def rsa_decrypt_base64_encoded_key(rsaprivatekey, enckey):
# type: (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey,
# str) -> bytes
"""Decrypt an RSA encrypted key encoded as base64
:param rsaprivatekey: RSA private key
:type rsaprivatekey:
cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey
:param str enckey: base64-encoded key
:rtype: bytes
:return: decrypted key
"""
return rsaprivatekey.decrypt(
base64.b64decode(enckey),
cryptography.hazmat.primitives.asymmetric.padding.OAEP(
mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(
algorithm=cryptography.hazmat.primitives.hashes.SHA1()
),
algorithm=cryptography.hazmat.primitives.hashes.SHA1(),
label=None,
)
) | Decrypt an RSA encrypted key encoded as base64
:param rsaprivatekey: RSA private key
:type rsaprivatekey:
cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey
:param str enckey: base64-encoded key
:rtype: bytes
:return: decrypted key |
def step2_exchange(self, code=None, http=None, device_flow_info=None):
"""Exchanges a code for OAuth2Credentials.
Args:
code: string, a dict-like object, or None. For a non-device
flow, this is either the response code as a string, or a
dictionary of query parameters to the redirect_uri. For a
device flow, this should be None.
http: httplib2.Http, optional http instance to use when fetching
credentials.
device_flow_info: DeviceFlowInfo, return value from step1 in the
case of a device flow.
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError: if a problem occurred exchanging the code for a
refresh_token.
ValueError: if code and device_flow_info are both provided or both
missing.
"""
if code is None and device_flow_info is None:
raise ValueError('No code or device_flow_info provided.')
if code is not None and device_flow_info is not None:
raise ValueError('Cannot provide both code and device_flow_info.')
if code is None:
code = device_flow_info.device_code
elif not isinstance(code, (six.string_types, six.binary_type)):
if 'code' not in code:
raise FlowExchangeError(code.get(
'error', 'No code was supplied in the query parameters.'))
code = code['code']
post_data = {
'client_id': self.client_id,
'code': code,
'scope': self.scope,
}
if self.client_secret is not None:
post_data['client_secret'] = self.client_secret
if self._pkce:
post_data['code_verifier'] = self.code_verifier
if device_flow_info is not None:
post_data['grant_type'] = 'http://oauth.net/grant_type/device/1.0'
else:
post_data['grant_type'] = 'authorization_code'
post_data['redirect_uri'] = self.redirect_uri
body = urllib.parse.urlencode(post_data)
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
if self.authorization_header is not None:
headers['Authorization'] = self.authorization_header
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
if http is None:
http = transport.get_http_object()
resp, content = transport.request(
http, self.token_uri, method='POST', body=body, headers=headers)
d = _parse_exchange_token_response(content)
if resp.status == http_client.OK and 'access_token' in d:
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
if not refresh_token:
logger.info(
'Received token response with no refresh_token. Consider '
"reauthenticating with prompt='consent'.")
token_expiry = None
if 'expires_in' in d:
delta = datetime.timedelta(seconds=int(d['expires_in']))
token_expiry = delta + _UTCNOW()
extracted_id_token = None
id_token_jwt = None
if 'id_token' in d:
extracted_id_token = _extract_id_token(d['id_token'])
id_token_jwt = d['id_token']
logger.info('Successfully retrieved access token')
return OAuth2Credentials(
access_token, self.client_id, self.client_secret,
refresh_token, token_expiry, self.token_uri, self.user_agent,
revoke_uri=self.revoke_uri, id_token=extracted_id_token,
id_token_jwt=id_token_jwt, token_response=d, scopes=self.scope,
token_info_uri=self.token_info_uri)
else:
logger.info('Failed to retrieve access token: %s', content)
if 'error' in d:
# you never know what those providers got to say
error_msg = (str(d['error']) +
str(d.get('error_description', '')))
else:
error_msg = 'Invalid response: {0}.'.format(str(resp.status))
raise FlowExchangeError(error_msg) | Exchanges a code for OAuth2Credentials.
Args:
code: string, a dict-like object, or None. For a non-device
flow, this is either the response code as a string, or a
dictionary of query parameters to the redirect_uri. For a
device flow, this should be None.
http: httplib2.Http, optional http instance to use when fetching
credentials.
device_flow_info: DeviceFlowInfo, return value from step1 in the
case of a device flow.
Returns:
An OAuth2Credentials object that can be used to authorize requests.
Raises:
FlowExchangeError: if a problem occurred exchanging the code for a
refresh_token.
ValueError: if code and device_flow_info are both provided or both
missing. |
def update(self, docs=None, split=0, parallelism=None, progress_bar=True):
"""Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:type split: int
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
"""
self.apply(
docs=docs,
split=split,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
) | Update the features of the specified candidates.
:param docs: If provided, apply features to all the candidates in these
documents.
:param split: If docs is None, apply features to the candidates in this
particular split.
:type split: int
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Featurizer if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool |
def urlencode_utf8(params):
"""
UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242
"""
if hasattr(params, 'items'):
params = params.items()
params = (
'='.join((
quote_plus(k.encode('utf8'), safe='/'),
quote_plus(v.encode('utf8'), safe='/')
)) for k, v in params
)
return '&'.join(params) | UTF-8 safe variant of urllib.urlencode.
http://stackoverflow.com/a/8152242 |
def create_parser(subparsers):
""" create parser """
metrics_parser = subparsers.add_parser(
'metrics',
help='Display info of a topology\'s metrics',
usage="%(prog)s cluster/[role]/[env] topology-name [options]",
add_help=False)
args.add_cluster_role_env(metrics_parser)
args.add_topology_name(metrics_parser)
args.add_verbose(metrics_parser)
args.add_tracker_url(metrics_parser)
args.add_config(metrics_parser)
args.add_component_name(metrics_parser)
metrics_parser.set_defaults(subcommand='metrics')
containers_parser = subparsers.add_parser(
'containers',
help='Display info of a topology\'s containers metrics',
usage="%(prog)s cluster/[role]/[env] topology-name [options]",
add_help=False)
args.add_cluster_role_env(containers_parser)
args.add_topology_name(containers_parser)
args.add_verbose(containers_parser)
args.add_tracker_url(containers_parser)
args.add_config(containers_parser)
args.add_container_id(containers_parser)
containers_parser.set_defaults(subcommand='containers')
return subparsers | create parser |
def after(self, i, sibling, name=None):
"""Adds siblings after the current tag."""
self.parent._insert(sibling, idx=self._own_index + 1 + i, name=name)
return self | Adds siblings after the current tag. |
def entropy(data=None, prob=None, tol=1e-5):
'''
given a probability distribution (prob) or an interable of symbols (data) compute and
return its entropy
inputs:
------
data: iterable of symbols
prob: iterable with probabilities
tol: if prob is given, 'entropy' checks that the sum is about 1.
It raises an error if abs(sum(prob)-1) >= tol
'''
if prob is None and data is None:
raise ValueError("%s.entropy requires either 'prob' or 'data' to be defined" % __name__)
if prob is not None and data is not None:
raise ValueError("%s.entropy requires only 'prob' or 'data to be given but not both" % __name__)
if prob is not None and not isinstance(prob, np.ndarray):
raise TypeError("'entropy' in '%s' needs 'prob' to be an ndarray" % __name__)
if prob is not None and abs(prob.sum()-1) > tol:
raise ValueError("parameter 'prob' in '%s.entropy' should sum to 1" % __name__)
if data is not None:
prob = symbols_to_prob(data).prob()
# compute the log2 of the probability and change any -inf by 0s
logProb = np.log2(prob)
logProb[logProb == -np.inf] = 0
# return dot product of logProb and prob
return -float(np.dot(prob, logProb)) | given a probability distribution (prob) or an interable of symbols (data) compute and
return its entropy
inputs:
------
data: iterable of symbols
prob: iterable with probabilities
tol: if prob is given, 'entropy' checks that the sum is about 1.
It raises an error if abs(sum(prob)-1) >= tol |
def install(self, name=None, prefix=None, pkgs=None, dep=True,
channels=None, token=None):
"""
Install a set of packages into an environment by name or path.
If token is specified, the channels different from the defaults will
get the token appended.
"""
logger.debug(str((prefix, pkgs, channels)))
# TODO: Fix temporal hack
if not pkgs or not isinstance(pkgs, (list, tuple, str)):
raise TypeError('must specify a list of one or more packages to '
'install into existing environment')
cmd_list = ['install', '--yes', '--json', '--force-pscheck']
if name:
cmd_list.extend(['--name', name])
elif prefix:
cmd_list.extend(['--prefix', prefix])
else:
# Just install into the current environment, whatever that is
pass
# TODO: Check if correct
if channels:
cmd_list.extend(['--override-channels'])
for channel in channels:
cmd_list.extend(['--channel'])
channel = self.parse_token_channel(channel, token)
cmd_list.extend([channel])
# TODO: Fix temporal hack
if isinstance(pkgs, (list, tuple)):
cmd_list.extend(pkgs)
elif isinstance(pkgs, str):
cmd_list.extend(['--file', pkgs])
if not dep:
cmd_list.extend(['--no-deps'])
return self._call_and_parse(cmd_list) | Install a set of packages into an environment by name or path.
If token is specified, the channels different from the defaults will
get the token appended. |
def _create_class(rule, index):
# type: (Type[Rule], int) -> Type[SplitRule]
"""
Create subtype of SplitRule based on rule.
:param rule: Rule from which the SplitRule derive.
:param index: Index of the rule (in original Rule class) to use for SplitRule.
:return: Class inherited from SplitRule representing rule at index.
"""
name = 'SplitRule[' + rule.__name__ + ';' + str(index) + ']'
created = type(name, (SplitRule,), SplitRule.__dict__.copy()) # type: Type[SplitRule]
created.rule = rule.rules[index]
created.rule_index = index
created.from_rule = rule
return created | Create subtype of SplitRule based on rule.
:param rule: Rule from which the SplitRule derive.
:param index: Index of the rule (in original Rule class) to use for SplitRule.
:return: Class inherited from SplitRule representing rule at index. |
def _allocate_address_neutron(self, instance, network_ids):
"""
Allocates a floating/public ip address to the given instance,
using the OpenStack Network ('Neutron') API.
:param instance: instance to assign address to
:param list network_id:
List of IDs (as strings) of networks where to
request allocation the floating IP.
:return: public ip address
"""
self._init_os_api()
with OpenStackCloudProvider.__node_start_lock:
# Note: to return *all* addresses, all parameters to
# `neutron_client.list_floatingips()` should be left out;
# setting them to `None` (e.g., `fixed_ip_address=None`)
# results in an empty list...
free_ips = [
ip for ip in
self.neutron_client.list_floatingips().get('floatingips')
if (ip['floating_network_id'] in network_ids
# keep only unallocated IP addrs
and ip['fixed_ip_address'] is None
and ip['port_id'] is None)
]
if free_ips:
floating_ip = free_ips.pop()
log.debug("Using existing floating IP %r", floating_ip)
else:
# FIXME: OpenStack Network API v2 requires that we specify
# a network ID along with the request for a floating IP.
# However, ElastiCluster configuration allows for multiple
# networks to be connected to a VM, but does not give any
# hint as to which one(s) should be used for such requests.
# So we try them all, ignoring errors until one request
# succeeds and hope that it's OK. One can imagine
# scenarios where this is *not* correct, but: (1) these
# scenarios are unlikely, and (2) the old novaclient code
# above has not even had the concept of multiple networks
# for floating IPs and no-one has complained in 5 years...
for network_id in network_ids:
log.debug(
"Trying to allocate floating IP on network %s ...", network_id)
try:
floating_ip = self.neutron_client.create_floatingip({
'floatingip': {
'floating_network_id':network_id,
}}).get('floatingip')
log.debug(
"Allocated IP address %s on network %s",
floating_ip['floating_ip_address'], network_id)
break # stop at first network where we get a floating IP
except BadNeutronRequest as err:
raise RuntimeError(
"Failed allocating floating IP on network {0}: {1}"
.format(network_id, err))
if floating_ip.get('floating_ip_address', None) is None:
raise RuntimeError(
"Could not allocate floating IP for VM {0}"
.format(instance_id))
# wait until at least one interface is up
interfaces = []
# FIXMEE: no timeout!
while not interfaces:
interfaces = instance.interface_list()
sleep(2) ## FIXME: hard-coded value
# get port ID
for interface in interfaces:
log.debug(
"Instance %s (ID: %s):"
" Checking if floating IP can be attached to interface %r ...",
instance.name, instance.id, interface)
# if interface.net_id not in network_ids:
# log.debug(
# "Instance %s (ID: %s):"
# " Skipping interface %r:"
# " not attached to any of the requested networks.",
# instance.name, instance.id, interface)
# continue
port_id = interface.port_id
if port_id is None:
log.debug(
"Instance %s (ID: %s):"
" Skipping interface %r: no port ID!",
instance.name, instance.id, interface)
continue
log.debug(
"Instance `%s` (ID: %s):"
" will assign floating IP to port ID %s (state: %s),"
" already running IP addresses %r",
instance.name, instance.id,
port_id, interface.port_state,
[item['ip_address'] for item in interface.fixed_ips])
if interface.port_state != 'ACTIVE':
log.warn(
"Instance `%s` (ID: %s):"
" port `%s` is in state %s (epected 'ACTIVE' instead)",
instance.name, instance.id,
port_id, interface.port_state)
break
else:
raise RuntimeError(
"Could not find port on network(s) {0}"
" for instance {1} (ID: {2}) to bind a floating IP to."
.format(network_ids, instance.name, instance.id))
# assign floating IP to port
floating_ip = self.neutron_client.update_floatingip(
floating_ip['id'], {
'floatingip': {
'port_id': port_id,
},
}
).get('floatingip')
ip_address = floating_ip['floating_ip_address']
log.debug("Assigned IP address %s to port %s", ip_address, port_id)
log.info("Waiting 300s until floating IP %s is ACTIVE", ip_address)
for i in range(300):
_floating_ip = self.neutron_client.show_floatingip(floating_ip['id'])
if _floating_ip['floatingip']['status'] != 'DOWN':
break
sleep(1)
# Invalidate cache for this VM, as we just assigned a new IP
if instance.id in self._cached_instances:
del self._cached_instances[instance.id]
return ip_address | Allocates a floating/public ip address to the given instance,
using the OpenStack Network ('Neutron') API.
:param instance: instance to assign address to
:param list network_id:
List of IDs (as strings) of networks where to
request allocation the floating IP.
:return: public ip address |
def delete(self, force=False):
"""
Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
self._halt_if_already_deleted()
if force:
# Forcing deletion by cleaning up all record sets first. We'll
# do it all in one change set.
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
for rrset in self.record_sets:
# You can delete a HostedZone if there are only SOA and NS
# entries left. So delete everything but SOA/NS entries.
if rrset.rrset_type not in ['SOA', 'NS']:
cset.add_change('DELETE', rrset)
if cset.deletions or cset.creations:
# Bombs away.
self.connection._change_resource_record_sets(cset)
# Now delete the HostedZone.
retval = self.connection.delete_hosted_zone_by_id(self.id)
# Used to protect against modifying a deleted HostedZone.
self._is_deleted = True
return retval | Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request. |
def com_google_fonts_check_metadata_valid_post_script_name_values(font_metadata,
font_familynames):
"""METADATA.pb font.post_script_name field
contains font name in right format?
"""
for font_familyname in font_familynames:
psname = "".join(str(font_familyname).split())
if psname in "".join(font_metadata.post_script_name.split("-")):
yield PASS, ("METADATA.pb postScriptName field"
" contains font name in right format.")
else:
yield FAIL, ("METADATA.pb postScriptName (\"{}\")"
" does not match correct font name format (\"{}\")."
"").format(font_metadata.post_script_name,
font_familyname) | METADATA.pb font.post_script_name field
contains font name in right format? |
def fromtif(path, ext='tif', start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, engine=None, credentials=None, discard_extra=False):
"""
Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error
"""
from tifffile import TiffFile
if nplanes is not None and nplanes <= 0:
raise ValueError('nplanes must be positive if passed, got %d' % nplanes)
def getarray(idx_buffer_filename):
idx, buf, fname = idx_buffer_filename
fbuf = BytesIO(buf)
tfh = TiffFile(fbuf)
ary = tfh.asarray()
pageCount = ary.shape[0]
if nplanes is not None:
extra = pageCount % nplanes
if extra:
if discard_extra:
pageCount = pageCount - extra
logging.getLogger('thunder').warn('Ignored %d pages in file %s' % (extra, fname))
else:
raise ValueError("nplanes '%d' does not evenly divide '%d in file %s'" % (nplanes, pageCount,
fname))
values = [ary[i:(i+nplanes)] for i in range(0, pageCount, nplanes)]
else:
values = [ary]
tfh.close()
if ary.ndim == 3:
values = [val.squeeze() for val in values]
nvals = len(values)
keys = [(idx*nvals + timepoint,) for timepoint in range(nvals)]
return zip(keys, values)
recount = False if nplanes is None else True
data = frompath(path, accessor=getarray, ext=ext, start=start, stop=stop,
recursive=recursive, npartitions=npartitions, recount=recount,
labels=labels, engine=engine, credentials=credentials)
if engine is not None and npartitions is not None and data.npartitions() < npartitions:
data = data.repartition(npartitions)
return data | Loads images from single or multi-page TIF files.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
ext : string, optional, default = 'tif'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching 'path' and 'ext'. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
discard_extra : boolean, optional, default = False
If True and nplanes doesn't divide by the number of pages in a multi-page tiff, the reminder will
be discarded and a warning will be shown. If False, it will raise an error |
def add_environment(self, environment, sync=True):
"""
add an environment to this OS instance.
:param environment: the environment to add on this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the environment object on list to be added on next save().
:return:
"""
LOGGER.debug("OSInstance.add_environment")
if not sync:
self.environment_2_add.append(environment)
else:
if environment.id is None:
environment.save()
if self.id is not None and environment.id is not None:
params = {
'id': self.id,
'environmentID': environment.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/environments/add', 'parameters': params}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.add_environment - Problem while updating OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.environment_ids.append(environment.id)
environment.osi_ids.append(self.id)
else:
LOGGER.warning(
'OSInstance.add_environment - Problem while updating OS instance ' +
self.name + '. Reason: application ' + environment.name + ' id is None'
) | add an environment to this OS instance.
:param environment: the environment to add on this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the environment object on list to be added on next save().
:return: |
def create_event(self, institute, case, user, link, category, verb,
subject, level='specific', variant=None, content=None,
panel=None):
"""Create a Event with the parameters given.
Arguments:
institute (dict): A institute
case (dict): A case
user (dict): A User
link (str): The url to be used in the event
category (str): case or variant
verb (str): What type of event
subject (str): What is operated on
level (str): 'specific' or 'global'. Default is 'specific'
variant (dict): A variant
content (str): The content of the comment
Returns:
event(dict): The inserted event
"""
variant = variant or {}
event = dict(
institute=institute['_id'],
case=case['_id'],
user_id=user['_id'],
user_name=user['name'],
link=link,
category=category,
verb=verb,
subject=subject,
level=level,
variant_id=variant.get('variant_id'),
content=content,
panel=panel,
created_at=datetime.now(),
updated_at=datetime.now(),
)
LOG.debug("Saving Event")
self.event_collection.insert_one(event)
LOG.debug("Event Saved")
return event | Create a Event with the parameters given.
Arguments:
institute (dict): A institute
case (dict): A case
user (dict): A User
link (str): The url to be used in the event
category (str): case or variant
verb (str): What type of event
subject (str): What is operated on
level (str): 'specific' or 'global'. Default is 'specific'
variant (dict): A variant
content (str): The content of the comment
Returns:
event(dict): The inserted event |
def random(self: 'ErrorValue') -> np.ndarray:
"""Sample a random number (array) of the distribution defined by
mean=`self.val` and variance=`self.err`^2.
"""
if isinstance(self.val, np.ndarray):
# IGNORE:E1103
return np.random.randn(self.val.shape) * self.err + self.val
else:
return np.random.randn() * self.err + self.val | Sample a random number (array) of the distribution defined by
mean=`self.val` and variance=`self.err`^2. |
def get_gae_versions():
"""Gets a list of all of the available Python SDK versions, sorted with
the newest last."""
r = requests.get(SDK_RELEASES_URL)
r.raise_for_status()
releases = r.json().get('items', {})
# We only care about the Python releases, which all are in the format
# "featured/google_appengine_{version}.zip". We'll extract the version
# number so we can sort the list by version, and finally get the download
# URL.
versions_and_urls = []
for release in releases:
match = PYTHON_RELEASE_RE.match(release['name'])
if not match:
continue
versions_and_urls.append(
([int(x) for x in match.groups()], release['mediaLink']))
return sorted(versions_and_urls, key=lambda x: x[0]) | Gets a list of all of the available Python SDK versions, sorted with
the newest last. |
def parse_bytes(self, bytestr, isfinal=True):
"""
Parse a byte string. If the string is very large, split it in chuncks
and parse each chunk with isfinal=False, then parse an empty chunk
with isfinal=True.
"""
with self._context():
self.filename = None
self.p.Parse(bytestr, isfinal)
return self._root | Parse a byte string. If the string is very large, split it in chuncks
and parse each chunk with isfinal=False, then parse an empty chunk
with isfinal=True. |
def _start_keep_alive(self):
'''
Start the keep alive thread as a daemon
'''
keep_alive_thread = threading.Thread(target=self.keep_alive)
keep_alive_thread.daemon = True
keep_alive_thread.start() | Start the keep alive thread as a daemon |
def get(self, name: str) -> Union[None, str, List[str]]:
"""
获取 header
"""
name = name.casefold()
if name == "referer" or name == "referrer":
if "referrer" in self._headers:
return self._headers["referrer"]
elif "referer" in self._headers:
return self._headers["referer"]
else:
return None
elif name in self._headers:
return self._headers[name]
else:
return None | 获取 header |
def onStart(self):
""" Override onStart method for npyscreen """
curses.mousemask(0)
self.paths.host_config()
version = Version()
# setup initial runtime stuff
if self.first_time[0] and self.first_time[1] != 'exists':
system = System()
thr = Thread(target=system.start, args=(), kwargs={})
thr.start()
countdown = 60
while thr.is_alive():
npyscreen.notify_wait('Completing initialization:...' + str(countdown),
title='Setting up things...')
time.sleep(1)
countdown -= 1
thr.join()
quit_s = '\t'*4 + '^Q to quit'
tab_esc = '\t'*4 + 'ESC to close menu popup'
self.addForm('MAIN',
MainForm,
name='Vent ' + version +
'\t\t\t\t\t^T for help' + quit_s + tab_esc,
color='IMPORTANT')
self.addForm('HELP',
HelpForm,
name='Help\t\t\t\t\t\t\t\t^T to toggle previous' +
quit_s,
color='DANGER')
self.addForm('TUTORIALINTRO',
TutorialIntroForm,
name='Vent Tutorial' + quit_s,
color='DANGER')
self.addForm('TUTORIALBACKGROUND',
TutorialBackgroundForm,
name='About Vent' + quit_s,
color='DANGER')
self.addForm('TUTORIALTERMINOLOGY',
TutorialTerminologyForm,
name='About Vent' + quit_s,
color='DANGER')
self.addForm('TUTORIALGETTINGSETUP',
TutorialGettingSetupForm,
name='About Vent' + quit_s,
color='DANGER')
self.addForm('TUTORIALSTARTINGCORES',
TutorialStartingCoresForm,
name='Working with Cores' + quit_s,
color='DANGER')
self.addForm('TUTORIALADDINGPLUGINS',
TutorialAddingPluginsForm,
name='Working with Plugins' + quit_s,
color='DANGER')
self.addForm('TUTORIALADDINGFILES',
TutorialAddingFilesForm,
name='Files' + quit_s,
color='DANGER')
self.addForm('TUTORIALTROUBLESHOOTING',
TutorialTroubleshootingForm,
name='Troubleshooting' + quit_s,
color='DANGER') | Override onStart method for npyscreen |
def generic_commit_and_try_merge2master_wf(git_action,
file_content,
doc_id,
auth_info,
parent_sha,
commit_msg='',
merged_sha=None,
doctype_display_name="document"):
"""Actually make a local Git commit and push it to our remote
"""
# _LOG.debug('generic_commit_and_try_merge2master_wf: doc_id="{s}" \
# parent_sha="{p}" merged_sha="{m}"'.format(
# s=doc_id, p=parent_sha, m=merged_sha))
merge_needed = False
fc = tempfile.NamedTemporaryFile()
# N.B. we currently assume file_content is text/JSON, or should be serialized from a dict
try:
if is_str_type(file_content):
fc.write(file_content)
else:
write_as_json(file_content, fc)
fc.flush()
try:
max_file_size = git_action.max_file_size
except:
max_file_size = None
if max_file_size is not None:
file_size = os.stat(fc.name).st_size
if file_size > max_file_size:
m = 'Commit of {t} "{i}" had a file size ({a} bytes) which ' \
'exceeds the maximum size allowed ({b} bytes).'
m = m.format(t=doctype_display_name, i=doc_id, a=file_size, b=max_file_size)
raise GitWorkflowError(m)
f = "Could not acquire lock to write to %s #%s" % (doctype_display_name, doc_id)
acquire_lock_raise(git_action, fail_msg=f)
try:
try:
commit_resp = git_action.write_doc_from_tmpfile(doc_id,
fc,
parent_sha,
auth_info,
commit_msg,
doctype_display_name)
except Exception as e:
_LOG.exception('write_doc_from_tmpfile exception')
raise GitWorkflowError("Could not write to %s #%s ! Details: \n%s" %
(doctype_display_name, doc_id, e.message))
written_fp = git_action.path_for_doc(doc_id)
branch_name = commit_resp['branch']
new_sha = commit_resp['commit_sha']
_LOG.debug('write of {t} {i} on parent {p} returned = {c}'.format(t=doctype_display_name,
i=doc_id,
p=parent_sha,
c=str(commit_resp)))
m_resp = _do_merge2master_commit(git_action,
new_sha,
branch_name,
written_fp,
merged_sha=merged_sha,
prev_file_sha=commit_resp.get('prev_file_sha'))
new_sha, branch_name, merge_needed = m_resp
finally:
git_action.release_lock()
finally:
fc.close()
# What other useful information should be returned on a successful write?
r = {
"error": 0,
"resource_id": doc_id,
"branch_name": branch_name,
"description": "Updated %s #%s" % (doctype_display_name, doc_id),
"sha": new_sha,
"merge_needed": merge_needed,
}
_LOG.debug('returning {r}'.format(r=str(r)))
return r | Actually make a local Git commit and push it to our remote |
async def _build_state(self,
request: Request,
message: BaseMessage,
responder: Responder) \
-> Tuple[
Optional[BaseState],
Optional[BaseTrigger],
Optional[bool],
]:
"""
Build the state for this request.
"""
trigger, state_class, dnr = await self._find_trigger(request)
if trigger is None:
if not message.should_confuse():
return None, None, None
state_class = self._confused_state(request)
logger.debug('Next state: %s (confused)', state_class.name())
else:
logger.debug('Next state: %s', state_class.name())
state = state_class(request, responder, trigger, trigger)
return state, trigger, dnr | Build the state for this request. |
def RawBytesToScriptHash(raw):
"""
Get a hash of the provided raw bytes using the ripemd160 algorithm.
Args:
raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC'
Returns:
UInt160:
"""
rawh = binascii.unhexlify(raw)
rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8'))
return UInt160(data=rawhashstr) | Get a hash of the provided raw bytes using the ripemd160 algorithm.
Args:
raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC'
Returns:
UInt160: |
def make_contiguous(im, keep_zeros=True):
r"""
Take an image with arbitrary greyscale values and adjust them to ensure
all values fall in a contiguous range starting at 0.
This function will handle negative numbers such that most negative number
will become 0, *unless* ``keep_zeros`` is ``True`` in which case it will
become 1, and all 0's in the original image remain 0.
Parameters
----------
im : array_like
An ND array containing greyscale values
keep_zeros : Boolean
If ``True`` (default) then 0 values remain 0, regardless of how the
other numbers are adjusted. This is mostly relevant when the array
contains negative numbers, and means that -1 will become +1, while
0 values remain 0.
Returns
-------
image : ND-array
An ND-array the same size as ``im`` but with all values in contiguous
orders.
Example
-------
>>> import porespy as ps
>>> import scipy as sp
>>> im = sp.array([[0, 2, 9], [6, 8, 3]])
>>> im = ps.tools.make_contiguous(im)
>>> print(im)
[[0 1 5]
[3 4 2]]
"""
im = sp.copy(im)
if keep_zeros:
mask = (im == 0)
im[mask] = im.min() - 1
im = im - im.min()
im_flat = im.flatten()
im_vals = sp.unique(im_flat)
im_map = sp.zeros(shape=sp.amax(im_flat) + 1)
im_map[im_vals] = sp.arange(0, sp.size(sp.unique(im_flat)))
im_new = im_map[im_flat]
im_new = sp.reshape(im_new, newshape=sp.shape(im))
im_new = sp.array(im_new, dtype=im_flat.dtype)
return im_new | r"""
Take an image with arbitrary greyscale values and adjust them to ensure
all values fall in a contiguous range starting at 0.
This function will handle negative numbers such that most negative number
will become 0, *unless* ``keep_zeros`` is ``True`` in which case it will
become 1, and all 0's in the original image remain 0.
Parameters
----------
im : array_like
An ND array containing greyscale values
keep_zeros : Boolean
If ``True`` (default) then 0 values remain 0, regardless of how the
other numbers are adjusted. This is mostly relevant when the array
contains negative numbers, and means that -1 will become +1, while
0 values remain 0.
Returns
-------
image : ND-array
An ND-array the same size as ``im`` but with all values in contiguous
orders.
Example
-------
>>> import porespy as ps
>>> import scipy as sp
>>> im = sp.array([[0, 2, 9], [6, 8, 3]])
>>> im = ps.tools.make_contiguous(im)
>>> print(im)
[[0 1 5]
[3 4 2]] |
def rgb_to_yiq(r, g=None, b=None):
"""Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
"""
if type(r) in [list,tuple]:
r, g, b = r
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q) | Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % rgb_to_yiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)' |
def get_nonparametric_sources(self):
"""
:returns: list of non parametric sources in the composite source model
"""
return [src for sm in self.source_models
for src_group in sm.src_groups
for src in src_group if hasattr(src, 'data')] | :returns: list of non parametric sources in the composite source model |
def K_r2(self, r2):
"""
Returns the kernel evaluated on `r2`, which is the scaled squared distance.
Will call self.K_r(r=sqrt(r2)), or can be overwritten directly (and should operate element-wise on r2).
"""
r = self._clipped_sqrt(r2)
return self.K_r(r) | Returns the kernel evaluated on `r2`, which is the scaled squared distance.
Will call self.K_r(r=sqrt(r2)), or can be overwritten directly (and should operate element-wise on r2). |
def _db_pre_transform(self, train_tfm:List[Callable], valid_tfm:List[Callable]):
"Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image`"
self.train_ds.x.after_open = compose(train_tfm)
self.valid_ds.x.after_open = compose(valid_tfm)
return self | Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image` |
def list(ctx, scenario_name, format): # pragma: no cover
""" Lists status of instances. """
args = ctx.obj.get('args')
subcommand = base._get_subcommand(__name__)
command_args = {
'subcommand': subcommand,
'format': format,
}
statuses = []
s = scenarios.Scenarios(
base.get_configs(args, command_args), scenario_name)
for scenario in s:
statuses.extend(base.execute_subcommand(scenario.config, subcommand))
headers = [util.title(name) for name in status.get_status()._fields]
if format == 'simple' or format == 'plain':
table_format = 'simple'
if format == 'plain':
headers = []
table_format = format
_print_tabulate_data(headers, statuses, table_format)
else:
_print_yaml_data(headers, statuses) | Lists status of instances. |
def is_git_file(cls, path, name):
"""Determine if file is known by git."""
os.chdir(path)
p = subprocess.Popen(['git', 'ls-files', '--error-unmatch', name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
return p.returncode == 0 | Determine if file is known by git. |
def _kill(self, variable, code_loc): # pylint:disable=no-self-use
"""
Kill previous defs. addr_list is a list of normalized addresses.
"""
# Case 1: address perfectly match, we kill
# Case 2: a is a subset of the original address
# Case 3: a is a superset of the original address
# the previous definition is killed. mark it in data graph.
if variable in self._live_defs:
for loc in self._live_defs.lookup_defs(variable):
pv = ProgramVariable(variable, loc, arch=self.project.arch)
self._data_graph_add_edge(pv, ProgramVariable(variable, code_loc, arch=self.project.arch), type='kill')
self._live_defs.kill_def(variable, code_loc) | Kill previous defs. addr_list is a list of normalized addresses. |
def remove(name=None, pkgs=None, **kwargs):
'''
Remove specified package. Accepts full or partial FMRI.
In case of multiple match, the command fails and won't modify the OS.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove tcsh
salt '*' pkg.remove pkg://solaris/shell/tcsh
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug('Removing these packages instead of %s: %s', name, targets)
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Remove the package(s)
cmd = ['/bin/pkg', 'uninstall', '-v'] + targets
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
# Get a list of the packages after the uninstall
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if out['retcode'] != 0:
raise CommandExecutionError(
'Error occurred removing package(s)',
info={
'changes': ret,
'retcode': ips_pkg_return_values[out['retcode']],
'errors': [out['stderr']]
}
)
return ret | Remove specified package. Accepts full or partial FMRI.
In case of multiple match, the command fails and won't modify the OS.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove tcsh
salt '*' pkg.remove pkg://solaris/shell/tcsh
salt '*' pkg.remove pkgs='["foo", "bar"]' |
def _gql(cls, query_string, *args, **kwds):
"""Run a GQL query."""
from .query import gql # Import late to avoid circular imports.
return gql('SELECT * FROM %s %s' % (cls._class_name(), query_string),
*args, **kwds) | Run a GQL query. |
def identify_ibids(line):
"""Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed
"""
ibid_match_txt = {}
# Record details of each matched ibid:
for m_ibid in re_ibid.finditer(line):
ibid_match_txt[m_ibid.start()] = m_ibid.group(0)
# Replace matched text in line with underscores:
line = line[0:m_ibid.start()] + \
"_" * len(m_ibid.group(0)) + \
line[m_ibid.end():]
return ibid_match_txt, line | Find IBIDs within the line, record their position and length,
and replace them with underscores.
@param line: (string) the working reference line
@return: (tuple) containing 2 dictionaries and a string:
Dictionary: matched IBID text: (Key: position of IBID in
line; Value: matched IBID text)
String: working line with matched IBIDs removed |
def _format_name(self, name, surname, snake_case=False):
"""Format a first name and a surname into a cohesive string.
Note that either name or surname can be empty strings, and
formatting will still succeed.
:param str name: A first name.
:param str surname: A surname.
:param bool snake_case: If True, format the name as
"snake_case", also stripping diacritics if any. (default:
False)
:return str: The formatted name.
"""
if not name or not surname:
sep = ''
elif snake_case:
sep = '_'
else:
sep = ' '
if snake_case:
name = self._snakify_name(name)
surname = self._snakify_name(surname)
disp_name = '{}{}{}'.format(name, sep, surname)
return disp_name | Format a first name and a surname into a cohesive string.
Note that either name or surname can be empty strings, and
formatting will still succeed.
:param str name: A first name.
:param str surname: A surname.
:param bool snake_case: If True, format the name as
"snake_case", also stripping diacritics if any. (default:
False)
:return str: The formatted name. |
def expr_stmt(self, lhs, rhs):
"""
(2.6, 2.7, 3.0, 3.1)
expr_stmt: testlist (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist))*)
(3.2-)
expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
"""
if isinstance(rhs, ast.AugAssign):
if isinstance(lhs, ast.Tuple) or isinstance(lhs, ast.List):
error = diagnostic.Diagnostic(
"fatal", "illegal expression for augmented assignment", {},
rhs.op.loc, [lhs.loc])
self.diagnostic_engine.process(error)
else:
rhs.target = self._assignable(lhs)
rhs.loc = rhs.target.loc.join(rhs.value.loc)
return rhs
elif rhs is not None:
rhs.targets = list(map(self._assignable, [lhs] + rhs.targets))
rhs.loc = lhs.loc.join(rhs.value.loc)
return rhs
else:
return ast.Expr(value=lhs, loc=lhs.loc) | (2.6, 2.7, 3.0, 3.1)
expr_stmt: testlist (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist))*)
(3.2-)
expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*) |
def add(self, resource, replace=False):
"""Add just a single resource."""
uri = resource.uri
if (uri in self and not replace):
raise ResourceListDupeError(
"Attempt to add resource already in resource_list")
self[uri] = resource | Add just a single resource. |
def parse_mbox(filepath):
"""Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict`
"""
mbox = _MBox(filepath, create=False)
for msg in mbox:
message = message_to_dict(msg)
yield message | Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict` |
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x)) | Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped. |
def addFileAnnot(self, point, buffer, filename, ufilename=None, desc=None):
"""Add a 'FileAttachment' annotation at location 'point'."""
CheckParent(self)
val = _fitz.Page_addFileAnnot(self, point, buffer, filename, ufilename, desc)
if not val: return
val.thisown = True
val.parent = weakref.proxy(self)
self._annot_refs[id(val)] = val
return val | Add a 'FileAttachment' annotation at location 'point'. |
def is_filtered(self, require=None, ignore=None):
"""Return ``True`` for filtered calls
:param iterable ignore: if set, the filters to ignore, make sure to
include 'PASS', when setting, default is ``['PASS']``
:param iterable require: if set, the filters to require for returning
``True``
"""
ignore = ignore or ["PASS"]
if "FT" not in self.data or not self.data["FT"]:
return False
for ft in self.data["FT"]:
if ft in ignore:
continue # skip
if not require:
return True
elif ft in require:
return True
return False | Return ``True`` for filtered calls
:param iterable ignore: if set, the filters to ignore, make sure to
include 'PASS', when setting, default is ``['PASS']``
:param iterable require: if set, the filters to require for returning
``True`` |
def td_tr(points, dist_threshold):
""" Top-Down Time-Ratio Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
dist_threshold (float): max distance error, in meters
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
"""
if len(points) <= 2:
return points
else:
max_dist_threshold = 0
found_index = 0
delta_e = time_dist(points[-1], points[0]) * I_3600
d_lat = points[-1].lat - points[0].lat
d_lon = points[-1].lon - points[0].lon
for i in range(1, len(points)-1):
delta_i = time_dist(points[i], points[0]) * I_3600
di_de = delta_i / delta_e
point = Point(
points[0].lat + d_lat * di_de,
points[0].lon + d_lon * di_de,
None
)
dist = loc_dist(points[i], point)
if dist > max_dist_threshold:
max_dist_threshold = dist
found_index = i
if max_dist_threshold > dist_threshold:
one = td_tr(points[:found_index], dist_threshold)
two = td_tr(points[found_index:], dist_threshold)
one.extend(two)
return one
else:
return [points[0], points[-1]] | Top-Down Time-Ratio Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
dist_threshold (float): max distance error, in meters
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory |
def save():
"""
save function
"""
results = {}
cpu_number = 0
while True:
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_governor'.format(cpu_number))
except:
break
governor = _file.read().strip()
results.setdefault(cpu_number, {})['governor'] = governor
_file.close()
try:
_file = open(
CPU_PREFIX + 'cpu{}/cpufreq/scaling_cur_freq'.format(cpu_number))
except:
break
results[cpu_number]['freq'] = _file.read().strip()
_file.close()
cpu_number += 1
return results | save function |
def checkMultipleFiles(input):
""" Evaluates the input to determine whether there is 1 or more than 1 valid input file.
"""
f,i,o,a=buildFileList(input)
return len(f) > 1 | Evaluates the input to determine whether there is 1 or more than 1 valid input file. |
def get_activities(self):
"""Gets all ``Activities``.
In plenary mode, the returned list contains all known activites
or an error results. Otherwise, the returned list may contain
only those activities that are accessible through this session.
return: (osid.learning.ActivityList) - a ``ActivityList``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('learning',
collection='Activity',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.ActivityList(result, runtime=self._runtime, proxy=self._proxy) | Gets all ``Activities``.
In plenary mode, the returned list contains all known activites
or an error results. Otherwise, the returned list may contain
only those activities that are accessible through this session.
return: (osid.learning.ActivityList) - a ``ActivityList``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def handle_nextPageTemplate(self, pt):
'''
if pt has also templates for even and odd page convert it to list
'''
has_left_template = self._has_template_for_name(pt + '_left')
has_right_template = self._has_template_for_name(pt + '_right')
if has_left_template and has_right_template:
pt = [pt + '_left', pt + '_right']
'''On endPage change to the page template with name or index pt'''
if isinstance(pt, str):
if hasattr(self, '_nextPageTemplateCycle'):
del self._nextPageTemplateCycle
for t in self.pageTemplates:
if t.id == pt:
self._nextPageTemplateIndex = self.pageTemplates.index(t)
return
raise ValueError("can't find template('%s')" % pt)
elif isinstance(pt, int):
if hasattr(self, '_nextPageTemplateCycle'):
del self._nextPageTemplateCycle
self._nextPageTemplateIndex = pt
elif isinstance(pt, (list, tuple)):
#used for alternating left/right pages
#collect the refs to the template objects, complain if any are bad
c = PTCycle()
for ptn in pt:
#special case name used to short circuit the iteration
if ptn == '*':
c._restart = len(c)
continue
for t in self.pageTemplates:
if t.id == ptn.strip():
c.append(t)
break
if not c:
raise ValueError("No valid page templates in cycle")
elif c._restart > len(c):
raise ValueError("Invalid cycle restart position")
#ensure we start on the first one$
self._nextPageTemplateCycle = c.cyclicIterator()
else:
raise TypeError("Argument pt should be string or integer or list") | if pt has also templates for even and odd page convert it to list |
def compute_fw_at_frac_max_1d_simple(Y, xc, X=None, f=0.5):
"""Compute the full width at fraction f of the maximum"""
yy = np.asarray(Y)
if yy.ndim != 1:
raise ValueError('array must be 1-d')
if yy.size == 0:
raise ValueError('array is empty')
if X is None:
xx = np.arange(yy.shape[0])
else:
xx = X
xpix = coor_to_pix_1d(xc - xx[0])
try:
peak = yy[xpix]
except IndexError:
raise ValueError('peak is out of array')
fwhm_x, _codex, _msgx = compute_fwhm_1d(xx, yy - f * peak, xc, xpix)
return peak, fwhm_x | Compute the full width at fraction f of the maximum |
def mapping_create(index, doc_type, body=None, hosts=None, profile=None, source=None):
'''
Create a mapping in a given index
index
Index for the mapping
doc_type
Name of the document type
body
Mapping definition as specified in https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html
source
URL to file specifying mapping definition. Cannot be used in combination with ``body``.
CLI example::
salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }'
'''
es = _get_instance(hosts, profile)
if source and body:
message = 'Either body or source should be specified but not both.'
raise SaltInvocationError(message)
if source:
body = __salt__['cp.get_file_str'](
source,
saltenv=__opts__.get('saltenv', 'base'))
try:
result = es.indices.put_mapping(index=index, doc_type=doc_type, body=body)
return result.get('acknowledged', False)
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot create mapping {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error)) | Create a mapping in a given index
index
Index for the mapping
doc_type
Name of the document type
body
Mapping definition as specified in https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-mapping.html
source
URL to file specifying mapping definition. Cannot be used in combination with ``body``.
CLI example::
salt myminion elasticsearch.mapping_create testindex user '{ "user" : { "properties" : { "message" : {"type" : "string", "store" : true } } } }' |
def configure(cls, api_token,
api_url="https://api.qubole.com/api/", version="v1.2",
poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"):
"""
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
"""
cls._auth = QuboleAuth(api_token)
cls.api_token = api_token
cls.version = version
cls.baseurl = api_url
if poll_interval < Qubole.MIN_POLL_INTERVAL:
log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))
cls.poll_interval = Qubole.MIN_POLL_INTERVAL
else:
cls.poll_interval = poll_interval
cls.skip_ssl_cert_check = skip_ssl_cert_check
cls.cloud_name = cloud_name.lower()
cls.cached_agent = None | Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events |
def set_parent(self, new_parent, init=False):
"Associate the header to the control (it could be recreated)"
self._created = False
SubComponent.set_parent(self, new_parent, init)
# if index not given, append the column at the last position:
if self.index == -1 or self.index > self._parent.wx_obj.GetColumnCount():
self.index = self._parent.wx_obj.GetColumnCount()
# insert the column in the listview:
self._parent.wx_obj.InsertColumn(self.index, self.text, self._align,
self.width)
self._created = True | Associate the header to the control (it could be recreated) |
def get_order(membersuite_id, client=None):
"""Get an Order by ID.
"""
if not membersuite_id:
return None
client = client or get_new_client(request_session=True)
if not client.session_id:
client.request_session()
object_query = "SELECT Object() FROM ORDER WHERE ID = '{}'".format(
membersuite_id)
result = client.execute_object_query(object_query)
msql_result = result["body"]["ExecuteMSQLResult"]
if msql_result["Success"]:
membersuite_object_data = (msql_result["ResultValue"]
["SingleObject"])
else:
raise ExecuteMSQLError(result=result)
return Order(membersuite_object_data=membersuite_object_data) | Get an Order by ID. |
def setup_job(manager, job_id, tool_id, tool_version, use_metadata=False):
""" Setup new job from these inputs and return dict summarizing state
(used to configure command line).
"""
job_id = manager.setup_job(job_id, tool_id, tool_version)
if use_metadata:
manager.enable_metadata_directory(job_id)
return build_job_config(
job_id=job_id,
job_directory=manager.job_directory(job_id),
system_properties=manager.system_properties(),
tool_id=tool_id,
tool_version=tool_version
) | Setup new job from these inputs and return dict summarizing state
(used to configure command line). |
def filter_conflicts(conflicts_list, fields):
"""Use this function to automatically filter all the entries defined for a
given rule.
Params:
conflicts_list(List[Conflict]): the list of conflicts to filter.
fields(List[str]): fields to filter out, using an accessor syntax of
the form ``field.subfield.subsubfield``.
Return:
List[Conflict]: the given list filtered by `fields`
"""
for field in fields:
conflicts_list = filter_conflicts_by_path(conflicts_list, field)
return conflicts_list | Use this function to automatically filter all the entries defined for a
given rule.
Params:
conflicts_list(List[Conflict]): the list of conflicts to filter.
fields(List[str]): fields to filter out, using an accessor syntax of
the form ``field.subfield.subsubfield``.
Return:
List[Conflict]: the given list filtered by `fields` |
def get_config_variable(self, config_id, offset):
"""Get a chunk of a config variable's value."""
config = self._config_variables.get(config_id)
if config is None:
return [b""]
return [bytes(config.current_value[offset:offset + 20])] | Get a chunk of a config variable's value. |
def build_reportnum_kb(fpath):
"""Given the path to a knowledge base file containing the details
of institutes and the patterns that their preprint report
numbering schemes take, create a dictionary of regexp search
patterns to recognise these preprint references in reference
lines, and a dictionary of replacements for non-standard preprint
categories in these references.
The knowledge base file should consist only of lines that take one
of the following 3 formats:
#####Institute Name####
(the name of the institute to which the preprint reference patterns
belong, e.g. '#####LANL#####', surrounded by 5 # on either side.)
<pattern>
(numeration patterns for an institute's preprints, surrounded by
< and >.)
seek-term --- replace-term
(i.e. a seek phrase on the left hand side, a replace phrase on the
right hand side, with the two phrases being separated by 3 hyphens.)
E.g.:
ASTRO PH ---astro-ph
The left-hand side term is a non-standard version of the preprint
reference category; the right-hand side term is the standard version.
If the KB file cannot be read from, or an unexpected line is
encountered in the KB, an error message is output to standard error
and execution is halted with an error-code 0.
@param fpath: (string) the path to the knowledge base file.
@return: (tuple) containing 2 dictionaries. The first contains regexp
search patterns used to identify preprint references in a line. This
dictionary is keyed by a tuple containing the line number of the
pattern in the KB and the non-standard category string.
E.g.: (3, 'ASTRO PH').
The second dictionary contains the standardised category string,
and is keyed by the non-standard category string. E.g.: 'astro-ph'.
"""
def _add_institute_preprint_patterns(preprint_classifications,
preprint_numeration_ptns,
preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories,
kb_line_num):
"""For a list of preprint category strings and preprint numeration
patterns for a given institute, create the regexp patterns for
each of the preprint types. Add the regexp patterns to the
dictionary of search patterns
(preprint_reference_search_regexp_patterns), keyed by the line
number of the institute in the KB, and the preprint category
search string. Also add the standardised preprint category string
to another dictionary, keyed by the line number of its position
in the KB and its non-standardised version.
@param preprint_classifications: (list) of tuples whereby each tuple
contains a preprint category search string and the line number of
the name of institute to which it belongs in the KB.
E.g.: (45, 'ASTRO PH').
@param preprint_numeration_ptns: (list) of preprint reference
numeration search patterns (strings)
@param preprint_reference_search_regexp_patterns: (dictionary) of
regexp patterns used to search in document lines.
@param standardised_preprint_reference_categories: (dictionary)
containing the standardised strings for preprint reference
categories. (E.g. 'astro-ph'.)
@param kb_line_num: (integer) - the line number int the KB at
which a given institute name was found.
@return: None
"""
if preprint_classifications and preprint_numeration_ptns:
# the previous institute had both numeration styles and categories
# for preprint references.
# build regexps and add them for this institute:
# First, order the numeration styles by line-length, and build a
# grouped regexp for recognising numeration:
ordered_patterns = \
order_reportnum_patterns_bylen(preprint_numeration_ptns)
# create a grouped regexp for numeration part of
# preprint reference:
numeration_regexp = \
create_institute_numeration_group_regexp_pattern(
ordered_patterns)
# for each "classification" part of preprint references, create a
# complete regex:
# will be in the style "(categ)-(numatn1|numatn2|numatn3|...)"
for classification in preprint_classifications:
search_pattern_str = ur'(?:^|[^a-zA-Z0-9\/\.\-])([\[\(]?(?P<categ>' \
+ classification[0].strip() + u')' \
+ numeration_regexp + ur'[\]\)]?)'
re_search_pattern = re.compile(search_pattern_str,
re.UNICODE)
preprint_reference_search_regexp_patterns[(kb_line_num,
classification[0])] =\
re_search_pattern
standardised_preprint_reference_categories[(kb_line_num,
classification[0])] =\
classification[1]
preprint_reference_search_regexp_patterns = {} # a dictionary of patterns
# used to recognise
# categories of preprints
# as used by various
# institutes
standardised_preprint_reference_categories = {} # dictionary of
# standardised category
# strings for preprint cats
current_institute_preprint_classifications = [] # list of tuples containing
# preprint categories in
# their raw & standardised
# forms, as read from KB
current_institute_numerations = [] # list of preprint
# numeration patterns, as
# read from the KB
# pattern to recognise an institute name line in the KB
re_institute_name = re.compile(ur'^\*{5}\s*(.+)\s*\*{5}$', re.UNICODE)
# pattern to recognise an institute preprint categ line in the KB
re_preprint_classification = \
re.compile(ur'^\s*(\w.*)\s*---\s*(\w.*)\s*$', re.UNICODE)
# pattern to recognise a preprint numeration-style line in KB
re_numeration_pattern = re.compile(ur'^\<(.+)\>$', re.UNICODE)
kb_line_num = 0 # when making the dictionary of patterns, which is
# keyed by the category search string, this counter
# will ensure that patterns in the dictionary are not
# overwritten if 2 institutes have the same category
# styles.
with file_resolving(fpath) as fh:
for rawline in fh:
if rawline.startswith('#'):
continue
kb_line_num += 1
m_institute_name = re_institute_name.search(rawline)
if m_institute_name:
# This KB line is the name of an institute
# append the last institute's pattern list to the list of
# institutes:
_add_institute_preprint_patterns(current_institute_preprint_classifications,
current_institute_numerations,
preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories,
kb_line_num)
# Now start a new dictionary to contain the search patterns
# for this institute:
current_institute_preprint_classifications = []
current_institute_numerations = []
# move on to the next line
continue
m_preprint_classification = \
re_preprint_classification.search(rawline)
if m_preprint_classification:
# This KB line contains a preprint classification for
# the current institute
try:
current_institute_preprint_classifications.append((m_preprint_classification.group(1),
m_preprint_classification.group(2)))
except (AttributeError, NameError):
# didn't match this line correctly - skip it
pass
# move on to the next line
continue
m_numeration_pattern = re_numeration_pattern.search(rawline)
if m_numeration_pattern:
# This KB line contains a preprint item numeration pattern
# for the current institute
try:
current_institute_numerations.append(
m_numeration_pattern.group(1))
except (AttributeError, NameError):
# didn't match the numeration pattern correctly - skip it
pass
continue
_add_institute_preprint_patterns(current_institute_preprint_classifications,
current_institute_numerations,
preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories,
kb_line_num)
# return the preprint reference patterns and the replacement strings
# for non-standard categ-strings:
return (preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories) | Given the path to a knowledge base file containing the details
of institutes and the patterns that their preprint report
numbering schemes take, create a dictionary of regexp search
patterns to recognise these preprint references in reference
lines, and a dictionary of replacements for non-standard preprint
categories in these references.
The knowledge base file should consist only of lines that take one
of the following 3 formats:
#####Institute Name####
(the name of the institute to which the preprint reference patterns
belong, e.g. '#####LANL#####', surrounded by 5 # on either side.)
<pattern>
(numeration patterns for an institute's preprints, surrounded by
< and >.)
seek-term --- replace-term
(i.e. a seek phrase on the left hand side, a replace phrase on the
right hand side, with the two phrases being separated by 3 hyphens.)
E.g.:
ASTRO PH ---astro-ph
The left-hand side term is a non-standard version of the preprint
reference category; the right-hand side term is the standard version.
If the KB file cannot be read from, or an unexpected line is
encountered in the KB, an error message is output to standard error
and execution is halted with an error-code 0.
@param fpath: (string) the path to the knowledge base file.
@return: (tuple) containing 2 dictionaries. The first contains regexp
search patterns used to identify preprint references in a line. This
dictionary is keyed by a tuple containing the line number of the
pattern in the KB and the non-standard category string.
E.g.: (3, 'ASTRO PH').
The second dictionary contains the standardised category string,
and is keyed by the non-standard category string. E.g.: 'astro-ph'. |
def _setup_regex(self):
"""Sets up the constant regex strings etc. that can be used to
parse the strings for determining context."""
self.RE_COMMENTS = cache.RE_COMMENTS
self.RE_MODULE = cache.RE_MODULE
self.RE_TYPE = cache.RE_TYPE
self.RE_EXEC = cache.RE_EXEC
self.RE_MEMBERS = cache.RE_MEMBERS
self.RE_DEPEND = cache.RE_DEPEND | Sets up the constant regex strings etc. that can be used to
parse the strings for determining context. |
def _PopulateQuantilesHistogram(self, hist, nums):
"""Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
"""
if not nums:
return
num_quantile_buckets = 10
quantiles_to_get = [
x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)
]
quantiles = np.percentile(nums, quantiles_to_get)
hist.type = self.histogram_proto.QUANTILES
quantiles_sample_count = float(len(nums)) / num_quantile_buckets
for low, high in zip(quantiles, quantiles[1:]):
hist.buckets.add(
low_value=low, high_value=high, sample_count=quantiles_sample_count) | Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from. |
def is_watching(self, username):
"""Check if user is being watched by the given user
:param username: Check if username is watching you
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/watching/{}'.format(username))
return response['watching'] | Check if user is being watched by the given user
:param username: Check if username is watching you |
def with_connection(func):
"""Decorate a function to open a new datafind connection if required
This method will inspect the ``connection`` keyword, and if `None`
(or missing), will use the ``host`` and ``port`` keywords to open
a new connection and pass it as ``connection=<new>`` to ``func``.
"""
@wraps(func)
def wrapped(*args, **kwargs):
if kwargs.get('connection') is None:
kwargs['connection'] = _choose_connection(host=kwargs.get('host'),
port=kwargs.get('port'))
try:
return func(*args, **kwargs)
except HTTPException:
kwargs['connection'] = reconnect(kwargs['connection'])
return func(*args, **kwargs)
return wrapped | Decorate a function to open a new datafind connection if required
This method will inspect the ``connection`` keyword, and if `None`
(or missing), will use the ``host`` and ``port`` keywords to open
a new connection and pass it as ``connection=<new>`` to ``func``. |
def sha256sum(filename):
"""Return SHA256 hash of file."""
sha256 = hashlib.sha256()
mem_view = memoryview(bytearray(128*1024))
with open(filename, 'rb', buffering=0) as stream:
for i in iter(lambda: stream.readinto(mem_view), 0):
sha256.update(mem_view[:i])
return sha256.hexdigest() | Return SHA256 hash of file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.