code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _getrsyncoptions(self):
"""Get options to be passed for rsync."""
ignores = list(self.DEFAULT_IGNORES)
ignores += self.config.option.rsyncignore
ignores += self.config.getini("rsyncignore")
return {"ignores": ignores, "verbose": self.config.option.verbose} | Get options to be passed for rsync. | Below is the the instruction that describes the task:
### Input:
Get options to be passed for rsync.
### Response:
def _getrsyncoptions(self):
"""Get options to be passed for rsync."""
ignores = list(self.DEFAULT_IGNORES)
ignores += self.config.option.rsyncignore
ignores += self.config.getini("rsyncignore")
return {"ignores": ignores, "verbose": self.config.option.verbose} |
def available():
"""Returns True if a deep water model can be built, or False otherwise."""
builder_json = h2o.api("GET /3/ModelBuilders", data={"algo": "deepwater"})
visibility = builder_json["model_builders"]["deepwater"]["visibility"]
if visibility == "Experimental":
print("Cannot build a Deep Water model - no backend found.")
return False
else:
return True | Returns True if a deep water model can be built, or False otherwise. | Below is the the instruction that describes the task:
### Input:
Returns True if a deep water model can be built, or False otherwise.
### Response:
def available():
"""Returns True if a deep water model can be built, or False otherwise."""
builder_json = h2o.api("GET /3/ModelBuilders", data={"algo": "deepwater"})
visibility = builder_json["model_builders"]["deepwater"]["visibility"]
if visibility == "Experimental":
print("Cannot build a Deep Water model - no backend found.")
return False
else:
return True |
def describe(self):
'''Provide a dictionary with information describing itself.'''
description = {
'description': self._description,
'type': self.name,
}
description.update(self.extra_params)
return description | Provide a dictionary with information describing itself. | Below is the the instruction that describes the task:
### Input:
Provide a dictionary with information describing itself.
### Response:
def describe(self):
'''Provide a dictionary with information describing itself.'''
description = {
'description': self._description,
'type': self.name,
}
description.update(self.extra_params)
return description |
def read_locked(*args, **kwargs):
"""Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to.
"""
def decorator(f):
attr_name = kwargs.get('lock', '_lock')
@six.wraps(f)
def wrapper(self, *args, **kwargs):
rw_lock = getattr(self, attr_name)
with rw_lock.read_lock():
return f(self, *args, **kwargs)
return wrapper
# This is needed to handle when the decorator has args or the decorator
# doesn't have args, python is rather weird here...
if kwargs or not args:
return decorator
else:
if len(args) == 1:
return decorator(args[0])
else:
return decorator | Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to. | Below is the the instruction that describes the task:
### Input:
Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to.
### Response:
def read_locked(*args, **kwargs):
"""Acquires & releases a read lock around call into decorated method.
NOTE(harlowja): if no attribute name is provided then by default the
attribute named '_lock' is looked for (this attribute is expected to be
a :py:class:`.ReaderWriterLock`) in the instance object this decorator
is attached to.
"""
def decorator(f):
attr_name = kwargs.get('lock', '_lock')
@six.wraps(f)
def wrapper(self, *args, **kwargs):
rw_lock = getattr(self, attr_name)
with rw_lock.read_lock():
return f(self, *args, **kwargs)
return wrapper
# This is needed to handle when the decorator has args or the decorator
# doesn't have args, python is rather weird here...
if kwargs or not args:
return decorator
else:
if len(args) == 1:
return decorator(args[0])
else:
return decorator |
def autosave_all(self):
"""Autosave all opened files."""
for index in range(self.stack.get_stack_count()):
self.autosave(index) | Autosave all opened files. | Below is the the instruction that describes the task:
### Input:
Autosave all opened files.
### Response:
def autosave_all(self):
"""Autosave all opened files."""
for index in range(self.stack.get_stack_count()):
self.autosave(index) |
def access_required(config=None):
"""
Authenticates a HTTP method handler based on a custom set of arguments
"""
def _access_required(http_method_handler):
def secure_http_method_handler(self, *args, **kwargs):
# authentication context must be set
if not self.__provider_config__.authentication:
_message = "Service available to authenticated users only, no auth context provider set in handler"
authentication_error = prestans.exception.AuthenticationError(_message)
authentication_error.request = self.request
raise authentication_error
# check for access by calling is_authorized_user
if not self.__provider_config__.authentication.is_authorized_user(config):
_message = "Service available to authorized users only"
authorization_error = prestans.exception.AuthorizationError(_message)
authorization_error.request = self.request
raise authorization_error
http_method_handler(self, *args, **kwargs)
return wraps(http_method_handler)(secure_http_method_handler)
return _access_required | Authenticates a HTTP method handler based on a custom set of arguments | Below is the the instruction that describes the task:
### Input:
Authenticates a HTTP method handler based on a custom set of arguments
### Response:
def access_required(config=None):
"""
Authenticates a HTTP method handler based on a custom set of arguments
"""
def _access_required(http_method_handler):
def secure_http_method_handler(self, *args, **kwargs):
# authentication context must be set
if not self.__provider_config__.authentication:
_message = "Service available to authenticated users only, no auth context provider set in handler"
authentication_error = prestans.exception.AuthenticationError(_message)
authentication_error.request = self.request
raise authentication_error
# check for access by calling is_authorized_user
if not self.__provider_config__.authentication.is_authorized_user(config):
_message = "Service available to authorized users only"
authorization_error = prestans.exception.AuthorizationError(_message)
authorization_error.request = self.request
raise authorization_error
http_method_handler(self, *args, **kwargs)
return wraps(http_method_handler)(secure_http_method_handler)
return _access_required |
def connection_id_to_public_key(self, connection_id):
"""
Get stored public key for a connection.
"""
with self._connections_lock:
try:
connection_info = self._connections[connection_id]
return connection_info.public_key
except KeyError:
return None | Get stored public key for a connection. | Below is the the instruction that describes the task:
### Input:
Get stored public key for a connection.
### Response:
def connection_id_to_public_key(self, connection_id):
"""
Get stored public key for a connection.
"""
with self._connections_lock:
try:
connection_info = self._connections[connection_id]
return connection_info.public_key
except KeyError:
return None |
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href')) | Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat | Below is the the instruction that describes the task:
### Input:
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
### Response:
def get_pages(url):
"""
Return the 'pages' from the starting url
Technically, look for the 'next 50' link, yield and download it, repeat
"""
while True:
yield url
doc = html.parse(url).find("body")
links = [a for a in doc.findall(".//a") if a.text and a.text.startswith("next ")]
if not links:
break
url = urljoin(url, links[0].get('href')) |
def debug(self, i: int=None) -> str:
"""
Returns a debug message
"""
head = "[" + colors.yellow("debug") + "]"
if i is not None:
head = str(i) + " " + head
return head | Returns a debug message | Below is the the instruction that describes the task:
### Input:
Returns a debug message
### Response:
def debug(self, i: int=None) -> str:
"""
Returns a debug message
"""
head = "[" + colors.yellow("debug") + "]"
if i is not None:
head = str(i) + " " + head
return head |
def _insert_additionals(self, fmtos, seen=None):
"""
Insert additional formatoptions into `fmtos`.
This method inserts those formatoptions into `fmtos` that are required
because one of the following criteria is fullfilled:
1. The :attr:`replot` attribute is True
2. Any formatoption with START priority is in `fmtos`
3. A dependency of one formatoption is in `fmtos`
Parameters
----------
fmtos: list
The list of formatoptions that shall be updated
seen: set
The formatoption keys that shall not be included. If None, all
formatoptions in `fmtos` are used
Returns
-------
fmtos
The initial `fmtos` plus further formatoptions
Notes
-----
`fmtos` and `seen` are modified in place (except that any formatoption
in the initial `fmtos` has :attr:`~Formatoption.requires_clearing`
attribute set to True)"""
def get_dependencies(fmto):
if fmto is None:
return []
return fmto.dependencies + list(chain(*map(
lambda key: get_dependencies(getattr(self, key, None)),
fmto.dependencies)))
seen = seen or {fmto.key for fmto in fmtos}
keys = {fmto.key for fmto in fmtos}
self.replot = self.replot or any(
fmto.requires_replot for fmto in fmtos)
if self.replot or any(fmto.priority >= START for fmto in fmtos):
self.replot = True
self.plot_data = self.data
new_fmtos = dict((f.key, f) for f in self._fmtos
if ((f not in fmtos and is_data_dependent(
f, self.data))))
seen.update(new_fmtos)
keys.update(new_fmtos)
fmtos += list(new_fmtos.values())
# insert the formatoptions that have to be updated if the plot is
# changed
if any(fmto.priority >= BEFOREPLOTTING for fmto in fmtos):
new_fmtos = dict((f.key, f) for f in self._fmtos
if ((f not in fmtos and f.update_after_plot)))
fmtos += list(new_fmtos.values())
for fmto in set(self._fmtos).difference(fmtos):
all_dependencies = get_dependencies(fmto)
if keys.intersection(all_dependencies):
fmtos.append(fmto)
if any(fmto.requires_clearing for fmto in fmtos):
self.cleared = True
return list(self._fmtos)
return fmtos | Insert additional formatoptions into `fmtos`.
This method inserts those formatoptions into `fmtos` that are required
because one of the following criteria is fullfilled:
1. The :attr:`replot` attribute is True
2. Any formatoption with START priority is in `fmtos`
3. A dependency of one formatoption is in `fmtos`
Parameters
----------
fmtos: list
The list of formatoptions that shall be updated
seen: set
The formatoption keys that shall not be included. If None, all
formatoptions in `fmtos` are used
Returns
-------
fmtos
The initial `fmtos` plus further formatoptions
Notes
-----
`fmtos` and `seen` are modified in place (except that any formatoption
in the initial `fmtos` has :attr:`~Formatoption.requires_clearing`
attribute set to True) | Below is the the instruction that describes the task:
### Input:
Insert additional formatoptions into `fmtos`.
This method inserts those formatoptions into `fmtos` that are required
because one of the following criteria is fullfilled:
1. The :attr:`replot` attribute is True
2. Any formatoption with START priority is in `fmtos`
3. A dependency of one formatoption is in `fmtos`
Parameters
----------
fmtos: list
The list of formatoptions that shall be updated
seen: set
The formatoption keys that shall not be included. If None, all
formatoptions in `fmtos` are used
Returns
-------
fmtos
The initial `fmtos` plus further formatoptions
Notes
-----
`fmtos` and `seen` are modified in place (except that any formatoption
in the initial `fmtos` has :attr:`~Formatoption.requires_clearing`
attribute set to True)
### Response:
def _insert_additionals(self, fmtos, seen=None):
"""
Insert additional formatoptions into `fmtos`.
This method inserts those formatoptions into `fmtos` that are required
because one of the following criteria is fullfilled:
1. The :attr:`replot` attribute is True
2. Any formatoption with START priority is in `fmtos`
3. A dependency of one formatoption is in `fmtos`
Parameters
----------
fmtos: list
The list of formatoptions that shall be updated
seen: set
The formatoption keys that shall not be included. If None, all
formatoptions in `fmtos` are used
Returns
-------
fmtos
The initial `fmtos` plus further formatoptions
Notes
-----
`fmtos` and `seen` are modified in place (except that any formatoption
in the initial `fmtos` has :attr:`~Formatoption.requires_clearing`
attribute set to True)"""
def get_dependencies(fmto):
if fmto is None:
return []
return fmto.dependencies + list(chain(*map(
lambda key: get_dependencies(getattr(self, key, None)),
fmto.dependencies)))
seen = seen or {fmto.key for fmto in fmtos}
keys = {fmto.key for fmto in fmtos}
self.replot = self.replot or any(
fmto.requires_replot for fmto in fmtos)
if self.replot or any(fmto.priority >= START for fmto in fmtos):
self.replot = True
self.plot_data = self.data
new_fmtos = dict((f.key, f) for f in self._fmtos
if ((f not in fmtos and is_data_dependent(
f, self.data))))
seen.update(new_fmtos)
keys.update(new_fmtos)
fmtos += list(new_fmtos.values())
# insert the formatoptions that have to be updated if the plot is
# changed
if any(fmto.priority >= BEFOREPLOTTING for fmto in fmtos):
new_fmtos = dict((f.key, f) for f in self._fmtos
if ((f not in fmtos and f.update_after_plot)))
fmtos += list(new_fmtos.values())
for fmto in set(self._fmtos).difference(fmtos):
all_dependencies = get_dependencies(fmto)
if keys.intersection(all_dependencies):
fmtos.append(fmto)
if any(fmto.requires_clearing for fmto in fmtos):
self.cleared = True
return list(self._fmtos)
return fmtos |
def _add_graph_level(graph, level, parent_ids, names, scores, normalized_scores,
include_pad):
"""Adds a level to the passed graph"""
for i, parent_id in enumerate(parent_ids):
if not include_pad and names[i] == PAD_TOKEN:
continue
new_node = (level, i)
parent_node = (level - 1, parent_id)
raw_score = '%.3f' % float(scores[i]) if scores[i] is not None else '-inf'
norm_score = '%.3f' % float(normalized_scores[i]) if normalized_scores[i] is not None else '-inf'
graph.add_node(new_node)
graph.node[new_node]["name"] = names[i]
graph.node[new_node]["score"] = "[RAW] {}".format(raw_score)
graph.node[new_node]["norm_score"] = "[NORM] {}".format(norm_score)
graph.node[new_node]["size"] = 100
# Add an edge to the parent
graph.add_edge(parent_node, new_node) | Adds a level to the passed graph | Below is the the instruction that describes the task:
### Input:
Adds a level to the passed graph
### Response:
def _add_graph_level(graph, level, parent_ids, names, scores, normalized_scores,
include_pad):
"""Adds a level to the passed graph"""
for i, parent_id in enumerate(parent_ids):
if not include_pad and names[i] == PAD_TOKEN:
continue
new_node = (level, i)
parent_node = (level - 1, parent_id)
raw_score = '%.3f' % float(scores[i]) if scores[i] is not None else '-inf'
norm_score = '%.3f' % float(normalized_scores[i]) if normalized_scores[i] is not None else '-inf'
graph.add_node(new_node)
graph.node[new_node]["name"] = names[i]
graph.node[new_node]["score"] = "[RAW] {}".format(raw_score)
graph.node[new_node]["norm_score"] = "[NORM] {}".format(norm_score)
graph.node[new_node]["size"] = 100
# Add an edge to the parent
graph.add_edge(parent_node, new_node) |
def set_property(self, key, value):
'''Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided
'''
value_type = type(value)
if value_type not in [str, int, bool]:
raise NotImplementedError(
'Only string, integer, and boolean properties are implemented')
key_object = self.properties.findChild(name='key', text=key)
# Key (and value, if it's a valid property list) don't exist
if key_object is None:
key_object = self.soup.new_tag('key')
key_object.string = key
self.properties.append(key_object)
value_object = self.soup.new_tag(
{str: 'string', int: 'integer', bool: str(value).lower()}[
value_type])
if value_type is not bool:
value_object.string = str(value)
self.properties.append(value_object)
return
# Key (and value, if it's a valid property list) exist
# Eh, just remove the key+value tags from the tree and re-add them
# (with the new value)
value_object = key_object.find_next_sibling()
key_object.decompose()
value_object.decompose()
self.set_property(key, value) | Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided | Below is the the instruction that describes the task:
### Input:
Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided
### Response:
def set_property(self, key, value):
'''Set a new (or updating existing) key value pair.
Args:
key: A string containing the key namespace
value: A str, int, or bool value
Raises:
NotImplementedError: an unsupported value-type was provided
'''
value_type = type(value)
if value_type not in [str, int, bool]:
raise NotImplementedError(
'Only string, integer, and boolean properties are implemented')
key_object = self.properties.findChild(name='key', text=key)
# Key (and value, if it's a valid property list) don't exist
if key_object is None:
key_object = self.soup.new_tag('key')
key_object.string = key
self.properties.append(key_object)
value_object = self.soup.new_tag(
{str: 'string', int: 'integer', bool: str(value).lower()}[
value_type])
if value_type is not bool:
value_object.string = str(value)
self.properties.append(value_object)
return
# Key (and value, if it's a valid property list) exist
# Eh, just remove the key+value tags from the tree and re-add them
# (with the new value)
value_object = key_object.find_next_sibling()
key_object.decompose()
value_object.decompose()
self.set_property(key, value) |
def columnOptions( self, tableType ):
"""
Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..]
"""
if ( not tableType ):
return []
schema = tableType.schema()
return map(lambda x: x.name(), schema.columns()) | Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..] | Below is the the instruction that describes the task:
### Input:
Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..]
### Response:
def columnOptions( self, tableType ):
"""
Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..]
"""
if ( not tableType ):
return []
schema = tableType.schema()
return map(lambda x: x.name(), schema.columns()) |
def _convert_priority(p_priority):
"""
Converts todo.txt priority to an iCalendar priority (RFC 2445).
Priority A gets priority 1, priority B gets priority 5 and priority C-F get
priorities 6-9. This scheme makes sure that clients that use "high",
"medium" and "low" show the correct priority.
"""
result = 0
prio_map = {
'A': 1,
'B': 5,
'C': 6,
'D': 7,
'E': 8,
'F': 9,
}
try:
result = prio_map[p_priority]
except KeyError:
if p_priority:
# todos with no priority have priority None, and result of this
# function will be 0. For all other letters, return 9 (lowest
# priority in RFC 2445).
result = 9
return result | Converts todo.txt priority to an iCalendar priority (RFC 2445).
Priority A gets priority 1, priority B gets priority 5 and priority C-F get
priorities 6-9. This scheme makes sure that clients that use "high",
"medium" and "low" show the correct priority. | Below is the the instruction that describes the task:
### Input:
Converts todo.txt priority to an iCalendar priority (RFC 2445).
Priority A gets priority 1, priority B gets priority 5 and priority C-F get
priorities 6-9. This scheme makes sure that clients that use "high",
"medium" and "low" show the correct priority.
### Response:
def _convert_priority(p_priority):
"""
Converts todo.txt priority to an iCalendar priority (RFC 2445).
Priority A gets priority 1, priority B gets priority 5 and priority C-F get
priorities 6-9. This scheme makes sure that clients that use "high",
"medium" and "low" show the correct priority.
"""
result = 0
prio_map = {
'A': 1,
'B': 5,
'C': 6,
'D': 7,
'E': 8,
'F': 9,
}
try:
result = prio_map[p_priority]
except KeyError:
if p_priority:
# todos with no priority have priority None, and result of this
# function will be 0. For all other letters, return 9 (lowest
# priority in RFC 2445).
result = 9
return result |
def getOffsetFromRva(self, rva):
"""
Converts an offset to an RVA.
@type rva: int
@param rva: The RVA to be converted.
@rtype: int
@return: An integer value representing an offset in the PE file.
"""
offset = -1
s = self.getSectionByRva(rva)
if s != offset:
offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value
else:
offset = rva
return offset | Converts an offset to an RVA.
@type rva: int
@param rva: The RVA to be converted.
@rtype: int
@return: An integer value representing an offset in the PE file. | Below is the the instruction that describes the task:
### Input:
Converts an offset to an RVA.
@type rva: int
@param rva: The RVA to be converted.
@rtype: int
@return: An integer value representing an offset in the PE file.
### Response:
def getOffsetFromRva(self, rva):
"""
Converts an offset to an RVA.
@type rva: int
@param rva: The RVA to be converted.
@rtype: int
@return: An integer value representing an offset in the PE file.
"""
offset = -1
s = self.getSectionByRva(rva)
if s != offset:
offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value
else:
offset = rva
return offset |
def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element] | Extract the AppNexus object or list of objects from the response | Below is the the instruction that describes the task:
### Input:
Extract the AppNexus object or list of objects from the response
### Response:
def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element] |
def dumps(post, handler=None, **kwargs):
"""
Serialize a :py:class:`post <frontmatter.Post>` to a string and return text.
This always returns unicode text, which can then be encoded.
Passing ``handler`` will change how metadata is turned into text. A handler
passed as an argument will override ``post.handler``, with
:py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as
a default.
::
>>> print(frontmatter.dumps(post))
---
excerpt: tl;dr
layout: post
title: Hello, world!
---
Well, hello there, world.
"""
if handler is None:
handler = getattr(post, 'handler', None) or YAMLHandler()
start_delimiter = kwargs.pop('start_delimiter', handler.START_DELIMITER)
end_delimiter = kwargs.pop('end_delimiter', handler.END_DELIMITER)
metadata = handler.export(post.metadata, **kwargs)
return POST_TEMPLATE.format(
metadata=metadata, content=post.content,
start_delimiter=start_delimiter,
end_delimiter=end_delimiter).strip() | Serialize a :py:class:`post <frontmatter.Post>` to a string and return text.
This always returns unicode text, which can then be encoded.
Passing ``handler`` will change how metadata is turned into text. A handler
passed as an argument will override ``post.handler``, with
:py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as
a default.
::
>>> print(frontmatter.dumps(post))
---
excerpt: tl;dr
layout: post
title: Hello, world!
---
Well, hello there, world. | Below is the the instruction that describes the task:
### Input:
Serialize a :py:class:`post <frontmatter.Post>` to a string and return text.
This always returns unicode text, which can then be encoded.
Passing ``handler`` will change how metadata is turned into text. A handler
passed as an argument will override ``post.handler``, with
:py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as
a default.
::
>>> print(frontmatter.dumps(post))
---
excerpt: tl;dr
layout: post
title: Hello, world!
---
Well, hello there, world.
### Response:
def dumps(post, handler=None, **kwargs):
"""
Serialize a :py:class:`post <frontmatter.Post>` to a string and return text.
This always returns unicode text, which can then be encoded.
Passing ``handler`` will change how metadata is turned into text. A handler
passed as an argument will override ``post.handler``, with
:py:class:`YAMLHandler <frontmatter.default_handlers.YAMLHandler>` used as
a default.
::
>>> print(frontmatter.dumps(post))
---
excerpt: tl;dr
layout: post
title: Hello, world!
---
Well, hello there, world.
"""
if handler is None:
handler = getattr(post, 'handler', None) or YAMLHandler()
start_delimiter = kwargs.pop('start_delimiter', handler.START_DELIMITER)
end_delimiter = kwargs.pop('end_delimiter', handler.END_DELIMITER)
metadata = handler.export(post.metadata, **kwargs)
return POST_TEMPLATE.format(
metadata=metadata, content=post.content,
start_delimiter=start_delimiter,
end_delimiter=end_delimiter).strip() |
def get_document_length(self, document):
"""
Returns the number of terms found within the specified document.
"""
if document in self._documents:
return self._documents[document]
else:
raise IndexError(DOCUMENT_DOES_NOT_EXIST) | Returns the number of terms found within the specified document. | Below is the the instruction that describes the task:
### Input:
Returns the number of terms found within the specified document.
### Response:
def get_document_length(self, document):
"""
Returns the number of terms found within the specified document.
"""
if document in self._documents:
return self._documents[document]
else:
raise IndexError(DOCUMENT_DOES_NOT_EXIST) |
def hicexplorer_basic_statistics(self):
"""Create the general statistics for HiCExplorer."""
data = {}
for file in self.mod_data:
max_distance_key = 'Max rest. site distance'
total_pairs = self.mod_data[file]['Pairs considered'][0]
try:
self.mod_data[file][max_distance_key][0]
except KeyError:
max_distance_key = 'Max library insert size'
data_ = {
'Pairs considered': self.mod_data[file]['Pairs considered'][0],
'Pairs used': self.mod_data[file]['Pairs used'][0] / total_pairs,
'Mapped': self.mod_data[file]['One mate unmapped'][0] / total_pairs,
'Min rest. site distance': self.mod_data[file]['Min rest. site distance'][0],
max_distance_key: self.mod_data[file][max_distance_key][0],
}
data[self.mod_data[file]['File'][0]] = data_
headers = OrderedDict()
headers['Pairs considered'] = {
'title': '{} Pairs'.format(config.read_count_prefix),
'description': 'Total number of read pairs ({})'.format(config.read_count_desc),
'shared_key': 'read_count'
}
headers['Pairs used'] = {
'title': '% Used pairs',
'max': 100,
'min': 0,
'modify': lambda x: x * 100,
'suffix': '%'
}
headers['Mapped'] = {
'title': '% Mapped',
'max': 100,
'min': 0,
'modify': lambda x: (1 - x) * 100,
'scale': 'RdYlGn',
'suffix': '%'
}
headers['Min rest. site distance'] = {
'title': 'Min RE dist',
'description': 'Minimum restriction site distance (bp)',
'format': '{:.0f}',
'suffix': ' bp'
}
headers[max_distance_key] = {
'title': 'Max RE dist',
'description': max_distance_key + ' (bp)',
'format': '{:.0f}',
'suffix': ' bp'
}
self.general_stats_addcols(data, headers) | Create the general statistics for HiCExplorer. | Below is the the instruction that describes the task:
### Input:
Create the general statistics for HiCExplorer.
### Response:
def hicexplorer_basic_statistics(self):
"""Create the general statistics for HiCExplorer."""
data = {}
for file in self.mod_data:
max_distance_key = 'Max rest. site distance'
total_pairs = self.mod_data[file]['Pairs considered'][0]
try:
self.mod_data[file][max_distance_key][0]
except KeyError:
max_distance_key = 'Max library insert size'
data_ = {
'Pairs considered': self.mod_data[file]['Pairs considered'][0],
'Pairs used': self.mod_data[file]['Pairs used'][0] / total_pairs,
'Mapped': self.mod_data[file]['One mate unmapped'][0] / total_pairs,
'Min rest. site distance': self.mod_data[file]['Min rest. site distance'][0],
max_distance_key: self.mod_data[file][max_distance_key][0],
}
data[self.mod_data[file]['File'][0]] = data_
headers = OrderedDict()
headers['Pairs considered'] = {
'title': '{} Pairs'.format(config.read_count_prefix),
'description': 'Total number of read pairs ({})'.format(config.read_count_desc),
'shared_key': 'read_count'
}
headers['Pairs used'] = {
'title': '% Used pairs',
'max': 100,
'min': 0,
'modify': lambda x: x * 100,
'suffix': '%'
}
headers['Mapped'] = {
'title': '% Mapped',
'max': 100,
'min': 0,
'modify': lambda x: (1 - x) * 100,
'scale': 'RdYlGn',
'suffix': '%'
}
headers['Min rest. site distance'] = {
'title': 'Min RE dist',
'description': 'Minimum restriction site distance (bp)',
'format': '{:.0f}',
'suffix': ' bp'
}
headers[max_distance_key] = {
'title': 'Max RE dist',
'description': max_distance_key + ' (bp)',
'format': '{:.0f}',
'suffix': ' bp'
}
self.general_stats_addcols(data, headers) |
def pad_length(x, d):
"""Return a vector appropriate to a dimensional space, using an input vector
as a prompt depending on its type:
- If the input is a vector, return that vector.
- If the input is a scalar, return a vector filled with that value.
Useful when a function expects an array specifying values along each axis,
but wants to also accept a scalar value in case the length is the same in
all directions.
Parameters
----------
x: float or array-like
The input parameter that may need padding.
d: int
The dimensional space to make `x` appropriate for.
Returns
-------
x_pad: array-like, shape (d,)
The padded parameter.
"""
try:
x[0]
except TypeError:
x = d * [x]
return np.array(x) | Return a vector appropriate to a dimensional space, using an input vector
as a prompt depending on its type:
- If the input is a vector, return that vector.
- If the input is a scalar, return a vector filled with that value.
Useful when a function expects an array specifying values along each axis,
but wants to also accept a scalar value in case the length is the same in
all directions.
Parameters
----------
x: float or array-like
The input parameter that may need padding.
d: int
The dimensional space to make `x` appropriate for.
Returns
-------
x_pad: array-like, shape (d,)
The padded parameter. | Below is the the instruction that describes the task:
### Input:
Return a vector appropriate to a dimensional space, using an input vector
as a prompt depending on its type:
- If the input is a vector, return that vector.
- If the input is a scalar, return a vector filled with that value.
Useful when a function expects an array specifying values along each axis,
but wants to also accept a scalar value in case the length is the same in
all directions.
Parameters
----------
x: float or array-like
The input parameter that may need padding.
d: int
The dimensional space to make `x` appropriate for.
Returns
-------
x_pad: array-like, shape (d,)
The padded parameter.
### Response:
def pad_length(x, d):
"""Return a vector appropriate to a dimensional space, using an input vector
as a prompt depending on its type:
- If the input is a vector, return that vector.
- If the input is a scalar, return a vector filled with that value.
Useful when a function expects an array specifying values along each axis,
but wants to also accept a scalar value in case the length is the same in
all directions.
Parameters
----------
x: float or array-like
The input parameter that may need padding.
d: int
The dimensional space to make `x` appropriate for.
Returns
-------
x_pad: array-like, shape (d,)
The padded parameter.
"""
try:
x[0]
except TypeError:
x = d * [x]
return np.array(x) |
def doLog(self, level, where, format, *args, **kwargs):
"""
Log a message at the given level, with the possibility of going
higher up in the stack.
@param level: log level
@type level: int
@param where: how many frames to go back from the last log frame;
or a function (to log for a future call)
@type where: int (negative), or function
@param kwargs: a dict of pre-calculated values from a previous
doLog call
@return: a dict of calculated variables, to be reused in a
call to doLog that should show the same location
@rtype: dict
"""
if _canShortcutLogging(self.logCategory, level):
return {}
args = self.logFunction(*args)
return doLog(level, self.logObjectName(), self.logCategory,
format, args, where=where, **kwargs) | Log a message at the given level, with the possibility of going
higher up in the stack.
@param level: log level
@type level: int
@param where: how many frames to go back from the last log frame;
or a function (to log for a future call)
@type where: int (negative), or function
@param kwargs: a dict of pre-calculated values from a previous
doLog call
@return: a dict of calculated variables, to be reused in a
call to doLog that should show the same location
@rtype: dict | Below is the the instruction that describes the task:
### Input:
Log a message at the given level, with the possibility of going
higher up in the stack.
@param level: log level
@type level: int
@param where: how many frames to go back from the last log frame;
or a function (to log for a future call)
@type where: int (negative), or function
@param kwargs: a dict of pre-calculated values from a previous
doLog call
@return: a dict of calculated variables, to be reused in a
call to doLog that should show the same location
@rtype: dict
### Response:
def doLog(self, level, where, format, *args, **kwargs):
"""
Log a message at the given level, with the possibility of going
higher up in the stack.
@param level: log level
@type level: int
@param where: how many frames to go back from the last log frame;
or a function (to log for a future call)
@type where: int (negative), or function
@param kwargs: a dict of pre-calculated values from a previous
doLog call
@return: a dict of calculated variables, to be reused in a
call to doLog that should show the same location
@rtype: dict
"""
if _canShortcutLogging(self.logCategory, level):
return {}
args = self.logFunction(*args)
return doLog(level, self.logObjectName(), self.logCategory,
format, args, where=where, **kwargs) |
def get_all(cls, include_disabled=True):
"""Returns a list of all accounts of a given type
Args:
include_disabled (`bool`): Include disabled accounts. Default: `True`
Returns:
list of account objects
"""
if cls == BaseAccount:
raise InquisitorError('get_all on BaseAccount is not supported')
account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id
qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)
if not include_disabled:
qry = qry.filter(Account.enabled == 1)
accounts = qry.find(Account.account_type_id == account_type_id)
return {res.account_id: cls(res) for res in accounts} | Returns a list of all accounts of a given type
Args:
include_disabled (`bool`): Include disabled accounts. Default: `True`
Returns:
list of account objects | Below is the the instruction that describes the task:
### Input:
Returns a list of all accounts of a given type
Args:
include_disabled (`bool`): Include disabled accounts. Default: `True`
Returns:
list of account objects
### Response:
def get_all(cls, include_disabled=True):
"""Returns a list of all accounts of a given type
Args:
include_disabled (`bool`): Include disabled accounts. Default: `True`
Returns:
list of account objects
"""
if cls == BaseAccount:
raise InquisitorError('get_all on BaseAccount is not supported')
account_type_id = db.AccountType.find_one(account_type=cls.account_type).account_type_id
qry = db.Account.order_by(desc(Account.enabled), Account.account_type_id, Account.account_name)
if not include_disabled:
qry = qry.filter(Account.enabled == 1)
accounts = qry.find(Account.account_type_id == account_type_id)
return {res.account_id: cls(res) for res in accounts} |
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, asynchronous=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, set the asynchronous parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param asynchronous bool: execute request asynchronously
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If asynchronous parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter asynchronous is False or missing,
then the method will return the response directly.
"""
if not asynchronous:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats, _preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats, _preload_content, _request_timeout))
return thread | Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, set the asynchronous parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param asynchronous bool: execute request asynchronously
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If asynchronous parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter asynchronous is False or missing,
then the method will return the response directly. | Below is the the instruction that describes the task:
### Input:
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, set the asynchronous parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param asynchronous bool: execute request asynchronously
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If asynchronous parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter asynchronous is False or missing,
then the method will return the response directly.
### Response:
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, asynchronous=None,
_return_http_data_only=None, collection_formats=None, _preload_content=True,
_request_timeout=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, set the asynchronous parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param asynchronous bool: execute request asynchronously
:param _return_http_data_only: response data without head status code and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will be returned without
reading/decoding response data. Default is True.
:param _request_timeout: timeout setting for this request. If one number provided, it will be total request
timeout. It can also be a pair (tuple) of (connection, read) timeouts.
:return:
If asynchronous parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter asynchronous is False or missing,
then the method will return the response directly.
"""
if not asynchronous:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats, _preload_content, _request_timeout)
else:
thread = self.pool.apply_async(self.__call_api, (resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
_return_http_data_only,
collection_formats, _preload_content, _request_timeout))
return thread |
def _empathy_status(status, message):
""" Updates status and message for Empathy IM application.
`status`
Status type.
`message`
Status message.
"""
ACCT_IFACE = 'org.freedesktop.Telepathy.Account'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
ACCT_MAN_IFACE = 'org.freedesktop.Telepathy.AccountManager'
ACCT_MAN_PATH = '/org/freedesktop/Telepathy/AccountManager'
SP_IFACE = ('org.freedesktop.Telepathy.'
'Connection.Interface.SimplePresence')
# fetch main account manager interface
am_iface = _dbus_get_interface(ACCT_MAN_IFACE, ACCT_MAN_PATH,
DBUS_PROP_IFACE)
if am_iface:
account_paths = am_iface.Get(ACCT_MAN_IFACE, 'ValidAccounts')
for account_path in account_paths:
try:
# fetch account interface
account = _dbus_get_object(ACCT_MAN_IFACE, account_path)
# skip disconnected, disabled, etc.
if account.Get(ACCT_IFACE, 'ConnectionStatus') != 0:
continue
# fetch simple presence interface for account connection
conn_path = account.Get(ACCT_IFACE, 'Connection')
conn_iface = conn_path.replace("/", ".")[1:]
sp_iface = _dbus_get_interface(conn_iface, conn_path,
SP_IFACE)
except dbus.exceptions.DBusException:
continue
# set status and message
for code in EMPATHY_CODE_MAP[status]:
try:
sp_iface.SetPresence(code, message)
except dbus.exceptions.DBusException:
pass
else:
break | Updates status and message for Empathy IM application.
`status`
Status type.
`message`
Status message. | Below is the the instruction that describes the task:
### Input:
Updates status and message for Empathy IM application.
`status`
Status type.
`message`
Status message.
### Response:
def _empathy_status(status, message):
""" Updates status and message for Empathy IM application.
`status`
Status type.
`message`
Status message.
"""
ACCT_IFACE = 'org.freedesktop.Telepathy.Account'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
ACCT_MAN_IFACE = 'org.freedesktop.Telepathy.AccountManager'
ACCT_MAN_PATH = '/org/freedesktop/Telepathy/AccountManager'
SP_IFACE = ('org.freedesktop.Telepathy.'
'Connection.Interface.SimplePresence')
# fetch main account manager interface
am_iface = _dbus_get_interface(ACCT_MAN_IFACE, ACCT_MAN_PATH,
DBUS_PROP_IFACE)
if am_iface:
account_paths = am_iface.Get(ACCT_MAN_IFACE, 'ValidAccounts')
for account_path in account_paths:
try:
# fetch account interface
account = _dbus_get_object(ACCT_MAN_IFACE, account_path)
# skip disconnected, disabled, etc.
if account.Get(ACCT_IFACE, 'ConnectionStatus') != 0:
continue
# fetch simple presence interface for account connection
conn_path = account.Get(ACCT_IFACE, 'Connection')
conn_iface = conn_path.replace("/", ".")[1:]
sp_iface = _dbus_get_interface(conn_iface, conn_path,
SP_IFACE)
except dbus.exceptions.DBusException:
continue
# set status and message
for code in EMPATHY_CODE_MAP[status]:
try:
sp_iface.SetPresence(code, message)
except dbus.exceptions.DBusException:
pass
else:
break |
def get_api_date(self):
'''
Figure out the date to use for API requests. Assumes yesterday's date
if between midnight and 10am Eastern time. Override this function in a
subclass to change how the API date is calculated.
'''
# NOTE: If you are writing your own function to get the date, make sure
# to include the first if block below to allow for the ``date``
# parameter to hard-code a date.
api_date = None
if self.date is not None and not isinstance(self.date, datetime):
try:
api_date = datetime.strptime(self.date, '%Y-%m-%d')
except (TypeError, ValueError):
self.logger.warning('Invalid date \'%s\'', self.date)
if api_date is None:
utc_time = pytz.utc.localize(datetime.utcnow())
eastern = pytz.timezone('US/Eastern')
api_date = eastern.normalize(utc_time.astimezone(eastern))
if api_date.hour < 10:
# The scores on NHL.com change at 10am Eastern, if it's before
# that time of day then we will use yesterday's date.
api_date -= timedelta(days=1)
self.date = api_date | Figure out the date to use for API requests. Assumes yesterday's date
if between midnight and 10am Eastern time. Override this function in a
subclass to change how the API date is calculated. | Below is the the instruction that describes the task:
### Input:
Figure out the date to use for API requests. Assumes yesterday's date
if between midnight and 10am Eastern time. Override this function in a
subclass to change how the API date is calculated.
### Response:
def get_api_date(self):
'''
Figure out the date to use for API requests. Assumes yesterday's date
if between midnight and 10am Eastern time. Override this function in a
subclass to change how the API date is calculated.
'''
# NOTE: If you are writing your own function to get the date, make sure
# to include the first if block below to allow for the ``date``
# parameter to hard-code a date.
api_date = None
if self.date is not None and not isinstance(self.date, datetime):
try:
api_date = datetime.strptime(self.date, '%Y-%m-%d')
except (TypeError, ValueError):
self.logger.warning('Invalid date \'%s\'', self.date)
if api_date is None:
utc_time = pytz.utc.localize(datetime.utcnow())
eastern = pytz.timezone('US/Eastern')
api_date = eastern.normalize(utc_time.astimezone(eastern))
if api_date.hour < 10:
# The scores on NHL.com change at 10am Eastern, if it's before
# that time of day then we will use yesterday's date.
api_date -= timedelta(days=1)
self.date = api_date |
def _item_list(profile=None):
'''
Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list
'''
g_client = _auth(profile)
ret = []
for item in g_client.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'name': item.name,
# }
return ret | Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list | Below is the the instruction that describes the task:
### Input:
Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list
### Response:
def _item_list(profile=None):
'''
Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list
'''
g_client = _auth(profile)
ret = []
for item in g_client.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'name': item.name,
# }
return ret |
def installed(name, default=False, user=None, opts=None, env=None):
'''
Verify that the specified ruby is installed with RVM. RVM is
installed when necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rvm as.
env: None
A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS)
opts: None
A list of option flags to pass to RVM (ie -C, --patch)
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
return ret
ret = _check_rvm(ret, user)
if ret['result'] is False:
if not __salt__['rvm.install'](runas=user):
ret['comment'] = 'RVM failed to install.'
return ret
else:
return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env)
else:
return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env) | Verify that the specified ruby is installed with RVM. RVM is
installed when necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rvm as.
env: None
A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS)
opts: None
A list of option flags to pass to RVM (ie -C, --patch)
.. versionadded:: 0.17.0 | Below is the the instruction that describes the task:
### Input:
Verify that the specified ruby is installed with RVM. RVM is
installed when necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rvm as.
env: None
A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS)
opts: None
A list of option flags to pass to RVM (ie -C, --patch)
.. versionadded:: 0.17.0
### Response:
def installed(name, default=False, user=None, opts=None, env=None):
'''
Verify that the specified ruby is installed with RVM. RVM is
installed when necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rvm as.
env: None
A list of environment variables to set (ie, RUBY_CONFIGURE_OPTS)
opts: None
A list of option flags to pass to RVM (ie -C, --patch)
.. versionadded:: 0.17.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
return ret
ret = _check_rvm(ret, user)
if ret['result'] is False:
if not __salt__['rvm.install'](runas=user):
ret['comment'] = 'RVM failed to install.'
return ret
else:
return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env)
else:
return _check_and_install_ruby(ret, name, default, user=user, opts=opts, env=env) |
def deleteSettings(self, groupName=None):
""" Deletes registry items from the persistent store.
"""
groupName = groupName if groupName else self.settingsGroupName
settings = QtCore.QSettings()
logger.info("Deleting {} from: {}".format(groupName, settings.fileName()))
removeSettingsGroup(groupName) | Deletes registry items from the persistent store. | Below is the the instruction that describes the task:
### Input:
Deletes registry items from the persistent store.
### Response:
def deleteSettings(self, groupName=None):
""" Deletes registry items from the persistent store.
"""
groupName = groupName if groupName else self.settingsGroupName
settings = QtCore.QSettings()
logger.info("Deleting {} from: {}".format(groupName, settings.fileName()))
removeSettingsGroup(groupName) |
def assoc(_d, key, value):
"""Associate a key with a value in a dictionary
:param _d: a dictionary
:param key: a key in the dictionary
:param value: a value for the key
:returns: a new dictionary
>>> data = {}
>>> new_data = assoc(data, 'name', 'Holy Grail')
>>> new_data
{'name': 'Holy Grail'}
>>> data
{}
.. note:: the original dictionary is not modified
"""
d = deepcopy(_d)
d[key] = value
return d | Associate a key with a value in a dictionary
:param _d: a dictionary
:param key: a key in the dictionary
:param value: a value for the key
:returns: a new dictionary
>>> data = {}
>>> new_data = assoc(data, 'name', 'Holy Grail')
>>> new_data
{'name': 'Holy Grail'}
>>> data
{}
.. note:: the original dictionary is not modified | Below is the the instruction that describes the task:
### Input:
Associate a key with a value in a dictionary
:param _d: a dictionary
:param key: a key in the dictionary
:param value: a value for the key
:returns: a new dictionary
>>> data = {}
>>> new_data = assoc(data, 'name', 'Holy Grail')
>>> new_data
{'name': 'Holy Grail'}
>>> data
{}
.. note:: the original dictionary is not modified
### Response:
def assoc(_d, key, value):
"""Associate a key with a value in a dictionary
:param _d: a dictionary
:param key: a key in the dictionary
:param value: a value for the key
:returns: a new dictionary
>>> data = {}
>>> new_data = assoc(data, 'name', 'Holy Grail')
>>> new_data
{'name': 'Holy Grail'}
>>> data
{}
.. note:: the original dictionary is not modified
"""
d = deepcopy(_d)
d[key] = value
return d |
def get_supply_voltage(self, dest_addr_long=None):
"""
Fetches the value of %V and returns it as volts.
"""
value = self._get_parameter(b"%V", dest_addr_long=dest_addr_long)
return (hex_to_int(value) * (1200/1024.0)) / 1000 | Fetches the value of %V and returns it as volts. | Below is the the instruction that describes the task:
### Input:
Fetches the value of %V and returns it as volts.
### Response:
def get_supply_voltage(self, dest_addr_long=None):
"""
Fetches the value of %V and returns it as volts.
"""
value = self._get_parameter(b"%V", dest_addr_long=dest_addr_long)
return (hex_to_int(value) * (1200/1024.0)) / 1000 |
def cell_arrays(self):
""" Returns the all cell arrays """
cdata = self.GetCellData()
narr = cdata.GetNumberOfArrays()
# Update data if necessary
if hasattr(self, '_cell_arrays'):
keys = list(self._cell_arrays.keys())
if narr == len(keys):
if keys:
if self._cell_arrays[keys[0]].size == self.n_cells:
return self._cell_arrays
else:
return self._cell_arrays
# dictionary with callbacks
self._cell_arrays = CellScalarsDict(self)
for i in range(narr):
name = cdata.GetArrayName(i)
self._cell_arrays[name] = self._cell_scalar(name)
self._cell_arrays.enable_callback()
return self._cell_arrays | Returns the all cell arrays | Below is the the instruction that describes the task:
### Input:
Returns the all cell arrays
### Response:
def cell_arrays(self):
""" Returns the all cell arrays """
cdata = self.GetCellData()
narr = cdata.GetNumberOfArrays()
# Update data if necessary
if hasattr(self, '_cell_arrays'):
keys = list(self._cell_arrays.keys())
if narr == len(keys):
if keys:
if self._cell_arrays[keys[0]].size == self.n_cells:
return self._cell_arrays
else:
return self._cell_arrays
# dictionary with callbacks
self._cell_arrays = CellScalarsDict(self)
for i in range(narr):
name = cdata.GetArrayName(i)
self._cell_arrays[name] = self._cell_scalar(name)
self._cell_arrays.enable_callback()
return self._cell_arrays |
def init(image, root=None):
'''
Mount the named image via qemu-nbd and return the mounted roots
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.init /srv/image.qcow2
'''
nbd = connect(image)
if not nbd:
return ''
return mount(nbd, root) | Mount the named image via qemu-nbd and return the mounted roots
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.init /srv/image.qcow2 | Below is the the instruction that describes the task:
### Input:
Mount the named image via qemu-nbd and return the mounted roots
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.init /srv/image.qcow2
### Response:
def init(image, root=None):
'''
Mount the named image via qemu-nbd and return the mounted roots
CLI Example:
.. code-block:: bash
salt '*' qemu_nbd.init /srv/image.qcow2
'''
nbd = connect(image)
if not nbd:
return ''
return mount(nbd, root) |
def can_create_catalog_with_record_types(self, catalog_record_types):
"""Tests if this user can create a single ``Catalog`` using the desired record types.
While ``CatalogingManager.getCatalogRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Catalog``.
Providing an empty array tests if a ``Catalog`` can be created
with no records.
arg: catalog_record_types (osid.type.Type[]): array of
catalog record types
return: (boolean) - ``true`` if ``Catalog`` creation using the
specified record ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``catalog_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=catalog_record_types)
return True | Tests if this user can create a single ``Catalog`` using the desired record types.
While ``CatalogingManager.getCatalogRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Catalog``.
Providing an empty array tests if a ``Catalog`` can be created
with no records.
arg: catalog_record_types (osid.type.Type[]): array of
catalog record types
return: (boolean) - ``true`` if ``Catalog`` creation using the
specified record ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``catalog_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Tests if this user can create a single ``Catalog`` using the desired record types.
While ``CatalogingManager.getCatalogRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Catalog``.
Providing an empty array tests if a ``Catalog`` can be created
with no records.
arg: catalog_record_types (osid.type.Type[]): array of
catalog record types
return: (boolean) - ``true`` if ``Catalog`` creation using the
specified record ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``catalog_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def can_create_catalog_with_record_types(self, catalog_record_types):
"""Tests if this user can create a single ``Catalog`` using the desired record types.
While ``CatalogingManager.getCatalogRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Catalog``.
Providing an empty array tests if a ``Catalog`` can be created
with no records.
arg: catalog_record_types (osid.type.Type[]): array of
catalog record types
return: (boolean) - ``true`` if ``Catalog`` creation using the
specified record ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``catalog_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=catalog_record_types)
return True |
def is_point_layer(layer):
"""Check if a QGIS layer is vector and its geometries are points.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains points, otherwise False.
:rtype: bool
"""
try:
return (layer.type() == QgsMapLayer.VectorLayer) and (
layer.geometryType() == QgsWkbTypes.PointGeometry)
except AttributeError:
return False | Check if a QGIS layer is vector and its geometries are points.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains points, otherwise False.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if a QGIS layer is vector and its geometries are points.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains points, otherwise False.
:rtype: bool
### Response:
def is_point_layer(layer):
"""Check if a QGIS layer is vector and its geometries are points.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains points, otherwise False.
:rtype: bool
"""
try:
return (layer.type() == QgsMapLayer.VectorLayer) and (
layer.geometryType() == QgsWkbTypes.PointGeometry)
except AttributeError:
return False |
def normalize_ext_rename(filepath):
""" normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file
>>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt')
>>> pth == normalize_ext_rename(pth)
True
"""
logger.debug('normalize_ext.filepath=' + str(filepath))
new_file_path = normalize_ext(filepath)
logger.debug('download_unzip.new_filepaths=' + str(new_file_path))
# FIXME: fails when name is a url filename
filepath = rename_file(filepath, new_file_path)
logger.debug('download_unzip.filepath=' + str(filepath))
return filepath | normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file
>>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt')
>>> pth == normalize_ext_rename(pth)
True | Below is the the instruction that describes the task:
### Input:
normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file
>>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt')
>>> pth == normalize_ext_rename(pth)
True
### Response:
def normalize_ext_rename(filepath):
""" normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file
>>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt')
>>> pth == normalize_ext_rename(pth)
True
"""
logger.debug('normalize_ext.filepath=' + str(filepath))
new_file_path = normalize_ext(filepath)
logger.debug('download_unzip.new_filepaths=' + str(new_file_path))
# FIXME: fails when name is a url filename
filepath = rename_file(filepath, new_file_path)
logger.debug('download_unzip.filepath=' + str(filepath))
return filepath |
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True | Connect to Mill. | Below is the the instruction that describes the task:
### Input:
Connect to Mill.
### Response:
async def connect(self, retry=2):
"""Connect to Mill."""
# pylint: disable=too-many-return-statements
url = API_ENDPOINT_1 + 'login'
headers = {
"Content-Type": "application/x-zc-object",
"Connection": "Keep-Alive",
"X-Zc-Major-Domain": "seanywell",
"X-Zc-Msg-Name": "millService",
"X-Zc-Sub-Domain": "milltype",
"X-Zc-Seq-Id": "1",
"X-Zc-Version": "1",
}
payload = {"account": self._username,
"password": self._password}
try:
with async_timeout.timeout(self._timeout):
resp = await self.websession.post(url,
data=json.dumps(payload),
headers=headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
if retry < 1:
_LOGGER.error("Error connecting to Mill", exc_info=True)
return False
return await self.connect(retry - 1)
result = await resp.text()
if '"errorCode":3504' in result:
_LOGGER.error('Wrong password')
return False
if '"errorCode":3501' in result:
_LOGGER.error('Account does not exist')
return False
data = json.loads(result)
token = data.get('token')
if token is None:
_LOGGER.error('No token')
return False
user_id = data.get('userId')
if user_id is None:
_LOGGER.error('No user id')
return False
self._token = token
self._user_id = user_id
return True |
def tabulate(lol, headers, eol='\n'):
"""Use the pypi tabulate package instead!"""
yield '| %s |' % ' | '.join(headers) + eol
yield '| %s:|' % ':| '.join(['-' * len(w) for w in headers]) + eol
for row in lol:
yield '| %s |' % ' | '.join(str(c) for c in row) + eol | Use the pypi tabulate package instead! | Below is the the instruction that describes the task:
### Input:
Use the pypi tabulate package instead!
### Response:
def tabulate(lol, headers, eol='\n'):
"""Use the pypi tabulate package instead!"""
yield '| %s |' % ' | '.join(headers) + eol
yield '| %s:|' % ':| '.join(['-' * len(w) for w in headers]) + eol
for row in lol:
yield '| %s |' % ' | '.join(str(c) for c in row) + eol |
def get_range(self, ignore_blank_lines=True):
"""
Gets the fold region range (start and end line).
.. note:: Start line do no encompass the trigger line.
:param ignore_blank_lines: True to ignore blank lines at the end of the
scope (the method will rewind to find that last meaningful block
that is part of the fold scope).
:returns: tuple(int, int)
"""
ref_lvl = self.trigger_level
first_line = self._trigger.blockNumber()
block = self._trigger.next()
last_line = block.blockNumber()
lvl = self.scope_level
if ref_lvl == lvl: # for zone set programmatically such as imports
# in pyqode.python
ref_lvl -= 1
while (block.isValid() and
TextBlockHelper.get_fold_lvl(block) > ref_lvl):
last_line = block.blockNumber()
block = block.next()
if ignore_blank_lines and last_line:
block = block.document().findBlockByNumber(last_line)
while block.blockNumber() and block.text().strip() == '':
block = block.previous()
last_line = block.blockNumber()
return first_line, last_line | Gets the fold region range (start and end line).
.. note:: Start line do no encompass the trigger line.
:param ignore_blank_lines: True to ignore blank lines at the end of the
scope (the method will rewind to find that last meaningful block
that is part of the fold scope).
:returns: tuple(int, int) | Below is the the instruction that describes the task:
### Input:
Gets the fold region range (start and end line).
.. note:: Start line do no encompass the trigger line.
:param ignore_blank_lines: True to ignore blank lines at the end of the
scope (the method will rewind to find that last meaningful block
that is part of the fold scope).
:returns: tuple(int, int)
### Response:
def get_range(self, ignore_blank_lines=True):
"""
Gets the fold region range (start and end line).
.. note:: Start line do no encompass the trigger line.
:param ignore_blank_lines: True to ignore blank lines at the end of the
scope (the method will rewind to find that last meaningful block
that is part of the fold scope).
:returns: tuple(int, int)
"""
ref_lvl = self.trigger_level
first_line = self._trigger.blockNumber()
block = self._trigger.next()
last_line = block.blockNumber()
lvl = self.scope_level
if ref_lvl == lvl: # for zone set programmatically such as imports
# in pyqode.python
ref_lvl -= 1
while (block.isValid() and
TextBlockHelper.get_fold_lvl(block) > ref_lvl):
last_line = block.blockNumber()
block = block.next()
if ignore_blank_lines and last_line:
block = block.document().findBlockByNumber(last_line)
while block.blockNumber() and block.text().strip() == '':
block = block.previous()
last_line = block.blockNumber()
return first_line, last_line |
def get_body(self, msg):
""" Extracts and returns the decoded body from an EmailMessage object"""
body = ""
charset = ""
if msg.is_multipart():
for part in msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
# skip any text/plain (txt) attachments
if ctype == 'text/plain' and 'attachment' not in cdispo:
body = part.get_payload(decode=True) # decode
charset = part.get_content_charset()
break
# not multipart - i.e. plain text, no attachments, keeping fingers crossed
else:
body = msg.get_payload(decode=True)
charset = msg.get_content_charset()
return body.decode(charset) | Extracts and returns the decoded body from an EmailMessage object | Below is the the instruction that describes the task:
### Input:
Extracts and returns the decoded body from an EmailMessage object
### Response:
def get_body(self, msg):
""" Extracts and returns the decoded body from an EmailMessage object"""
body = ""
charset = ""
if msg.is_multipart():
for part in msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
# skip any text/plain (txt) attachments
if ctype == 'text/plain' and 'attachment' not in cdispo:
body = part.get_payload(decode=True) # decode
charset = part.get_content_charset()
break
# not multipart - i.e. plain text, no attachments, keeping fingers crossed
else:
body = msg.get_payload(decode=True)
charset = msg.get_content_charset()
return body.decode(charset) |
def shutdown(self):
"""
Disconnect all cached connections.
@returns: a deferred that fires once all connection are disconnected.
@rtype: L{Deferred}
"""
self._shuttingDown = {key: Deferred()
for key in self.cachedConnections.keys()}
return DeferredList(
[maybeDeferred(p.transport.loseConnection)
for p in self.cachedConnections.values()]
+ self._shuttingDown.values()) | Disconnect all cached connections.
@returns: a deferred that fires once all connection are disconnected.
@rtype: L{Deferred} | Below is the the instruction that describes the task:
### Input:
Disconnect all cached connections.
@returns: a deferred that fires once all connection are disconnected.
@rtype: L{Deferred}
### Response:
def shutdown(self):
"""
Disconnect all cached connections.
@returns: a deferred that fires once all connection are disconnected.
@rtype: L{Deferred}
"""
self._shuttingDown = {key: Deferred()
for key in self.cachedConnections.keys()}
return DeferredList(
[maybeDeferred(p.transport.loseConnection)
for p in self.cachedConnections.values()]
+ self._shuttingDown.values()) |
def select_larva(self):
"""Select all larva."""
action = sc_pb.Action()
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
return action | Select all larva. | Below is the the instruction that describes the task:
### Input:
Select all larva.
### Response:
def select_larva(self):
"""Select all larva."""
action = sc_pb.Action()
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
return action |
def backoff(
max_tries=constants.BACKOFF_DEFAULT_MAXTRIES,
delay=constants.BACKOFF_DEFAULT_DELAY,
factor=constants.BACKOFF_DEFAULT_FACTOR,
exceptions=None):
"""Implements an exponential backoff decorator which will retry decorated
function upon given exceptions. This implementation is based on
`Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from
the *Python Decorator Library*.
:param int max_tries: Number of tries before give up. Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`.
:param int delay: Delay between retries (in seconds). Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`.
:param int factor: Multiply factor in which delay will be increased for the
next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`.
:param exceptions: Tuple of exception types to catch that triggers retry.
Any exception not listed will break the decorator and retry routines
will not run.
:type exceptions: tuple[Exception]
"""
if max_tries <= 0:
raise ValueError('Max tries must be greater than 0; got {!r}'.format(max_tries))
if delay <= 0:
raise ValueError('Delay must be greater than 0; got {!r}'.format(delay))
if factor <= 1:
raise ValueError('Backoff factor must be greater than 1; got {!r}'.format(factor))
def outter(f):
def inner(*args, **kwargs):
m_max_tries, m_delay = max_tries, delay # make mutable
while m_max_tries > 0:
try:
retval = f(*args, **kwargs)
except exceptions:
logger.exception('backoff retry for: %r (max_tries=%r, delay=%r, '
'factor=%r, exceptions=%r)', f, max_tries, delay, factor, exceptions)
m_max_tries -= 1 # consume an attempt
if m_max_tries <= 0:
raise # run out of tries
time.sleep(m_delay) # wait...
m_delay *= factor # make future wait longer
else:
# we're done without errors
return retval
return inner
return outter | Implements an exponential backoff decorator which will retry decorated
function upon given exceptions. This implementation is based on
`Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from
the *Python Decorator Library*.
:param int max_tries: Number of tries before give up. Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`.
:param int delay: Delay between retries (in seconds). Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`.
:param int factor: Multiply factor in which delay will be increased for the
next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`.
:param exceptions: Tuple of exception types to catch that triggers retry.
Any exception not listed will break the decorator and retry routines
will not run.
:type exceptions: tuple[Exception] | Below is the the instruction that describes the task:
### Input:
Implements an exponential backoff decorator which will retry decorated
function upon given exceptions. This implementation is based on
`Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from
the *Python Decorator Library*.
:param int max_tries: Number of tries before give up. Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`.
:param int delay: Delay between retries (in seconds). Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`.
:param int factor: Multiply factor in which delay will be increased for the
next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`.
:param exceptions: Tuple of exception types to catch that triggers retry.
Any exception not listed will break the decorator and retry routines
will not run.
:type exceptions: tuple[Exception]
### Response:
def backoff(
max_tries=constants.BACKOFF_DEFAULT_MAXTRIES,
delay=constants.BACKOFF_DEFAULT_DELAY,
factor=constants.BACKOFF_DEFAULT_FACTOR,
exceptions=None):
"""Implements an exponential backoff decorator which will retry decorated
function upon given exceptions. This implementation is based on
`Retry <https://wiki.python.org/moin/PythonDecoratorLibrary#Retry>`_ from
the *Python Decorator Library*.
:param int max_tries: Number of tries before give up. Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_MAXTRIES`.
:param int delay: Delay between retries (in seconds). Defaults to
:const:`~escpos.constants.BACKOFF_DEFAULT_DELAY`.
:param int factor: Multiply factor in which delay will be increased for the
next retry. Defaults to :const:`~escpos.constants.BACKOFF_DEFAULT_FACTOR`.
:param exceptions: Tuple of exception types to catch that triggers retry.
Any exception not listed will break the decorator and retry routines
will not run.
:type exceptions: tuple[Exception]
"""
if max_tries <= 0:
raise ValueError('Max tries must be greater than 0; got {!r}'.format(max_tries))
if delay <= 0:
raise ValueError('Delay must be greater than 0; got {!r}'.format(delay))
if factor <= 1:
raise ValueError('Backoff factor must be greater than 1; got {!r}'.format(factor))
def outter(f):
def inner(*args, **kwargs):
m_max_tries, m_delay = max_tries, delay # make mutable
while m_max_tries > 0:
try:
retval = f(*args, **kwargs)
except exceptions:
logger.exception('backoff retry for: %r (max_tries=%r, delay=%r, '
'factor=%r, exceptions=%r)', f, max_tries, delay, factor, exceptions)
m_max_tries -= 1 # consume an attempt
if m_max_tries <= 0:
raise # run out of tries
time.sleep(m_delay) # wait...
m_delay *= factor # make future wait longer
else:
# we're done without errors
return retval
return inner
return outter |
def list(self, request, *args, **kwargs):
"""
To get a list of projects, run **GET** against */api/projects/* as authenticated user.
Here you can also check actual value for project quotas and project usage
Note that a user can only see connected projects:
- projects that the user owns as a customer
- projects where user has any role
Supported logic filters:
- ?can_manage - return a list of projects where current user is manager or a customer owner;
- ?can_admin - return a list of projects where current user is admin;
"""
return super(ProjectViewSet, self).list(request, *args, **kwargs) | To get a list of projects, run **GET** against */api/projects/* as authenticated user.
Here you can also check actual value for project quotas and project usage
Note that a user can only see connected projects:
- projects that the user owns as a customer
- projects where user has any role
Supported logic filters:
- ?can_manage - return a list of projects where current user is manager or a customer owner;
- ?can_admin - return a list of projects where current user is admin; | Below is the the instruction that describes the task:
### Input:
To get a list of projects, run **GET** against */api/projects/* as authenticated user.
Here you can also check actual value for project quotas and project usage
Note that a user can only see connected projects:
- projects that the user owns as a customer
- projects where user has any role
Supported logic filters:
- ?can_manage - return a list of projects where current user is manager or a customer owner;
- ?can_admin - return a list of projects where current user is admin;
### Response:
def list(self, request, *args, **kwargs):
"""
To get a list of projects, run **GET** against */api/projects/* as authenticated user.
Here you can also check actual value for project quotas and project usage
Note that a user can only see connected projects:
- projects that the user owns as a customer
- projects where user has any role
Supported logic filters:
- ?can_manage - return a list of projects where current user is manager or a customer owner;
- ?can_admin - return a list of projects where current user is admin;
"""
return super(ProjectViewSet, self).list(request, *args, **kwargs) |
def by_current_session(cls):
""" Returns current user session """
session = Session.current_session()
if session is None:
return None
return cls.where_id(session.user_id) | Returns current user session | Below is the the instruction that describes the task:
### Input:
Returns current user session
### Response:
def by_current_session(cls):
""" Returns current user session """
session = Session.current_session()
if session is None:
return None
return cls.where_id(session.user_id) |
def generate_raml_resource_types(module):
"""Compile a Pale module's resource documentation into RAML format.
RAML calls Pale resources 'resourceTypes'. This function converts Pale
resources into the RAML resourceType format.
The returned string should be appended to the RAML documentation string
before it is returned.
"""
from pale import extract_endpoints, extract_resources, is_pale_module
if not is_pale_module(module):
raise ValueError(
"""The passed in `module` (%s) is not a pale module. `paledoc`
only works on modules with a `_module_type` set to equal
`pale.ImplementationModule`.""")
module_resource_types = extract_resources(module)
raml_resource_types_unsorted = {}
for resource in module_resource_types:
resource_name = resource.__name__
raml_resource_types_unsorted[resource_name] = document_resource(resource)
if hasattr(resource, "_description"):
modified_description = clean_description(resource._description)
raml_resource_types_unsorted[resource_name]["description"] = modified_description
raml_resource_types_doc = OrderedDict(sorted(raml_resource_types_unsorted.items(), key=lambda t: t[0]))
output = StringIO()
indent = " " # 2
# blacklist of resources to ignore
ignored_resources = []
for resource_type in raml_resource_types_doc:
this_resource_type = raml_resource_types_doc[resource_type]
# add the name, ignoring the blacklist
if resource_type not in ignored_resources:
output.write(indent + resource_type + ":\n")
indent += " " # 4
# add the description
if this_resource_type.get("description") != None:
modified_description = clean_description(this_resource_type["description"])
output.write(indent + "description: " + modified_description + "\n")
# if there are no fields, set type directly:
if len(this_resource_type["fields"]) == 0:
this_type = "object"
if this_resource_type.get("_underlying_model") != None:
if this_resource_type["_underlying_model"] != object:
if hasattr(this_resource_type._underlying_model, "_value_type") \
and this_resource_type["_underlying_model"]._value_type not in ignored_resources:
this_type = this_resource_type["_underlying_model"]._value_type
output.write(indent + "type: " + this_type + "\n")
indent = indent[:-2] # 2
# if there are fields, use them as the properties, which implies type = object
else:
output.write(indent + "properties:\n")
indent += " " # 6
sorted_fields = OrderedDict(sorted(this_resource_type["fields"].items(), key=lambda t: t[0]))
# add the field name, a.k.a. RAML type name
for field in sorted_fields:
output.write(indent + field + ":\n")
# add the query parameters, a.k.a. RAML properties
properties = sorted_fields[field]
indent += " " # 8
# if this type is a list of other types, set it to type 'array' and note the item types
# if not, add the type from the Pale type
if "_underlying_model" in this_resource_type and this_resource_type["_underlying_model"] == object:
output.write(indent + "type: base\n")
elif "item_type" in properties:
output.write(indent + "type: array\n")
output.write(indent + "items: " + properties["item_type"] + "\n")
elif "type" in properties:
output.write(indent + "type: " + properties["type"].replace(" ", "_") + "\n")
# if extended description exists, strip newlines and whitespace and add as description
if properties.get("extended_description") != None:
modified_description = clean_description(properties["extended_description"])
output.write(indent + "description: " + modified_description + "\n")
# otherwise, use description
elif properties.get("description") != None:
modified_description = clean_description(properties["description"])
output.write(indent + "description: " + modified_description + "\n")
if properties.get("default_fields") != None:
output.write(indent + "properties:\n")
indent += " " # 10
for field_name in sorted(properties["default_fields"]):
# @TODO check if every default field is actually a string type
output.write(indent + field_name + ": string\n")
indent = indent[:-2] # 8
indent = indent[:-2] # 6
indent = indent[:-4] # 2
raml_resource_types = output.getvalue()
output.close()
return raml_resource_types | Compile a Pale module's resource documentation into RAML format.
RAML calls Pale resources 'resourceTypes'. This function converts Pale
resources into the RAML resourceType format.
The returned string should be appended to the RAML documentation string
before it is returned. | Below is the the instruction that describes the task:
### Input:
Compile a Pale module's resource documentation into RAML format.
RAML calls Pale resources 'resourceTypes'. This function converts Pale
resources into the RAML resourceType format.
The returned string should be appended to the RAML documentation string
before it is returned.
### Response:
def generate_raml_resource_types(module):
"""Compile a Pale module's resource documentation into RAML format.
RAML calls Pale resources 'resourceTypes'. This function converts Pale
resources into the RAML resourceType format.
The returned string should be appended to the RAML documentation string
before it is returned.
"""
from pale import extract_endpoints, extract_resources, is_pale_module
if not is_pale_module(module):
raise ValueError(
"""The passed in `module` (%s) is not a pale module. `paledoc`
only works on modules with a `_module_type` set to equal
`pale.ImplementationModule`.""")
module_resource_types = extract_resources(module)
raml_resource_types_unsorted = {}
for resource in module_resource_types:
resource_name = resource.__name__
raml_resource_types_unsorted[resource_name] = document_resource(resource)
if hasattr(resource, "_description"):
modified_description = clean_description(resource._description)
raml_resource_types_unsorted[resource_name]["description"] = modified_description
raml_resource_types_doc = OrderedDict(sorted(raml_resource_types_unsorted.items(), key=lambda t: t[0]))
output = StringIO()
indent = " " # 2
# blacklist of resources to ignore
ignored_resources = []
for resource_type in raml_resource_types_doc:
this_resource_type = raml_resource_types_doc[resource_type]
# add the name, ignoring the blacklist
if resource_type not in ignored_resources:
output.write(indent + resource_type + ":\n")
indent += " " # 4
# add the description
if this_resource_type.get("description") != None:
modified_description = clean_description(this_resource_type["description"])
output.write(indent + "description: " + modified_description + "\n")
# if there are no fields, set type directly:
if len(this_resource_type["fields"]) == 0:
this_type = "object"
if this_resource_type.get("_underlying_model") != None:
if this_resource_type["_underlying_model"] != object:
if hasattr(this_resource_type._underlying_model, "_value_type") \
and this_resource_type["_underlying_model"]._value_type not in ignored_resources:
this_type = this_resource_type["_underlying_model"]._value_type
output.write(indent + "type: " + this_type + "\n")
indent = indent[:-2] # 2
# if there are fields, use them as the properties, which implies type = object
else:
output.write(indent + "properties:\n")
indent += " " # 6
sorted_fields = OrderedDict(sorted(this_resource_type["fields"].items(), key=lambda t: t[0]))
# add the field name, a.k.a. RAML type name
for field in sorted_fields:
output.write(indent + field + ":\n")
# add the query parameters, a.k.a. RAML properties
properties = sorted_fields[field]
indent += " " # 8
# if this type is a list of other types, set it to type 'array' and note the item types
# if not, add the type from the Pale type
if "_underlying_model" in this_resource_type and this_resource_type["_underlying_model"] == object:
output.write(indent + "type: base\n")
elif "item_type" in properties:
output.write(indent + "type: array\n")
output.write(indent + "items: " + properties["item_type"] + "\n")
elif "type" in properties:
output.write(indent + "type: " + properties["type"].replace(" ", "_") + "\n")
# if extended description exists, strip newlines and whitespace and add as description
if properties.get("extended_description") != None:
modified_description = clean_description(properties["extended_description"])
output.write(indent + "description: " + modified_description + "\n")
# otherwise, use description
elif properties.get("description") != None:
modified_description = clean_description(properties["description"])
output.write(indent + "description: " + modified_description + "\n")
if properties.get("default_fields") != None:
output.write(indent + "properties:\n")
indent += " " # 10
for field_name in sorted(properties["default_fields"]):
# @TODO check if every default field is actually a string type
output.write(indent + field_name + ": string\n")
indent = indent[:-2] # 8
indent = indent[:-2] # 6
indent = indent[:-4] # 2
raml_resource_types = output.getvalue()
output.close()
return raml_resource_types |
def request_stop(self, message='', exit_code=0):
"""Stop the Arbiter daemon
:return: None
"""
# Only a master arbiter can stop the daemons
if self.is_master:
# Stop the daemons
self.daemons_stop(timeout=self.conf.daemons_stop_timeout)
# Request the daemon stop
super(Arbiter, self).request_stop(message, exit_code) | Stop the Arbiter daemon
:return: None | Below is the the instruction that describes the task:
### Input:
Stop the Arbiter daemon
:return: None
### Response:
def request_stop(self, message='', exit_code=0):
"""Stop the Arbiter daemon
:return: None
"""
# Only a master arbiter can stop the daemons
if self.is_master:
# Stop the daemons
self.daemons_stop(timeout=self.conf.daemons_stop_timeout)
# Request the daemon stop
super(Arbiter, self).request_stop(message, exit_code) |
def qemu_rebase(target, backing_file, safe=True, fail_on_error=True):
"""
changes the backing file of 'source' to 'backing_file'
If backing_file is specified as "" (the empty string),
then the image is rebased onto no backing file
(i.e. it will exist independently of any backing file).
(Taken from qemu-img man page)
Args:
target(str): Path to the source disk
backing_file(str): path to the base disk
safe(bool): if false, allow unsafe rebase
(check qemu-img docs for more info)
"""
cmd = ['qemu-img', 'rebase', '-b', backing_file, target]
if not safe:
cmd.insert(2, '-u')
return run_command_with_validation(
cmd,
fail_on_error,
msg='Failed to rebase {target} onto {backing_file}'.format(
target=target, backing_file=backing_file
)
) | changes the backing file of 'source' to 'backing_file'
If backing_file is specified as "" (the empty string),
then the image is rebased onto no backing file
(i.e. it will exist independently of any backing file).
(Taken from qemu-img man page)
Args:
target(str): Path to the source disk
backing_file(str): path to the base disk
safe(bool): if false, allow unsafe rebase
(check qemu-img docs for more info) | Below is the the instruction that describes the task:
### Input:
changes the backing file of 'source' to 'backing_file'
If backing_file is specified as "" (the empty string),
then the image is rebased onto no backing file
(i.e. it will exist independently of any backing file).
(Taken from qemu-img man page)
Args:
target(str): Path to the source disk
backing_file(str): path to the base disk
safe(bool): if false, allow unsafe rebase
(check qemu-img docs for more info)
### Response:
def qemu_rebase(target, backing_file, safe=True, fail_on_error=True):
"""
changes the backing file of 'source' to 'backing_file'
If backing_file is specified as "" (the empty string),
then the image is rebased onto no backing file
(i.e. it will exist independently of any backing file).
(Taken from qemu-img man page)
Args:
target(str): Path to the source disk
backing_file(str): path to the base disk
safe(bool): if false, allow unsafe rebase
(check qemu-img docs for more info)
"""
cmd = ['qemu-img', 'rebase', '-b', backing_file, target]
if not safe:
cmd.insert(2, '-u')
return run_command_with_validation(
cmd,
fail_on_error,
msg='Failed to rebase {target} onto {backing_file}'.format(
target=target, backing_file=backing_file
)
) |
def to_canstrat(self, key, log, lith_field, filename=None, as_text=False):
"""
Make a Canstrat DAT (aka ASCII) file.
TODO:
The data part should probably belong to striplog, and only the
header should be written by the well.
Args:
filename (str)
key (str)
log (str): the log name, should be 6 characters.
lith_field (str) the name of the lithology field in the striplog's
Primary component. Must match the Canstrat definitions.
filename (str)
as_text (bool): if you don't want to write a file.
"""
if (filename is None):
if (not as_text):
m = "You must provide a filename or set as_text to True."
raise WellError(m)
strip = self.data[key]
strip = strip.fill() # Default is to fill with 'null' intervals.
record = {1: [well_to_card_1(self)],
2: [well_to_card_2(self, key)],
8: [],
7: [interval_to_card_7(iv, lith_field) for iv in strip]
}
result = ''
for c in [1, 2, 8, 7]:
for d in record[c]:
result += write_row(d, card=c, log=log)
if as_text:
return result
else:
with open(filename, 'w') as f:
f.write(result)
return None | Make a Canstrat DAT (aka ASCII) file.
TODO:
The data part should probably belong to striplog, and only the
header should be written by the well.
Args:
filename (str)
key (str)
log (str): the log name, should be 6 characters.
lith_field (str) the name of the lithology field in the striplog's
Primary component. Must match the Canstrat definitions.
filename (str)
as_text (bool): if you don't want to write a file. | Below is the the instruction that describes the task:
### Input:
Make a Canstrat DAT (aka ASCII) file.
TODO:
The data part should probably belong to striplog, and only the
header should be written by the well.
Args:
filename (str)
key (str)
log (str): the log name, should be 6 characters.
lith_field (str) the name of the lithology field in the striplog's
Primary component. Must match the Canstrat definitions.
filename (str)
as_text (bool): if you don't want to write a file.
### Response:
def to_canstrat(self, key, log, lith_field, filename=None, as_text=False):
"""
Make a Canstrat DAT (aka ASCII) file.
TODO:
The data part should probably belong to striplog, and only the
header should be written by the well.
Args:
filename (str)
key (str)
log (str): the log name, should be 6 characters.
lith_field (str) the name of the lithology field in the striplog's
Primary component. Must match the Canstrat definitions.
filename (str)
as_text (bool): if you don't want to write a file.
"""
if (filename is None):
if (not as_text):
m = "You must provide a filename or set as_text to True."
raise WellError(m)
strip = self.data[key]
strip = strip.fill() # Default is to fill with 'null' intervals.
record = {1: [well_to_card_1(self)],
2: [well_to_card_2(self, key)],
8: [],
7: [interval_to_card_7(iv, lith_field) for iv in strip]
}
result = ''
for c in [1, 2, 8, 7]:
for d in record[c]:
result += write_row(d, card=c, log=log)
if as_text:
return result
else:
with open(filename, 'w') as f:
f.write(result)
return None |
def tau_reduction(ms, rate, n_per_decade):
"""Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g. to show certain features of
the data one might want 100 points per decade.
NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and
will also remove some points in this range, which is usually fine.
Typical use would be something like:
(data,m,taus)=tau_generator(data,rate,taus="all")
(m,taus)=tau_reduction(m,rate,n_per_decade)
Parameters
----------
ms: array of integers
List of m values (assumed to be an "all" list) to remove points from.
rate: float
Sample rate of data in Hz. Time interval between measurements
is 1/rate seconds. Used to convert to taus.
n_per_decade: int
Number of ms/taus to keep per decade.
Returns
-------
m: np.array
Reduced list of m values
taus: np.array
Reduced list of tau values
"""
ms = np.int64(ms)
keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) -
np.rint(n_per_decade*np.log10(ms[:-1])))
# Adjust ms size to fit above-defined mask
ms = ms[:-1]
assert len(ms) == len(keep)
ms = ms[keep]
taus = ms/float(rate)
return ms, taus | Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g. to show certain features of
the data one might want 100 points per decade.
NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and
will also remove some points in this range, which is usually fine.
Typical use would be something like:
(data,m,taus)=tau_generator(data,rate,taus="all")
(m,taus)=tau_reduction(m,rate,n_per_decade)
Parameters
----------
ms: array of integers
List of m values (assumed to be an "all" list) to remove points from.
rate: float
Sample rate of data in Hz. Time interval between measurements
is 1/rate seconds. Used to convert to taus.
n_per_decade: int
Number of ms/taus to keep per decade.
Returns
-------
m: np.array
Reduced list of m values
taus: np.array
Reduced list of tau values | Below is the the instruction that describes the task:
### Input:
Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g. to show certain features of
the data one might want 100 points per decade.
NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and
will also remove some points in this range, which is usually fine.
Typical use would be something like:
(data,m,taus)=tau_generator(data,rate,taus="all")
(m,taus)=tau_reduction(m,rate,n_per_decade)
Parameters
----------
ms: array of integers
List of m values (assumed to be an "all" list) to remove points from.
rate: float
Sample rate of data in Hz. Time interval between measurements
is 1/rate seconds. Used to convert to taus.
n_per_decade: int
Number of ms/taus to keep per decade.
Returns
-------
m: np.array
Reduced list of m values
taus: np.array
Reduced list of tau values
### Response:
def tau_reduction(ms, rate, n_per_decade):
"""Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only useful if more than the "decade" and "octave" but
less than the "all" taus are wanted. E.g. to show certain features of
the data one might want 100 points per decade.
NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and
will also remove some points in this range, which is usually fine.
Typical use would be something like:
(data,m,taus)=tau_generator(data,rate,taus="all")
(m,taus)=tau_reduction(m,rate,n_per_decade)
Parameters
----------
ms: array of integers
List of m values (assumed to be an "all" list) to remove points from.
rate: float
Sample rate of data in Hz. Time interval between measurements
is 1/rate seconds. Used to convert to taus.
n_per_decade: int
Number of ms/taus to keep per decade.
Returns
-------
m: np.array
Reduced list of m values
taus: np.array
Reduced list of tau values
"""
ms = np.int64(ms)
keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) -
np.rint(n_per_decade*np.log10(ms[:-1])))
# Adjust ms size to fit above-defined mask
ms = ms[:-1]
assert len(ms) == len(keep)
ms = ms[keep]
taus = ms/float(rate)
return ms, taus |
def clean_build(self):
"""Delete the build directory and all ingested files """
import shutil
if self.build_fs.exists:
try:
shutil.rmtree(self.build_fs.getsyspath('/'))
except NoSysPathError:
pass | Delete the build directory and all ingested files | Below is the the instruction that describes the task:
### Input:
Delete the build directory and all ingested files
### Response:
def clean_build(self):
"""Delete the build directory and all ingested files """
import shutil
if self.build_fs.exists:
try:
shutil.rmtree(self.build_fs.getsyspath('/'))
except NoSysPathError:
pass |
async def main():
"""Run."""
async with ClientSession() as websession:
try:
# Create a client:
client = Client('<EMAIL>', '<PASSWORD>', websession)
await client.async_init()
print('Showing active Tiles:')
print(await client.tiles.all())
print('Showing all Tiles:')
print(await client.tiles.all(show_inactive=True))
except TileError as err:
print(err) | Run. | Below is the the instruction that describes the task:
### Input:
Run.
### Response:
async def main():
"""Run."""
async with ClientSession() as websession:
try:
# Create a client:
client = Client('<EMAIL>', '<PASSWORD>', websession)
await client.async_init()
print('Showing active Tiles:')
print(await client.tiles.all())
print('Showing all Tiles:')
print(await client.tiles.all(show_inactive=True))
except TileError as err:
print(err) |
def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None,
issue_key=None, start_index=0, max_results=25):
"""
Get Plan results
:param project_key:
:param plan_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:return:
"""
return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled,
label=label, issue_key=issue_key, start_index=start_index, max_results=max_results) | Get Plan results
:param project_key:
:param plan_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:return: | Below is the the instruction that describes the task:
### Input:
Get Plan results
:param project_key:
:param plan_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:return:
### Response:
def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None,
issue_key=None, start_index=0, max_results=25):
"""
Get Plan results
:param project_key:
:param plan_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:return:
"""
return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled,
label=label, issue_key=issue_key, start_index=start_index, max_results=max_results) |
def port_tag_details(cls, tags):
# type: (Sequence[str]) -> Union[Tuple[bool, Port, str], None]
"""Search tags for port info, returning it
Args:
tags: A list of tags to check
Returns:
None or (is_source, port, connected_value|disconnected_value)
where port is one of the Enum entries of Port
"""
for tag in tags:
match = port_tag_re.match(tag)
if match:
source_sink, port, extra = match.groups()
return source_sink == "source", cls(port), extra | Search tags for port info, returning it
Args:
tags: A list of tags to check
Returns:
None or (is_source, port, connected_value|disconnected_value)
where port is one of the Enum entries of Port | Below is the the instruction that describes the task:
### Input:
Search tags for port info, returning it
Args:
tags: A list of tags to check
Returns:
None or (is_source, port, connected_value|disconnected_value)
where port is one of the Enum entries of Port
### Response:
def port_tag_details(cls, tags):
# type: (Sequence[str]) -> Union[Tuple[bool, Port, str], None]
"""Search tags for port info, returning it
Args:
tags: A list of tags to check
Returns:
None or (is_source, port, connected_value|disconnected_value)
where port is one of the Enum entries of Port
"""
for tag in tags:
match = port_tag_re.match(tag)
if match:
source_sink, port, extra = match.groups()
return source_sink == "source", cls(port), extra |
def _configure(cls, **defaults):
"""Updates class-level defaults for :class:`_Options` container."""
for attr in defaults:
setattr(cls, attr, defaults[attr]) | Updates class-level defaults for :class:`_Options` container. | Below is the the instruction that describes the task:
### Input:
Updates class-level defaults for :class:`_Options` container.
### Response:
def _configure(cls, **defaults):
"""Updates class-level defaults for :class:`_Options` container."""
for attr in defaults:
setattr(cls, attr, defaults[attr]) |
def moduli_to_velocities(rho, K_s, G):
"""
convert moduli to velocities
mainly to support Burnman operations
:param rho: density in kg/m^3
:param v_phi: adiabatic bulk modulus in Pa
:param v_s: shear modulus in Pa
:return: bulk sound speed and shear velocity
"""
return np.sqrt(K_s / rho), np.sqrt(G / rho) | convert moduli to velocities
mainly to support Burnman operations
:param rho: density in kg/m^3
:param v_phi: adiabatic bulk modulus in Pa
:param v_s: shear modulus in Pa
:return: bulk sound speed and shear velocity | Below is the the instruction that describes the task:
### Input:
convert moduli to velocities
mainly to support Burnman operations
:param rho: density in kg/m^3
:param v_phi: adiabatic bulk modulus in Pa
:param v_s: shear modulus in Pa
:return: bulk sound speed and shear velocity
### Response:
def moduli_to_velocities(rho, K_s, G):
"""
convert moduli to velocities
mainly to support Burnman operations
:param rho: density in kg/m^3
:param v_phi: adiabatic bulk modulus in Pa
:param v_s: shear modulus in Pa
:return: bulk sound speed and shear velocity
"""
return np.sqrt(K_s / rho), np.sqrt(G / rho) |
def segment_content_handler():
"""Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
"""
from ligo.lw.lsctables import (SegmentTable, SegmentDefTable,
SegmentSumTable)
from ligo.lw.ligolw import PartialLIGOLWContentHandler
def _filter(name, attrs):
return reduce(
operator.or_,
[table_.CheckProperties(name, attrs) for
table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)])
return build_content_handler(PartialLIGOLWContentHandler, _filter) | Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables | Below is the the instruction that describes the task:
### Input:
Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
### Response:
def segment_content_handler():
"""Build a `~xml.sax.handlers.ContentHandler` to read segment XML tables
"""
from ligo.lw.lsctables import (SegmentTable, SegmentDefTable,
SegmentSumTable)
from ligo.lw.ligolw import PartialLIGOLWContentHandler
def _filter(name, attrs):
return reduce(
operator.or_,
[table_.CheckProperties(name, attrs) for
table_ in (SegmentTable, SegmentDefTable, SegmentSumTable)])
return build_content_handler(PartialLIGOLWContentHandler, _filter) |
def group_join(
self,
inner_enumerable,
outer_key=lambda x: x,
inner_key=lambda x: x,
result_func=lambda x: x
):
"""
Return enumerable of group join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform the result of group
join
:return: new Enumerable object
"""
if not isinstance(inner_enumerable, Enumerable):
raise TypeError(
u"inner enumerable parameter must be an instance of Enumerable"
)
return Enumerable(
itertools.product(
self,
inner_enumerable.default_if_empty()
)
).group_by(
key_names=['id'],
key=lambda x: outer_key(x[0]),
result_func=lambda g: (
g.first()[0],
g.where(
lambda x: inner_key(x[1]) == g.key.id).select(
lambda x: x[1]
)
)
).select(result_func) | Return enumerable of group join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform the result of group
join
:return: new Enumerable object | Below is the the instruction that describes the task:
### Input:
Return enumerable of group join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform the result of group
join
:return: new Enumerable object
### Response:
def group_join(
self,
inner_enumerable,
outer_key=lambda x: x,
inner_key=lambda x: x,
result_func=lambda x: x
):
"""
Return enumerable of group join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform the result of group
join
:return: new Enumerable object
"""
if not isinstance(inner_enumerable, Enumerable):
raise TypeError(
u"inner enumerable parameter must be an instance of Enumerable"
)
return Enumerable(
itertools.product(
self,
inner_enumerable.default_if_empty()
)
).group_by(
key_names=['id'],
key=lambda x: outer_key(x[0]),
result_func=lambda g: (
g.first()[0],
g.where(
lambda x: inner_key(x[1]) == g.key.id).select(
lambda x: x[1]
)
)
).select(result_func) |
def _unicode(self):
'''This returns a printable representation of the screen as a unicode
string (which, under Python 3.x, is the same as 'str'). The end of each
screen line is terminated by a newline.'''
return u'\n'.join ([ u''.join(c) for c in self.w ]) | This returns a printable representation of the screen as a unicode
string (which, under Python 3.x, is the same as 'str'). The end of each
screen line is terminated by a newline. | Below is the the instruction that describes the task:
### Input:
This returns a printable representation of the screen as a unicode
string (which, under Python 3.x, is the same as 'str'). The end of each
screen line is terminated by a newline.
### Response:
def _unicode(self):
'''This returns a printable representation of the screen as a unicode
string (which, under Python 3.x, is the same as 'str'). The end of each
screen line is terminated by a newline.'''
return u'\n'.join ([ u''.join(c) for c in self.w ]) |
def term_with_coeff(term, coeff):
"""
Change the coefficient of a PauliTerm.
:param PauliTerm term: A PauliTerm object
:param Number coeff: The coefficient to set on the PauliTerm
:returns: A new PauliTerm that duplicates term but sets coeff
:rtype: PauliTerm
"""
if not isinstance(coeff, Number):
raise ValueError("coeff must be a Number")
new_pauli = term.copy()
# We cast to a complex number to ensure that internally the coefficients remain compatible.
new_pauli.coefficient = complex(coeff)
return new_pauli | Change the coefficient of a PauliTerm.
:param PauliTerm term: A PauliTerm object
:param Number coeff: The coefficient to set on the PauliTerm
:returns: A new PauliTerm that duplicates term but sets coeff
:rtype: PauliTerm | Below is the the instruction that describes the task:
### Input:
Change the coefficient of a PauliTerm.
:param PauliTerm term: A PauliTerm object
:param Number coeff: The coefficient to set on the PauliTerm
:returns: A new PauliTerm that duplicates term but sets coeff
:rtype: PauliTerm
### Response:
def term_with_coeff(term, coeff):
"""
Change the coefficient of a PauliTerm.
:param PauliTerm term: A PauliTerm object
:param Number coeff: The coefficient to set on the PauliTerm
:returns: A new PauliTerm that duplicates term but sets coeff
:rtype: PauliTerm
"""
if not isinstance(coeff, Number):
raise ValueError("coeff must be a Number")
new_pauli = term.copy()
# We cast to a complex number to ensure that internally the coefficients remain compatible.
new_pauli.coefficient = complex(coeff)
return new_pauli |
def _assert_explicit_vr(dicom_input):
"""
Assert that explicit vr is used
"""
if settings.validate_multiframe_implicit:
header = dicom_input[0]
if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2':
raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM') | Assert that explicit vr is used | Below is the the instruction that describes the task:
### Input:
Assert that explicit vr is used
### Response:
def _assert_explicit_vr(dicom_input):
"""
Assert that explicit vr is used
"""
if settings.validate_multiframe_implicit:
header = dicom_input[0]
if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2':
raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM') |
def is_equivalent(self, other):
"""
Return ``True`` if the IPA character is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string, containing the representation of the IPA character,
2. a Unicode string, containing a space-separated list of descriptors,
3. a list of Unicode strings, containing descriptors, and
4. another IPAChar.
:rtype: bool
"""
if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other):
return True
if isinstance(other, IPAChar):
return self.canonical_representation == other.canonical_representation
try:
return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation
except:
return False | Return ``True`` if the IPA character is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string, containing the representation of the IPA character,
2. a Unicode string, containing a space-separated list of descriptors,
3. a list of Unicode strings, containing descriptors, and
4. another IPAChar.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Return ``True`` if the IPA character is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string, containing the representation of the IPA character,
2. a Unicode string, containing a space-separated list of descriptors,
3. a list of Unicode strings, containing descriptors, and
4. another IPAChar.
:rtype: bool
### Response:
def is_equivalent(self, other):
"""
Return ``True`` if the IPA character is equivalent to the ``other`` object.
The ``other`` object can be:
1. a Unicode string, containing the representation of the IPA character,
2. a Unicode string, containing a space-separated list of descriptors,
3. a list of Unicode strings, containing descriptors, and
4. another IPAChar.
:rtype: bool
"""
if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other):
return True
if isinstance(other, IPAChar):
return self.canonical_representation == other.canonical_representation
try:
return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation
except:
return False |
def _find_spelling_errors_in_chunks(chunks,
contents,
valid_words_dictionary=None,
technical_words_dictionary=None,
user_dictionary_words=None):
"""For each chunk and a set of valid and technical words, find errors."""
for chunk in chunks:
for error in spellcheck_region(chunk.data,
valid_words_dictionary,
technical_words_dictionary,
user_dictionary_words):
col_offset = _determine_character_offset(error.line_offset,
error.column_offset,
chunk.column)
msg = _SPELLCHECK_MESSAGES[error.error_type].format(error.word)
yield _populate_spelling_error(error.word,
error.suggestions,
contents,
error.line_offset +
chunk.line,
col_offset,
msg) | For each chunk and a set of valid and technical words, find errors. | Below is the the instruction that describes the task:
### Input:
For each chunk and a set of valid and technical words, find errors.
### Response:
def _find_spelling_errors_in_chunks(chunks,
contents,
valid_words_dictionary=None,
technical_words_dictionary=None,
user_dictionary_words=None):
"""For each chunk and a set of valid and technical words, find errors."""
for chunk in chunks:
for error in spellcheck_region(chunk.data,
valid_words_dictionary,
technical_words_dictionary,
user_dictionary_words):
col_offset = _determine_character_offset(error.line_offset,
error.column_offset,
chunk.column)
msg = _SPELLCHECK_MESSAGES[error.error_type].format(error.word)
yield _populate_spelling_error(error.word,
error.suggestions,
contents,
error.line_offset +
chunk.line,
col_offset,
msg) |
def set_as_error(self, color=Qt.red):
"""
Highlights text as a syntax error.
:param color: Underline color
:type color: QtGui.QColor
"""
self.format.setUnderlineStyle(
QTextCharFormat.WaveUnderline)
self.format.setUnderlineColor(color) | Highlights text as a syntax error.
:param color: Underline color
:type color: QtGui.QColor | Below is the the instruction that describes the task:
### Input:
Highlights text as a syntax error.
:param color: Underline color
:type color: QtGui.QColor
### Response:
def set_as_error(self, color=Qt.red):
"""
Highlights text as a syntax error.
:param color: Underline color
:type color: QtGui.QColor
"""
self.format.setUnderlineStyle(
QTextCharFormat.WaveUnderline)
self.format.setUnderlineColor(color) |
def choose_parent_view(self, request):
"""
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.choose_parent_view_class
return view_class.as_view(**kwargs)(request) | Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute. | Below is the the instruction that describes the task:
### Input:
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
### Response:
def choose_parent_view(self, request):
"""
Instantiates a class-based view to provide a view that allows a parent
page to be chosen for a new object, where the assigned model extends
Wagtail's Page model, and there is more than one potential parent for
new instances. The view class used can be overridden by changing the
'choose_parent_view_class' attribute.
"""
kwargs = {'model_admin': self}
view_class = self.choose_parent_view_class
return view_class.as_view(**kwargs)(request) |
def order_by(self, order_attribute):
''' Return the list of items in a certain order '''
to_return = []
for f in sorted(self.items, key=lambda i: getattr(i, order_attribute)):
to_return.append(f)
return to_return | Return the list of items in a certain order | Below is the the instruction that describes the task:
### Input:
Return the list of items in a certain order
### Response:
def order_by(self, order_attribute):
''' Return the list of items in a certain order '''
to_return = []
for f in sorted(self.items, key=lambda i: getattr(i, order_attribute)):
to_return.append(f)
return to_return |
def MakeSuiteFromHist(hist, name=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
name: string name
Returns:
Suite object
"""
if name is None:
name = hist.name
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, name) | Makes a normalized suite from a Hist object.
Args:
hist: Hist object
name: string name
Returns:
Suite object | Below is the the instruction that describes the task:
### Input:
Makes a normalized suite from a Hist object.
Args:
hist: Hist object
name: string name
Returns:
Suite object
### Response:
def MakeSuiteFromHist(hist, name=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
name: string name
Returns:
Suite object
"""
if name is None:
name = hist.name
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, name) |
def clean(args):
"""
%prog clean
Removes all symlinks from current folder
"""
p = OptionParser(clean.__doc__)
opts, args = p.parse_args(args)
for link_name in os.listdir(os.getcwd()):
if not op.islink(link_name):
continue
logging.debug("remove symlink `{0}`".format(link_name))
os.unlink(link_name) | %prog clean
Removes all symlinks from current folder | Below is the the instruction that describes the task:
### Input:
%prog clean
Removes all symlinks from current folder
### Response:
def clean(args):
"""
%prog clean
Removes all symlinks from current folder
"""
p = OptionParser(clean.__doc__)
opts, args = p.parse_args(args)
for link_name in os.listdir(os.getcwd()):
if not op.islink(link_name):
continue
logging.debug("remove symlink `{0}`".format(link_name))
os.unlink(link_name) |
def prefix_items(self, prefix, strip_prefix=False):
"""Get all (key, value) pairs with keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All (key, value) pairs in the store where the keys
begin with the ``prefix``.
"""
items = self.items(key_from=prefix)
start = 0
if strip_prefix:
start = len(prefix)
for key, value in items:
if not key.startswith(prefix):
break
yield key[start:], value | Get all (key, value) pairs with keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All (key, value) pairs in the store where the keys
begin with the ``prefix``. | Below is the the instruction that describes the task:
### Input:
Get all (key, value) pairs with keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All (key, value) pairs in the store where the keys
begin with the ``prefix``.
### Response:
def prefix_items(self, prefix, strip_prefix=False):
"""Get all (key, value) pairs with keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All (key, value) pairs in the store where the keys
begin with the ``prefix``.
"""
items = self.items(key_from=prefix)
start = 0
if strip_prefix:
start = len(prefix)
for key, value in items:
if not key.startswith(prefix):
break
yield key[start:], value |
def action(args):
"""Roll back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and n (giving the number of
operations to roll back).
"""
log.info('loading reference package')
r = refpkg.Refpkg(args.refpkg, create=False)
# First check if we can do n rollbacks
q = r.contents
for i in range(args.n):
if q['rollback'] is None:
log.error('Cannot rollback {} changes; '
'refpkg only records {} changes.'.format(args.n, i))
return 1
else:
q = q['rollback']
for i in range(args.n):
r.rollback()
return 0 | Roll back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and n (giving the number of
operations to roll back). | Below is the the instruction that describes the task:
### Input:
Roll back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and n (giving the number of
operations to roll back).
### Response:
def action(args):
"""Roll back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and n (giving the number of
operations to roll back).
"""
log.info('loading reference package')
r = refpkg.Refpkg(args.refpkg, create=False)
# First check if we can do n rollbacks
q = r.contents
for i in range(args.n):
if q['rollback'] is None:
log.error('Cannot rollback {} changes; '
'refpkg only records {} changes.'.format(args.n, i))
return 1
else:
q = q['rollback']
for i in range(args.n):
r.rollback()
return 0 |
def on_created(self, event, dry_run=False, remove_uploaded=True):
'Called when a file (or directory) is created. '
super(ArchiveEventHandler, self).on_created(event)
log.info("created: %s", event) | Called when a file (or directory) is created. | Below is the the instruction that describes the task:
### Input:
Called when a file (or directory) is created.
### Response:
def on_created(self, event, dry_run=False, remove_uploaded=True):
'Called when a file (or directory) is created. '
super(ArchiveEventHandler, self).on_created(event)
log.info("created: %s", event) |
def mouseUp(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs releasing a mouse button up (but not down beforehand).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse up happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
mouse up happens. None by default.
button (str, int, optional): The mouse button released. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button)
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position()
if button == 1 or str(button).lower() == 'left':
platformModule._mouseUp(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._mouseUp(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._mouseUp(x, y, 'right')
_autoPause(pause, _pause) | Performs releasing a mouse button up (but not down beforehand).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse up happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
mouse up happens. None by default.
button (str, int, optional): The mouse button released. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3 | Below is the the instruction that describes the task:
### Input:
Performs releasing a mouse button up (but not down beforehand).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse up happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
mouse up happens. None by default.
button (str, int, optional): The mouse button released. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
### Response:
def mouseUp(x=None, y=None, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs releasing a mouse button up (but not down beforehand).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
mouse up happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
mouse up happens. None by default.
button (str, int, optional): The mouse button released. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, or 3
"""
if button not in ('left', 'middle', 'right', 1, 2, 3):
raise ValueError("button argument must be one of ('left', 'middle', 'right', 1, 2, 3), not %s" % button)
_failSafeCheck()
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position()
if button == 1 or str(button).lower() == 'left':
platformModule._mouseUp(x, y, 'left')
elif button == 2 or str(button).lower() == 'middle':
platformModule._mouseUp(x, y, 'middle')
elif button == 3 or str(button).lower() == 'right':
platformModule._mouseUp(x, y, 'right')
_autoPause(pause, _pause) |
def value(self):
"""returns object as dictionary"""
return {
"type" : "simple",
"symbol" : self.symbol.value,
"label" : self.label,
"description" : self.description,
"rotationType": self.rotationType,
"rotationExpression": self.rotationExpression
} | returns object as dictionary | Below is the the instruction that describes the task:
### Input:
returns object as dictionary
### Response:
def value(self):
"""returns object as dictionary"""
return {
"type" : "simple",
"symbol" : self.symbol.value,
"label" : self.label,
"description" : self.description,
"rotationType": self.rotationType,
"rotationExpression": self.rotationExpression
} |
async def get_box_ids_json(self) -> str:
"""
Return json object on lists of all unique box identifiers for credentials in wallet:
schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:0",
"9cHbp54C8n...:3:CL:37:0",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2",
...
]
}
:return: tuple of sets for schema ids, cred def ids, rev reg ids
"""
LOGGER.debug('HolderProver.get_box_ids_json >>>')
s_ids = set()
cd_ids = set()
rr_ids = set()
for cred in json.loads(await self.get_creds_display_coarse()):
s_ids.add(cred['schema_id'])
cd_ids.add(cred['cred_def_id'])
if cred['rev_reg_id']:
rr_ids.add(cred['rev_reg_id'])
rv = json.dumps({
'schema_id': list(s_ids),
'cred_def_id': list(cd_ids),
'rev_reg_id': list(rr_ids)
})
LOGGER.debug('HolderProver.get_box_ids_json <<< %s', rv)
return rv | Return json object on lists of all unique box identifiers for credentials in wallet:
schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:0",
"9cHbp54C8n...:3:CL:37:0",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2",
...
]
}
:return: tuple of sets for schema ids, cred def ids, rev reg ids | Below is the the instruction that describes the task:
### Input:
Return json object on lists of all unique box identifiers for credentials in wallet:
schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:0",
"9cHbp54C8n...:3:CL:37:0",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2",
...
]
}
:return: tuple of sets for schema ids, cred def ids, rev reg ids
### Response:
async def get_box_ids_json(self) -> str:
"""
Return json object on lists of all unique box identifiers for credentials in wallet:
schema identifiers, credential definition identifiers, and revocation registry identifiers; e.g.,
::
{
"schema_id": [
"R17v42T4pk...:2:tombstone:1.2",
"9cHbp54C8n...:2:business:2.0",
...
],
"cred_def_id": [
"R17v42T4pk...:3:CL:19:0",
"9cHbp54C8n...:3:CL:37:0",
...
]
"rev_reg_id": [
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:0",
"R17v42T4pk...:4:R17v42T4pk...:3:CL:19:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:0",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:1",
"9cHbp54C8n...:4:9cHbp54C8n...:3:CL:37:0:CL_ACCUM:2",
...
]
}
:return: tuple of sets for schema ids, cred def ids, rev reg ids
"""
LOGGER.debug('HolderProver.get_box_ids_json >>>')
s_ids = set()
cd_ids = set()
rr_ids = set()
for cred in json.loads(await self.get_creds_display_coarse()):
s_ids.add(cred['schema_id'])
cd_ids.add(cred['cred_def_id'])
if cred['rev_reg_id']:
rr_ids.add(cred['rev_reg_id'])
rv = json.dumps({
'schema_id': list(s_ids),
'cred_def_id': list(cd_ids),
'rev_reg_id': list(rr_ids)
})
LOGGER.debug('HolderProver.get_box_ids_json <<< %s', rv)
return rv |
def move(src, dest, user=None):
"""
Move or rename src to dest.
"""
src_host, src_port, src_path = path.split(src, user)
dest_host, dest_port, dest_path = path.split(dest, user)
src_fs = hdfs(src_host, src_port, user)
dest_fs = hdfs(dest_host, dest_port, user)
try:
retval = src_fs.move(src_path, dest_fs, dest_path)
return retval
finally:
src_fs.close()
dest_fs.close() | Move or rename src to dest. | Below is the the instruction that describes the task:
### Input:
Move or rename src to dest.
### Response:
def move(src, dest, user=None):
"""
Move or rename src to dest.
"""
src_host, src_port, src_path = path.split(src, user)
dest_host, dest_port, dest_path = path.split(dest, user)
src_fs = hdfs(src_host, src_port, user)
dest_fs = hdfs(dest_host, dest_port, user)
try:
retval = src_fs.move(src_path, dest_fs, dest_path)
return retval
finally:
src_fs.close()
dest_fs.close() |
def do_flipper(parser, token):
"""The flipper tag takes two arguments: the user to look up and the feature
to compare against.
"""
nodelist = parser.parse(('endflipper',))
tag_name, user_key, feature = token.split_contents()
parser.delete_first_token()
return FlipperNode(nodelist, user_key, feature) | The flipper tag takes two arguments: the user to look up and the feature
to compare against. | Below is the the instruction that describes the task:
### Input:
The flipper tag takes two arguments: the user to look up and the feature
to compare against.
### Response:
def do_flipper(parser, token):
"""The flipper tag takes two arguments: the user to look up and the feature
to compare against.
"""
nodelist = parser.parse(('endflipper',))
tag_name, user_key, feature = token.split_contents()
parser.delete_first_token()
return FlipperNode(nodelist, user_key, feature) |
def validate_values(self, definition):
"""This function checks that the fields have the correct values.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field contains an unexpected value.
"""
if not self._strict_type_checks:
return
# Validate the scalar kind.
scalar_kind = definition.get('kind')
if scalar_kind not in SCALAR_TYPES_MAP.keys():
raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind +
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the collection policy.
collection_policy = definition.get('release_channel_collection', None)
if collection_policy and collection_policy not in ['opt-in', 'opt-out']:
raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate the cpp_guard.
cpp_guard = definition.get('cpp_guard')
if cpp_guard and re.match(r'\W', cpp_guard):
raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate record_in_processes.
record_in_processes = definition.get('record_in_processes', [])
for proc in record_in_processes:
if not utils.is_valid_process_name(proc):
raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc +
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the expiration version.
# Historical versions of Scalars.json may contain expiration versions
# using the deprecated format 'N.Na1'. Those scripts set
# self._strict_type_checks to false.
expires = definition.get('expires')
if not utils.validate_expiration_version(expires) and self._strict_type_checks:
raise ParserError('{} - invalid expires: {}.\nSee: {}#required-fields'
.format(self._name, expires, BASE_DOC_URL)) | This function checks that the fields have the correct values.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field contains an unexpected value. | Below is the the instruction that describes the task:
### Input:
This function checks that the fields have the correct values.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field contains an unexpected value.
### Response:
def validate_values(self, definition):
"""This function checks that the fields have the correct values.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field contains an unexpected value.
"""
if not self._strict_type_checks:
return
# Validate the scalar kind.
scalar_kind = definition.get('kind')
if scalar_kind not in SCALAR_TYPES_MAP.keys():
raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind +
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the collection policy.
collection_policy = definition.get('release_channel_collection', None)
if collection_policy and collection_policy not in ['opt-in', 'opt-out']:
raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate the cpp_guard.
cpp_guard = definition.get('cpp_guard')
if cpp_guard and re.match(r'\W', cpp_guard):
raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate record_in_processes.
record_in_processes = definition.get('record_in_processes', [])
for proc in record_in_processes:
if not utils.is_valid_process_name(proc):
raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc +
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the expiration version.
# Historical versions of Scalars.json may contain expiration versions
# using the deprecated format 'N.Na1'. Those scripts set
# self._strict_type_checks to false.
expires = definition.get('expires')
if not utils.validate_expiration_version(expires) and self._strict_type_checks:
raise ParserError('{} - invalid expires: {}.\nSee: {}#required-fields'
.format(self._name, expires, BASE_DOC_URL)) |
def draw_noisy_time_series(self, SNR=1.0, red_noise_ratio=0.25, outlier_ratio=0.0):
"""
A function to draw a noisy time series based on the clean model
such that
y_noisy = y + yw + yr,
where yw is white noise, yr is red noise and y will be rescaled
so that y_noisy complies with the specified signal-to-noise
ratio (SNR).
Parameters
---------
SNR: float
Signal-to-noise ratio of the resulting contaminated signal in
decibels [dB]. SNR is defined as
SNR = 10*log(var_signal/var_noise), hence
NR var_signal/var_noise
10 10
7 5
3 2
0 1
-3 0.5
-7 0.2
-10 0.1
red_noise_variance: float in [0, 1]
The variance of the red noise component is set according to
Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties
that explain the noise perfectly
outlier_ratio: float in [0, 1]
Percentage of outlier data points
Returns
-------
t: ndarray
Vector containing the time instants
y_noisy: ndarray
Vector containing the contaminated signal
s: ndarray
Vector containing the uncertainties associated to the white
noise component
"""
if outlier_ratio < 0.0 or outlier_ratio > 1.0:
raise ValueError("Outlier ratio must be in [0, 1]")
if red_noise_ratio < 0.0:
raise ValueError("Red noise ratio must be positive")
np.random.seed(self.rseed)
t = self.t
y_clean = self.y_clean
N = len(t)
# First we generate s
s, mean_s_squared = generate_uncertainties(N, rseed=self.rseed)
#print(mean_s_squared)
#print(np.mean(s**2))
# Draw a heteroscedastic white noise vector
white_noise = np.random.multivariate_normal(np.zeros(N,), np.diag(s**2))
# Now we generate a colored noise vector which is unaccounted by s
red_noise_variance = mean_s_squared*red_noise_ratio
# First order markovian process to generate
red_noise = first_order_markov_process(t, red_noise_variance, 1.0, rseed=self.rseed)
# The following is not ok for irregularly sampled time series because
# it assumes constant dt=1
#phi=0.5
#red_noise = np.random.randn(N)*np.sqrt(red_noise_variance)
#for i in range(1, N):
# red_noise[i] = phi*red_noise[i-1] + np.sqrt(1 - phi**2)*red_noise[i]
# The final noise vector
#print("%f %f" % (np.var(white_noise)*red_noise_ratio, np.var(red_noise)))
noise = white_noise + red_noise
var_noise = mean_s_squared + red_noise_variance
SNR_unitless = 10.0**(SNR/10.0)
self.A = np.sqrt(SNR_unitless*var_noise)
y = self.A*y_clean
y_noisy = y + noise
# Add outliers with a certain percentage
rperm = np.where(np.random.uniform(size=N) < outlier_ratio)[0]
outlier = np.random.uniform(5.0*np.std(y), 10.0*np.std(y), size=len(rperm))
y_noisy[rperm] += outlier
return t, y_noisy, s | A function to draw a noisy time series based on the clean model
such that
y_noisy = y + yw + yr,
where yw is white noise, yr is red noise and y will be rescaled
so that y_noisy complies with the specified signal-to-noise
ratio (SNR).
Parameters
---------
SNR: float
Signal-to-noise ratio of the resulting contaminated signal in
decibels [dB]. SNR is defined as
SNR = 10*log(var_signal/var_noise), hence
NR var_signal/var_noise
10 10
7 5
3 2
0 1
-3 0.5
-7 0.2
-10 0.1
red_noise_variance: float in [0, 1]
The variance of the red noise component is set according to
Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties
that explain the noise perfectly
outlier_ratio: float in [0, 1]
Percentage of outlier data points
Returns
-------
t: ndarray
Vector containing the time instants
y_noisy: ndarray
Vector containing the contaminated signal
s: ndarray
Vector containing the uncertainties associated to the white
noise component | Below is the the instruction that describes the task:
### Input:
A function to draw a noisy time series based on the clean model
such that
y_noisy = y + yw + yr,
where yw is white noise, yr is red noise and y will be rescaled
so that y_noisy complies with the specified signal-to-noise
ratio (SNR).
Parameters
---------
SNR: float
Signal-to-noise ratio of the resulting contaminated signal in
decibels [dB]. SNR is defined as
SNR = 10*log(var_signal/var_noise), hence
NR var_signal/var_noise
10 10
7 5
3 2
0 1
-3 0.5
-7 0.2
-10 0.1
red_noise_variance: float in [0, 1]
The variance of the red noise component is set according to
Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties
that explain the noise perfectly
outlier_ratio: float in [0, 1]
Percentage of outlier data points
Returns
-------
t: ndarray
Vector containing the time instants
y_noisy: ndarray
Vector containing the contaminated signal
s: ndarray
Vector containing the uncertainties associated to the white
noise component
### Response:
def draw_noisy_time_series(self, SNR=1.0, red_noise_ratio=0.25, outlier_ratio=0.0):
"""
A function to draw a noisy time series based on the clean model
such that
y_noisy = y + yw + yr,
where yw is white noise, yr is red noise and y will be rescaled
so that y_noisy complies with the specified signal-to-noise
ratio (SNR).
Parameters
---------
SNR: float
Signal-to-noise ratio of the resulting contaminated signal in
decibels [dB]. SNR is defined as
SNR = 10*log(var_signal/var_noise), hence
NR var_signal/var_noise
10 10
7 5
3 2
0 1
-3 0.5
-7 0.2
-10 0.1
red_noise_variance: float in [0, 1]
The variance of the red noise component is set according to
Var(yw)*red_noise_ratio. Set this to zero to obtain uncertainties
that explain the noise perfectly
outlier_ratio: float in [0, 1]
Percentage of outlier data points
Returns
-------
t: ndarray
Vector containing the time instants
y_noisy: ndarray
Vector containing the contaminated signal
s: ndarray
Vector containing the uncertainties associated to the white
noise component
"""
if outlier_ratio < 0.0 or outlier_ratio > 1.0:
raise ValueError("Outlier ratio must be in [0, 1]")
if red_noise_ratio < 0.0:
raise ValueError("Red noise ratio must be positive")
np.random.seed(self.rseed)
t = self.t
y_clean = self.y_clean
N = len(t)
# First we generate s
s, mean_s_squared = generate_uncertainties(N, rseed=self.rseed)
#print(mean_s_squared)
#print(np.mean(s**2))
# Draw a heteroscedastic white noise vector
white_noise = np.random.multivariate_normal(np.zeros(N,), np.diag(s**2))
# Now we generate a colored noise vector which is unaccounted by s
red_noise_variance = mean_s_squared*red_noise_ratio
# First order markovian process to generate
red_noise = first_order_markov_process(t, red_noise_variance, 1.0, rseed=self.rseed)
# The following is not ok for irregularly sampled time series because
# it assumes constant dt=1
#phi=0.5
#red_noise = np.random.randn(N)*np.sqrt(red_noise_variance)
#for i in range(1, N):
# red_noise[i] = phi*red_noise[i-1] + np.sqrt(1 - phi**2)*red_noise[i]
# The final noise vector
#print("%f %f" % (np.var(white_noise)*red_noise_ratio, np.var(red_noise)))
noise = white_noise + red_noise
var_noise = mean_s_squared + red_noise_variance
SNR_unitless = 10.0**(SNR/10.0)
self.A = np.sqrt(SNR_unitless*var_noise)
y = self.A*y_clean
y_noisy = y + noise
# Add outliers with a certain percentage
rperm = np.where(np.random.uniform(size=N) < outlier_ratio)[0]
outlier = np.random.uniform(5.0*np.std(y), 10.0*np.std(y), size=len(rperm))
y_noisy[rperm] += outlier
return t, y_noisy, s |
def remove_task_db(self, fid, force=False):
'''将任务从数据库中删除'''
self.remove_slice_db(fid)
sql = 'DELETE FROM upload WHERE fid=?'
self.cursor.execute(sql, [fid, ])
self.check_commit(force=force) | 将任务从数据库中删除 | Below is the the instruction that describes the task:
### Input:
将任务从数据库中删除
### Response:
def remove_task_db(self, fid, force=False):
'''将任务从数据库中删除'''
self.remove_slice_db(fid)
sql = 'DELETE FROM upload WHERE fid=?'
self.cursor.execute(sql, [fid, ])
self.check_commit(force=force) |
def migrate_abci_chain(self):
"""Generate and record a new ABCI chain ID. New blocks are not
accepted until we receive an InitChain ABCI request with
the matching chain ID and validator set.
Chain ID is generated based on the current chain and height.
`chain-X` => `chain-X-migrated-at-height-5`.
`chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`.
If there is no known chain (we are at genesis), the function returns.
"""
latest_chain = self.get_latest_abci_chain()
if latest_chain is None:
return
block = self.get_latest_block()
suffix = '-migrated-at-height-'
chain_id = latest_chain['chain_id']
block_height_str = str(block['height'])
new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str
self.store_abci_chain(block['height'] + 1, new_chain_id, False) | Generate and record a new ABCI chain ID. New blocks are not
accepted until we receive an InitChain ABCI request with
the matching chain ID and validator set.
Chain ID is generated based on the current chain and height.
`chain-X` => `chain-X-migrated-at-height-5`.
`chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`.
If there is no known chain (we are at genesis), the function returns. | Below is the the instruction that describes the task:
### Input:
Generate and record a new ABCI chain ID. New blocks are not
accepted until we receive an InitChain ABCI request with
the matching chain ID and validator set.
Chain ID is generated based on the current chain and height.
`chain-X` => `chain-X-migrated-at-height-5`.
`chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`.
If there is no known chain (we are at genesis), the function returns.
### Response:
def migrate_abci_chain(self):
"""Generate and record a new ABCI chain ID. New blocks are not
accepted until we receive an InitChain ABCI request with
the matching chain ID and validator set.
Chain ID is generated based on the current chain and height.
`chain-X` => `chain-X-migrated-at-height-5`.
`chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`.
If there is no known chain (we are at genesis), the function returns.
"""
latest_chain = self.get_latest_abci_chain()
if latest_chain is None:
return
block = self.get_latest_block()
suffix = '-migrated-at-height-'
chain_id = latest_chain['chain_id']
block_height_str = str(block['height'])
new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str
self.store_abci_chain(block['height'] + 1, new_chain_id, False) |
def dict_to_source(dict):
'''
Transform a dict with key 'citation' into a :class:`Source`.
If the argument passed is already a :class:`Source`, this method just
returns the argument.
'''
if isinstance(dict, Source):
return dict
return Source(
dict['citation'],
dict.get('markup')
) | Transform a dict with key 'citation' into a :class:`Source`.
If the argument passed is already a :class:`Source`, this method just
returns the argument. | Below is the the instruction that describes the task:
### Input:
Transform a dict with key 'citation' into a :class:`Source`.
If the argument passed is already a :class:`Source`, this method just
returns the argument.
### Response:
def dict_to_source(dict):
'''
Transform a dict with key 'citation' into a :class:`Source`.
If the argument passed is already a :class:`Source`, this method just
returns the argument.
'''
if isinstance(dict, Source):
return dict
return Source(
dict['citation'],
dict.get('markup')
) |
def GetFileSystems():
"""Make syscalls to get the mounted filesystems.
Returns:
A list of Struct objects.
Based on the information for getfsstat
http://developer.apple.com/library/mac/#documentation/Darwin/
Reference/ManPages/man2/getfsstat.2.html
"""
version = OSXVersion()
major, minor = version.VersionAsMajorMinor()
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
if major <= 10 and minor <= 5:
use_64 = False
fs_struct = StatFSStruct
else:
use_64 = True
fs_struct = StatFS64Struct
# Get max 20 file systems.
struct_size = fs_struct.GetSize()
buf_size = struct_size * 20
cbuf = ctypes.create_string_buffer(buf_size)
if use_64:
# MNT_NOWAIT = 2 - don't ask the filesystems, just return cache.
ret = libc.getfsstat64(ctypes.byref(cbuf), buf_size, 2)
else:
ret = libc.getfsstat(ctypes.byref(cbuf), buf_size, 2)
if ret == 0:
logging.debug("getfsstat failed err: %s", ret)
return []
return ParseFileSystemsStruct(fs_struct, ret, cbuf) | Make syscalls to get the mounted filesystems.
Returns:
A list of Struct objects.
Based on the information for getfsstat
http://developer.apple.com/library/mac/#documentation/Darwin/
Reference/ManPages/man2/getfsstat.2.html | Below is the the instruction that describes the task:
### Input:
Make syscalls to get the mounted filesystems.
Returns:
A list of Struct objects.
Based on the information for getfsstat
http://developer.apple.com/library/mac/#documentation/Darwin/
Reference/ManPages/man2/getfsstat.2.html
### Response:
def GetFileSystems():
"""Make syscalls to get the mounted filesystems.
Returns:
A list of Struct objects.
Based on the information for getfsstat
http://developer.apple.com/library/mac/#documentation/Darwin/
Reference/ManPages/man2/getfsstat.2.html
"""
version = OSXVersion()
major, minor = version.VersionAsMajorMinor()
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
if major <= 10 and minor <= 5:
use_64 = False
fs_struct = StatFSStruct
else:
use_64 = True
fs_struct = StatFS64Struct
# Get max 20 file systems.
struct_size = fs_struct.GetSize()
buf_size = struct_size * 20
cbuf = ctypes.create_string_buffer(buf_size)
if use_64:
# MNT_NOWAIT = 2 - don't ask the filesystems, just return cache.
ret = libc.getfsstat64(ctypes.byref(cbuf), buf_size, 2)
else:
ret = libc.getfsstat(ctypes.byref(cbuf), buf_size, 2)
if ret == 0:
logging.debug("getfsstat failed err: %s", ret)
return []
return ParseFileSystemsStruct(fs_struct, ret, cbuf) |
def get_parent_until(path):
"""
Given a file path, determine the full module path.
e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields
'numpy.core'
"""
dirname = osp.dirname(path)
try:
mod = osp.basename(path)
mod = osp.splitext(mod)[0]
imp.find_module(mod, [dirname])
except ImportError:
return
items = [mod]
while 1:
items.append(osp.basename(dirname))
try:
dirname = osp.dirname(dirname)
imp.find_module('__init__', [dirname + os.sep])
except ImportError:
break
return '.'.join(reversed(items)) | Given a file path, determine the full module path.
e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields
'numpy.core' | Below is the the instruction that describes the task:
### Input:
Given a file path, determine the full module path.
e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields
'numpy.core'
### Response:
def get_parent_until(path):
"""
Given a file path, determine the full module path.
e.g. '/usr/lib/python2.7/dist-packages/numpy/core/__init__.pyc' yields
'numpy.core'
"""
dirname = osp.dirname(path)
try:
mod = osp.basename(path)
mod = osp.splitext(mod)[0]
imp.find_module(mod, [dirname])
except ImportError:
return
items = [mod]
while 1:
items.append(osp.basename(dirname))
try:
dirname = osp.dirname(dirname)
imp.find_module('__init__', [dirname + os.sep])
except ImportError:
break
return '.'.join(reversed(items)) |
def report_idle_after(seconds):
"""Report_idle_after after certain number of seconds."""
def decorator(func):
def wrapper(*args, **kwargs):
def _handle_timeout(signum, frame):
config = get_config()
if not config.ready:
config.load()
message = {
"subject": "Idle Experiment.",
"body": idle_template.format(
app_id=config.get("id"), minutes_so_far=round(seconds / 60)
),
}
log("Reporting problem with idle experiment...")
get_messenger(config).send(message)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator | Report_idle_after after certain number of seconds. | Below is the the instruction that describes the task:
### Input:
Report_idle_after after certain number of seconds.
### Response:
def report_idle_after(seconds):
"""Report_idle_after after certain number of seconds."""
def decorator(func):
def wrapper(*args, **kwargs):
def _handle_timeout(signum, frame):
config = get_config()
if not config.ready:
config.load()
message = {
"subject": "Idle Experiment.",
"body": idle_template.format(
app_id=config.get("id"), minutes_so_far=round(seconds / 60)
),
}
log("Reporting problem with idle experiment...")
get_messenger(config).send(message)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator |
def union(self, other, left_name="LEFT", right_name="RIGHT"):
"""
*Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2")
"""
if not isinstance(left_name, str) or \
not isinstance(right_name, str):
raise TypeError("left_name and right_name must be strings. "
"{} - {} was provided".format(type(left_name), type(right_name)))
if isinstance(other, GMQLDataset):
other_idx = other.__index
else:
raise TypeError("other must be a GMQLDataset. "
"{} was provided".format(type(other)))
if len(left_name) == 0 or len(right_name) == 0:
raise ValueError("left_name and right_name must not be empty")
new_index = self.opmng.union(self.__index, other_idx, left_name, right_name)
new_local_sources, new_remote_sources = self.__combine_sources(self, other)
new_location = self.__combine_locations(self, other)
return GMQLDataset(index=new_index, location=new_location,
local_sources=new_local_sources,
remote_sources=new_remote_sources,
meta_profile=self.meta_profile) | *Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2") | Below is the the instruction that describes the task:
### Input:
*Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2")
### Response:
def union(self, other, left_name="LEFT", right_name="RIGHT"):
"""
*Wrapper of* ``UNION``
The UNION operation is used to integrate homogeneous or heterogeneous samples of two
datasets within a single dataset; for each sample of either one of the input datasets, a
sample is created in the result as follows:
* its metadata are the same as in the original sample;
* its schema is the schema of the first (left) input dataset; new
identifiers are assigned to each output sample;
* its regions are the same (in coordinates and attribute values) as in the original
sample. Region attributes which are missing in an input dataset sample
(w.r.t. the merged schema) are set to null.
:param other: a GMQLDataset
:param left_name: name that you want to assign to the left dataset
:param right_name: name tha t you want to assign to the right dataset
:return: a new GMQLDataset
Example of usage::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.union(other=d2, left_name="D1", right_name="D2")
"""
if not isinstance(left_name, str) or \
not isinstance(right_name, str):
raise TypeError("left_name and right_name must be strings. "
"{} - {} was provided".format(type(left_name), type(right_name)))
if isinstance(other, GMQLDataset):
other_idx = other.__index
else:
raise TypeError("other must be a GMQLDataset. "
"{} was provided".format(type(other)))
if len(left_name) == 0 or len(right_name) == 0:
raise ValueError("left_name and right_name must not be empty")
new_index = self.opmng.union(self.__index, other_idx, left_name, right_name)
new_local_sources, new_remote_sources = self.__combine_sources(self, other)
new_location = self.__combine_locations(self, other)
return GMQLDataset(index=new_index, location=new_location,
local_sources=new_local_sources,
remote_sources=new_remote_sources,
meta_profile=self.meta_profile) |
def get_uncompleted_tasks(self):
"""Return a list of all uncompleted tasks in this project.
.. warning:: Requires Todoist premium.
:return: A list of all uncompleted tasks in this project.
:rtype: list of :class:`pytodoist.todoist.Task`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.add_task('Install PyTodoist')
>>> uncompleted_tasks = project.get_uncompleted_tasks()
>>> for task in uncompleted_tasks:
... task.complete()
"""
all_tasks = self.get_tasks()
completed_tasks = self.get_completed_tasks()
return [t for t in all_tasks if t not in completed_tasks] | Return a list of all uncompleted tasks in this project.
.. warning:: Requires Todoist premium.
:return: A list of all uncompleted tasks in this project.
:rtype: list of :class:`pytodoist.todoist.Task`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.add_task('Install PyTodoist')
>>> uncompleted_tasks = project.get_uncompleted_tasks()
>>> for task in uncompleted_tasks:
... task.complete() | Below is the the instruction that describes the task:
### Input:
Return a list of all uncompleted tasks in this project.
.. warning:: Requires Todoist premium.
:return: A list of all uncompleted tasks in this project.
:rtype: list of :class:`pytodoist.todoist.Task`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.add_task('Install PyTodoist')
>>> uncompleted_tasks = project.get_uncompleted_tasks()
>>> for task in uncompleted_tasks:
... task.complete()
### Response:
def get_uncompleted_tasks(self):
"""Return a list of all uncompleted tasks in this project.
.. warning:: Requires Todoist premium.
:return: A list of all uncompleted tasks in this project.
:rtype: list of :class:`pytodoist.todoist.Task`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.add_task('Install PyTodoist')
>>> uncompleted_tasks = project.get_uncompleted_tasks()
>>> for task in uncompleted_tasks:
... task.complete()
"""
all_tasks = self.get_tasks()
completed_tasks = self.get_completed_tasks()
return [t for t in all_tasks if t not in completed_tasks] |
def concretize_load_idx(self, idx, strategies=None):
"""
Concretizes a load index.
:param idx: An expression for the index.
:param strategies: A list of concretization strategies (to override the default).
:param min_idx: Minimum value for a concretized index (inclusive).
:param max_idx: Maximum value for a concretized index (exclusive).
:returns: A list of concrete indexes.
"""
if isinstance(idx, int):
return [idx]
elif not self.state.solver.symbolic(idx):
return [self.state.solver.eval(idx)]
strategies = self.load_strategies if strategies is None else strategies
return self._apply_concretization_strategies(idx, strategies, 'load') | Concretizes a load index.
:param idx: An expression for the index.
:param strategies: A list of concretization strategies (to override the default).
:param min_idx: Minimum value for a concretized index (inclusive).
:param max_idx: Maximum value for a concretized index (exclusive).
:returns: A list of concrete indexes. | Below is the the instruction that describes the task:
### Input:
Concretizes a load index.
:param idx: An expression for the index.
:param strategies: A list of concretization strategies (to override the default).
:param min_idx: Minimum value for a concretized index (inclusive).
:param max_idx: Maximum value for a concretized index (exclusive).
:returns: A list of concrete indexes.
### Response:
def concretize_load_idx(self, idx, strategies=None):
"""
Concretizes a load index.
:param idx: An expression for the index.
:param strategies: A list of concretization strategies (to override the default).
:param min_idx: Minimum value for a concretized index (inclusive).
:param max_idx: Maximum value for a concretized index (exclusive).
:returns: A list of concrete indexes.
"""
if isinstance(idx, int):
return [idx]
elif not self.state.solver.symbolic(idx):
return [self.state.solver.eval(idx)]
strategies = self.load_strategies if strategies is None else strategies
return self._apply_concretization_strategies(idx, strategies, 'load') |
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
"""
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
"""
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df | Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata | Below is the the instruction that describes the task:
### Input:
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
### Response:
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
"""
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
"""
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df |
def ms_panset(self, viewer, event, data_x, data_y,
msg=True):
"""An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window.
"""
if self.canpan and (event.state == 'down'):
self._panset(viewer, data_x, data_y, msg=msg)
return True | An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window. | Below is the the instruction that describes the task:
### Input:
An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window.
### Response:
def ms_panset(self, viewer, event, data_x, data_y,
msg=True):
"""An interactive way to set the pan position. The location
(data_x, data_y) will be centered in the window.
"""
if self.canpan and (event.state == 'down'):
self._panset(viewer, data_x, data_y, msg=msg)
return True |
def capture_output_from_running_process(context: RunContext) -> None:
"""
Parses output from a running sub-process
Decodes and filters the process output line by line, buffering it
If "mute" is False, sends the output back in real time
:param context: run context
:type context: _RunContext
"""
# Get the raw output one line at a time
_output = context.capture.readline(block=False)
if _output:
line = decode_and_filter(_output, context)
if line:
if not context.mute:
# Print in real time
_LOGGER_PROCESS.debug(line)
# Buffer the line
context.process_output_chunks.append(line)
# Get additional output if any
return capture_output_from_running_process(context)
return None | Parses output from a running sub-process
Decodes and filters the process output line by line, buffering it
If "mute" is False, sends the output back in real time
:param context: run context
:type context: _RunContext | Below is the the instruction that describes the task:
### Input:
Parses output from a running sub-process
Decodes and filters the process output line by line, buffering it
If "mute" is False, sends the output back in real time
:param context: run context
:type context: _RunContext
### Response:
def capture_output_from_running_process(context: RunContext) -> None:
"""
Parses output from a running sub-process
Decodes and filters the process output line by line, buffering it
If "mute" is False, sends the output back in real time
:param context: run context
:type context: _RunContext
"""
# Get the raw output one line at a time
_output = context.capture.readline(block=False)
if _output:
line = decode_and_filter(_output, context)
if line:
if not context.mute:
# Print in real time
_LOGGER_PROCESS.debug(line)
# Buffer the line
context.process_output_chunks.append(line)
# Get additional output if any
return capture_output_from_running_process(context)
return None |
def compare_last_two_snapshots(obj, raw=False):
"""Helper to compare the last two snapshots directly
"""
if get_snapshot_count(obj) < 2:
return {}
version = get_version(obj)
snap1 = get_snapshot_by_version(obj, version - 1)
snap2 = get_snapshot_by_version(obj, version)
return compare_snapshots(snap1, snap2, raw=raw) | Helper to compare the last two snapshots directly | Below is the the instruction that describes the task:
### Input:
Helper to compare the last two snapshots directly
### Response:
def compare_last_two_snapshots(obj, raw=False):
"""Helper to compare the last two snapshots directly
"""
if get_snapshot_count(obj) < 2:
return {}
version = get_version(obj)
snap1 = get_snapshot_by_version(obj, version - 1)
snap2 = get_snapshot_by_version(obj, version)
return compare_snapshots(snap1, snap2, raw=raw) |
def get_scalingip(context, id, fields=None):
"""Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.SCALING, '_deallocated': False}
scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not scaling_ip:
raise q_exc.ScalingIpNotFound(id=id)
return v._make_scaling_ip_dict(scaling_ip) | Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present. | Below is the the instruction that describes the task:
### Input:
Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
### Response:
def get_scalingip(context, id, fields=None):
"""Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.SCALING, '_deallocated': False}
scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not scaling_ip:
raise q_exc.ScalingIpNotFound(id=id)
return v._make_scaling_ip_dict(scaling_ip) |
def conservtion_profile_pid(region, genome_alignment,
mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS,
species=None):
"""
build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus.
"""
res = []
s = region.start if region.isPositiveStrand() else region.end - 1
e = region.end if region.isPositiveStrand() else region.start - 1
step = 1 if region.isPositiveStrand() else -1
for i in range(s, e, step):
try:
col = genome_alignment.get_column(region.chrom, i, mi_seqs, species)
res.append(pid(col))
except NoSuchAlignmentColumnError:
res.append(None)
except NoUniqueColumnError:
res.append(None)
return res | build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus. | Below is the the instruction that describes the task:
### Input:
build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus.
### Response:
def conservtion_profile_pid(region, genome_alignment,
mi_seqs=MissingSequenceHandler.TREAT_AS_ALL_GAPS,
species=None):
"""
build a conservation profile for the given region using the genome alignment.
The scores in the profile will be the percent of bases identical to the
reference sequence.
:param miss_seqs: how to treat sequence with no actual sequence data for
the column.
:return: a list of the same length as the region where each entry is the
PID at the corresponding locus.
"""
res = []
s = region.start if region.isPositiveStrand() else region.end - 1
e = region.end if region.isPositiveStrand() else region.start - 1
step = 1 if region.isPositiveStrand() else -1
for i in range(s, e, step):
try:
col = genome_alignment.get_column(region.chrom, i, mi_seqs, species)
res.append(pid(col))
except NoSuchAlignmentColumnError:
res.append(None)
except NoUniqueColumnError:
res.append(None)
return res |
def parse_attr_signature(sig):
""" Parse an attribute signature """
match = ATTR_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Attribute signature invalid, got ' + sig)
name, _, params = match.groups()
if params is not None and params.strip() != '':
params = split_sig(params)
params = [parse_param_signature(x) for x in params]
else:
params = []
return (name, params) | Parse an attribute signature | Below is the the instruction that describes the task:
### Input:
Parse an attribute signature
### Response:
def parse_attr_signature(sig):
""" Parse an attribute signature """
match = ATTR_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Attribute signature invalid, got ' + sig)
name, _, params = match.groups()
if params is not None and params.strip() != '':
params = split_sig(params)
params = [parse_param_signature(x) for x in params]
else:
params = []
return (name, params) |
def TriToBin(self, x, y, z):
'''
Turn an x-y-z triangular coord to an a-b coord.
if z is negative, calc with its abs then return (a, -b).
:param x,y,z: the three numbers of the triangular coord
:type x,y,z: float or double are both OK, just numbers
:return: the corresponding a-b coord
:rtype: a tuple consist of a and b
'''
if (z >= 0):
if (x + y + z == 0):
return (0, 0)
else:
Sum = x + y + z
X = 100.0 * x / Sum
Y = 100.0 * y / Sum
Z = 100.0 * z / Sum
if (X + Y != 0):
a = Z / 2.0 + (100.0 - Z) * Y / (Y + X)
else:
a = Z / 2.0
b = Z / 2.0 * (np.sqrt(3))
return (a, b)
else:
z = abs(z)
if (x + y + z == 0):
return (0, 0)
else:
Sum = x + y + z
X = 100.0 * x / Sum
Y = 100.0 * y / Sum
Z = 100.0 * z / Sum
if (X + Y != 0):
a = Z / 2.0 + (100.0 - Z) * Y / (Y + X)
else:
a = Z / 2.0
b = Z / 2.0 * (np.sqrt(3))
return (a, -b) | Turn an x-y-z triangular coord to an a-b coord.
if z is negative, calc with its abs then return (a, -b).
:param x,y,z: the three numbers of the triangular coord
:type x,y,z: float or double are both OK, just numbers
:return: the corresponding a-b coord
:rtype: a tuple consist of a and b | Below is the the instruction that describes the task:
### Input:
Turn an x-y-z triangular coord to an a-b coord.
if z is negative, calc with its abs then return (a, -b).
:param x,y,z: the three numbers of the triangular coord
:type x,y,z: float or double are both OK, just numbers
:return: the corresponding a-b coord
:rtype: a tuple consist of a and b
### Response:
def TriToBin(self, x, y, z):
'''
Turn an x-y-z triangular coord to an a-b coord.
if z is negative, calc with its abs then return (a, -b).
:param x,y,z: the three numbers of the triangular coord
:type x,y,z: float or double are both OK, just numbers
:return: the corresponding a-b coord
:rtype: a tuple consist of a and b
'''
if (z >= 0):
if (x + y + z == 0):
return (0, 0)
else:
Sum = x + y + z
X = 100.0 * x / Sum
Y = 100.0 * y / Sum
Z = 100.0 * z / Sum
if (X + Y != 0):
a = Z / 2.0 + (100.0 - Z) * Y / (Y + X)
else:
a = Z / 2.0
b = Z / 2.0 * (np.sqrt(3))
return (a, b)
else:
z = abs(z)
if (x + y + z == 0):
return (0, 0)
else:
Sum = x + y + z
X = 100.0 * x / Sum
Y = 100.0 * y / Sum
Z = 100.0 * z / Sum
if (X + Y != 0):
a = Z / 2.0 + (100.0 - Z) * Y / (Y + X)
else:
a = Z / 2.0
b = Z / 2.0 * (np.sqrt(3))
return (a, -b) |
def validate_oath_hotp(self, params):
"""
Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys
secured in AEAD's that we have stored in an SQLite3 database.
"""
from_key = params["hotp"][0]
if not re.match(hotp_valid_input, from_key):
self.log_error("IN: %s, Invalid OATH-HOTP OTP" % (params))
return "ERR Invalid OATH-HOTP OTP"
uid, otp, = get_oath_hotp_bits(params)
if not uid or not otp:
self.log_error("IN: %s, could not get UID/OTP ('%s'/'%s')" % (params, uid, otp))
return "ERR Invalid OATH-HOTP input"
if args.debug:
print "OATH-HOTP uid %s, OTP %s" % (uid, otp)
# Fetch counter value for `uid' from database
try:
db = ValOathDb(args.db_file)
entry = db.get(uid)
except Exception, e:
self.log_error("IN: %s, database error : '%s'" % (params, e))
return "ERR Internal error"
# Check for correct OATH-HOTP OTP
nonce = entry.data["nonce"].decode('hex')
aead = entry.data["aead"].decode('hex')
new_counter = pyhsm.oath_hotp.search_for_oath_code(hsm, entry.data["key_handle"], nonce, aead, \
entry.data["oath_c"], otp, args.look_ahead)
if args.debug:
print "OATH-HOTP %i..%i -> new C == %s" \
% (entry.data["oath_c"], entry.data["oath_c"] + args.look_ahead, new_counter)
if type(new_counter) != int:
# XXX increase 'throttling parameter' to make brute forcing harder/impossible
return "ERR Could not validate OATH-HOTP OTP"
try:
# Must successfully store new_counter before we return OK
if db.update_oath_hotp_c(entry, new_counter):
return "OK counter=%04x" % (new_counter)
else:
return "ERR replayed OATH-HOTP"
except Exception, e:
self.log_error("IN: %s, database error updating counter : %s" % (params, e))
return "ERR Internal error" | Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys
secured in AEAD's that we have stored in an SQLite3 database. | Below is the the instruction that describes the task:
### Input:
Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys
secured in AEAD's that we have stored in an SQLite3 database.
### Response:
def validate_oath_hotp(self, params):
"""
Validate OATH-HOTP code using YubiHSM HMAC-SHA1 hashing with token keys
secured in AEAD's that we have stored in an SQLite3 database.
"""
from_key = params["hotp"][0]
if not re.match(hotp_valid_input, from_key):
self.log_error("IN: %s, Invalid OATH-HOTP OTP" % (params))
return "ERR Invalid OATH-HOTP OTP"
uid, otp, = get_oath_hotp_bits(params)
if not uid or not otp:
self.log_error("IN: %s, could not get UID/OTP ('%s'/'%s')" % (params, uid, otp))
return "ERR Invalid OATH-HOTP input"
if args.debug:
print "OATH-HOTP uid %s, OTP %s" % (uid, otp)
# Fetch counter value for `uid' from database
try:
db = ValOathDb(args.db_file)
entry = db.get(uid)
except Exception, e:
self.log_error("IN: %s, database error : '%s'" % (params, e))
return "ERR Internal error"
# Check for correct OATH-HOTP OTP
nonce = entry.data["nonce"].decode('hex')
aead = entry.data["aead"].decode('hex')
new_counter = pyhsm.oath_hotp.search_for_oath_code(hsm, entry.data["key_handle"], nonce, aead, \
entry.data["oath_c"], otp, args.look_ahead)
if args.debug:
print "OATH-HOTP %i..%i -> new C == %s" \
% (entry.data["oath_c"], entry.data["oath_c"] + args.look_ahead, new_counter)
if type(new_counter) != int:
# XXX increase 'throttling parameter' to make brute forcing harder/impossible
return "ERR Could not validate OATH-HOTP OTP"
try:
# Must successfully store new_counter before we return OK
if db.update_oath_hotp_c(entry, new_counter):
return "OK counter=%04x" % (new_counter)
else:
return "ERR replayed OATH-HOTP"
except Exception, e:
self.log_error("IN: %s, database error updating counter : %s" % (params, e))
return "ERR Internal error" |
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid | Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain. | Below is the the instruction that describes the task:
### Input:
Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
### Response:
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid |
def credits(self, **kwargs):
"""
Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API.
### Response:
def credits(self, **kwargs):
"""
Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_series_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def _totals(self, query):
""" General method for returning total counts
"""
self.add_parameters(limit=1)
query = self._build_query(query)
self._retrieve_data(query)
self.url_params = None
# extract the 'total items' figure
return int(self.request.headers["Total-Results"]) | General method for returning total counts | Below is the the instruction that describes the task:
### Input:
General method for returning total counts
### Response:
def _totals(self, query):
""" General method for returning total counts
"""
self.add_parameters(limit=1)
query = self._build_query(query)
self._retrieve_data(query)
self.url_params = None
# extract the 'total items' figure
return int(self.request.headers["Total-Results"]) |
def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir | Returns the directory containing the manually extracted data. | Below is the the instruction that describes the task:
### Input:
Returns the directory containing the manually extracted data.
### Response:
def manual_dir(self):
"""Returns the directory containing the manually extracted data."""
if not tf.io.gfile.exists(self._manual_dir):
raise AssertionError(
'Manual directory {} does not exist. Create it and download/extract '
'dataset artifacts in there.'.format(self._manual_dir))
return self._manual_dir |
def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop',
'replace', 'truncate', 'load'])
return status.split(None, 1)[0].lower() in mutating | Determines if the statement is mutating based on the status. | Below is the the instruction that describes the task:
### Input:
Determines if the statement is mutating based on the status.
### Response:
def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop',
'replace', 'truncate', 'load'])
return status.split(None, 1)[0].lower() in mutating |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.