Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
1,100 | dpkp/kafka-python | kafka/consumer/group.py | KafkaConsumer.commit_async | def commit_async(self, offsets=None, callback=None):
"""Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
log.debug("Committing offsets: %s", offsets)
future = self._coordinator.commit_offsets_async(
offsets, callback=callback)
return future | python | def commit_async(self, offsets=None, callback=None):
"""Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
log.debug("Committing offsets: %s", offsets)
future = self._coordinator.commit_offsets_async(
offsets, callback=callback)
return future | ['def', 'commit_async', '(', 'self', ',', 'offsets', '=', 'None', ',', 'callback', '=', 'None', ')', ':', 'assert', 'self', '.', 'config', '[', "'api_version'", ']', '>=', '(', '0', ',', '8', ',', '1', ')', ',', "'Requires >= Kafka 0.8.1'", 'assert', 'self', '.', 'config', '[', "'group_id'", ']', 'is', 'not', 'None', ',', "'Requires group_id'", 'if', 'offsets', 'is', 'None', ':', 'offsets', '=', 'self', '.', '_subscription', '.', 'all_consumed_offsets', '(', ')', 'log', '.', 'debug', '(', '"Committing offsets: %s"', ',', 'offsets', ')', 'future', '=', 'self', '.', '_coordinator', '.', 'commit_offsets_async', '(', 'offsets', ',', 'callback', '=', 'callback', ')', 'return', 'future'] | Commit offsets to kafka asynchronously, optionally firing callback.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
callback (callable, optional): Called as callback(offsets, response)
with response as either an Exception or an OffsetCommitResponse
struct. This callback can be used to trigger custom actions when
a commit request completes.
Returns:
kafka.future.Future | ['Commit', 'offsets', 'to', 'kafka', 'asynchronously', 'optionally', 'firing', 'callback', '.'] | train | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/group.py#L462-L494 |
1,101 | fboender/ansible-cmdb | lib/mako/_ast_util.py | copy_location | def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node | python | def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node | ['def', 'copy_location', '(', 'new_node', ',', 'old_node', ')', ':', 'for', 'attr', 'in', "'lineno'", ',', "'col_offset'", ':', 'if', 'attr', 'in', 'old_node', '.', '_attributes', 'and', 'attr', 'in', 'new_node', '.', '_attributes', 'and', 'hasattr', '(', 'old_node', ',', 'attr', ')', ':', 'setattr', '(', 'new_node', ',', 'attr', ',', 'getattr', '(', 'old_node', ',', 'attr', ')', ')', 'return', 'new_node'] | Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one. | ['Copy', 'the', 'source', 'location', 'hint', '(', 'lineno', 'and', 'col_offset', ')', 'from', 'the', 'old', 'to', 'the', 'new', 'node', 'if', 'possible', 'and', 'return', 'the', 'new', 'one', '.'] | train | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/_ast_util.py#L125-L134 |
1,102 | watson-developer-cloud/python-sdk | ibm_watson/compare_comply_v1.py | ColumnHeaderTextsNormalized._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'text_normalized'", ')', 'and', 'self', '.', 'text_normalized', 'is', 'not', 'None', ':', '_dict', '[', "'text_normalized'", ']', '=', 'self', '.', 'text_normalized', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L1740-L1746 |
1,103 | cloudmesh/cloudmesh-common | cloudmesh/common/FlatDict.py | FlatDict2.object_to_dict | def object_to_dict(cls, obj):
"""
This function converts Objects into Dictionary
"""
dict_obj = dict()
if obj is not None:
if type(obj) == list:
dict_list = []
for inst in obj:
dict_list.append(cls.object_to_dict(inst))
dict_obj["list"] = dict_list
elif not cls.is_primitive(obj):
for key in obj.__dict__:
# is an object
if type(obj.__dict__[key]) == list:
dict_list = []
for inst in obj.__dict__[key]:
dict_list.append(cls.object_to_dict(inst))
dict_obj[key] = dict_list
elif not cls.is_primitive(obj.__dict__[key]):
temp_dict = cls.object_to_dict(obj.__dict__[key])
dict_obj[key] = temp_dict
else:
dict_obj[key] = obj.__dict__[key]
elif cls.is_primitive(obj):
return obj
return dict_obj | python | def object_to_dict(cls, obj):
"""
This function converts Objects into Dictionary
"""
dict_obj = dict()
if obj is not None:
if type(obj) == list:
dict_list = []
for inst in obj:
dict_list.append(cls.object_to_dict(inst))
dict_obj["list"] = dict_list
elif not cls.is_primitive(obj):
for key in obj.__dict__:
# is an object
if type(obj.__dict__[key]) == list:
dict_list = []
for inst in obj.__dict__[key]:
dict_list.append(cls.object_to_dict(inst))
dict_obj[key] = dict_list
elif not cls.is_primitive(obj.__dict__[key]):
temp_dict = cls.object_to_dict(obj.__dict__[key])
dict_obj[key] = temp_dict
else:
dict_obj[key] = obj.__dict__[key]
elif cls.is_primitive(obj):
return obj
return dict_obj | ['def', 'object_to_dict', '(', 'cls', ',', 'obj', ')', ':', 'dict_obj', '=', 'dict', '(', ')', 'if', 'obj', 'is', 'not', 'None', ':', 'if', 'type', '(', 'obj', ')', '==', 'list', ':', 'dict_list', '=', '[', ']', 'for', 'inst', 'in', 'obj', ':', 'dict_list', '.', 'append', '(', 'cls', '.', 'object_to_dict', '(', 'inst', ')', ')', 'dict_obj', '[', '"list"', ']', '=', 'dict_list', 'elif', 'not', 'cls', '.', 'is_primitive', '(', 'obj', ')', ':', 'for', 'key', 'in', 'obj', '.', '__dict__', ':', '# is an object', 'if', 'type', '(', 'obj', '.', '__dict__', '[', 'key', ']', ')', '==', 'list', ':', 'dict_list', '=', '[', ']', 'for', 'inst', 'in', 'obj', '.', '__dict__', '[', 'key', ']', ':', 'dict_list', '.', 'append', '(', 'cls', '.', 'object_to_dict', '(', 'inst', ')', ')', 'dict_obj', '[', 'key', ']', '=', 'dict_list', 'elif', 'not', 'cls', '.', 'is_primitive', '(', 'obj', '.', '__dict__', '[', 'key', ']', ')', ':', 'temp_dict', '=', 'cls', '.', 'object_to_dict', '(', 'obj', '.', '__dict__', '[', 'key', ']', ')', 'dict_obj', '[', 'key', ']', '=', 'temp_dict', 'else', ':', 'dict_obj', '[', 'key', ']', '=', 'obj', '.', '__dict__', '[', 'key', ']', 'elif', 'cls', '.', 'is_primitive', '(', 'obj', ')', ':', 'return', 'obj', 'return', 'dict_obj'] | This function converts Objects into Dictionary | ['This', 'function', 'converts', 'Objects', 'into', 'Dictionary'] | train | https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/FlatDict.py#L156-L183 |
1,104 | log2timeline/plaso | plaso/analysis/windows_services.py | WindowsServiceCollection.AddService | def AddService(self, new_service):
"""Add a new service to the list of ones we know about.
Args:
new_service (WindowsService): the service to add.
"""
for service in self._services:
if new_service == service:
# If this service is the same as one we already know about, we
# just want to add where it came from.
service.sources.append(new_service.sources[0])
return
# We only add a new object to our list if we don't have
# an identical one already.
self._services.append(new_service) | python | def AddService(self, new_service):
"""Add a new service to the list of ones we know about.
Args:
new_service (WindowsService): the service to add.
"""
for service in self._services:
if new_service == service:
# If this service is the same as one we already know about, we
# just want to add where it came from.
service.sources.append(new_service.sources[0])
return
# We only add a new object to our list if we don't have
# an identical one already.
self._services.append(new_service) | ['def', 'AddService', '(', 'self', ',', 'new_service', ')', ':', 'for', 'service', 'in', 'self', '.', '_services', ':', 'if', 'new_service', '==', 'service', ':', '# If this service is the same as one we already know about, we', '# just want to add where it came from.', 'service', '.', 'sources', '.', 'append', '(', 'new_service', '.', 'sources', '[', '0', ']', ')', 'return', "# We only add a new object to our list if we don't have", '# an identical one already.', 'self', '.', '_services', '.', 'append', '(', 'new_service', ')'] | Add a new service to the list of ones we know about.
Args:
new_service (WindowsService): the service to add. | ['Add', 'a', 'new', 'service', 'to', 'the', 'list', 'of', 'ones', 'we', 'know', 'about', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/windows_services.py#L171-L186 |
1,105 | inveniosoftware/invenio-access | invenio_access/cli.py | process_deny_action | def process_deny_action(processors, action, argument):
"""Process deny action."""
for processor in processors:
processor(action, argument)
db.session.commit() | python | def process_deny_action(processors, action, argument):
"""Process deny action."""
for processor in processors:
processor(action, argument)
db.session.commit() | ['def', 'process_deny_action', '(', 'processors', ',', 'action', ',', 'argument', ')', ':', 'for', 'processor', 'in', 'processors', ':', 'processor', '(', 'action', ',', 'argument', ')', 'db', '.', 'session', '.', 'commit', '(', ')'] | Process deny action. | ['Process', 'deny', 'action', '.'] | train | https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L171-L175 |
1,106 | insightindustry/validator-collection | validator_collection/checkers.py | is_on_filesystem | def is_on_filesystem(value, **kwargs):
"""Indicate whether ``value`` is a file or directory that exists on the local
filesystem.
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.path_exists(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | python | def is_on_filesystem(value, **kwargs):
"""Indicate whether ``value`` is a file or directory that exists on the local
filesystem.
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.path_exists(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | ['def', 'is_on_filesystem', '(', 'value', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'value', '=', 'validators', '.', 'path_exists', '(', 'value', ',', '*', '*', 'kwargs', ')', 'except', 'SyntaxError', 'as', 'error', ':', 'raise', 'error', 'except', 'Exception', ':', 'return', 'False', 'return', 'True'] | Indicate whether ``value`` is a file or directory that exists on the local
filesystem.
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | ['Indicate', 'whether', 'value', 'is', 'a', 'file', 'or', 'directory', 'that', 'exists', 'on', 'the', 'local', 'filesystem', '.'] | train | https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L1068-L1088 |
1,107 | Tanganelli/CoAPthon3 | coapthon/messages/message.py | Message.version | def version(self, v):
"""
Sets the CoAP version
:param v: the version
:raise AttributeError: if value is not 1
"""
if not isinstance(v, int) or v != 1:
raise AttributeError
self._version = v | python | def version(self, v):
"""
Sets the CoAP version
:param v: the version
:raise AttributeError: if value is not 1
"""
if not isinstance(v, int) or v != 1:
raise AttributeError
self._version = v | ['def', 'version', '(', 'self', ',', 'v', ')', ':', 'if', 'not', 'isinstance', '(', 'v', ',', 'int', ')', 'or', 'v', '!=', '1', ':', 'raise', 'AttributeError', 'self', '.', '_version', '=', 'v'] | Sets the CoAP version
:param v: the version
:raise AttributeError: if value is not 1 | ['Sets', 'the', 'CoAP', 'version'] | train | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/message.py#L42-L51 |
1,108 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py | brocade_system_monitor.system_monitor_sfp_alert_state | def system_monitor_sfp_alert_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
sfp = ET.SubElement(system_monitor, "sfp")
alert = ET.SubElement(sfp, "alert")
state = ET.SubElement(alert, "state")
state.text = kwargs.pop('state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def system_monitor_sfp_alert_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
sfp = ET.SubElement(system_monitor, "sfp")
alert = ET.SubElement(sfp, "alert")
state = ET.SubElement(alert, "state")
state.text = kwargs.pop('state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'system_monitor_sfp_alert_state', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'system_monitor', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"system-monitor"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-system-monitor"', ')', 'sfp', '=', 'ET', '.', 'SubElement', '(', 'system_monitor', ',', '"sfp"', ')', 'alert', '=', 'ET', '.', 'SubElement', '(', 'sfp', ',', '"alert"', ')', 'state', '=', 'ET', '.', 'SubElement', '(', 'alert', ',', '"state"', ')', 'state', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'state'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L194-L205 |
1,109 | Laufire/ec | ec/modules/core.py | setActiveModule | def setActiveModule(Module):
r"""Helps with collecting the members of the imported modules.
"""
module_name = Module.__name__
if module_name not in ModuleMembers:
ModuleMembers[module_name] = []
ModulesQ.append(module_name)
Group(Module, {}) # brand the module with __ec_member__
state.ActiveModuleMemberQ = ModuleMembers[module_name] | python | def setActiveModule(Module):
r"""Helps with collecting the members of the imported modules.
"""
module_name = Module.__name__
if module_name not in ModuleMembers:
ModuleMembers[module_name] = []
ModulesQ.append(module_name)
Group(Module, {}) # brand the module with __ec_member__
state.ActiveModuleMemberQ = ModuleMembers[module_name] | ['def', 'setActiveModule', '(', 'Module', ')', ':', 'module_name', '=', 'Module', '.', '__name__', 'if', 'module_name', 'not', 'in', 'ModuleMembers', ':', 'ModuleMembers', '[', 'module_name', ']', '=', '[', ']', 'ModulesQ', '.', 'append', '(', 'module_name', ')', 'Group', '(', 'Module', ',', '{', '}', ')', '# brand the module with __ec_member__\r', 'state', '.', 'ActiveModuleMemberQ', '=', 'ModuleMembers', '[', 'module_name', ']'] | r"""Helps with collecting the members of the imported modules. | ['r', 'Helps', 'with', 'collecting', 'the', 'members', 'of', 'the', 'imported', 'modules', '.'] | train | https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/core.py#L90-L100 |
1,110 | tornadoweb/tornado | tornado/locale.py | load_gettext_translations | def load_gettext_translations(directory: str, domain: str) -> None:
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have your app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith("."):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(
domain, directory, languages=[lang]
)
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales)) | python | def load_gettext_translations(directory: str, domain: str) -> None:
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have your app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith("."):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(
domain, directory, languages=[lang]
)
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales)) | ['def', 'load_gettext_translations', '(', 'directory', ':', 'str', ',', 'domain', ':', 'str', ')', '->', 'None', ':', 'global', '_translations', 'global', '_supported_locales', 'global', '_use_gettext', '_translations', '=', '{', '}', 'for', 'lang', 'in', 'os', '.', 'listdir', '(', 'directory', ')', ':', 'if', 'lang', '.', 'startswith', '(', '"."', ')', ':', 'continue', '# skip .svn, etc', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', 'lang', ')', ')', ':', 'continue', 'try', ':', 'os', '.', 'stat', '(', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', 'lang', ',', '"LC_MESSAGES"', ',', 'domain', '+', '".mo"', ')', ')', '_translations', '[', 'lang', ']', '=', 'gettext', '.', 'translation', '(', 'domain', ',', 'directory', ',', 'languages', '=', '[', 'lang', ']', ')', 'except', 'Exception', 'as', 'e', ':', 'gen_log', '.', 'error', '(', '"Cannot load translation for \'%s\': %s"', ',', 'lang', ',', 'str', '(', 'e', ')', ')', 'continue', '_supported_locales', '=', 'frozenset', '(', 'list', '(', '_translations', '.', 'keys', '(', ')', ')', '+', '[', '_default_locale', ']', ')', '_use_gettext', '=', 'True', 'gen_log', '.', 'debug', '(', '"Supported locales: %s"', ',', 'sorted', '(', '_supported_locales', ')', ')'] | Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have your app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo | ['Loads', 'translations', 'from', 'gettext', 's', 'locale', 'tree'] | train | https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/locale.py#L178-L218 |
1,111 | datamachine/twx.botapi | twx/botapi/botapi.py | get_chat | def get_chat(chat_id, **kwargs):
"""
Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username
of a user, group or channel, etc.).
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:returns: Returns a Chat object on success.
:rtype: Chat
"""
# required args
params = dict(
chat_id=chat_id,
)
return TelegramBotRPCRequest('getChat', params=params, on_result=lambda result: Chat.from_result(result), **kwargs) | python | def get_chat(chat_id, **kwargs):
"""
Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username
of a user, group or channel, etc.).
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:returns: Returns a Chat object on success.
:rtype: Chat
"""
# required args
params = dict(
chat_id=chat_id,
)
return TelegramBotRPCRequest('getChat', params=params, on_result=lambda result: Chat.from_result(result), **kwargs) | ['def', 'get_chat', '(', 'chat_id', ',', '*', '*', 'kwargs', ')', ':', '# required args', 'params', '=', 'dict', '(', 'chat_id', '=', 'chat_id', ',', ')', 'return', 'TelegramBotRPCRequest', '(', "'getChat'", ',', 'params', '=', 'params', ',', 'on_result', '=', 'lambda', 'result', ':', 'Chat', '.', 'from_result', '(', 'result', ')', ',', '*', '*', 'kwargs', ')'] | Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username
of a user, group or channel, etc.).
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:returns: Returns a Chat object on success.
:rtype: Chat | ['Use', 'this', 'method', 'to', 'get', 'up', 'to', 'date', 'information', 'about', 'the', 'chat', '(', 'current', 'name', 'of', 'the', 'user', 'for', 'one', '-', 'on', '-', 'one', 'conversations', 'current', 'username', 'of', 'a', 'user', 'group', 'or', 'channel', 'etc', '.', ')', '.'] | train | https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L3467-L3487 |
1,112 | hyperledger/sawtooth-core | validator/sawtooth_validator/execution/processor_manager.py | ProcessorManager.get_next_of_type | def get_next_of_type(self, processor_type):
"""Get the next available processor of a particular type and increment
its occupancy counter.
Args:
processor_type (ProcessorType): The processor type associated with
a zmq identity.
Returns:
(Processor): Information about the transaction processor
"""
with self._condition:
if processor_type not in self:
self.wait_for_registration(processor_type)
try:
processor = self[processor_type].next_processor()
except NoProcessorVacancyError:
processor = self.wait_for_vacancy(processor_type)
processor.inc_occupancy()
return processor | python | def get_next_of_type(self, processor_type):
"""Get the next available processor of a particular type and increment
its occupancy counter.
Args:
processor_type (ProcessorType): The processor type associated with
a zmq identity.
Returns:
(Processor): Information about the transaction processor
"""
with self._condition:
if processor_type not in self:
self.wait_for_registration(processor_type)
try:
processor = self[processor_type].next_processor()
except NoProcessorVacancyError:
processor = self.wait_for_vacancy(processor_type)
processor.inc_occupancy()
return processor | ['def', 'get_next_of_type', '(', 'self', ',', 'processor_type', ')', ':', 'with', 'self', '.', '_condition', ':', 'if', 'processor_type', 'not', 'in', 'self', ':', 'self', '.', 'wait_for_registration', '(', 'processor_type', ')', 'try', ':', 'processor', '=', 'self', '[', 'processor_type', ']', '.', 'next_processor', '(', ')', 'except', 'NoProcessorVacancyError', ':', 'processor', '=', 'self', '.', 'wait_for_vacancy', '(', 'processor_type', ')', 'processor', '.', 'inc_occupancy', '(', ')', 'return', 'processor'] | Get the next available processor of a particular type and increment
its occupancy counter.
Args:
processor_type (ProcessorType): The processor type associated with
a zmq identity.
Returns:
(Processor): Information about the transaction processor | ['Get', 'the', 'next', 'available', 'processor', 'of', 'a', 'particular', 'type', 'and', 'increment', 'its', 'occupancy', 'counter', '.'] | train | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/processor_manager.py#L59-L78 |
1,113 | twilio/twilio-python | twilio/http/validation_client.py | ValidationClient._build_validation_payload | def _build_validation_payload(self, request):
"""
Extract relevant information from request to build a ClientValidationJWT
:param PreparedRequest request: request we will extract information from.
:return: ValidationPayload
"""
parsed = urlparse(request.url)
path = parsed.path
query_string = parsed.query or ''
return ValidationPayload(
method=request.method,
path=path,
query_string=query_string,
all_headers=request.headers,
signed_headers=ValidationClient.__SIGNED_HEADERS,
body=request.body or ''
) | python | def _build_validation_payload(self, request):
"""
Extract relevant information from request to build a ClientValidationJWT
:param PreparedRequest request: request we will extract information from.
:return: ValidationPayload
"""
parsed = urlparse(request.url)
path = parsed.path
query_string = parsed.query or ''
return ValidationPayload(
method=request.method,
path=path,
query_string=query_string,
all_headers=request.headers,
signed_headers=ValidationClient.__SIGNED_HEADERS,
body=request.body or ''
) | ['def', '_build_validation_payload', '(', 'self', ',', 'request', ')', ':', 'parsed', '=', 'urlparse', '(', 'request', '.', 'url', ')', 'path', '=', 'parsed', '.', 'path', 'query_string', '=', 'parsed', '.', 'query', 'or', "''", 'return', 'ValidationPayload', '(', 'method', '=', 'request', '.', 'method', ',', 'path', '=', 'path', ',', 'query_string', '=', 'query_string', ',', 'all_headers', '=', 'request', '.', 'headers', ',', 'signed_headers', '=', 'ValidationClient', '.', '__SIGNED_HEADERS', ',', 'body', '=', 'request', '.', 'body', 'or', "''", ')'] | Extract relevant information from request to build a ClientValidationJWT
:param PreparedRequest request: request we will extract information from.
:return: ValidationPayload | ['Extract', 'relevant', 'information', 'from', 'request', 'to', 'build', 'a', 'ClientValidationJWT', ':', 'param', 'PreparedRequest', 'request', ':', 'request', 'we', 'will', 'extract', 'information', 'from', '.', ':', 'return', ':', 'ValidationPayload'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/http/validation_client.py#L74-L91 |
1,114 | bitcraze/crazyflie-lib-python | cflib/crazyflie/__init__.py | Crazyflie._mems_updated_cb | def _mems_updated_cb(self):
"""Called when the memories have been identified"""
logger.info('Memories finished updating')
self.param.refresh_toc(self._param_toc_updated_cb, self._toc_cache) | python | def _mems_updated_cb(self):
"""Called when the memories have been identified"""
logger.info('Memories finished updating')
self.param.refresh_toc(self._param_toc_updated_cb, self._toc_cache) | ['def', '_mems_updated_cb', '(', 'self', ')', ':', 'logger', '.', 'info', '(', "'Memories finished updating'", ')', 'self', '.', 'param', '.', 'refresh_toc', '(', 'self', '.', '_param_toc_updated_cb', ',', 'self', '.', '_toc_cache', ')'] | Called when the memories have been identified | ['Called', 'when', 'the', 'memories', 'have', 'been', 'identified'] | train | https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/__init__.py#L173-L176 |
1,115 | bcbio/bcbio-nextgen | bcbio/qc/viral.py | run | def run(bam_file, data, out_dir):
"""Run viral QC analysis:
1. Extract the unmapped reads
2. BWA-MEM to the viral sequences from GDC database https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files
3. Report viruses that are in more than 50% covered by at least 5x
"""
source_link = 'https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files'
viral_target = "gdc-viral"
out = {}
if vcfutils.get_paired_phenotype(data):
viral_refs = [x for x in dd.get_viral_files(data) if os.path.basename(x) == "%s.fa" % viral_target]
if viral_refs and utils.file_exists(viral_refs[0]):
viral_ref = viral_refs[0]
viral_bam = os.path.join(utils.safe_makedir(out_dir),
"%s-%s.bam" % (dd.get_sample_name(data),
utils.splitext_plus(os.path.basename(viral_ref))[0]))
out_file = "%s-completeness.txt" % utils.splitext_plus(viral_bam)[0]
cores = dd.get_num_cores(data)
if not utils.file_uptodate(out_file, bam_file):
if not utils.file_uptodate(viral_bam, bam_file):
with file_transaction(data, viral_bam) as tx_out_file:
tmpfile = "%s-tmp" % utils.splitext_plus(tx_out_file)[0]
cmd = ("samtools view -u -f 4 {bam_file} | "
"bamtofastq collate=0 | "
"bwa mem -t {cores} {viral_ref} - | "
"bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} "
"inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}")
do.run(cmd.format(**locals()), "Align unmapped reads to viral genome")
with file_transaction(data, out_file) as tx_out_file:
sample_name = dd.get_sample_name(data)
mosdepth_prefix = os.path.splitext(viral_bam)[0]
cmd = ("mosdepth -t {cores} {mosdepth_prefix} {viral_bam} -n --thresholds 1,5,25 --by "
"<(awk 'BEGIN {{FS=\"\\t\"}}; {{print $1 FS \"0\" FS $2}}' {viral_ref}.fai) && "
"echo '## Viral sequences (from {source_link}) found in unmapped reads' > {tx_out_file} &&"
"echo '## Sample: {sample_name}' >> {tx_out_file} && "
"echo '#virus\tsize\tdepth\t1x\t5x\t25x' >> {tx_out_file} && "
"paste <(zcat {mosdepth_prefix}.regions.bed.gz) <(zgrep -v ^# {mosdepth_prefix}.thresholds.bed.gz) | "
"awk 'BEGIN {{FS=\"\\t\"}} {{ print $1 FS $3 FS $4 FS $10/$3 FS $11/$3 FS $12/$3}}' | "
"sort -n -r -k 5,5 >> {tx_out_file}")
do.run(cmd.format(**locals()), "Analyse coverage of viral genomes")
out["base"] = out_file
out["secondary"] = []
return out | python | def run(bam_file, data, out_dir):
"""Run viral QC analysis:
1. Extract the unmapped reads
2. BWA-MEM to the viral sequences from GDC database https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files
3. Report viruses that are in more than 50% covered by at least 5x
"""
source_link = 'https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files'
viral_target = "gdc-viral"
out = {}
if vcfutils.get_paired_phenotype(data):
viral_refs = [x for x in dd.get_viral_files(data) if os.path.basename(x) == "%s.fa" % viral_target]
if viral_refs and utils.file_exists(viral_refs[0]):
viral_ref = viral_refs[0]
viral_bam = os.path.join(utils.safe_makedir(out_dir),
"%s-%s.bam" % (dd.get_sample_name(data),
utils.splitext_plus(os.path.basename(viral_ref))[0]))
out_file = "%s-completeness.txt" % utils.splitext_plus(viral_bam)[0]
cores = dd.get_num_cores(data)
if not utils.file_uptodate(out_file, bam_file):
if not utils.file_uptodate(viral_bam, bam_file):
with file_transaction(data, viral_bam) as tx_out_file:
tmpfile = "%s-tmp" % utils.splitext_plus(tx_out_file)[0]
cmd = ("samtools view -u -f 4 {bam_file} | "
"bamtofastq collate=0 | "
"bwa mem -t {cores} {viral_ref} - | "
"bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} "
"inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}")
do.run(cmd.format(**locals()), "Align unmapped reads to viral genome")
with file_transaction(data, out_file) as tx_out_file:
sample_name = dd.get_sample_name(data)
mosdepth_prefix = os.path.splitext(viral_bam)[0]
cmd = ("mosdepth -t {cores} {mosdepth_prefix} {viral_bam} -n --thresholds 1,5,25 --by "
"<(awk 'BEGIN {{FS=\"\\t\"}}; {{print $1 FS \"0\" FS $2}}' {viral_ref}.fai) && "
"echo '## Viral sequences (from {source_link}) found in unmapped reads' > {tx_out_file} &&"
"echo '## Sample: {sample_name}' >> {tx_out_file} && "
"echo '#virus\tsize\tdepth\t1x\t5x\t25x' >> {tx_out_file} && "
"paste <(zcat {mosdepth_prefix}.regions.bed.gz) <(zgrep -v ^# {mosdepth_prefix}.thresholds.bed.gz) | "
"awk 'BEGIN {{FS=\"\\t\"}} {{ print $1 FS $3 FS $4 FS $10/$3 FS $11/$3 FS $12/$3}}' | "
"sort -n -r -k 5,5 >> {tx_out_file}")
do.run(cmd.format(**locals()), "Analyse coverage of viral genomes")
out["base"] = out_file
out["secondary"] = []
return out | ['def', 'run', '(', 'bam_file', ',', 'data', ',', 'out_dir', ')', ':', 'source_link', '=', "'https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files'", 'viral_target', '=', '"gdc-viral"', 'out', '=', '{', '}', 'if', 'vcfutils', '.', 'get_paired_phenotype', '(', 'data', ')', ':', 'viral_refs', '=', '[', 'x', 'for', 'x', 'in', 'dd', '.', 'get_viral_files', '(', 'data', ')', 'if', 'os', '.', 'path', '.', 'basename', '(', 'x', ')', '==', '"%s.fa"', '%', 'viral_target', ']', 'if', 'viral_refs', 'and', 'utils', '.', 'file_exists', '(', 'viral_refs', '[', '0', ']', ')', ':', 'viral_ref', '=', 'viral_refs', '[', '0', ']', 'viral_bam', '=', 'os', '.', 'path', '.', 'join', '(', 'utils', '.', 'safe_makedir', '(', 'out_dir', ')', ',', '"%s-%s.bam"', '%', '(', 'dd', '.', 'get_sample_name', '(', 'data', ')', ',', 'utils', '.', 'splitext_plus', '(', 'os', '.', 'path', '.', 'basename', '(', 'viral_ref', ')', ')', '[', '0', ']', ')', ')', 'out_file', '=', '"%s-completeness.txt"', '%', 'utils', '.', 'splitext_plus', '(', 'viral_bam', ')', '[', '0', ']', 'cores', '=', 'dd', '.', 'get_num_cores', '(', 'data', ')', 'if', 'not', 'utils', '.', 'file_uptodate', '(', 'out_file', ',', 'bam_file', ')', ':', 'if', 'not', 'utils', '.', 'file_uptodate', '(', 'viral_bam', ',', 'bam_file', ')', ':', 'with', 'file_transaction', '(', 'data', ',', 'viral_bam', ')', 'as', 'tx_out_file', ':', 'tmpfile', '=', '"%s-tmp"', '%', 'utils', '.', 'splitext_plus', '(', 'tx_out_file', ')', '[', '0', ']', 'cmd', '=', '(', '"samtools view -u -f 4 {bam_file} | "', '"bamtofastq collate=0 | "', '"bwa mem -t {cores} {viral_ref} - | "', '"bamsort tmpfile={tmpfile} inputthreads={cores} outputthreads={cores} "', '"inputformat=sam index=1 indexfilename={tx_out_file}.bai O={tx_out_file}"', ')', 'do', '.', 'run', '(', 'cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', '"Align unmapped reads to viral genome"', ')', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'sample_name', '=', 'dd', '.', 'get_sample_name', '(', 'data', ')', 'mosdepth_prefix', '=', 'os', '.', 'path', '.', 'splitext', '(', 'viral_bam', ')', '[', '0', ']', 'cmd', '=', '(', '"mosdepth -t {cores} {mosdepth_prefix} {viral_bam} -n --thresholds 1,5,25 --by "', '"<(awk \'BEGIN {{FS=\\"\\\\t\\"}}; {{print $1 FS \\"0\\" FS $2}}\' {viral_ref}.fai) && "', '"echo \'## Viral sequences (from {source_link}) found in unmapped reads\' > {tx_out_file} &&"', '"echo \'## Sample: {sample_name}\' >> {tx_out_file} && "', '"echo \'#virus\\tsize\\tdepth\\t1x\\t5x\\t25x\' >> {tx_out_file} && "', '"paste <(zcat {mosdepth_prefix}.regions.bed.gz) <(zgrep -v ^# {mosdepth_prefix}.thresholds.bed.gz) | "', '"awk \'BEGIN {{FS=\\"\\\\t\\"}} {{ print $1 FS $3 FS $4 FS $10/$3 FS $11/$3 FS $12/$3}}\' | "', '"sort -n -r -k 5,5 >> {tx_out_file}"', ')', 'do', '.', 'run', '(', 'cmd', '.', 'format', '(', '*', '*', 'locals', '(', ')', ')', ',', '"Analyse coverage of viral genomes"', ')', 'out', '[', '"base"', ']', '=', 'out_file', 'out', '[', '"secondary"', ']', '=', '[', ']', 'return', 'out'] | Run viral QC analysis:
1. Extract the unmapped reads
2. BWA-MEM to the viral sequences from GDC database https://gdc.cancer.gov/about-data/data-harmonization-and-generation/gdc-reference-files
3. Report viruses that are in more than 50% covered by at least 5x | ['Run', 'viral', 'QC', 'analysis', ':', '1', '.', 'Extract', 'the', 'unmapped', 'reads', '2', '.', 'BWA', '-', 'MEM', 'to', 'the', 'viral', 'sequences', 'from', 'GDC', 'database', 'https', ':', '//', 'gdc', '.', 'cancer', '.', 'gov', '/', 'about', '-', 'data', '/', 'data', '-', 'harmonization', '-', 'and', '-', 'generation', '/', 'gdc', '-', 'reference', '-', 'files', '3', '.', 'Report', 'viruses', 'that', 'are', 'in', 'more', 'than', '50%', 'covered', 'by', 'at', 'least', '5x'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/viral.py#L15-L57 |
1,116 | totalgood/nlpia | src/nlpia/transcoders.py | clean_asciidoc | def clean_asciidoc(text):
r""" Transform asciidoc text into ASCII text that NL parsers can handle
TODO:
Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc
>>> clean_asciidoc('**Hello** _world_!')
'"Hello" "world"!'
"""
text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text)
text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text)
return text | python | def clean_asciidoc(text):
r""" Transform asciidoc text into ASCII text that NL parsers can handle
TODO:
Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc
>>> clean_asciidoc('**Hello** _world_!')
'"Hello" "world"!'
"""
text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text)
text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text)
return text | ['def', 'clean_asciidoc', '(', 'text', ')', ':', 'text', '=', 're', '.', 'sub', '(', "r'(\\b|^)[\\[_*]{1,2}([a-zA-Z0-9])'", ',', 'r\'"\\2\'', ',', 'text', ')', 'text', '=', 're', '.', 'sub', '(', "r'([a-zA-Z0-9])[\\]_*]{1,2}'", ',', 'r\'\\1"\'', ',', 'text', ')', 'return', 'text'] | r""" Transform asciidoc text into ASCII text that NL parsers can handle
TODO:
Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc
>>> clean_asciidoc('**Hello** _world_!')
'"Hello" "world"!' | ['r', 'Transform', 'asciidoc', 'text', 'into', 'ASCII', 'text', 'that', 'NL', 'parsers', 'can', 'handle'] | train | https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L121-L132 |
1,117 | raymondEhlers/pachyderm | pachyderm/projectors.py | HistProjector.output_hist | def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[str, Any]) -> Union[Hist, Any]:
""" Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist.
"""
return output_hist | python | def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[str, Any]) -> Union[Hist, Any]:
""" Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist.
"""
return output_hist | ['def', 'output_hist', '(', 'self', ',', 'output_hist', ':', 'Hist', ',', 'input_observable', ':', 'Any', ',', '*', '*', 'kwargs', ':', 'Dict', '[', 'str', ',', 'Any', ']', ')', '->', 'Union', '[', 'Hist', ',', 'Any', ']', ':', 'return', 'output_hist'] | Return an output object. It should store the ``output_hist``.
Note:
The output object could just be the raw histogram.
Note:
This function is just a basic placeholder which returns the given output object (a histogram)
and likely should be overridden.
Args:
output_hist: The output histogram
input_observable (object): The corresponding input object. It could be a histogram or something
more complex.
kwargs: Projection information dict combined with additional arguments passed to the
projection function
Return:
The output object which should be stored in the output dict. By default, it returns the
output hist. | ['Return', 'an', 'output', 'object', '.', 'It', 'should', 'store', 'the', 'output_hist', '.'] | train | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/projectors.py#L726-L746 |
1,118 | ethereum/web3.py | web3/_utils/validation.py | validate_address | def validate_address(value):
"""
Helper function for validating an address
"""
if is_bytes(value):
if not is_binary_address(value):
raise InvalidAddress("Address must be 20 bytes when input type is bytes", value)
return
if not isinstance(value, str):
raise TypeError('Address {} must be provided as a string'.format(value))
if not is_hex_address(value):
raise InvalidAddress("Address must be 20 bytes, as a hex string with a 0x prefix", value)
if not is_checksum_address(value):
if value == value.lower():
raise InvalidAddress(
"Web3.py only accepts checksum addresses. "
"The software that gave you this non-checksum address should be considered unsafe, "
"please file it as a bug on their platform. "
"Try using an ENS name instead. Or, if you must accept lower safety, "
"use Web3.toChecksumAddress(lower_case_address).",
value,
)
else:
raise InvalidAddress(
"Address has an invalid EIP-55 checksum. "
"After looking up the address from the original source, try again.",
value,
) | python | def validate_address(value):
"""
Helper function for validating an address
"""
if is_bytes(value):
if not is_binary_address(value):
raise InvalidAddress("Address must be 20 bytes when input type is bytes", value)
return
if not isinstance(value, str):
raise TypeError('Address {} must be provided as a string'.format(value))
if not is_hex_address(value):
raise InvalidAddress("Address must be 20 bytes, as a hex string with a 0x prefix", value)
if not is_checksum_address(value):
if value == value.lower():
raise InvalidAddress(
"Web3.py only accepts checksum addresses. "
"The software that gave you this non-checksum address should be considered unsafe, "
"please file it as a bug on their platform. "
"Try using an ENS name instead. Or, if you must accept lower safety, "
"use Web3.toChecksumAddress(lower_case_address).",
value,
)
else:
raise InvalidAddress(
"Address has an invalid EIP-55 checksum. "
"After looking up the address from the original source, try again.",
value,
) | ['def', 'validate_address', '(', 'value', ')', ':', 'if', 'is_bytes', '(', 'value', ')', ':', 'if', 'not', 'is_binary_address', '(', 'value', ')', ':', 'raise', 'InvalidAddress', '(', '"Address must be 20 bytes when input type is bytes"', ',', 'value', ')', 'return', 'if', 'not', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'raise', 'TypeError', '(', "'Address {} must be provided as a string'", '.', 'format', '(', 'value', ')', ')', 'if', 'not', 'is_hex_address', '(', 'value', ')', ':', 'raise', 'InvalidAddress', '(', '"Address must be 20 bytes, as a hex string with a 0x prefix"', ',', 'value', ')', 'if', 'not', 'is_checksum_address', '(', 'value', ')', ':', 'if', 'value', '==', 'value', '.', 'lower', '(', ')', ':', 'raise', 'InvalidAddress', '(', '"Web3.py only accepts checksum addresses. "', '"The software that gave you this non-checksum address should be considered unsafe, "', '"please file it as a bug on their platform. "', '"Try using an ENS name instead. Or, if you must accept lower safety, "', '"use Web3.toChecksumAddress(lower_case_address)."', ',', 'value', ',', ')', 'else', ':', 'raise', 'InvalidAddress', '(', '"Address has an invalid EIP-55 checksum. "', '"After looking up the address from the original source, try again."', ',', 'value', ',', ')'] | Helper function for validating an address | ['Helper', 'function', 'for', 'validating', 'an', 'address'] | train | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/validation.py#L142-L170 |
1,119 | lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/pycallgraph.py | get_dot | def get_dot(stop=True):
"""Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop.
"""
defaults = []
nodes = []
edges = []
# define default attributes
for comp, comp_attr in graph_attributes.items():
attr = ', '.join( '%s = "%s"' % (attr, val)
for attr, val in comp_attr.items() )
defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() )
# define nodes
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()]
node_str = '"%s" [%s];' % (func, ', '.join(attribs))
nodes.append( node_str % locals() )
# define edges
for fr_key, fr_val in call_dict.items():
if not fr_key: continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, totla_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
edge = '[ color = "%s", label="%s" ]' % (col, to_val)
edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge))
defaults = '\n\t'.join( defaults )
nodes = '\n\t'.join( nodes )
edges = '\n\t'.join( edges )
dot_fmt = ("digraph G {\n"
" %(defaults)s\n\n"
" %(nodes)s\n\n"
" %(edges)s\n}\n"
)
return dot_fmt % locals() | python | def get_dot(stop=True):
"""Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop.
"""
defaults = []
nodes = []
edges = []
# define default attributes
for comp, comp_attr in graph_attributes.items():
attr = ', '.join( '%s = "%s"' % (attr, val)
for attr, val in comp_attr.items() )
defaults.append( '\t%(comp)s [ %(attr)s ];\n' % locals() )
# define nodes
for func, hits in func_count.items():
calls_frac, total_time_frac, total_time = _frac_calculation(func, hits)
col = settings['node_colour'](calls_frac, total_time_frac)
attribs = ['%s="%s"' % a for a in settings['node_attributes'].items()]
node_str = '"%s" [%s];' % (func, ', '.join(attribs))
nodes.append( node_str % locals() )
# define edges
for fr_key, fr_val in call_dict.items():
if not fr_key: continue
for to_key, to_val in fr_val.items():
calls_frac, total_time_frac, totla_time = \
_frac_calculation(to_key, to_val)
col = settings['edge_colour'](calls_frac, total_time_frac)
edge = '[ color = "%s", label="%s" ]' % (col, to_val)
edges.append('"%s"->"%s" %s;' % (fr_key, to_key, edge))
defaults = '\n\t'.join( defaults )
nodes = '\n\t'.join( nodes )
edges = '\n\t'.join( edges )
dot_fmt = ("digraph G {\n"
" %(defaults)s\n\n"
" %(nodes)s\n\n"
" %(edges)s\n}\n"
)
return dot_fmt % locals() | ['def', 'get_dot', '(', 'stop', '=', 'True', ')', ':', 'defaults', '=', '[', ']', 'nodes', '=', '[', ']', 'edges', '=', '[', ']', '# define default attributes', 'for', 'comp', ',', 'comp_attr', 'in', 'graph_attributes', '.', 'items', '(', ')', ':', 'attr', '=', "', '", '.', 'join', '(', '\'%s = "%s"\'', '%', '(', 'attr', ',', 'val', ')', 'for', 'attr', ',', 'val', 'in', 'comp_attr', '.', 'items', '(', ')', ')', 'defaults', '.', 'append', '(', "'\\t%(comp)s [ %(attr)s ];\\n'", '%', 'locals', '(', ')', ')', '# define nodes', 'for', 'func', ',', 'hits', 'in', 'func_count', '.', 'items', '(', ')', ':', 'calls_frac', ',', 'total_time_frac', ',', 'total_time', '=', '_frac_calculation', '(', 'func', ',', 'hits', ')', 'col', '=', 'settings', '[', "'node_colour'", ']', '(', 'calls_frac', ',', 'total_time_frac', ')', 'attribs', '=', '[', '\'%s="%s"\'', '%', 'a', 'for', 'a', 'in', 'settings', '[', "'node_attributes'", ']', '.', 'items', '(', ')', ']', 'node_str', '=', '\'"%s" [%s];\'', '%', '(', 'func', ',', "', '", '.', 'join', '(', 'attribs', ')', ')', 'nodes', '.', 'append', '(', 'node_str', '%', 'locals', '(', ')', ')', '# define edges', 'for', 'fr_key', ',', 'fr_val', 'in', 'call_dict', '.', 'items', '(', ')', ':', 'if', 'not', 'fr_key', ':', 'continue', 'for', 'to_key', ',', 'to_val', 'in', 'fr_val', '.', 'items', '(', ')', ':', 'calls_frac', ',', 'total_time_frac', ',', 'totla_time', '=', '_frac_calculation', '(', 'to_key', ',', 'to_val', ')', 'col', '=', 'settings', '[', "'edge_colour'", ']', '(', 'calls_frac', ',', 'total_time_frac', ')', 'edge', '=', '\'[ color = "%s", label="%s" ]\'', '%', '(', 'col', ',', 'to_val', ')', 'edges', '.', 'append', '(', '\'"%s"->"%s" %s;\'', '%', '(', 'fr_key', ',', 'to_key', ',', 'edge', ')', ')', 'defaults', '=', "'\\n\\t'", '.', 'join', '(', 'defaults', ')', 'nodes', '=', "'\\n\\t'", '.', 'join', '(', 'nodes', ')', 'edges', '=', "'\\n\\t'", '.', 'join', '(', 'edges', ')', 'dot_fmt', '=', '(', '"digraph G {\\n"', '"\t%(defaults)s\\n\\n"', '"\t%(nodes)s\\n\\n"', '"\t%(edges)s\\n}\\n"', ')', 'return', 'dot_fmt', '%', 'locals', '(', ')'] | Returns a string containing a DOT file. Setting stop to True will cause
the trace to stop. | ['Returns', 'a', 'string', 'containing', 'a', 'DOT', 'file', '.', 'Setting', 'stop', 'to', 'True', 'will', 'cause', 'the', 'trace', 'to', 'stop', '.'] | train | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pycallgraph.py#L327-L369 |
1,120 | Shapeways/coyote_framework | coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py | WebElementWrapper.send_special_keys | def send_special_keys(self, value):
"""
Send special keys such as <enter> or <delete>
@rtype: WebElementWrapper
@return: Self
"""
def send_keys_element():
"""
Wrapper to send keys
"""
return self.element.send_keys(value)
self.execute_and_handle_webelement_exceptions(send_keys_element, 'send keys')
return self | python | def send_special_keys(self, value):
"""
Send special keys such as <enter> or <delete>
@rtype: WebElementWrapper
@return: Self
"""
def send_keys_element():
"""
Wrapper to send keys
"""
return self.element.send_keys(value)
self.execute_and_handle_webelement_exceptions(send_keys_element, 'send keys')
return self | ['def', 'send_special_keys', '(', 'self', ',', 'value', ')', ':', 'def', 'send_keys_element', '(', ')', ':', '"""\n Wrapper to send keys\n """', 'return', 'self', '.', 'element', '.', 'send_keys', '(', 'value', ')', 'self', '.', 'execute_and_handle_webelement_exceptions', '(', 'send_keys_element', ',', "'send keys'", ')', 'return', 'self'] | Send special keys such as <enter> or <delete>
@rtype: WebElementWrapper
@return: Self | ['Send', 'special', 'keys', 'such', 'as', '<enter', '>', 'or', '<delete', '>'] | train | https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py#L281-L294 |
1,121 | geographika/mappyfile | docs/examples/geometry/geometry.py | erosion | def erosion(mapfile, dilated):
"""
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
"""
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
# make a deep copy of the polygon layer in the Map
# so any modification are made to this layer only
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "#999999"
style["outlinecolor"] = "#b2b2b2" | python | def erosion(mapfile, dilated):
"""
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
"""
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
# make a deep copy of the polygon layer in the Map
# so any modification are made to this layer only
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "#999999"
style["outlinecolor"] = "#b2b2b2" | ['def', 'erosion', '(', 'mapfile', ',', 'dilated', ')', ':', 'll', '=', 'mappyfile', '.', 'find', '(', 'mapfile', '[', '"layers"', ']', ',', '"name"', ',', '"line"', ')', 'll', '[', '"status"', ']', '=', '"OFF"', 'pl', '=', 'mappyfile', '.', 'find', '(', 'mapfile', '[', '"layers"', ']', ',', '"name"', ',', '"polygon"', ')', '# make a deep copy of the polygon layer in the Map', '# so any modification are made to this layer only', 'pl2', '=', 'deepcopy', '(', 'pl', ')', 'pl2', '[', '"name"', ']', '=', '"newpolygon"', 'mapfile', '[', '"layers"', ']', '.', 'append', '(', 'pl2', ')', 'dilated', '=', 'dilated', '.', 'buffer', '(', '-', '0.3', ')', 'pl2', '[', '"features"', ']', '[', '0', ']', '[', '"wkt"', ']', '=', 'dilated', '.', 'wkt', 'style', '=', 'pl', '[', '"classes"', ']', '[', '0', ']', '[', '"styles"', ']', '[', '0', ']', 'style', '[', '"color"', ']', '=', '"#999999"', 'style', '[', '"outlinecolor"', ']', '=', '"#b2b2b2"'] | We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it | ['We', 'will', 'continue', 'to', 'work', 'with', 'the', 'modified', 'Mapfile', 'If', 'we', 'wanted', 'to', 'start', 'from', 'scratch', 'we', 'could', 'simply', 'reread', 'it'] | train | https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/docs/examples/geometry/geometry.py#L23-L45 |
1,122 | saltstack/salt | salt/cloud/clouds/vmware.py | _get_si | def _get_si():
'''
Authenticate with vCenter server and return service instance object.
'''
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
protocol = config.get_cloud_config_value(
'protocol', get_configured_provider(), __opts__, search_global=False, default='https'
)
port = config.get_cloud_config_value(
'port', get_configured_provider(), __opts__, search_global=False, default=443
)
return salt.utils.vmware.get_service_instance(url,
username,
password,
protocol=protocol,
port=port) | python | def _get_si():
'''
Authenticate with vCenter server and return service instance object.
'''
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
protocol = config.get_cloud_config_value(
'protocol', get_configured_provider(), __opts__, search_global=False, default='https'
)
port = config.get_cloud_config_value(
'port', get_configured_provider(), __opts__, search_global=False, default=443
)
return salt.utils.vmware.get_service_instance(url,
username,
password,
protocol=protocol,
port=port) | ['def', '_get_si', '(', ')', ':', 'url', '=', 'config', '.', 'get_cloud_config_value', '(', "'url'", ',', 'get_configured_provider', '(', ')', ',', '__opts__', ',', 'search_global', '=', 'False', ')', 'username', '=', 'config', '.', 'get_cloud_config_value', '(', "'user'", ',', 'get_configured_provider', '(', ')', ',', '__opts__', ',', 'search_global', '=', 'False', ')', 'password', '=', 'config', '.', 'get_cloud_config_value', '(', "'password'", ',', 'get_configured_provider', '(', ')', ',', '__opts__', ',', 'search_global', '=', 'False', ')', 'protocol', '=', 'config', '.', 'get_cloud_config_value', '(', "'protocol'", ',', 'get_configured_provider', '(', ')', ',', '__opts__', ',', 'search_global', '=', 'False', ',', 'default', '=', "'https'", ')', 'port', '=', 'config', '.', 'get_cloud_config_value', '(', "'port'", ',', 'get_configured_provider', '(', ')', ',', '__opts__', ',', 'search_global', '=', 'False', ',', 'default', '=', '443', ')', 'return', 'salt', '.', 'utils', '.', 'vmware', '.', 'get_service_instance', '(', 'url', ',', 'username', ',', 'password', ',', 'protocol', '=', 'protocol', ',', 'port', '=', 'port', ')'] | Authenticate with vCenter server and return service instance object. | ['Authenticate', 'with', 'vCenter', 'server', 'and', 'return', 'service', 'instance', 'object', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L238-L263 |
1,123 | riga/law | law/parser.py | root_task_parser | def root_task_parser():
"""
Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached.
"""
global _root_task_parser
if _root_task_parser:
return _root_task_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
root_task = luigi_parser.known_args.root_task
# get all root task parameter destinations
root_dests = []
for task_name, _, param_name, _ in luigi.task_register.Register.get_all_params():
if task_name == root_task:
root_dests.append(param_name)
# create a new parser and add all root actions
_root_task_parser = ArgumentParser(add_help=False)
for action in list(full_parser()._actions):
if not action.option_strings or action.dest in root_dests:
_root_task_parser._add_action(action)
logger.debug("build luigi argument parser for root task {}".format(root_task))
return _root_task_parser | python | def root_task_parser():
"""
Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached.
"""
global _root_task_parser
if _root_task_parser:
return _root_task_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
root_task = luigi_parser.known_args.root_task
# get all root task parameter destinations
root_dests = []
for task_name, _, param_name, _ in luigi.task_register.Register.get_all_params():
if task_name == root_task:
root_dests.append(param_name)
# create a new parser and add all root actions
_root_task_parser = ArgumentParser(add_help=False)
for action in list(full_parser()._actions):
if not action.option_strings or action.dest in root_dests:
_root_task_parser._add_action(action)
logger.debug("build luigi argument parser for root task {}".format(root_task))
return _root_task_parser | ['def', 'root_task_parser', '(', ')', ':', 'global', '_root_task_parser', 'if', '_root_task_parser', ':', 'return', '_root_task_parser', 'luigi_parser', '=', 'luigi', '.', 'cmdline_parser', '.', 'CmdlineParser', '.', 'get_instance', '(', ')', 'if', 'not', 'luigi_parser', ':', 'return', 'None', 'root_task', '=', 'luigi_parser', '.', 'known_args', '.', 'root_task', '# get all root task parameter destinations', 'root_dests', '=', '[', ']', 'for', 'task_name', ',', '_', ',', 'param_name', ',', '_', 'in', 'luigi', '.', 'task_register', '.', 'Register', '.', 'get_all_params', '(', ')', ':', 'if', 'task_name', '==', 'root_task', ':', 'root_dests', '.', 'append', '(', 'param_name', ')', '# create a new parser and add all root actions', '_root_task_parser', '=', 'ArgumentParser', '(', 'add_help', '=', 'False', ')', 'for', 'action', 'in', 'list', '(', 'full_parser', '(', ')', '.', '_actions', ')', ':', 'if', 'not', 'action', '.', 'option_strings', 'or', 'action', '.', 'dest', 'in', 'root_dests', ':', '_root_task_parser', '.', '_add_action', '(', 'action', ')', 'logger', '.', 'debug', '(', '"build luigi argument parser for root task {}"', '.', 'format', '(', 'root_task', ')', ')', 'return', '_root_task_parser'] | Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached. | ['Returns', 'a', 'new', '*', 'ArgumentParser', '*', 'instance', 'that', 'only', 'contains', 'paremeter', 'actions', 'of', 'the', 'root', 'task', '.', 'The', 'returned', 'instance', 'is', 'cached', '.'] | train | https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/parser.py#L50-L80 |
1,124 | bxlab/bx-python | lib/bx/bitset_builders.py | binned_bitsets_from_list | def binned_bitsets_from_list( list=[] ):
"""Read a list into a dictionary of bitsets"""
last_chrom = None
last_bitset = None
bitsets = dict()
for l in list:
chrom = l[0]
if chrom != last_chrom:
if chrom not in bitsets:
bitsets[chrom] = BinnedBitSet(MAX)
last_chrom = chrom
last_bitset = bitsets[chrom]
start, end = int( l[1] ), int( l[2] )
last_bitset.set_range( start, end - start )
return bitsets | python | def binned_bitsets_from_list( list=[] ):
"""Read a list into a dictionary of bitsets"""
last_chrom = None
last_bitset = None
bitsets = dict()
for l in list:
chrom = l[0]
if chrom != last_chrom:
if chrom not in bitsets:
bitsets[chrom] = BinnedBitSet(MAX)
last_chrom = chrom
last_bitset = bitsets[chrom]
start, end = int( l[1] ), int( l[2] )
last_bitset.set_range( start, end - start )
return bitsets | ['def', 'binned_bitsets_from_list', '(', 'list', '=', '[', ']', ')', ':', 'last_chrom', '=', 'None', 'last_bitset', '=', 'None', 'bitsets', '=', 'dict', '(', ')', 'for', 'l', 'in', 'list', ':', 'chrom', '=', 'l', '[', '0', ']', 'if', 'chrom', '!=', 'last_chrom', ':', 'if', 'chrom', 'not', 'in', 'bitsets', ':', 'bitsets', '[', 'chrom', ']', '=', 'BinnedBitSet', '(', 'MAX', ')', 'last_chrom', '=', 'chrom', 'last_bitset', '=', 'bitsets', '[', 'chrom', ']', 'start', ',', 'end', '=', 'int', '(', 'l', '[', '1', ']', ')', ',', 'int', '(', 'l', '[', '2', ']', ')', 'last_bitset', '.', 'set_range', '(', 'start', ',', 'end', '-', 'start', ')', 'return', 'bitsets'] | Read a list into a dictionary of bitsets | ['Read', 'a', 'list', 'into', 'a', 'dictionary', 'of', 'bitsets'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/bitset_builders.py#L130-L144 |
1,125 | fiesta/fiesta-python | fiesta/fiesta.py | FiestaAPISandbox.reset | def reset(self):
"""
Reset the state of the sandbox.
http://docs.fiesta.cc/sandbox.html#post--reset
"""
path = 'reset'
request_data = {} # Need to put data into the request to force urllib2 to make it a POST request
response_data = self.request(path, request_data)
success = response_data['reset'] # True of False
return success | python | def reset(self):
"""
Reset the state of the sandbox.
http://docs.fiesta.cc/sandbox.html#post--reset
"""
path = 'reset'
request_data = {} # Need to put data into the request to force urllib2 to make it a POST request
response_data = self.request(path, request_data)
success = response_data['reset'] # True of False
return success | ['def', 'reset', '(', 'self', ')', ':', 'path', '=', "'reset'", 'request_data', '=', '{', '}', '# Need to put data into the request to force urllib2 to make it a POST request', 'response_data', '=', 'self', '.', 'request', '(', 'path', ',', 'request_data', ')', 'success', '=', 'response_data', '[', "'reset'", ']', '# True of False', 'return', 'success'] | Reset the state of the sandbox.
http://docs.fiesta.cc/sandbox.html#post--reset | ['Reset', 'the', 'state', 'of', 'the', 'sandbox', '.', 'http', ':', '//', 'docs', '.', 'fiesta', '.', 'cc', '/', 'sandbox', '.', 'html#post', '--', 'reset'] | train | https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L364-L373 |
1,126 | biolink/biolink-model | metamodel/generators/markdowngen.py | MarkdownGenerator.bbin | def bbin(obj: Union[str, Element]) -> str:
""" Boldify built in types
@param obj: object name or id
@return:
"""
return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj | python | def bbin(obj: Union[str, Element]) -> str:
""" Boldify built in types
@param obj: object name or id
@return:
"""
return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj | ['def', 'bbin', '(', 'obj', ':', 'Union', '[', 'str', ',', 'Element', ']', ')', '->', 'str', ':', 'return', 'obj', '.', 'name', 'if', 'isinstance', '(', 'obj', ',', 'Element', ')', 'else', "f'**{obj}**'", 'if', 'obj', 'in', 'builtin_names', 'else', 'obj'] | Boldify built in types
@param obj: object name or id
@return: | ['Boldify', 'built', 'in', 'types'] | train | https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L254-L260 |
1,127 | dlintott/gns3-converter | gns3converter/main.py | make_vbox_dirs | def make_vbox_dirs(max_vbox_id, output_dir, topology_name):
"""
Create VirtualBox working directories if required
:param int max_vbox_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name
"""
if max_vbox_id is not None:
for i in range(1, max_vbox_id + 1):
vbox_dir = os.path.join(output_dir, topology_name + '-files',
'vbox', 'vm-%s' % i)
os.makedirs(vbox_dir) | python | def make_vbox_dirs(max_vbox_id, output_dir, topology_name):
"""
Create VirtualBox working directories if required
:param int max_vbox_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name
"""
if max_vbox_id is not None:
for i in range(1, max_vbox_id + 1):
vbox_dir = os.path.join(output_dir, topology_name + '-files',
'vbox', 'vm-%s' % i)
os.makedirs(vbox_dir) | ['def', 'make_vbox_dirs', '(', 'max_vbox_id', ',', 'output_dir', ',', 'topology_name', ')', ':', 'if', 'max_vbox_id', 'is', 'not', 'None', ':', 'for', 'i', 'in', 'range', '(', '1', ',', 'max_vbox_id', '+', '1', ')', ':', 'vbox_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', 'topology_name', '+', "'-files'", ',', "'vbox'", ',', "'vm-%s'", '%', 'i', ')', 'os', '.', 'makedirs', '(', 'vbox_dir', ')'] | Create VirtualBox working directories if required
:param int max_vbox_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name | ['Create', 'VirtualBox', 'working', 'directories', 'if', 'required'] | train | https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/main.py#L398-L410 |
1,128 | Brightmd/TxPx | txpx/process.py | background | def background(cl, proto=EchoProcess, **kw):
"""
Use the reactor to run a process in the background.
Keep the pid around.
``proto'' may be any callable which returns an instance of ProcessProtocol
"""
if isinstance(cl, basestring):
cl = shlex.split(cl)
if not cl[0].startswith('/'):
path = which(cl[0])
assert path, '%s not found' % cl[0]
cl[0] = path[0]
d = Deferred()
proc = reactor.spawnProcess(
proto(name=basename(cl[0]), deferred=d),
cl[0],
cl,
env=os.environ,
**kw)
daycare.add(proc.pid)
return d | python | def background(cl, proto=EchoProcess, **kw):
"""
Use the reactor to run a process in the background.
Keep the pid around.
``proto'' may be any callable which returns an instance of ProcessProtocol
"""
if isinstance(cl, basestring):
cl = shlex.split(cl)
if not cl[0].startswith('/'):
path = which(cl[0])
assert path, '%s not found' % cl[0]
cl[0] = path[0]
d = Deferred()
proc = reactor.spawnProcess(
proto(name=basename(cl[0]), deferred=d),
cl[0],
cl,
env=os.environ,
**kw)
daycare.add(proc.pid)
return d | ['def', 'background', '(', 'cl', ',', 'proto', '=', 'EchoProcess', ',', '*', '*', 'kw', ')', ':', 'if', 'isinstance', '(', 'cl', ',', 'basestring', ')', ':', 'cl', '=', 'shlex', '.', 'split', '(', 'cl', ')', 'if', 'not', 'cl', '[', '0', ']', '.', 'startswith', '(', "'/'", ')', ':', 'path', '=', 'which', '(', 'cl', '[', '0', ']', ')', 'assert', 'path', ',', "'%s not found'", '%', 'cl', '[', '0', ']', 'cl', '[', '0', ']', '=', 'path', '[', '0', ']', 'd', '=', 'Deferred', '(', ')', 'proc', '=', 'reactor', '.', 'spawnProcess', '(', 'proto', '(', 'name', '=', 'basename', '(', 'cl', '[', '0', ']', ')', ',', 'deferred', '=', 'd', ')', ',', 'cl', '[', '0', ']', ',', 'cl', ',', 'env', '=', 'os', '.', 'environ', ',', '*', '*', 'kw', ')', 'daycare', '.', 'add', '(', 'proc', '.', 'pid', ')', 'return', 'd'] | Use the reactor to run a process in the background.
Keep the pid around.
``proto'' may be any callable which returns an instance of ProcessProtocol | ['Use', 'the', 'reactor', 'to', 'run', 'a', 'process', 'in', 'the', 'background', '.'] | train | https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L128-L153 |
1,129 | odlgroup/odl | odl/solvers/functional/default_functionals.py | NuclearNorm.convex_conj | def convex_conj(self):
"""Convex conjugate of the nuclear norm.
The convex conjugate is the indicator function on the unit ball of
the dual norm where the dual norm is obtained by taking the conjugate
exponent of both the outer and singular vector exponents.
"""
return IndicatorNuclearNormUnitBall(
self.domain,
conj_exponent(self.outernorm.exponent),
conj_exponent(self.pwisenorm.exponent)) | python | def convex_conj(self):
"""Convex conjugate of the nuclear norm.
The convex conjugate is the indicator function on the unit ball of
the dual norm where the dual norm is obtained by taking the conjugate
exponent of both the outer and singular vector exponents.
"""
return IndicatorNuclearNormUnitBall(
self.domain,
conj_exponent(self.outernorm.exponent),
conj_exponent(self.pwisenorm.exponent)) | ['def', 'convex_conj', '(', 'self', ')', ':', 'return', 'IndicatorNuclearNormUnitBall', '(', 'self', '.', 'domain', ',', 'conj_exponent', '(', 'self', '.', 'outernorm', '.', 'exponent', ')', ',', 'conj_exponent', '(', 'self', '.', 'pwisenorm', '.', 'exponent', ')', ')'] | Convex conjugate of the nuclear norm.
The convex conjugate is the indicator function on the unit ball of
the dual norm where the dual norm is obtained by taking the conjugate
exponent of both the outer and singular vector exponents. | ['Convex', 'conjugate', 'of', 'the', 'nuclear', 'norm', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/default_functionals.py#L2106-L2116 |
1,130 | Azure/azure-cosmos-python | azure/cosmos/cosmos_client.py | CosmosClient.DeletePermission | def DeletePermission(self, permission_link, options=None):
"""Deletes a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The deleted Permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.DeleteResource(path,
'permissions',
permission_id,
None,
options) | python | def DeletePermission(self, permission_link, options=None):
"""Deletes a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The deleted Permission.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(permission_link)
permission_id = base.GetResourceIdOrFullNameFromLink(permission_link)
return self.DeleteResource(path,
'permissions',
permission_id,
None,
options) | ['def', 'DeletePermission', '(', 'self', ',', 'permission_link', ',', 'options', '=', 'None', ')', ':', 'if', 'options', 'is', 'None', ':', 'options', '=', '{', '}', 'path', '=', 'base', '.', 'GetPathFromLink', '(', 'permission_link', ')', 'permission_id', '=', 'base', '.', 'GetResourceIdOrFullNameFromLink', '(', 'permission_link', ')', 'return', 'self', '.', 'DeleteResource', '(', 'path', ',', "'permissions'", ',', 'permission_id', ',', 'None', ',', 'options', ')'] | Deletes a permission.
:param str permission_link:
The link to the permission.
:param dict options:
The request options for the request.
:return:
The deleted Permission.
:rtype:
dict | ['Deletes', 'a', 'permission', '.'] | train | https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L794-L817 |
1,131 | odlgroup/odl | odl/space/weighting.py | ConstWeighting.repr_part | def repr_part(self):
"""String usable in a space's ``__repr__`` method."""
optargs = [('weighting', self.const, 1.0),
('exponent', self.exponent, 2.0)]
return signature_string([], optargs, mod=':.4') | python | def repr_part(self):
"""String usable in a space's ``__repr__`` method."""
optargs = [('weighting', self.const, 1.0),
('exponent', self.exponent, 2.0)]
return signature_string([], optargs, mod=':.4') | ['def', 'repr_part', '(', 'self', ')', ':', 'optargs', '=', '[', '(', "'weighting'", ',', 'self', '.', 'const', ',', '1.0', ')', ',', '(', "'exponent'", ',', 'self', '.', 'exponent', ',', '2.0', ')', ']', 'return', 'signature_string', '(', '[', ']', ',', 'optargs', ',', 'mod', '=', "':.4'", ')'] | String usable in a space's ``__repr__`` method. | ['String', 'usable', 'in', 'a', 'space', 's', '__repr__', 'method', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/weighting.py#L644-L648 |
1,132 | kennethreitz/legit | legit/scm.py | SCMRepo.undo | def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0 | python | def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0 | ['def', 'undo', '(', 'self', ',', 'hard', '=', 'False', ')', ':', 'if', 'not', 'self', '.', 'fake', ':', 'return', 'self', '.', 'repo', '.', 'git', '.', 'reset', '(', "'HEAD^'", ',', 'working_tree', '=', 'hard', ')', 'else', ':', 'click', '.', 'echo', '(', 'crayons', '.', 'red', '(', "'Faked! >>> git reset {}{}'", '.', 'format', '(', "'--hard '", 'if', 'hard', 'else', "''", ',', "'HEAD^'", ')', ')', ')', 'return', '0'] | Makes last commit not exist | ['Makes', 'last', 'commit', 'not', 'exist'] | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L220-L228 |
1,133 | mar10/pyftpsync | ftpsync/pyftpsync.py | run | def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return | python | def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return | ['def', 'run', '(', ')', ':', '# Use print() instead of logging when running in CLI mode:\r', 'set_pyftpsync_logger', '(', 'None', ')', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', '"Synchronize folders over FTP."', ',', 'epilog', '=', '"See also https://github.com/mar10/pyftpsync"', ',', 'parents', '=', '[', 'verbose_parser', ']', ',', ')', '# Note: we want to allow --version to be combined with --verbose. However\r', '# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.\r', 'if', 'check_cli_verbose', '(', '3', ')', '>', '3', ':', 'version_info', '=', '"pyftpsync/{} Python/{} {}"', '.', 'format', '(', '__version__', ',', 'PYTHON_VERSION', ',', 'platform', '.', 'platform', '(', ')', ')', 'else', ':', 'version_info', '=', '"{}"', '.', 'format', '(', '__version__', ')', 'parser', '.', 'add_argument', '(', '"-V"', ',', '"--version"', ',', 'action', '=', '"version"', ',', 'version', '=', 'version_info', ')', 'subparsers', '=', 'parser', '.', 'add_subparsers', '(', 'help', '=', '"sub-command help"', ')', '# --- Create the parser for the "upload" command ---------------------------\r', 'sp', '=', 'subparsers', '.', 'add_parser', '(', '"upload"', ',', 'parents', '=', '[', 'verbose_parser', ',', 'common_parser', ',', 'matcher_parser', ',', 'creds_parser', ']', ',', 'help', '=', '"copy new and modified files to remote folder"', ',', ')', 'sp', '.', 'add_argument', '(', '"local"', ',', 'metavar', '=', '"LOCAL"', ',', 'default', '=', '"."', ',', 'help', '=', '"path to local folder (default: %(default)s)"', ',', ')', 'sp', '.', 'add_argument', '(', '"remote"', ',', 'metavar', '=', '"REMOTE"', ',', 'help', '=', '"path to remote folder"', ')', 'sp', '.', 'add_argument', '(', '"--force"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"overwrite remote files, even if the target is newer "', '"(but no conflict was detected)"', ',', ')', 'sp', '.', 'add_argument', '(', '"--resolve"', ',', 'default', '=', '"ask"', ',', 'choices', '=', '[', '"local"', ',', '"skip"', ',', '"ask"', ']', ',', 'help', '=', '"conflict resolving strategy (default: \'%(default)s\')"', ',', ')', 'sp', '.', 'add_argument', '(', '"--delete"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"remove remote files if they don\'t exist locally"', ',', ')', 'sp', '.', 'add_argument', '(', '"--delete-unmatched"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"remove remote files if they don\'t exist locally "', '"or don\'t match the current filter (implies \'--delete\' option)"', ',', ')', 'sp', '.', 'set_defaults', '(', 'command', '=', '"upload"', ')', '# --- Create the parser for the "download" command -------------------------\r', 'sp', '=', 'subparsers', '.', 'add_parser', '(', '"download"', ',', 'parents', '=', '[', 'verbose_parser', ',', 'common_parser', ',', 'matcher_parser', ',', 'creds_parser', ']', ',', 'help', '=', '"copy new and modified files from remote folder to local target"', ',', ')', 'sp', '.', 'add_argument', '(', '"local"', ',', 'metavar', '=', '"LOCAL"', ',', 'default', '=', '"."', ',', 'help', '=', '"path to local folder (default: %(default)s)"', ',', ')', 'sp', '.', 'add_argument', '(', '"remote"', ',', 'metavar', '=', '"REMOTE"', ',', 'help', '=', '"path to remote folder"', ')', 'sp', '.', 'add_argument', '(', '"--force"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"overwrite local files, even if the target is newer "', '"(but no conflict was detected)"', ',', ')', 'sp', '.', 'add_argument', '(', '"--resolve"', ',', 'default', '=', '"ask"', ',', 'choices', '=', '[', '"remote"', ',', '"skip"', ',', '"ask"', ']', ',', 'help', '=', '"conflict resolving strategy (default: \'%(default)s\')"', ',', ')', 'sp', '.', 'add_argument', '(', '"--delete"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"remove local files if they don\'t exist on remote target"', ',', ')', 'sp', '.', 'add_argument', '(', '"--delete-unmatched"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"remove local files if they don\'t exist on remote target "', '"or don\'t match the current filter (implies \'--delete\' option)"', ',', ')', 'sp', '.', 'set_defaults', '(', 'command', '=', '"download"', ')', '# --- Create the parser for the "sync" command -----------------------------\r', 'sp', '=', 'subparsers', '.', 'add_parser', '(', '"sync"', ',', 'parents', '=', '[', 'verbose_parser', ',', 'common_parser', ',', 'matcher_parser', ',', 'creds_parser', ']', ',', 'help', '=', '"synchronize new and modified files between remote folder and local target"', ',', ')', 'sp', '.', 'add_argument', '(', '"local"', ',', 'metavar', '=', '"LOCAL"', ',', 'default', '=', '"."', ',', 'help', '=', '"path to local folder (default: %(default)s)"', ',', ')', 'sp', '.', 'add_argument', '(', '"remote"', ',', 'metavar', '=', '"REMOTE"', ',', 'help', '=', '"path to remote folder"', ')', 'sp', '.', 'add_argument', '(', '"--resolve"', ',', 'default', '=', '"ask"', ',', 'choices', '=', '[', '"old"', ',', '"new"', ',', '"local"', ',', '"remote"', ',', '"skip"', ',', '"ask"', ']', ',', 'help', '=', '"conflict resolving strategy (default: \'%(default)s\')"', ',', ')', 'sp', '.', 'set_defaults', '(', 'command', '=', '"sync"', ')', '# --- Create the parser for the "run" command -----------------------------\r', 'add_run_parser', '(', 'subparsers', ')', '# --- Create the parser for the "scan" command -----------------------------\r', 'add_scan_parser', '(', 'subparsers', ')', '# --- Parse command line ---------------------------------------------------\r', 'args', '=', 'parser', '.', 'parse_args', '(', ')', 'args', '.', 'verbose', '-=', 'args', '.', 'quiet', 'del', 'args', '.', 'quiet', '# print("verbose", args.verbose)\r', 'ftp_debug', '=', '0', 'if', 'args', '.', 'verbose', '>=', '6', ':', 'ftp_debug', '=', '1', '# Modify the `args` from the `pyftpsync.yaml` config:\r', 'if', 'getattr', '(', 'args', ',', '"command"', ',', 'None', ')', '==', '"run"', ':', 'handle_run_command', '(', 'parser', ',', 'args', ')', 'if', 'callable', '(', 'getattr', '(', 'args', ',', '"command"', ',', 'None', ')', ')', ':', '# scan_handler\r', 'try', ':', 'return', 'args', '.', 'command', '(', 'parser', ',', 'args', ')', 'except', 'KeyboardInterrupt', ':', 'print', '(', '"\\nAborted by user."', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'sys', '.', 'exit', '(', '3', ')', 'elif', 'not', 'hasattr', '(', 'args', ',', '"command"', ')', ':', 'parser', '.', 'error', '(', '"missing command (choose from \'upload\', \'download\', \'run\', \'sync\', \'scan\')"', ')', '# Post-process and check arguments\r', 'if', 'hasattr', '(', 'args', ',', '"delete_unmatched"', ')', 'and', 'args', '.', 'delete_unmatched', ':', 'args', '.', 'delete', '=', 'True', 'args', '.', 'local_target', '=', 'make_target', '(', 'args', '.', 'local', ',', '{', '"ftp_debug"', ':', 'ftp_debug', '}', ')', 'if', 'args', '.', 'remote', '==', '"."', ':', 'parser', '.', 'error', '(', '"\'.\' is expected to be the local target (not remote)"', ')', 'args', '.', 'remote_target', '=', 'make_target', '(', 'args', '.', 'remote', ',', '{', '"ftp_debug"', ':', 'ftp_debug', '}', ')', 'if', 'not', 'isinstance', '(', 'args', '.', 'local_target', ',', 'FsTarget', ')', 'and', 'isinstance', '(', 'args', '.', 'remote_target', ',', 'FsTarget', ')', ':', 'parser', '.', 'error', '(', '"a file system target is expected to be local"', ')', '# Let the command handler do its thing\r', 'opts', '=', 'namespace_to_dict', '(', 'args', ')', 'if', 'args', '.', 'command', '==', '"upload"', ':', 's', '=', 'UploadSynchronizer', '(', 'args', '.', 'local_target', ',', 'args', '.', 'remote_target', ',', 'opts', ')', 'elif', 'args', '.', 'command', '==', '"download"', ':', 's', '=', 'DownloadSynchronizer', '(', 'args', '.', 'local_target', ',', 'args', '.', 'remote_target', ',', 'opts', ')', 'elif', 'args', '.', 'command', '==', '"sync"', ':', 's', '=', 'BiDirSynchronizer', '(', 'args', '.', 'local_target', ',', 'args', '.', 'remote_target', ',', 'opts', ')', 'else', ':', 'parser', '.', 'error', '(', '"unknown command \'{}\'"', '.', 'format', '(', 'args', '.', 'command', ')', ')', 's', '.', 'is_script', '=', 'True', 'try', ':', 's', '.', 'run', '(', ')', 'except', 'KeyboardInterrupt', ':', 'print', '(', '"\\nAborted by user."', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'sys', '.', 'exit', '(', '3', ')', 'finally', ':', '# Prevent sporadic exceptions in ftplib, when closing in __del__\r', 's', '.', 'local', '.', 'close', '(', ')', 's', '.', 'remote', '.', 'close', '(', ')', 'stats', '=', 's', '.', 'get_stats', '(', ')', 'if', 'args', '.', 'verbose', '>=', '5', ':', 'pprint', '(', 'stats', ')', 'elif', 'args', '.', 'verbose', '>=', '1', ':', 'if', 'args', '.', 'dry_run', ':', 'print', '(', '"(DRY-RUN) "', ',', 'end', '=', '""', ')', 'print', '(', '"Wrote {}/{} files in {} directories, skipped: {}."', '.', 'format', '(', 'stats', '[', '"files_written"', ']', ',', 'stats', '[', '"local_files"', ']', ',', 'stats', '[', '"local_dirs"', ']', ',', 'stats', '[', '"conflict_files_skipped"', ']', ',', ')', ',', 'end', '=', '""', ',', ')', 'if', 'stats', '[', '"interactive_ask"', ']', ':', 'print', '(', ')', 'else', ':', 'print', '(', '" Elap: {}."', '.', 'format', '(', 'stats', '[', '"elap_str"', ']', ')', ')', 'return'] | CLI main entry point. | ['CLI', 'main', 'entry', 'point', '.'] | train | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/pyftpsync.py#L45-L271 |
1,134 | src-d/lookout-sdk | python/lookout/sdk/grpc/interceptors/logger.py | LogInterceptorMixin.add_request_log_fields | def add_request_log_fields(
self, log_fields: LogFields,
call_details: Union[grpc.HandlerCallDetails,
grpc.ClientCallDetails]
):
"""Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call
"""
service, method = call_details.method[1:].split("/")
log_fields.add_fields({
"system": "grpc",
"span.kind": self.KIND,
"grpc.service": service,
"grpc.method": method,
}) | python | def add_request_log_fields(
self, log_fields: LogFields,
call_details: Union[grpc.HandlerCallDetails,
grpc.ClientCallDetails]
):
"""Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call
"""
service, method = call_details.method[1:].split("/")
log_fields.add_fields({
"system": "grpc",
"span.kind": self.KIND,
"grpc.service": service,
"grpc.method": method,
}) | ['def', 'add_request_log_fields', '(', 'self', ',', 'log_fields', ':', 'LogFields', ',', 'call_details', ':', 'Union', '[', 'grpc', '.', 'HandlerCallDetails', ',', 'grpc', '.', 'ClientCallDetails', ']', ')', ':', 'service', ',', 'method', '=', 'call_details', '.', 'method', '[', '1', ':', ']', '.', 'split', '(', '"/"', ')', 'log_fields', '.', 'add_fields', '(', '{', '"system"', ':', '"grpc"', ',', '"span.kind"', ':', 'self', '.', 'KIND', ',', '"grpc.service"', ':', 'service', ',', '"grpc.method"', ':', 'method', ',', '}', ')'] | Add log fields related to a request to the provided log fields
:param log_fields: log fields instance to which to add the fields
:param call_details: some information regarding the call | ['Add', 'log', 'fields', 'related', 'to', 'a', 'request', 'to', 'the', 'provided', 'log', 'fields'] | train | https://github.com/src-d/lookout-sdk/blob/2ca64a77b022864fed3bb31d12997712c7e98e6e/python/lookout/sdk/grpc/interceptors/logger.py#L21-L38 |
1,135 | iclab/centinel | centinel/vpn/openvpn.py | OpenVPN.output_callback | def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line or "Cannot resolve host address:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True | python | def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line or "Cannot resolve host address:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True | ['def', 'output_callback', '(', 'self', ',', 'line', ',', 'kill_switch', ')', ':', 'self', '.', 'notifications', '+=', 'line', '+', '"\\n"', 'if', '"Initialization Sequence Completed"', 'in', 'line', ':', 'self', '.', 'started', '=', 'True', 'if', '"ERROR:"', 'in', 'line', 'or', '"Cannot resolve host address:"', 'in', 'line', ':', 'self', '.', 'error', '=', 'True', 'if', '"process exiting"', 'in', 'line', ':', 'self', '.', 'stopped', '=', 'True'] | Set status of openvpn according to what we process | ['Set', 'status', 'of', 'openvpn', 'according', 'to', 'what', 'we', 'process'] | train | https://github.com/iclab/centinel/blob/9a25dcf30c6a1db3c046f7ccb8ab8873e455c1a4/centinel/vpn/openvpn.py#L57-L66 |
1,136 | bitesofcode/projexui | projexui/widgets/xchart/xchartrenderer.py | XChartRenderer.drawAxis | def drawAxis(self, painter, rect, axis):
"""
Draws the axis for the given painter.
:param painter | <QPainter>
rect | <QRect>
"""
if not axis:
return
# draw the axis lines
painter.save()
pen = QPen(self.axisColor())
pen.setWidth(3)
painter.setPen(pen)
# draw the vertical line
if axis.orientation() == Qt.Vertical:
line = QLineF(rect.right(), rect.top(),
rect.right(), rect.bottom())
painter.drawLine(line)
painter.setFont(axis.labelFont())
for y, height, label in self._buildData.get('grid_h_labels', []):
painter.drawText(0, y - height / 2.0, rect.width() - 3, height,
Qt.AlignRight | Qt.AlignVCenter, label)
painter.translate(0, rect.center().y())
painter.rotate(-90)
painter.setFont(axis.titleFont())
painter.drawText(-rect.height()/2, 0, rect.height(), rect.width(),
Qt.AlignHCenter | Qt.AlignTop, axis.title())
# draw the horizontal line
else:
line = QLineF(rect.left(), rect.top(),
rect.right(), rect.top())
painter.setFont(axis.titleFont())
painter.drawText(rect,
Qt.AlignHCenter | Qt.AlignBottom,
axis.title())
painter.drawLine(line)
painter.setFont(axis.labelFont())
for x, width, label in self._buildData.get('grid_v_labels', []):
painter.drawText(x - width / 2.0, 3, width, rect.height() - 6,
Qt.AlignHCenter | Qt.AlignTop, label)
painter.restore() | python | def drawAxis(self, painter, rect, axis):
"""
Draws the axis for the given painter.
:param painter | <QPainter>
rect | <QRect>
"""
if not axis:
return
# draw the axis lines
painter.save()
pen = QPen(self.axisColor())
pen.setWidth(3)
painter.setPen(pen)
# draw the vertical line
if axis.orientation() == Qt.Vertical:
line = QLineF(rect.right(), rect.top(),
rect.right(), rect.bottom())
painter.drawLine(line)
painter.setFont(axis.labelFont())
for y, height, label in self._buildData.get('grid_h_labels', []):
painter.drawText(0, y - height / 2.0, rect.width() - 3, height,
Qt.AlignRight | Qt.AlignVCenter, label)
painter.translate(0, rect.center().y())
painter.rotate(-90)
painter.setFont(axis.titleFont())
painter.drawText(-rect.height()/2, 0, rect.height(), rect.width(),
Qt.AlignHCenter | Qt.AlignTop, axis.title())
# draw the horizontal line
else:
line = QLineF(rect.left(), rect.top(),
rect.right(), rect.top())
painter.setFont(axis.titleFont())
painter.drawText(rect,
Qt.AlignHCenter | Qt.AlignBottom,
axis.title())
painter.drawLine(line)
painter.setFont(axis.labelFont())
for x, width, label in self._buildData.get('grid_v_labels', []):
painter.drawText(x - width / 2.0, 3, width, rect.height() - 6,
Qt.AlignHCenter | Qt.AlignTop, label)
painter.restore() | ['def', 'drawAxis', '(', 'self', ',', 'painter', ',', 'rect', ',', 'axis', ')', ':', 'if', 'not', 'axis', ':', 'return', '# draw the axis lines\r', 'painter', '.', 'save', '(', ')', 'pen', '=', 'QPen', '(', 'self', '.', 'axisColor', '(', ')', ')', 'pen', '.', 'setWidth', '(', '3', ')', 'painter', '.', 'setPen', '(', 'pen', ')', '# draw the vertical line\r', 'if', 'axis', '.', 'orientation', '(', ')', '==', 'Qt', '.', 'Vertical', ':', 'line', '=', 'QLineF', '(', 'rect', '.', 'right', '(', ')', ',', 'rect', '.', 'top', '(', ')', ',', 'rect', '.', 'right', '(', ')', ',', 'rect', '.', 'bottom', '(', ')', ')', 'painter', '.', 'drawLine', '(', 'line', ')', 'painter', '.', 'setFont', '(', 'axis', '.', 'labelFont', '(', ')', ')', 'for', 'y', ',', 'height', ',', 'label', 'in', 'self', '.', '_buildData', '.', 'get', '(', "'grid_h_labels'", ',', '[', ']', ')', ':', 'painter', '.', 'drawText', '(', '0', ',', 'y', '-', 'height', '/', '2.0', ',', 'rect', '.', 'width', '(', ')', '-', '3', ',', 'height', ',', 'Qt', '.', 'AlignRight', '|', 'Qt', '.', 'AlignVCenter', ',', 'label', ')', 'painter', '.', 'translate', '(', '0', ',', 'rect', '.', 'center', '(', ')', '.', 'y', '(', ')', ')', 'painter', '.', 'rotate', '(', '-', '90', ')', 'painter', '.', 'setFont', '(', 'axis', '.', 'titleFont', '(', ')', ')', 'painter', '.', 'drawText', '(', '-', 'rect', '.', 'height', '(', ')', '/', '2', ',', '0', ',', 'rect', '.', 'height', '(', ')', ',', 'rect', '.', 'width', '(', ')', ',', 'Qt', '.', 'AlignHCenter', '|', 'Qt', '.', 'AlignTop', ',', 'axis', '.', 'title', '(', ')', ')', '# draw the horizontal line\r', 'else', ':', 'line', '=', 'QLineF', '(', 'rect', '.', 'left', '(', ')', ',', 'rect', '.', 'top', '(', ')', ',', 'rect', '.', 'right', '(', ')', ',', 'rect', '.', 'top', '(', ')', ')', 'painter', '.', 'setFont', '(', 'axis', '.', 'titleFont', '(', ')', ')', 'painter', '.', 'drawText', '(', 'rect', ',', 'Qt', '.', 'AlignHCenter', '|', 'Qt', '.', 'AlignBottom', ',', 'axis', '.', 'title', '(', ')', ')', 'painter', '.', 'drawLine', '(', 'line', ')', 'painter', '.', 'setFont', '(', 'axis', '.', 'labelFont', '(', ')', ')', 'for', 'x', ',', 'width', ',', 'label', 'in', 'self', '.', '_buildData', '.', 'get', '(', "'grid_v_labels'", ',', '[', ']', ')', ':', 'painter', '.', 'drawText', '(', 'x', '-', 'width', '/', '2.0', ',', '3', ',', 'width', ',', 'rect', '.', 'height', '(', ')', '-', '6', ',', 'Qt', '.', 'AlignHCenter', '|', 'Qt', '.', 'AlignTop', ',', 'label', ')', 'painter', '.', 'restore', '(', ')'] | Draws the axis for the given painter.
:param painter | <QPainter>
rect | <QRect> | ['Draws', 'the', 'axis', 'for', 'the', 'given', 'painter', '.', ':', 'param', 'painter', '|', '<QPainter', '>', 'rect', '|', '<QRect', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartrenderer.py#L320-L369 |
1,137 | j4321/tkColorPicker | tkcolorpicker/spinbox.py | Spinbox.focusout | def focusout(self, event):
"""Change style on focus out events."""
bc = self.style.lookup("TEntry", "bordercolor", ("!focus",))
dc = self.style.lookup("TEntry", "darkcolor", ("!focus",))
lc = self.style.lookup("TEntry", "lightcolor", ("!focus",))
self.style.configure("%s.spinbox.TFrame" % self.frame, bordercolor=bc,
darkcolor=dc, lightcolor=lc) | python | def focusout(self, event):
"""Change style on focus out events."""
bc = self.style.lookup("TEntry", "bordercolor", ("!focus",))
dc = self.style.lookup("TEntry", "darkcolor", ("!focus",))
lc = self.style.lookup("TEntry", "lightcolor", ("!focus",))
self.style.configure("%s.spinbox.TFrame" % self.frame, bordercolor=bc,
darkcolor=dc, lightcolor=lc) | ['def', 'focusout', '(', 'self', ',', 'event', ')', ':', 'bc', '=', 'self', '.', 'style', '.', 'lookup', '(', '"TEntry"', ',', '"bordercolor"', ',', '(', '"!focus"', ',', ')', ')', 'dc', '=', 'self', '.', 'style', '.', 'lookup', '(', '"TEntry"', ',', '"darkcolor"', ',', '(', '"!focus"', ',', ')', ')', 'lc', '=', 'self', '.', 'style', '.', 'lookup', '(', '"TEntry"', ',', '"lightcolor"', ',', '(', '"!focus"', ',', ')', ')', 'self', '.', 'style', '.', 'configure', '(', '"%s.spinbox.TFrame"', '%', 'self', '.', 'frame', ',', 'bordercolor', '=', 'bc', ',', 'darkcolor', '=', 'dc', ',', 'lightcolor', '=', 'lc', ')'] | Change style on focus out events. | ['Change', 'style', 'on', 'focus', 'out', 'events', '.'] | train | https://github.com/j4321/tkColorPicker/blob/ee2d583115e0c7ad7f29795763fc6b4ddc4e8c1d/tkcolorpicker/spinbox.py#L99-L105 |
1,138 | libtcod/python-tcod | tcod/libtcodpy.py | console_new | def console_new(w: int, h: int) -> tcod.console.Console:
"""Return an offscreen console of size: w,h.
.. deprecated:: 8.5
Create new consoles using :any:`tcod.console.Console` instead of this
function.
"""
return tcod.console.Console(w, h) | python | def console_new(w: int, h: int) -> tcod.console.Console:
"""Return an offscreen console of size: w,h.
.. deprecated:: 8.5
Create new consoles using :any:`tcod.console.Console` instead of this
function.
"""
return tcod.console.Console(w, h) | ['def', 'console_new', '(', 'w', ':', 'int', ',', 'h', ':', 'int', ')', '->', 'tcod', '.', 'console', '.', 'Console', ':', 'return', 'tcod', '.', 'console', '.', 'Console', '(', 'w', ',', 'h', ')'] | Return an offscreen console of size: w,h.
.. deprecated:: 8.5
Create new consoles using :any:`tcod.console.Console` instead of this
function. | ['Return', 'an', 'offscreen', 'console', 'of', 'size', ':', 'w', 'h', '.'] | train | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1675-L1682 |
1,139 | evhub/coconut | coconut/compiler/compiler.py | Compiler.repl_proc | def repl_proc(self, inputstring, log=True, **kwargs):
"""Process using replprocs."""
return self.apply_procs(self.replprocs, kwargs, inputstring, log=log) | python | def repl_proc(self, inputstring, log=True, **kwargs):
"""Process using replprocs."""
return self.apply_procs(self.replprocs, kwargs, inputstring, log=log) | ['def', 'repl_proc', '(', 'self', ',', 'inputstring', ',', 'log', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'apply_procs', '(', 'self', '.', 'replprocs', ',', 'kwargs', ',', 'inputstring', ',', 'log', '=', 'log', ')'] | Process using replprocs. | ['Process', 'using', 'replprocs', '.'] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1084-L1086 |
1,140 | mikedh/trimesh | trimesh/path/entities.py | Line.discrete | def discrete(self, vertices, scale=1.0):
"""
Discretize into a world- space path.
Parameters
------------
vertices: (n, dimension) float
Points in space
scale : float
Size of overall scene for numerical comparisons
Returns
-------------
discrete: (m, dimension) float
Path in space composed of line segments
"""
discrete = self._orient(vertices[self.points])
return discrete | python | def discrete(self, vertices, scale=1.0):
"""
Discretize into a world- space path.
Parameters
------------
vertices: (n, dimension) float
Points in space
scale : float
Size of overall scene for numerical comparisons
Returns
-------------
discrete: (m, dimension) float
Path in space composed of line segments
"""
discrete = self._orient(vertices[self.points])
return discrete | ['def', 'discrete', '(', 'self', ',', 'vertices', ',', 'scale', '=', '1.0', ')', ':', 'discrete', '=', 'self', '.', '_orient', '(', 'vertices', '[', 'self', '.', 'points', ']', ')', 'return', 'discrete'] | Discretize into a world- space path.
Parameters
------------
vertices: (n, dimension) float
Points in space
scale : float
Size of overall scene for numerical comparisons
Returns
-------------
discrete: (m, dimension) float
Path in space composed of line segments | ['Discretize', 'into', 'a', 'world', '-', 'space', 'path', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/entities.py#L395-L412 |
1,141 | SiLab-Bonn/pyBAR | pybar/analysis/analysis.py | analyze_cluster_size_per_scan_parameter | def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None):
''' This method takes multiple hit files and determines the cluster size for different scan parameter values of
Parameters
----------
input_files_hits: string
output_file_cluster_size: string
The data file with the results
parameter: string
The name of the parameter to separate the data into (e.g.: PlsrDAC)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
overwrite_output_files: bool
Set to true to overwrite the output file if it already exists
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen, if False nothing is printed
'''
logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits)
if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done
logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.')
else:
with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data
parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) # note to store the data
cluster_size_total = None # final array for the cluster size per GDAC
with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file
meta_data_array = in_hit_file_h5.root.meta_data[:]
scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters
if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting
scan_parameter_values = scan_parameter[parameter] # scan parameter settings used
if len(scan_parameter_values) == 1: # only analyze per scan step if there are more than one scan step
logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.')
else:
logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values)))
event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes
parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers)))
hit_table = in_hit_file_h5.root.Hits
analysis_utils.index_event_number(hit_table)
total_hits, total_hits_2, index = 0, 0, 0
chunk_size = max_chunk_size
# initialize the analysis and set settings
analyze_data = AnalyzeRawData()
analyze_data.create_cluster_size_hist = True
analyze_data.create_cluster_tot_hist = True
analyze_data.histogram.set_no_scan_parameter() # one has to tell histogram the # of scan parameters for correct occupancy hist allocation
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80)
progress_bar.start()
for parameter_index, parameter_range in enumerate(parameter_ranges): # loop over the selected events
analyze_data.reset() # resets the data of the last analysis
logging.debug('Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%')
start_event_number = parameter_range[1]
stop_event_number = parameter_range[2]
logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[')
actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0]))
# loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given
readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up
for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size):
total_hits += hits.shape[0]
analyze_data.analyze_hits(hits) # analyze the selected hits in chunks
readout_hit_len += hits.shape[0]
progress_bar.update(index)
chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction
if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits
chunk_size = 50
# get occupancy hist
occupancy = analyze_data.histogram.get_occupancy() # just check here if histogram is consistent
# store and plot cluster size hist
cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist()
cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table)
cluster_size_hist_table[:] = cluster_size_hist
if output_pdf is not False:
plotting.plot_cluster_size(hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index]), filename=output_pdf)
if cluster_size_total is None: # true if no data was appended to the array yet
cluster_size_total = cluster_size_hist
else:
cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist])
total_hits_2 += np.sum(occupancy)
progress_bar.finish()
if total_hits != total_hits_2:
logging.warning('Analysis shows inconsistent number of hits. Check needed!')
logging.info('Analyzed %d hits!', total_hits)
cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table)
cluster_size_total_out[:] = cluster_size_total | python | def analyze_cluster_size_per_scan_parameter(input_file_hits, output_file_cluster_size, parameter='GDAC', max_chunk_size=10000000, overwrite_output_files=False, output_pdf=None):
''' This method takes multiple hit files and determines the cluster size for different scan parameter values of
Parameters
----------
input_files_hits: string
output_file_cluster_size: string
The data file with the results
parameter: string
The name of the parameter to separate the data into (e.g.: PlsrDAC)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
overwrite_output_files: bool
Set to true to overwrite the output file if it already exists
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen, if False nothing is printed
'''
logging.info('Analyze the cluster sizes for different ' + parameter + ' settings for ' + input_file_hits)
if os.path.isfile(output_file_cluster_size) and not overwrite_output_files: # skip analysis if already done
logging.info('Analyzed cluster size file ' + output_file_cluster_size + ' already exists. Skip cluster size analysis.')
else:
with tb.open_file(output_file_cluster_size, mode="w") as out_file_h5: # file to write the data into
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data
parameter_goup = out_file_h5.create_group(out_file_h5.root, parameter, title=parameter) # note to store the data
cluster_size_total = None # final array for the cluster size per GDAC
with tb.open_file(input_file_hits, mode="r+") as in_hit_file_h5: # open the actual hit file
meta_data_array = in_hit_file_h5.root.meta_data[:]
scan_parameter = analysis_utils.get_scan_parameter(meta_data_array) # get the scan parameters
if scan_parameter: # if a GDAC scan parameter was used analyze the cluster size per GDAC setting
scan_parameter_values = scan_parameter[parameter] # scan parameter settings used
if len(scan_parameter_values) == 1: # only analyze per scan step if there are more than one scan step
logging.warning('The file ' + str(input_file_hits) + ' has no different ' + str(parameter) + ' parameter values. Omit analysis.')
else:
logging.info('Analyze ' + input_file_hits + ' per scan parameter ' + parameter + ' for ' + str(len(scan_parameter_values)) + ' values from ' + str(np.amin(scan_parameter_values)) + ' to ' + str(np.amax(scan_parameter_values)))
event_numbers = analysis_utils.get_meta_data_at_scan_parameter(meta_data_array, parameter)['event_number'] # get the event numbers in meta_data where the scan parameter changes
parameter_ranges = np.column_stack((scan_parameter_values, analysis_utils.get_ranges_from_array(event_numbers)))
hit_table = in_hit_file_h5.root.Hits
analysis_utils.index_event_number(hit_table)
total_hits, total_hits_2, index = 0, 0, 0
chunk_size = max_chunk_size
# initialize the analysis and set settings
analyze_data = AnalyzeRawData()
analyze_data.create_cluster_size_hist = True
analyze_data.create_cluster_tot_hist = True
analyze_data.histogram.set_no_scan_parameter() # one has to tell histogram the # of scan parameters for correct occupancy hist allocation
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80)
progress_bar.start()
for parameter_index, parameter_range in enumerate(parameter_ranges): # loop over the selected events
analyze_data.reset() # resets the data of the last analysis
logging.debug('Analyze GDAC = ' + str(parameter_range[0]) + ' ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%')
start_event_number = parameter_range[1]
stop_event_number = parameter_range[2]
logging.debug('Data from events = [' + str(start_event_number) + ',' + str(stop_event_number) + '[')
actual_parameter_group = out_file_h5.create_group(parameter_goup, name=parameter + '_' + str(parameter_range[0]), title=parameter + '_' + str(parameter_range[0]))
# loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given
readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up
for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=start_event_number, stop_event_number=stop_event_number, start_index=index, chunk_size=chunk_size):
total_hits += hits.shape[0]
analyze_data.analyze_hits(hits) # analyze the selected hits in chunks
readout_hit_len += hits.shape[0]
progress_bar.update(index)
chunk_size = int(1.05 * readout_hit_len) if int(1.05 * readout_hit_len) < max_chunk_size else max_chunk_size # to increase the readout speed, estimated the number of hits for one read instruction
if chunk_size < 50: # limit the lower chunk size, there can always be a crazy event with more than 20 hits
chunk_size = 50
# get occupancy hist
occupancy = analyze_data.histogram.get_occupancy() # just check here if histogram is consistent
# store and plot cluster size hist
cluster_size_hist = analyze_data.clusterizer.get_cluster_size_hist()
cluster_size_hist_table = out_file_h5.create_carray(actual_parameter_group, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(cluster_size_hist.dtype), shape=cluster_size_hist.shape, filters=filter_table)
cluster_size_hist_table[:] = cluster_size_hist
if output_pdf is not False:
plotting.plot_cluster_size(hist=cluster_size_hist, title='Cluster size (' + str(np.sum(cluster_size_hist)) + ' entries) for ' + parameter + ' = ' + str(scan_parameter_values[parameter_index]), filename=output_pdf)
if cluster_size_total is None: # true if no data was appended to the array yet
cluster_size_total = cluster_size_hist
else:
cluster_size_total = np.vstack([cluster_size_total, cluster_size_hist])
total_hits_2 += np.sum(occupancy)
progress_bar.finish()
if total_hits != total_hits_2:
logging.warning('Analysis shows inconsistent number of hits. Check needed!')
logging.info('Analyzed %d hits!', total_hits)
cluster_size_total_out = out_file_h5.create_carray(out_file_h5.root, name='AllHistClusterSize', title='All Cluster Size Histograms', atom=tb.Atom.from_dtype(cluster_size_total.dtype), shape=cluster_size_total.shape, filters=filter_table)
cluster_size_total_out[:] = cluster_size_total | ['def', 'analyze_cluster_size_per_scan_parameter', '(', 'input_file_hits', ',', 'output_file_cluster_size', ',', 'parameter', '=', "'GDAC'", ',', 'max_chunk_size', '=', '10000000', ',', 'overwrite_output_files', '=', 'False', ',', 'output_pdf', '=', 'None', ')', ':', 'logging', '.', 'info', '(', "'Analyze the cluster sizes for different '", '+', 'parameter', '+', "' settings for '", '+', 'input_file_hits', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'output_file_cluster_size', ')', 'and', 'not', 'overwrite_output_files', ':', '# skip analysis if already done', 'logging', '.', 'info', '(', "'Analyzed cluster size file '", '+', 'output_file_cluster_size', '+', "' already exists. Skip cluster size analysis.'", ')', 'else', ':', 'with', 'tb', '.', 'open_file', '(', 'output_file_cluster_size', ',', 'mode', '=', '"w"', ')', 'as', 'out_file_h5', ':', '# file to write the data into', 'filter_table', '=', 'tb', '.', 'Filters', '(', 'complib', '=', "'blosc'", ',', 'complevel', '=', '5', ',', 'fletcher32', '=', 'False', ')', '# compression of the written data', 'parameter_goup', '=', 'out_file_h5', '.', 'create_group', '(', 'out_file_h5', '.', 'root', ',', 'parameter', ',', 'title', '=', 'parameter', ')', '# note to store the data', 'cluster_size_total', '=', 'None', '# final array for the cluster size per GDAC', 'with', 'tb', '.', 'open_file', '(', 'input_file_hits', ',', 'mode', '=', '"r+"', ')', 'as', 'in_hit_file_h5', ':', '# open the actual hit file', 'meta_data_array', '=', 'in_hit_file_h5', '.', 'root', '.', 'meta_data', '[', ':', ']', 'scan_parameter', '=', 'analysis_utils', '.', 'get_scan_parameter', '(', 'meta_data_array', ')', '# get the scan parameters', 'if', 'scan_parameter', ':', '# if a GDAC scan parameter was used analyze the cluster size per GDAC setting', 'scan_parameter_values', '=', 'scan_parameter', '[', 'parameter', ']', '# scan parameter settings used', 'if', 'len', '(', 'scan_parameter_values', ')', '==', '1', ':', '# only analyze per scan step if there are more than one scan step', 'logging', '.', 'warning', '(', "'The file '", '+', 'str', '(', 'input_file_hits', ')', '+', "' has no different '", '+', 'str', '(', 'parameter', ')', '+', "' parameter values. Omit analysis.'", ')', 'else', ':', 'logging', '.', 'info', '(', "'Analyze '", '+', 'input_file_hits', '+', "' per scan parameter '", '+', 'parameter', '+', "' for '", '+', 'str', '(', 'len', '(', 'scan_parameter_values', ')', ')', '+', "' values from '", '+', 'str', '(', 'np', '.', 'amin', '(', 'scan_parameter_values', ')', ')', '+', "' to '", '+', 'str', '(', 'np', '.', 'amax', '(', 'scan_parameter_values', ')', ')', ')', 'event_numbers', '=', 'analysis_utils', '.', 'get_meta_data_at_scan_parameter', '(', 'meta_data_array', ',', 'parameter', ')', '[', "'event_number'", ']', '# get the event numbers in meta_data where the scan parameter changes', 'parameter_ranges', '=', 'np', '.', 'column_stack', '(', '(', 'scan_parameter_values', ',', 'analysis_utils', '.', 'get_ranges_from_array', '(', 'event_numbers', ')', ')', ')', 'hit_table', '=', 'in_hit_file_h5', '.', 'root', '.', 'Hits', 'analysis_utils', '.', 'index_event_number', '(', 'hit_table', ')', 'total_hits', ',', 'total_hits_2', ',', 'index', '=', '0', ',', '0', ',', '0', 'chunk_size', '=', 'max_chunk_size', '# initialize the analysis and set settings', 'analyze_data', '=', 'AnalyzeRawData', '(', ')', 'analyze_data', '.', 'create_cluster_size_hist', '=', 'True', 'analyze_data', '.', 'create_cluster_tot_hist', '=', 'True', 'analyze_data', '.', 'histogram', '.', 'set_no_scan_parameter', '(', ')', '# one has to tell histogram the # of scan parameters for correct occupancy hist allocation', 'progress_bar', '=', 'progressbar', '.', 'ProgressBar', '(', 'widgets', '=', '[', "''", ',', 'progressbar', '.', 'Percentage', '(', ')', ',', "' '", ',', 'progressbar', '.', 'Bar', '(', 'marker', '=', "'*'", ',', 'left', '=', "'|'", ',', 'right', '=', "'|'", ')', ',', "' '", ',', 'progressbar', '.', 'AdaptiveETA', '(', ')', ']', ',', 'maxval', '=', 'hit_table', '.', 'shape', '[', '0', ']', ',', 'term_width', '=', '80', ')', 'progress_bar', '.', 'start', '(', ')', 'for', 'parameter_index', ',', 'parameter_range', 'in', 'enumerate', '(', 'parameter_ranges', ')', ':', '# loop over the selected events', 'analyze_data', '.', 'reset', '(', ')', '# resets the data of the last analysis', 'logging', '.', 'debug', '(', "'Analyze GDAC = '", '+', 'str', '(', 'parameter_range', '[', '0', ']', ')', '+', "' '", '+', 'str', '(', 'int', '(', 'float', '(', 'float', '(', 'parameter_index', ')', '/', 'float', '(', 'len', '(', 'parameter_ranges', ')', ')', '*', '100.0', ')', ')', ')', '+', "'%'", ')', 'start_event_number', '=', 'parameter_range', '[', '1', ']', 'stop_event_number', '=', 'parameter_range', '[', '2', ']', 'logging', '.', 'debug', '(', "'Data from events = ['", '+', 'str', '(', 'start_event_number', ')', '+', "','", '+', 'str', '(', 'stop_event_number', ')', '+', "'['", ')', 'actual_parameter_group', '=', 'out_file_h5', '.', 'create_group', '(', 'parameter_goup', ',', 'name', '=', 'parameter', '+', "'_'", '+', 'str', '(', 'parameter_range', '[', '0', ']', ')', ',', 'title', '=', 'parameter', '+', "'_'", '+', 'str', '(', 'parameter_range', '[', '0', ']', ')', ')', '# loop over the hits in the actual selected events with optimizations: variable chunk size, start word index given', 'readout_hit_len', '=', '0', '# variable to calculate a optimal chunk size value from the number of hits for speed up', 'for', 'hits', ',', 'index', 'in', 'analysis_utils', '.', 'data_aligned_at_events', '(', 'hit_table', ',', 'start_event_number', '=', 'start_event_number', ',', 'stop_event_number', '=', 'stop_event_number', ',', 'start_index', '=', 'index', ',', 'chunk_size', '=', 'chunk_size', ')', ':', 'total_hits', '+=', 'hits', '.', 'shape', '[', '0', ']', 'analyze_data', '.', 'analyze_hits', '(', 'hits', ')', '# analyze the selected hits in chunks', 'readout_hit_len', '+=', 'hits', '.', 'shape', '[', '0', ']', 'progress_bar', '.', 'update', '(', 'index', ')', 'chunk_size', '=', 'int', '(', '1.05', '*', 'readout_hit_len', ')', 'if', 'int', '(', '1.05', '*', 'readout_hit_len', ')', '<', 'max_chunk_size', 'else', 'max_chunk_size', '# to increase the readout speed, estimated the number of hits for one read instruction', 'if', 'chunk_size', '<', '50', ':', '# limit the lower chunk size, there can always be a crazy event with more than 20 hits', 'chunk_size', '=', '50', '# get occupancy hist', 'occupancy', '=', 'analyze_data', '.', 'histogram', '.', 'get_occupancy', '(', ')', '# just check here if histogram is consistent', '# store and plot cluster size hist', 'cluster_size_hist', '=', 'analyze_data', '.', 'clusterizer', '.', 'get_cluster_size_hist', '(', ')', 'cluster_size_hist_table', '=', 'out_file_h5', '.', 'create_carray', '(', 'actual_parameter_group', ',', 'name', '=', "'HistClusterSize'", ',', 'title', '=', "'Cluster Size Histogram'", ',', 'atom', '=', 'tb', '.', 'Atom', '.', 'from_dtype', '(', 'cluster_size_hist', '.', 'dtype', ')', ',', 'shape', '=', 'cluster_size_hist', '.', 'shape', ',', 'filters', '=', 'filter_table', ')', 'cluster_size_hist_table', '[', ':', ']', '=', 'cluster_size_hist', 'if', 'output_pdf', 'is', 'not', 'False', ':', 'plotting', '.', 'plot_cluster_size', '(', 'hist', '=', 'cluster_size_hist', ',', 'title', '=', "'Cluster size ('", '+', 'str', '(', 'np', '.', 'sum', '(', 'cluster_size_hist', ')', ')', '+', "' entries) for '", '+', 'parameter', '+', "' = '", '+', 'str', '(', 'scan_parameter_values', '[', 'parameter_index', ']', ')', ',', 'filename', '=', 'output_pdf', ')', 'if', 'cluster_size_total', 'is', 'None', ':', '# true if no data was appended to the array yet', 'cluster_size_total', '=', 'cluster_size_hist', 'else', ':', 'cluster_size_total', '=', 'np', '.', 'vstack', '(', '[', 'cluster_size_total', ',', 'cluster_size_hist', ']', ')', 'total_hits_2', '+=', 'np', '.', 'sum', '(', 'occupancy', ')', 'progress_bar', '.', 'finish', '(', ')', 'if', 'total_hits', '!=', 'total_hits_2', ':', 'logging', '.', 'warning', '(', "'Analysis shows inconsistent number of hits. Check needed!'", ')', 'logging', '.', 'info', '(', "'Analyzed %d hits!'", ',', 'total_hits', ')', 'cluster_size_total_out', '=', 'out_file_h5', '.', 'create_carray', '(', 'out_file_h5', '.', 'root', ',', 'name', '=', "'AllHistClusterSize'", ',', 'title', '=', "'All Cluster Size Histograms'", ',', 'atom', '=', 'tb', '.', 'Atom', '.', 'from_dtype', '(', 'cluster_size_total', '.', 'dtype', ')', ',', 'shape', '=', 'cluster_size_total', '.', 'shape', ',', 'filters', '=', 'filter_table', ')', 'cluster_size_total_out', '[', ':', ']', '=', 'cluster_size_total'] | This method takes multiple hit files and determines the cluster size for different scan parameter values of
Parameters
----------
input_files_hits: string
output_file_cluster_size: string
The data file with the results
parameter: string
The name of the parameter to separate the data into (e.g.: PlsrDAC)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
overwrite_output_files: bool
Set to true to overwrite the output file if it already exists
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen, if False nothing is printed | ['This', 'method', 'takes', 'multiple', 'hit', 'files', 'and', 'determines', 'the', 'cluster', 'size', 'for', 'different', 'scan', 'parameter', 'values', 'of'] | train | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L341-L425 |
1,142 | indico/indico-plugins | livesync/indico_livesync/util.py | get_excluded_categories | def get_excluded_categories():
"""Get excluded category IDs."""
from indico_livesync.plugin import LiveSyncPlugin
return {int(x['id']) for x in LiveSyncPlugin.settings.get('excluded_categories')} | python | def get_excluded_categories():
"""Get excluded category IDs."""
from indico_livesync.plugin import LiveSyncPlugin
return {int(x['id']) for x in LiveSyncPlugin.settings.get('excluded_categories')} | ['def', 'get_excluded_categories', '(', ')', ':', 'from', 'indico_livesync', '.', 'plugin', 'import', 'LiveSyncPlugin', 'return', '{', 'int', '(', 'x', '[', "'id'", ']', ')', 'for', 'x', 'in', 'LiveSyncPlugin', '.', 'settings', '.', 'get', '(', "'excluded_categories'", ')', '}'] | Get excluded category IDs. | ['Get', 'excluded', 'category', 'IDs', '.'] | train | https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/util.py#L82-L85 |
1,143 | Chilipp/model-organization | model_organization/__init__.py | ModelOrganizer.del_value | def del_value(self, keys, complete=False, on_projects=False,
on_globals=False, projectname=None, base='', dtype=None,
**kwargs):
"""
Delete a value in the configuration
Parameters
----------
keys: list of str
A list of keys to be deleted. %(get_value_note)s
%(ModelOrganizer.info.common_params)s
base: str
A base string that shall be put in front of each key in `values` to
avoid typing it all the time
"""
config = self.info(complete=complete, on_projects=on_projects,
on_globals=on_globals, projectname=projectname,
return_dict=True, insert_id=False, **kwargs)
for key in keys:
if base:
key = base + key
key, sub_config = utils.go_through_dict(key, config)
del sub_config[key] | python | def del_value(self, keys, complete=False, on_projects=False,
on_globals=False, projectname=None, base='', dtype=None,
**kwargs):
"""
Delete a value in the configuration
Parameters
----------
keys: list of str
A list of keys to be deleted. %(get_value_note)s
%(ModelOrganizer.info.common_params)s
base: str
A base string that shall be put in front of each key in `values` to
avoid typing it all the time
"""
config = self.info(complete=complete, on_projects=on_projects,
on_globals=on_globals, projectname=projectname,
return_dict=True, insert_id=False, **kwargs)
for key in keys:
if base:
key = base + key
key, sub_config = utils.go_through_dict(key, config)
del sub_config[key] | ['def', 'del_value', '(', 'self', ',', 'keys', ',', 'complete', '=', 'False', ',', 'on_projects', '=', 'False', ',', 'on_globals', '=', 'False', ',', 'projectname', '=', 'None', ',', 'base', '=', "''", ',', 'dtype', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'self', '.', 'info', '(', 'complete', '=', 'complete', ',', 'on_projects', '=', 'on_projects', ',', 'on_globals', '=', 'on_globals', ',', 'projectname', '=', 'projectname', ',', 'return_dict', '=', 'True', ',', 'insert_id', '=', 'False', ',', '*', '*', 'kwargs', ')', 'for', 'key', 'in', 'keys', ':', 'if', 'base', ':', 'key', '=', 'base', '+', 'key', 'key', ',', 'sub_config', '=', 'utils', '.', 'go_through_dict', '(', 'key', ',', 'config', ')', 'del', 'sub_config', '[', 'key', ']'] | Delete a value in the configuration
Parameters
----------
keys: list of str
A list of keys to be deleted. %(get_value_note)s
%(ModelOrganizer.info.common_params)s
base: str
A base string that shall be put in front of each key in `values` to
avoid typing it all the time | ['Delete', 'a', 'value', 'in', 'the', 'configuration'] | train | https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/__init__.py#L1219-L1241 |
1,144 | nefarioustim/parker | parker/mediafile.py | get_instance | def get_instance(uri):
"""Return an instance of MediaFile."""
global _instances
try:
instance = _instances[uri]
except KeyError:
instance = MediaFile(
uri,
client.get_instance()
)
_instances[uri] = instance
return instance | python | def get_instance(uri):
"""Return an instance of MediaFile."""
global _instances
try:
instance = _instances[uri]
except KeyError:
instance = MediaFile(
uri,
client.get_instance()
)
_instances[uri] = instance
return instance | ['def', 'get_instance', '(', 'uri', ')', ':', 'global', '_instances', 'try', ':', 'instance', '=', '_instances', '[', 'uri', ']', 'except', 'KeyError', ':', 'instance', '=', 'MediaFile', '(', 'uri', ',', 'client', '.', 'get_instance', '(', ')', ')', '_instances', '[', 'uri', ']', '=', 'instance', 'return', 'instance'] | Return an instance of MediaFile. | ['Return', 'an', 'instance', 'of', 'MediaFile', '.'] | train | https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/mediafile.py#L14-L26 |
1,145 | libtcod/python-tcod | tcod/image.py | Image.get_pixel | def get_pixel(self, x: int, y: int) -> Tuple[int, int, int]:
"""Get the color of a pixel in this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the pixels color value.
Values are in a 0 to 255 range.
"""
color = lib.TCOD_image_get_pixel(self.image_c, x, y)
return color.r, color.g, color.b | python | def get_pixel(self, x: int, y: int) -> Tuple[int, int, int]:
"""Get the color of a pixel in this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the pixels color value.
Values are in a 0 to 255 range.
"""
color = lib.TCOD_image_get_pixel(self.image_c, x, y)
return color.r, color.g, color.b | ['def', 'get_pixel', '(', 'self', ',', 'x', ':', 'int', ',', 'y', ':', 'int', ')', '->', 'Tuple', '[', 'int', ',', 'int', ',', 'int', ']', ':', 'color', '=', 'lib', '.', 'TCOD_image_get_pixel', '(', 'self', '.', 'image_c', ',', 'x', ',', 'y', ')', 'return', 'color', '.', 'r', ',', 'color', '.', 'g', ',', 'color', '.', 'b'] | Get the color of a pixel in this Image.
Args:
x (int): X pixel of the Image. Starting from the left at 0.
y (int): Y pixel of the Image. Starting from the top at 0.
Returns:
Tuple[int, int, int]:
An (r, g, b) tuple containing the pixels color value.
Values are in a 0 to 255 range. | ['Get', 'the', 'color', 'of', 'a', 'pixel', 'in', 'this', 'Image', '.'] | train | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/image.py#L145-L158 |
1,146 | boriel/zxbasic | arch/zx48k/optimizer.py | MemCell.replace_label | def replace_label(self, oldLabel, newLabel):
""" Replaces old label with a new one
"""
if oldLabel == newLabel:
return
tmp = re.compile(r'\b' + oldLabel + r'\b')
last = 0
l = len(newLabel)
while True:
match = tmp.search(self.asm[last:])
if not match:
break
txt = self.asm
self.asm = txt[:last + match.start()] + newLabel + txt[last + match.end():]
last += match.start() + l | python | def replace_label(self, oldLabel, newLabel):
""" Replaces old label with a new one
"""
if oldLabel == newLabel:
return
tmp = re.compile(r'\b' + oldLabel + r'\b')
last = 0
l = len(newLabel)
while True:
match = tmp.search(self.asm[last:])
if not match:
break
txt = self.asm
self.asm = txt[:last + match.start()] + newLabel + txt[last + match.end():]
last += match.start() + l | ['def', 'replace_label', '(', 'self', ',', 'oldLabel', ',', 'newLabel', ')', ':', 'if', 'oldLabel', '==', 'newLabel', ':', 'return', 'tmp', '=', 're', '.', 'compile', '(', "r'\\b'", '+', 'oldLabel', '+', "r'\\b'", ')', 'last', '=', '0', 'l', '=', 'len', '(', 'newLabel', ')', 'while', 'True', ':', 'match', '=', 'tmp', '.', 'search', '(', 'self', '.', 'asm', '[', 'last', ':', ']', ')', 'if', 'not', 'match', ':', 'break', 'txt', '=', 'self', '.', 'asm', 'self', '.', 'asm', '=', 'txt', '[', ':', 'last', '+', 'match', '.', 'start', '(', ')', ']', '+', 'newLabel', '+', 'txt', '[', 'last', '+', 'match', '.', 'end', '(', ')', ':', ']', 'last', '+=', 'match', '.', 'start', '(', ')', '+', 'l'] | Replaces old label with a new one | ['Replaces', 'old', 'label', 'with', 'a', 'new', 'one'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1218-L1234 |
1,147 | bcbio/bcbio-nextgen | bcbio/bam/__init__.py | _get_maxcov_downsample | def _get_maxcov_downsample(data):
"""Calculate maximum coverage downsampling for whole genome samples.
Returns None if we're not doing downsampling.
"""
from bcbio.bam import ref
from bcbio.ngsalign import alignprep, bwa
from bcbio.variation import coverage
fastq_file = data["files"][0]
params = alignprep.get_downsample_params(data)
if params:
num_reads = alignprep.total_reads_from_grabix(fastq_file)
if num_reads:
vrs = dd.get_variant_regions_merged(data)
total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
if vrs:
callable_size = pybedtools.BedTool(vrs).total_coverage()
genome_cov_pct = callable_size / float(total_size)
else:
callable_size = total_size
genome_cov_pct = 1.0
if (genome_cov_pct > coverage.GENOME_COV_THRESH
and dd.get_coverage_interval(data) in ["genome", None, False]):
total_counts, total_sizes = 0, 0
for count, size in bwa.fastq_size_output(fastq_file, 5000):
total_counts += int(count)
total_sizes += (int(size) * int(count))
read_size = float(total_sizes) / float(total_counts)
avg_cov = float(num_reads * read_size) / callable_size
if avg_cov >= params["min_coverage_for_downsampling"]:
return int(avg_cov * params["maxcov_downsample_multiplier"])
return None | python | def _get_maxcov_downsample(data):
"""Calculate maximum coverage downsampling for whole genome samples.
Returns None if we're not doing downsampling.
"""
from bcbio.bam import ref
from bcbio.ngsalign import alignprep, bwa
from bcbio.variation import coverage
fastq_file = data["files"][0]
params = alignprep.get_downsample_params(data)
if params:
num_reads = alignprep.total_reads_from_grabix(fastq_file)
if num_reads:
vrs = dd.get_variant_regions_merged(data)
total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
if vrs:
callable_size = pybedtools.BedTool(vrs).total_coverage()
genome_cov_pct = callable_size / float(total_size)
else:
callable_size = total_size
genome_cov_pct = 1.0
if (genome_cov_pct > coverage.GENOME_COV_THRESH
and dd.get_coverage_interval(data) in ["genome", None, False]):
total_counts, total_sizes = 0, 0
for count, size in bwa.fastq_size_output(fastq_file, 5000):
total_counts += int(count)
total_sizes += (int(size) * int(count))
read_size = float(total_sizes) / float(total_counts)
avg_cov = float(num_reads * read_size) / callable_size
if avg_cov >= params["min_coverage_for_downsampling"]:
return int(avg_cov * params["maxcov_downsample_multiplier"])
return None | ['def', '_get_maxcov_downsample', '(', 'data', ')', ':', 'from', 'bcbio', '.', 'bam', 'import', 'ref', 'from', 'bcbio', '.', 'ngsalign', 'import', 'alignprep', ',', 'bwa', 'from', 'bcbio', '.', 'variation', 'import', 'coverage', 'fastq_file', '=', 'data', '[', '"files"', ']', '[', '0', ']', 'params', '=', 'alignprep', '.', 'get_downsample_params', '(', 'data', ')', 'if', 'params', ':', 'num_reads', '=', 'alignprep', '.', 'total_reads_from_grabix', '(', 'fastq_file', ')', 'if', 'num_reads', ':', 'vrs', '=', 'dd', '.', 'get_variant_regions_merged', '(', 'data', ')', 'total_size', '=', 'sum', '(', '[', 'c', '.', 'size', 'for', 'c', 'in', 'ref', '.', 'file_contigs', '(', 'dd', '.', 'get_ref_file', '(', 'data', ')', ',', 'data', '[', '"config"', ']', ')', ']', ')', 'if', 'vrs', ':', 'callable_size', '=', 'pybedtools', '.', 'BedTool', '(', 'vrs', ')', '.', 'total_coverage', '(', ')', 'genome_cov_pct', '=', 'callable_size', '/', 'float', '(', 'total_size', ')', 'else', ':', 'callable_size', '=', 'total_size', 'genome_cov_pct', '=', '1.0', 'if', '(', 'genome_cov_pct', '>', 'coverage', '.', 'GENOME_COV_THRESH', 'and', 'dd', '.', 'get_coverage_interval', '(', 'data', ')', 'in', '[', '"genome"', ',', 'None', ',', 'False', ']', ')', ':', 'total_counts', ',', 'total_sizes', '=', '0', ',', '0', 'for', 'count', ',', 'size', 'in', 'bwa', '.', 'fastq_size_output', '(', 'fastq_file', ',', '5000', ')', ':', 'total_counts', '+=', 'int', '(', 'count', ')', 'total_sizes', '+=', '(', 'int', '(', 'size', ')', '*', 'int', '(', 'count', ')', ')', 'read_size', '=', 'float', '(', 'total_sizes', ')', '/', 'float', '(', 'total_counts', ')', 'avg_cov', '=', 'float', '(', 'num_reads', '*', 'read_size', ')', '/', 'callable_size', 'if', 'avg_cov', '>=', 'params', '[', '"min_coverage_for_downsampling"', ']', ':', 'return', 'int', '(', 'avg_cov', '*', 'params', '[', '"maxcov_downsample_multiplier"', ']', ')', 'return', 'None'] | Calculate maximum coverage downsampling for whole genome samples.
Returns None if we're not doing downsampling. | ['Calculate', 'maximum', 'coverage', 'downsampling', 'for', 'whole', 'genome', 'samples', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/__init__.py#L219-L250 |
1,148 | amadeus4dev/amadeus-python | amadeus/shopping/_hotel_offer.py | HotelOffer.get | def get(self, **params):
'''
Returns details for a specific offer.
.. code-block:: python
amadeus.shopping.hotel_offer('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v2/shopping/hotel-offers/{0}'
.format(self.offer_id), **params) | python | def get(self, **params):
'''
Returns details for a specific offer.
.. code-block:: python
amadeus.shopping.hotel_offer('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v2/shopping/hotel-offers/{0}'
.format(self.offer_id), **params) | ['def', 'get', '(', 'self', ',', '*', '*', 'params', ')', ':', 'return', 'self', '.', 'client', '.', 'get', '(', "'/v2/shopping/hotel-offers/{0}'", '.', 'format', '(', 'self', '.', 'offer_id', ')', ',', '*', '*', 'params', ')'] | Returns details for a specific offer.
.. code-block:: python
amadeus.shopping.hotel_offer('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed | ['Returns', 'details', 'for', 'a', 'specific', 'offer', '.'] | train | https://github.com/amadeus4dev/amadeus-python/blob/afb93667d2cd486ddc7f4a7f29f222f04453a44a/amadeus/shopping/_hotel_offer.py#L9-L21 |
1,149 | micha030201/aionationstates | aionationstates/utils.py | datetime_to_ns | def datetime_to_ns(then):
"""Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
"""
if then == datetime(1970, 1, 1, 0, 0):
return 'Antiquity'
now = datetime.utcnow()
delta = now - then
seconds = delta.total_seconds()
# There's gotta be a better way to do this...
years, seconds = divmod(seconds, 60*60*24*365)
days, seconds = divmod(seconds, 60*60*24)
hours, seconds = divmod(seconds, 60*60)
minutes, seconds = divmod(seconds, 60)
years = int(years)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds)
if years > 1:
if days > 1:
return f'{years} years {days} days ago'
elif days == 1:
return '{years} years 1 day ago'
return '{years} years ago'
if years == 1:
if days > 1:
return f'1 year {days} days ago'
elif days == 1:
return '1 year 1 day ago'
return '1 year ago'
if days > 3:
return f'{days} days ago'
if days > 1:
if hours > 1:
return f'{days} days {hours} hours ago'
elif hours == 1:
return f'{days} days 1 hour ago'
return f'{days} days ago'
if days == 1:
if hours > 1:
return f'1 day {hours} hours ago'
elif hours == 1:
return '1 day 1 hour ago'
return '1 day ago'
if hours > 1:
return f'{hours} hours ago'
if hours == 1:
return f'{minutes + 60} minutes ago'
if minutes > 1:
return f'{minutes} minutes ago'
if minutes == 1:
return '1 minute ago'
return 'Seconds ago' | python | def datetime_to_ns(then):
"""Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
"""
if then == datetime(1970, 1, 1, 0, 0):
return 'Antiquity'
now = datetime.utcnow()
delta = now - then
seconds = delta.total_seconds()
# There's gotta be a better way to do this...
years, seconds = divmod(seconds, 60*60*24*365)
days, seconds = divmod(seconds, 60*60*24)
hours, seconds = divmod(seconds, 60*60)
minutes, seconds = divmod(seconds, 60)
years = int(years)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds)
if years > 1:
if days > 1:
return f'{years} years {days} days ago'
elif days == 1:
return '{years} years 1 day ago'
return '{years} years ago'
if years == 1:
if days > 1:
return f'1 year {days} days ago'
elif days == 1:
return '1 year 1 day ago'
return '1 year ago'
if days > 3:
return f'{days} days ago'
if days > 1:
if hours > 1:
return f'{days} days {hours} hours ago'
elif hours == 1:
return f'{days} days 1 hour ago'
return f'{days} days ago'
if days == 1:
if hours > 1:
return f'1 day {hours} hours ago'
elif hours == 1:
return '1 day 1 hour ago'
return '1 day ago'
if hours > 1:
return f'{hours} hours ago'
if hours == 1:
return f'{minutes + 60} minutes ago'
if minutes > 1:
return f'{minutes} minutes ago'
if minutes == 1:
return '1 minute ago'
return 'Seconds ago' | ['def', 'datetime_to_ns', '(', 'then', ')', ':', 'if', 'then', '==', 'datetime', '(', '1970', ',', '1', ',', '1', ',', '0', ',', '0', ')', ':', 'return', "'Antiquity'", 'now', '=', 'datetime', '.', 'utcnow', '(', ')', 'delta', '=', 'now', '-', 'then', 'seconds', '=', 'delta', '.', 'total_seconds', '(', ')', "# There's gotta be a better way to do this...", 'years', ',', 'seconds', '=', 'divmod', '(', 'seconds', ',', '60', '*', '60', '*', '24', '*', '365', ')', 'days', ',', 'seconds', '=', 'divmod', '(', 'seconds', ',', '60', '*', '60', '*', '24', ')', 'hours', ',', 'seconds', '=', 'divmod', '(', 'seconds', ',', '60', '*', '60', ')', 'minutes', ',', 'seconds', '=', 'divmod', '(', 'seconds', ',', '60', ')', 'years', '=', 'int', '(', 'years', ')', 'days', '=', 'int', '(', 'days', ')', 'hours', '=', 'int', '(', 'hours', ')', 'minutes', '=', 'int', '(', 'minutes', ')', 'seconds', '=', 'round', '(', 'seconds', ')', 'if', 'years', '>', '1', ':', 'if', 'days', '>', '1', ':', 'return', "f'{years} years {days} days ago'", 'elif', 'days', '==', '1', ':', 'return', "'{years} years 1 day ago'", 'return', "'{years} years ago'", 'if', 'years', '==', '1', ':', 'if', 'days', '>', '1', ':', 'return', "f'1 year {days} days ago'", 'elif', 'days', '==', '1', ':', 'return', "'1 year 1 day ago'", 'return', "'1 year ago'", 'if', 'days', '>', '3', ':', 'return', "f'{days} days ago'", 'if', 'days', '>', '1', ':', 'if', 'hours', '>', '1', ':', 'return', "f'{days} days {hours} hours ago'", 'elif', 'hours', '==', '1', ':', 'return', "f'{days} days 1 hour ago'", 'return', "f'{days} days ago'", 'if', 'days', '==', '1', ':', 'if', 'hours', '>', '1', ':', 'return', "f'1 day {hours} hours ago'", 'elif', 'hours', '==', '1', ':', 'return', "'1 day 1 hour ago'", 'return', "'1 day ago'", 'if', 'hours', '>', '1', ':', 'return', "f'{hours} hours ago'", 'if', 'hours', '==', '1', ':', 'return', "f'{minutes + 60} minutes ago'", 'if', 'minutes', '>', '1', ':', 'return', "f'{minutes} minutes ago'", 'if', 'minutes', '==', '1', ':', 'return', "'1 minute ago'", 'return', "'Seconds ago'"] | Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc. | ['Transform', 'a', ':', 'any', ':', 'datetime', '.', 'datetime', 'into', 'a', 'NationStates', '-', 'style', 'string', '.'] | train | https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/utils.py#L142-L204 |
1,150 | theislab/anndata | anndata/base.py | BoundRecArr.to_df | def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df | python | def to_df(self) -> pd.DataFrame:
"""Convert to pandas dataframe."""
df = pd.DataFrame(index=RangeIndex(0, self.shape[0], name=None))
for key in self.keys():
value = self[key]
for icolumn, column in enumerate(value.T):
df['{}{}'.format(key, icolumn+1)] = column
return df | ['def', 'to_df', '(', 'self', ')', '->', 'pd', '.', 'DataFrame', ':', 'df', '=', 'pd', '.', 'DataFrame', '(', 'index', '=', 'RangeIndex', '(', '0', ',', 'self', '.', 'shape', '[', '0', ']', ',', 'name', '=', 'None', ')', ')', 'for', 'key', 'in', 'self', '.', 'keys', '(', ')', ':', 'value', '=', 'self', '[', 'key', ']', 'for', 'icolumn', ',', 'column', 'in', 'enumerate', '(', 'value', '.', 'T', ')', ':', 'df', '[', "'{}{}'", '.', 'format', '(', 'key', ',', 'icolumn', '+', '1', ')', ']', '=', 'column', 'return', 'df'] | Convert to pandas dataframe. | ['Convert', 'to', 'pandas', 'dataframe', '.'] | train | https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/base.py#L166-L173 |
1,151 | guma44/GEOparse | GEOparse/GEOTypes.py | GSE._get_object_as_soft | def _get_object_as_soft(self):
"""Get object as SOFT formatted string."""
soft = []
if self.database is not None:
soft.append(self.database._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
for gsm in itervalues(self.gsms):
soft.append(gsm._get_object_as_soft())
for gpl in itervalues(self.gpls):
soft.append(gpl._get_object_as_soft())
return "\n".join(soft) | python | def _get_object_as_soft(self):
"""Get object as SOFT formatted string."""
soft = []
if self.database is not None:
soft.append(self.database._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
for gsm in itervalues(self.gsms):
soft.append(gsm._get_object_as_soft())
for gpl in itervalues(self.gpls):
soft.append(gpl._get_object_as_soft())
return "\n".join(soft) | ['def', '_get_object_as_soft', '(', 'self', ')', ':', 'soft', '=', '[', ']', 'if', 'self', '.', 'database', 'is', 'not', 'None', ':', 'soft', '.', 'append', '(', 'self', '.', 'database', '.', '_get_object_as_soft', '(', ')', ')', 'soft', '+=', '[', '"^%s = %s"', '%', '(', 'self', '.', 'geotype', ',', 'self', '.', 'name', ')', ',', 'self', '.', '_get_metadata_as_string', '(', ')', ']', 'for', 'gsm', 'in', 'itervalues', '(', 'self', '.', 'gsms', ')', ':', 'soft', '.', 'append', '(', 'gsm', '.', '_get_object_as_soft', '(', ')', ')', 'for', 'gpl', 'in', 'itervalues', '(', 'self', '.', 'gpls', ')', ':', 'soft', '.', 'append', '(', 'gpl', '.', '_get_object_as_soft', '(', ')', ')', 'return', '"\\n"', '.', 'join', '(', 'soft', ')'] | Get object as SOFT formatted string. | ['Get', 'object', 'as', 'SOFT', 'formatted', 'string', '.'] | train | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L966-L978 |
1,152 | seleniumbase/SeleniumBase | seleniumbase/fixtures/base_case.py | BaseCase.assert_no_js_errors | def assert_no_js_errors(self):
""" Asserts that there are no JavaScript "SEVERE"-level page errors.
Works ONLY for Chrome (non-headless) and Chrome-based browsers.
Does NOT work on Firefox, Edge, IE, and some other browsers:
* See https://github.com/SeleniumHQ/selenium/issues/1161
Based on the following Stack Overflow solution:
* https://stackoverflow.com/a/41150512/7058266 """
try:
browser_logs = self.driver.get_log('browser')
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
return
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
errors = []
for entry in browser_logs:
if entry['level'] == 'SEVERE':
if messenger_library not in entry['message']:
# Add errors if not caused by SeleniumBase dependencies
errors.append(entry)
if len(errors) > 0:
current_url = self.get_current_url()
raise Exception(
"JavaScript errors found on %s => %s" % (current_url, errors)) | python | def assert_no_js_errors(self):
""" Asserts that there are no JavaScript "SEVERE"-level page errors.
Works ONLY for Chrome (non-headless) and Chrome-based browsers.
Does NOT work on Firefox, Edge, IE, and some other browsers:
* See https://github.com/SeleniumHQ/selenium/issues/1161
Based on the following Stack Overflow solution:
* https://stackoverflow.com/a/41150512/7058266 """
try:
browser_logs = self.driver.get_log('browser')
except (ValueError, WebDriverException):
# If unable to get browser logs, skip the assert and return.
return
messenger_library = "//cdnjs.cloudflare.com/ajax/libs/messenger"
errors = []
for entry in browser_logs:
if entry['level'] == 'SEVERE':
if messenger_library not in entry['message']:
# Add errors if not caused by SeleniumBase dependencies
errors.append(entry)
if len(errors) > 0:
current_url = self.get_current_url()
raise Exception(
"JavaScript errors found on %s => %s" % (current_url, errors)) | ['def', 'assert_no_js_errors', '(', 'self', ')', ':', 'try', ':', 'browser_logs', '=', 'self', '.', 'driver', '.', 'get_log', '(', "'browser'", ')', 'except', '(', 'ValueError', ',', 'WebDriverException', ')', ':', '# If unable to get browser logs, skip the assert and return.', 'return', 'messenger_library', '=', '"//cdnjs.cloudflare.com/ajax/libs/messenger"', 'errors', '=', '[', ']', 'for', 'entry', 'in', 'browser_logs', ':', 'if', 'entry', '[', "'level'", ']', '==', "'SEVERE'", ':', 'if', 'messenger_library', 'not', 'in', 'entry', '[', "'message'", ']', ':', '# Add errors if not caused by SeleniumBase dependencies', 'errors', '.', 'append', '(', 'entry', ')', 'if', 'len', '(', 'errors', ')', '>', '0', ':', 'current_url', '=', 'self', '.', 'get_current_url', '(', ')', 'raise', 'Exception', '(', '"JavaScript errors found on %s => %s"', '%', '(', 'current_url', ',', 'errors', ')', ')'] | Asserts that there are no JavaScript "SEVERE"-level page errors.
Works ONLY for Chrome (non-headless) and Chrome-based browsers.
Does NOT work on Firefox, Edge, IE, and some other browsers:
* See https://github.com/SeleniumHQ/selenium/issues/1161
Based on the following Stack Overflow solution:
* https://stackoverflow.com/a/41150512/7058266 | ['Asserts', 'that', 'there', 'are', 'no', 'JavaScript', 'SEVERE', '-', 'level', 'page', 'errors', '.', 'Works', 'ONLY', 'for', 'Chrome', '(', 'non', '-', 'headless', ')', 'and', 'Chrome', '-', 'based', 'browsers', '.', 'Does', 'NOT', 'work', 'on', 'Firefox', 'Edge', 'IE', 'and', 'some', 'other', 'browsers', ':', '*', 'See', 'https', ':', '//', 'github', '.', 'com', '/', 'SeleniumHQ', '/', 'selenium', '/', 'issues', '/', '1161', 'Based', 'on', 'the', 'following', 'Stack', 'Overflow', 'solution', ':', '*', 'https', ':', '//', 'stackoverflow', '.', 'com', '/', 'a', '/', '41150512', '/', '7058266'] | train | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L1769-L1792 |
1,153 | yeraydiazdiaz/lunr.py | lunr/languages/__init__.py | register_languages | def register_languages():
"""Register all supported languages to ensure compatibility."""
for language in set(SUPPORTED_LANGUAGES) - {"en"}:
language_stemmer = partial(nltk_stemmer, get_language_stemmer(language))
Pipeline.register_function(language_stemmer, "stemmer-{}".format(language)) | python | def register_languages():
"""Register all supported languages to ensure compatibility."""
for language in set(SUPPORTED_LANGUAGES) - {"en"}:
language_stemmer = partial(nltk_stemmer, get_language_stemmer(language))
Pipeline.register_function(language_stemmer, "stemmer-{}".format(language)) | ['def', 'register_languages', '(', ')', ':', 'for', 'language', 'in', 'set', '(', 'SUPPORTED_LANGUAGES', ')', '-', '{', '"en"', '}', ':', 'language_stemmer', '=', 'partial', '(', 'nltk_stemmer', ',', 'get_language_stemmer', '(', 'language', ')', ')', 'Pipeline', '.', 'register_function', '(', 'language_stemmer', ',', '"stemmer-{}"', '.', 'format', '(', 'language', ')', ')'] | Register all supported languages to ensure compatibility. | ['Register', 'all', 'supported', 'languages', 'to', 'ensure', 'compatibility', '.'] | train | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/languages/__init__.py#L92-L96 |
1,154 | bunq/sdk_python | bunq/sdk/model/generated/endpoint.py | WhitelistSdd.update | def update(cls, whitelist_sdd_id, monetary_account_paying_id=None,
maximum_amount_per_month=None, custom_headers=None):
"""
:type user_id: int
:type whitelist_sdd_id: int
:param monetary_account_paying_id: ID of the monetary account of which
you want to pay from.
:type monetary_account_paying_id: int
:param maximum_amount_per_month: The maximum amount of money that is
allowed to be deducted based on the whitelist.
:type maximum_amount_per_month: object_.Amount
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseInt
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
request_map = {
cls.FIELD_MONETARY_ACCOUNT_PAYING_ID: monetary_account_paying_id,
cls.FIELD_MAXIMUM_AMOUNT_PER_MONTH: maximum_amount_per_month
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id(),
whitelist_sdd_id)
response_raw = api_client.put(endpoint_url, request_bytes,
custom_headers)
return BunqResponseInt.cast_from_bunq_response(
cls._process_for_id(response_raw)
) | python | def update(cls, whitelist_sdd_id, monetary_account_paying_id=None,
maximum_amount_per_month=None, custom_headers=None):
"""
:type user_id: int
:type whitelist_sdd_id: int
:param monetary_account_paying_id: ID of the monetary account of which
you want to pay from.
:type monetary_account_paying_id: int
:param maximum_amount_per_month: The maximum amount of money that is
allowed to be deducted based on the whitelist.
:type maximum_amount_per_month: object_.Amount
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseInt
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
request_map = {
cls.FIELD_MONETARY_ACCOUNT_PAYING_ID: monetary_account_paying_id,
cls.FIELD_MAXIMUM_AMOUNT_PER_MONTH: maximum_amount_per_month
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id(),
whitelist_sdd_id)
response_raw = api_client.put(endpoint_url, request_bytes,
custom_headers)
return BunqResponseInt.cast_from_bunq_response(
cls._process_for_id(response_raw)
) | ['def', 'update', '(', 'cls', ',', 'whitelist_sdd_id', ',', 'monetary_account_paying_id', '=', 'None', ',', 'maximum_amount_per_month', '=', 'None', ',', 'custom_headers', '=', 'None', ')', ':', 'if', 'custom_headers', 'is', 'None', ':', 'custom_headers', '=', '{', '}', 'api_client', '=', 'client', '.', 'ApiClient', '(', 'cls', '.', '_get_api_context', '(', ')', ')', 'request_map', '=', '{', 'cls', '.', 'FIELD_MONETARY_ACCOUNT_PAYING_ID', ':', 'monetary_account_paying_id', ',', 'cls', '.', 'FIELD_MAXIMUM_AMOUNT_PER_MONTH', ':', 'maximum_amount_per_month', '}', 'request_map_string', '=', 'converter', '.', 'class_to_json', '(', 'request_map', ')', 'request_map_string', '=', 'cls', '.', '_remove_field_for_request', '(', 'request_map_string', ')', 'request_bytes', '=', 'request_map_string', '.', 'encode', '(', ')', 'endpoint_url', '=', 'cls', '.', '_ENDPOINT_URL_UPDATE', '.', 'format', '(', 'cls', '.', '_determine_user_id', '(', ')', ',', 'whitelist_sdd_id', ')', 'response_raw', '=', 'api_client', '.', 'put', '(', 'endpoint_url', ',', 'request_bytes', ',', 'custom_headers', ')', 'return', 'BunqResponseInt', '.', 'cast_from_bunq_response', '(', 'cls', '.', '_process_for_id', '(', 'response_raw', ')', ')'] | :type user_id: int
:type whitelist_sdd_id: int
:param monetary_account_paying_id: ID of the monetary account of which
you want to pay from.
:type monetary_account_paying_id: int
:param maximum_amount_per_month: The maximum amount of money that is
allowed to be deducted based on the whitelist.
:type maximum_amount_per_month: object_.Amount
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseInt | [':', 'type', 'user_id', ':', 'int', ':', 'type', 'whitelist_sdd_id', ':', 'int', ':', 'param', 'monetary_account_paying_id', ':', 'ID', 'of', 'the', 'monetary', 'account', 'of', 'which', 'you', 'want', 'to', 'pay', 'from', '.', ':', 'type', 'monetary_account_paying_id', ':', 'int', ':', 'param', 'maximum_amount_per_month', ':', 'The', 'maximum', 'amount', 'of', 'money', 'that', 'is', 'allowed', 'to', 'be', 'deducted', 'based', 'on', 'the', 'whitelist', '.', ':', 'type', 'maximum_amount_per_month', ':', 'object_', '.', 'Amount', ':', 'type', 'custom_headers', ':', 'dict', '[', 'str', 'str', ']', '|None'] | train | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L33891-L33927 |
1,155 | iotile/coretools | transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py | AWSIOTGatewayAgent.stop | def stop(self):
"""Stop this gateway agent."""
if self._disconnector:
self._disconnector.stop()
self.client.disconnect() | python | def stop(self):
"""Stop this gateway agent."""
if self._disconnector:
self._disconnector.stop()
self.client.disconnect() | ['def', 'stop', '(', 'self', ')', ':', 'if', 'self', '.', '_disconnector', ':', 'self', '.', '_disconnector', '.', 'stop', '(', ')', 'self', '.', 'client', '.', 'disconnect', '(', ')'] | Stop this gateway agent. | ['Stop', 'this', 'gateway', 'agent', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/gateway_agent.py#L94-L100 |
1,156 | juicer/juicer | juicer/utils/__init__.py | upload_rpm | def upload_rpm(rpm_path, repoid, connector, callback=None):
"""upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded
"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
info = rpm_info(rpm_path)
pkg_name = info['name']
nvrea = info['nvrea']
cksum = info['cksum']
size = info['size']
package_basename = info['package_basename']
juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size)
# initiate upload
upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector)
#create a statusbar
pbar = ProgressBar(size)
# read in rpm
total_seeked = 0
rpm_fd = open(rpm_path, 'rb')
rpm_fd.seek(0)
while total_seeked < size:
rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE)
last_offset = total_seeked
total_seeked += len(rpm_data)
juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked))
upload_code = upload.append(fdata=rpm_data, offset=last_offset)
if upload_code != Constants.PULP_PUT_OK:
juicer.utils.Log.log_error("Upload failed.")
pbar.update(len(rpm_data))
pbar.finish()
rpm_fd.close()
juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked)
# finalize upload
rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name)
juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id)
# clean up working dir
upload.clean_upload()
# Run callbacks?
if callback:
try:
juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback))
callback(pkg_name)
except Exception:
juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback))
pass
return rpm_id | python | def upload_rpm(rpm_path, repoid, connector, callback=None):
"""upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded
"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
info = rpm_info(rpm_path)
pkg_name = info['name']
nvrea = info['nvrea']
cksum = info['cksum']
size = info['size']
package_basename = info['package_basename']
juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size)
# initiate upload
upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector)
#create a statusbar
pbar = ProgressBar(size)
# read in rpm
total_seeked = 0
rpm_fd = open(rpm_path, 'rb')
rpm_fd.seek(0)
while total_seeked < size:
rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE)
last_offset = total_seeked
total_seeked += len(rpm_data)
juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked))
upload_code = upload.append(fdata=rpm_data, offset=last_offset)
if upload_code != Constants.PULP_PUT_OK:
juicer.utils.Log.log_error("Upload failed.")
pbar.update(len(rpm_data))
pbar.finish()
rpm_fd.close()
juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked)
# finalize upload
rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name)
juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id)
# clean up working dir
upload.clean_upload()
# Run callbacks?
if callback:
try:
juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback))
callback(pkg_name)
except Exception:
juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback))
pass
return rpm_id | ['def', 'upload_rpm', '(', 'rpm_path', ',', 'repoid', ',', 'connector', ',', 'callback', '=', 'None', ')', ':', 'ts', '=', 'rpm', '.', 'TransactionSet', '(', ')', 'ts', '.', 'setVSFlags', '(', 'rpm', '.', '_RPMVSF_NOSIGNATURES', ')', 'info', '=', 'rpm_info', '(', 'rpm_path', ')', 'pkg_name', '=', 'info', '[', "'name'", ']', 'nvrea', '=', 'info', '[', "'nvrea'", ']', 'cksum', '=', 'info', '[', "'cksum'", ']', 'size', '=', 'info', '[', "'size'", ']', 'package_basename', '=', 'info', '[', "'package_basename'", ']', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_notice', '(', '"Expected amount to seek: %s (package size by os.path.getsize)"', '%', 'size', ')', '# initiate upload', 'upload', '=', 'juicer', '.', 'utils', '.', 'Upload', '.', 'Upload', '(', 'package_basename', ',', 'cksum', ',', 'size', ',', 'repoid', ',', 'connector', ')', '#create a statusbar', 'pbar', '=', 'ProgressBar', '(', 'size', ')', '# read in rpm', 'total_seeked', '=', '0', 'rpm_fd', '=', 'open', '(', 'rpm_path', ',', "'rb'", ')', 'rpm_fd', '.', 'seek', '(', '0', ')', 'while', 'total_seeked', '<', 'size', ':', 'rpm_data', '=', 'rpm_fd', '.', 'read', '(', 'Constants', '.', 'UPLOAD_AT_ONCE', ')', 'last_offset', '=', 'total_seeked', 'total_seeked', '+=', 'len', '(', 'rpm_data', ')', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_notice', '(', '"Seeked %s data... (total seeked: %s)"', '%', '(', 'len', '(', 'rpm_data', ')', ',', 'total_seeked', ')', ')', 'upload_code', '=', 'upload', '.', 'append', '(', 'fdata', '=', 'rpm_data', ',', 'offset', '=', 'last_offset', ')', 'if', 'upload_code', '!=', 'Constants', '.', 'PULP_PUT_OK', ':', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_error', '(', '"Upload failed."', ')', 'pbar', '.', 'update', '(', 'len', '(', 'rpm_data', ')', ')', 'pbar', '.', 'finish', '(', ')', 'rpm_fd', '.', 'close', '(', ')', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_notice', '(', '"Seeked total data: %s"', '%', 'total_seeked', ')', '# finalize upload', 'rpm_id', '=', 'upload', '.', 'import_upload', '(', 'nvrea', '=', 'nvrea', ',', 'rpm_name', '=', 'pkg_name', ')', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_debug', '(', '"RPM upload complete. New \'packageid\': %s"', '%', 'rpm_id', ')', '# clean up working dir', 'upload', '.', 'clean_upload', '(', ')', '# Run callbacks?', 'if', 'callback', ':', 'try', ':', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_debug', '(', '"Calling upload callack: %s"', '%', 'str', '(', 'callback', ')', ')', 'callback', '(', 'pkg_name', ')', 'except', 'Exception', ':', 'juicer', '.', 'utils', '.', 'Log', '.', 'log_error', '(', '"Exception raised in callback: %s"', ',', 'str', '(', 'callback', ')', ')', 'pass', 'return', 'rpm_id'] | upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded | ['upload', 'an', 'rpm', 'into', 'pulp'] | train | https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/__init__.py#L622-L685 |
1,157 | spencerahill/aospy | aospy/examples/example_obj_lib.py | conv_precip_frac | def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total) | python | def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total) | ['def', 'conv_precip_frac', '(', 'precip_largescale', ',', 'precip_convective', ')', ':', 'total', '=', 'total_precip', '(', 'precip_largescale', ',', 'precip_convective', ')', "# Mask using xarray's `where` method to prevent divide-by-zero.", 'return', 'precip_convective', '/', 'total', '.', 'where', '(', 'total', ')'] | Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray | ['Fraction', 'of', 'total', 'precip', 'that', 'is', 'from', 'convection', 'parameterization', '.'] | train | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/examples/example_obj_lib.py#L52-L67 |
1,158 | pecan/pecan | pecan/scaffolds/__init__.py | render_template | def render_template(content, variables):
"""
Return a bytestring representing a templated file based on the
input (content) and the variable names defined (vars).
"""
fsenc = sys.getfilesystemencoding()
def to_native(s, encoding='latin-1', errors='strict'):
if six.PY3:
if isinstance(s, six.text_type):
return s
return str(s, encoding, errors)
else:
if isinstance(s, six.text_type):
return s.encode(encoding, errors)
return str(s)
output = Template(
to_native(content, fsenc)
).substitute(variables)
if isinstance(output, six.text_type):
output = output.encode(fsenc, 'strict')
return output | python | def render_template(content, variables):
"""
Return a bytestring representing a templated file based on the
input (content) and the variable names defined (vars).
"""
fsenc = sys.getfilesystemencoding()
def to_native(s, encoding='latin-1', errors='strict'):
if six.PY3:
if isinstance(s, six.text_type):
return s
return str(s, encoding, errors)
else:
if isinstance(s, six.text_type):
return s.encode(encoding, errors)
return str(s)
output = Template(
to_native(content, fsenc)
).substitute(variables)
if isinstance(output, six.text_type):
output = output.encode(fsenc, 'strict')
return output | ['def', 'render_template', '(', 'content', ',', 'variables', ')', ':', 'fsenc', '=', 'sys', '.', 'getfilesystemencoding', '(', ')', 'def', 'to_native', '(', 's', ',', 'encoding', '=', "'latin-1'", ',', 'errors', '=', "'strict'", ')', ':', 'if', 'six', '.', 'PY3', ':', 'if', 'isinstance', '(', 's', ',', 'six', '.', 'text_type', ')', ':', 'return', 's', 'return', 'str', '(', 's', ',', 'encoding', ',', 'errors', ')', 'else', ':', 'if', 'isinstance', '(', 's', ',', 'six', '.', 'text_type', ')', ':', 'return', 's', '.', 'encode', '(', 'encoding', ',', 'errors', ')', 'return', 'str', '(', 's', ')', 'output', '=', 'Template', '(', 'to_native', '(', 'content', ',', 'fsenc', ')', ')', '.', 'substitute', '(', 'variables', ')', 'if', 'isinstance', '(', 'output', ',', 'six', '.', 'text_type', ')', ':', 'output', '=', 'output', '.', 'encode', '(', 'fsenc', ',', "'strict'", ')', 'return', 'output'] | Return a bytestring representing a templated file based on the
input (content) and the variable names defined (vars). | ['Return', 'a', 'bytestring', 'representing', 'a', 'templated', 'file', 'based', 'on', 'the', 'input', '(', 'content', ')', 'and', 'the', 'variable', 'names', 'defined', '(', 'vars', ')', '.'] | train | https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/scaffolds/__init__.py#L120-L142 |
1,159 | ceph/ceph-deploy | ceph_deploy/util/system.py | start_service | def start_service(conn, service='ceph'):
"""
Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
remoto.process.run(
conn,
[
'systemctl',
'start',
'{service}'.format(service=service),
]
) | python | def start_service(conn, service='ceph'):
"""
Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
remoto.process.run(
conn,
[
'systemctl',
'start',
'{service}'.format(service=service),
]
) | ['def', 'start_service', '(', 'conn', ',', 'service', '=', "'ceph'", ')', ':', 'if', 'is_systemd', '(', 'conn', ')', ':', 'remoto', '.', 'process', '.', 'run', '(', 'conn', ',', '[', "'systemctl'", ',', "'start'", ',', "'{service}'", '.', 'format', '(', 'service', '=', 'service', ')', ',', ']', ')'] | Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection. | ['Stop', 'a', 'service', 'on', 'a', 'remote', 'host', 'depending', 'on', 'the', 'type', 'of', 'init', 'system', '.', 'Obviously', 'this', 'should', 'be', 'done', 'for', 'RHEL', '/', 'Fedora', '/', 'CentOS', 'systems', '.'] | train | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/system.py#L133-L148 |
1,160 | saltstack/salt | salt/pillar/pillar_ldap.py | ext_pillar | def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
config_file):
'''
Execute LDAP searches and return the aggregated data
'''
config_template = None
try:
config_template = _render_template(config_file)
except jinja2.exceptions.TemplateNotFound:
log.debug('pillar_ldap: missing configuration file %s', config_file)
except Exception:
log.debug('pillar_ldap: failed to render template for %s',
config_file, exc_info=True)
if not config_template:
# We don't have a config file
return {}
import salt.utils.yaml
try:
opts = salt.utils.yaml.safe_load(config_template) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format(
config_file, err
)
if salt.log.is_console_configured():
log.warning(msg)
else:
print(msg)
return {}
else:
if not isinstance(opts, dict):
log.warning(
'pillar_ldap: %s is invalidly formatted, must be a YAML '
'dictionary. See the documentation for more information.',
config_file
)
return {}
if 'search_order' not in opts:
log.warning(
'pillar_ldap: search_order missing from configuration. See the '
'documentation for more information.'
)
return {}
data = {}
for source in opts['search_order']:
config = opts[source]
result = _do_search(config)
log.debug('source %s got result %s', source, result)
if result:
data = _result_to_dict(data, result, config, source)
return data | python | def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
config_file):
'''
Execute LDAP searches and return the aggregated data
'''
config_template = None
try:
config_template = _render_template(config_file)
except jinja2.exceptions.TemplateNotFound:
log.debug('pillar_ldap: missing configuration file %s', config_file)
except Exception:
log.debug('pillar_ldap: failed to render template for %s',
config_file, exc_info=True)
if not config_template:
# We don't have a config file
return {}
import salt.utils.yaml
try:
opts = salt.utils.yaml.safe_load(config_template) or {}
opts['conf_file'] = config_file
except Exception as err:
import salt.log
msg = 'pillar_ldap: error parsing configuration file: {0} - {1}'.format(
config_file, err
)
if salt.log.is_console_configured():
log.warning(msg)
else:
print(msg)
return {}
else:
if not isinstance(opts, dict):
log.warning(
'pillar_ldap: %s is invalidly formatted, must be a YAML '
'dictionary. See the documentation for more information.',
config_file
)
return {}
if 'search_order' not in opts:
log.warning(
'pillar_ldap: search_order missing from configuration. See the '
'documentation for more information.'
)
return {}
data = {}
for source in opts['search_order']:
config = opts[source]
result = _do_search(config)
log.debug('source %s got result %s', source, result)
if result:
data = _result_to_dict(data, result, config, source)
return data | ['def', 'ext_pillar', '(', 'minion_id', ',', '# pylint: disable=W0613', 'pillar', ',', '# pylint: disable=W0613', 'config_file', ')', ':', 'config_template', '=', 'None', 'try', ':', 'config_template', '=', '_render_template', '(', 'config_file', ')', 'except', 'jinja2', '.', 'exceptions', '.', 'TemplateNotFound', ':', 'log', '.', 'debug', '(', "'pillar_ldap: missing configuration file %s'", ',', 'config_file', ')', 'except', 'Exception', ':', 'log', '.', 'debug', '(', "'pillar_ldap: failed to render template for %s'", ',', 'config_file', ',', 'exc_info', '=', 'True', ')', 'if', 'not', 'config_template', ':', "# We don't have a config file", 'return', '{', '}', 'import', 'salt', '.', 'utils', '.', 'yaml', 'try', ':', 'opts', '=', 'salt', '.', 'utils', '.', 'yaml', '.', 'safe_load', '(', 'config_template', ')', 'or', '{', '}', 'opts', '[', "'conf_file'", ']', '=', 'config_file', 'except', 'Exception', 'as', 'err', ':', 'import', 'salt', '.', 'log', 'msg', '=', "'pillar_ldap: error parsing configuration file: {0} - {1}'", '.', 'format', '(', 'config_file', ',', 'err', ')', 'if', 'salt', '.', 'log', '.', 'is_console_configured', '(', ')', ':', 'log', '.', 'warning', '(', 'msg', ')', 'else', ':', 'print', '(', 'msg', ')', 'return', '{', '}', 'else', ':', 'if', 'not', 'isinstance', '(', 'opts', ',', 'dict', ')', ':', 'log', '.', 'warning', '(', "'pillar_ldap: %s is invalidly formatted, must be a YAML '", "'dictionary. See the documentation for more information.'", ',', 'config_file', ')', 'return', '{', '}', 'if', "'search_order'", 'not', 'in', 'opts', ':', 'log', '.', 'warning', '(', "'pillar_ldap: search_order missing from configuration. See the '", "'documentation for more information.'", ')', 'return', '{', '}', 'data', '=', '{', '}', 'for', 'source', 'in', 'opts', '[', "'search_order'", ']', ':', 'config', '=', 'opts', '[', 'source', ']', 'result', '=', '_do_search', '(', 'config', ')', 'log', '.', 'debug', '(', "'source %s got result %s'", ',', 'source', ',', 'result', ')', 'if', 'result', ':', 'data', '=', '_result_to_dict', '(', 'data', ',', 'result', ',', 'config', ',', 'source', ')', 'return', 'data'] | Execute LDAP searches and return the aggregated data | ['Execute', 'LDAP', 'searches', 'and', 'return', 'the', 'aggregated', 'data'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/pillar_ldap.py#L309-L365 |
1,161 | knagra/farnsworth | workshift/views.py | adjust_hours_view | def adjust_hours_view(request, semester):
"""
Adjust members' workshift hours requirements.
"""
page_name = "Adjust Hours"
pools = WorkshiftPool.objects.filter(semester=semester).order_by(
"-is_primary", "title",
)
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pool_hour_forms = []
for workshifter in workshifters:
forms_list = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
forms_list.append((
AdjustHoursForm(
data=request.POST or None,
prefix="pool_hours-{}".format(hours.pk),
instance=hours,
),
hours,
))
pool_hour_forms.append(forms_list)
if all(
form.is_valid()
for workshifter_forms in pool_hour_forms
for form, pool_hours in workshifter_forms
):
for workshifter_forms in pool_hour_forms:
for form, pool_hours in workshifter_forms:
form.save()
messages.add_message(request, messages.INFO, "Updated hours.")
return HttpResponseRedirect(wurl(
"workshift:adjust_hours",
sem_url=semester.sem_url,
))
return render_to_response("adjust_hours.html", {
"page_name": page_name,
"pools": pools,
"workshifters_tuples": zip(workshifters, pool_hour_forms),
}, context_instance=RequestContext(request)) | python | def adjust_hours_view(request, semester):
"""
Adjust members' workshift hours requirements.
"""
page_name = "Adjust Hours"
pools = WorkshiftPool.objects.filter(semester=semester).order_by(
"-is_primary", "title",
)
workshifters = WorkshiftProfile.objects.filter(semester=semester)
pool_hour_forms = []
for workshifter in workshifters:
forms_list = []
for pool in pools:
hours = workshifter.pool_hours.get(pool=pool)
forms_list.append((
AdjustHoursForm(
data=request.POST or None,
prefix="pool_hours-{}".format(hours.pk),
instance=hours,
),
hours,
))
pool_hour_forms.append(forms_list)
if all(
form.is_valid()
for workshifter_forms in pool_hour_forms
for form, pool_hours in workshifter_forms
):
for workshifter_forms in pool_hour_forms:
for form, pool_hours in workshifter_forms:
form.save()
messages.add_message(request, messages.INFO, "Updated hours.")
return HttpResponseRedirect(wurl(
"workshift:adjust_hours",
sem_url=semester.sem_url,
))
return render_to_response("adjust_hours.html", {
"page_name": page_name,
"pools": pools,
"workshifters_tuples": zip(workshifters, pool_hour_forms),
}, context_instance=RequestContext(request)) | ['def', 'adjust_hours_view', '(', 'request', ',', 'semester', ')', ':', 'page_name', '=', '"Adjust Hours"', 'pools', '=', 'WorkshiftPool', '.', 'objects', '.', 'filter', '(', 'semester', '=', 'semester', ')', '.', 'order_by', '(', '"-is_primary"', ',', '"title"', ',', ')', 'workshifters', '=', 'WorkshiftProfile', '.', 'objects', '.', 'filter', '(', 'semester', '=', 'semester', ')', 'pool_hour_forms', '=', '[', ']', 'for', 'workshifter', 'in', 'workshifters', ':', 'forms_list', '=', '[', ']', 'for', 'pool', 'in', 'pools', ':', 'hours', '=', 'workshifter', '.', 'pool_hours', '.', 'get', '(', 'pool', '=', 'pool', ')', 'forms_list', '.', 'append', '(', '(', 'AdjustHoursForm', '(', 'data', '=', 'request', '.', 'POST', 'or', 'None', ',', 'prefix', '=', '"pool_hours-{}"', '.', 'format', '(', 'hours', '.', 'pk', ')', ',', 'instance', '=', 'hours', ',', ')', ',', 'hours', ',', ')', ')', 'pool_hour_forms', '.', 'append', '(', 'forms_list', ')', 'if', 'all', '(', 'form', '.', 'is_valid', '(', ')', 'for', 'workshifter_forms', 'in', 'pool_hour_forms', 'for', 'form', ',', 'pool_hours', 'in', 'workshifter_forms', ')', ':', 'for', 'workshifter_forms', 'in', 'pool_hour_forms', ':', 'for', 'form', ',', 'pool_hours', 'in', 'workshifter_forms', ':', 'form', '.', 'save', '(', ')', 'messages', '.', 'add_message', '(', 'request', ',', 'messages', '.', 'INFO', ',', '"Updated hours."', ')', 'return', 'HttpResponseRedirect', '(', 'wurl', '(', '"workshift:adjust_hours"', ',', 'sem_url', '=', 'semester', '.', 'sem_url', ',', ')', ')', 'return', 'render_to_response', '(', '"adjust_hours.html"', ',', '{', '"page_name"', ':', 'page_name', ',', '"pools"', ':', 'pools', ',', '"workshifters_tuples"', ':', 'zip', '(', 'workshifters', ',', 'pool_hour_forms', ')', ',', '}', ',', 'context_instance', '=', 'RequestContext', '(', 'request', ')', ')'] | Adjust members' workshift hours requirements. | ['Adjust', 'members', 'workshift', 'hours', 'requirements', '.'] | train | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/workshift/views.py#L1025-L1069 |
1,162 | gwastro/pycbc | pycbc/psd/variation.py | find_trigger_value | def find_trigger_value(psd_var, idx, start, sample_rate):
""" Find the PSD variation value at a particular time
Parameters
----------
psd_var : TimeSeries
Time series of the varaibility in the PSD estimation
idx : numpy.ndarray
Time indices of the triggers
start : float
GPS start time
sample_rate : float
Sample rate defined in ini file
Returns
-------
vals : Array
PSD variation value at a particular time
"""
# Find gps time of the trigger
time = start + idx / sample_rate
# Find where in the psd variation time series the trigger belongs
ind = numpy.digitize(time, psd_var.sample_times)
ind -= 1
vals = psd_var[ind]
return vals | python | def find_trigger_value(psd_var, idx, start, sample_rate):
""" Find the PSD variation value at a particular time
Parameters
----------
psd_var : TimeSeries
Time series of the varaibility in the PSD estimation
idx : numpy.ndarray
Time indices of the triggers
start : float
GPS start time
sample_rate : float
Sample rate defined in ini file
Returns
-------
vals : Array
PSD variation value at a particular time
"""
# Find gps time of the trigger
time = start + idx / sample_rate
# Find where in the psd variation time series the trigger belongs
ind = numpy.digitize(time, psd_var.sample_times)
ind -= 1
vals = psd_var[ind]
return vals | ['def', 'find_trigger_value', '(', 'psd_var', ',', 'idx', ',', 'start', ',', 'sample_rate', ')', ':', '# Find gps time of the trigger', 'time', '=', 'start', '+', 'idx', '/', 'sample_rate', '# Find where in the psd variation time series the trigger belongs', 'ind', '=', 'numpy', '.', 'digitize', '(', 'time', ',', 'psd_var', '.', 'sample_times', ')', 'ind', '-=', '1', 'vals', '=', 'psd_var', '[', 'ind', ']', 'return', 'vals'] | Find the PSD variation value at a particular time
Parameters
----------
psd_var : TimeSeries
Time series of the varaibility in the PSD estimation
idx : numpy.ndarray
Time indices of the triggers
start : float
GPS start time
sample_rate : float
Sample rate defined in ini file
Returns
-------
vals : Array
PSD variation value at a particular time | ['Find', 'the', 'PSD', 'variation', 'value', 'at', 'a', 'particular', 'time'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/variation.py#L136-L162 |
1,163 | mabuchilab/QNET | src/qnet/algebra/core/hilbert_space_algebra.py | LocalSpace.next_basis_label_or_index | def next_basis_label_or_index(self, label_or_index, n=1):
"""Given the label or index of a basis state, return the label/index of
the next basis state.
More generally, if `n` is given, return the `n`'th next basis state
label/index; `n` may also be negative to obtain previous basis state
labels/indices.
The return type is the same as the type of `label_or_index`.
Args:
label_or_index (int or str or SymbolicLabelBase): If `int`, the
index of a basis state; if `str`, the label of a basis state
n (int): The increment
Raises:
IndexError: If going beyond the last or first basis state
ValueError: If `label` is not a label for any basis state in the
Hilbert space
.BasisNotSetError: If the Hilbert space has no defined basis
TypeError: if `label_or_index` is neither a :class:`str` nor an
:class:`int`, nor a :class:`SymbolicLabelBase`
"""
if isinstance(label_or_index, int):
new_index = label_or_index + n
if new_index < 0:
raise IndexError("index %d < 0" % new_index)
if self.has_basis:
if new_index >= self.dimension:
raise IndexError("index %d out of range for basis %s"
% (new_index, self._basis))
return new_index
elif isinstance(label_or_index, str):
label_index = self.basis_labels.index(label_or_index)
new_index = label_index + n
if (new_index < 0) or (new_index >= len(self._basis)):
raise IndexError("index %d out of range for basis %s"
% (new_index, self._basis))
return self._basis[new_index]
elif isinstance(label_or_index, SymbolicLabelBase):
return label_or_index.__class__(expr=label_or_index.expr + n)
else:
raise TypeError(
"Invalid type for label_or_index: %s"
% label_or_index.__class__.__name__) | python | def next_basis_label_or_index(self, label_or_index, n=1):
"""Given the label or index of a basis state, return the label/index of
the next basis state.
More generally, if `n` is given, return the `n`'th next basis state
label/index; `n` may also be negative to obtain previous basis state
labels/indices.
The return type is the same as the type of `label_or_index`.
Args:
label_or_index (int or str or SymbolicLabelBase): If `int`, the
index of a basis state; if `str`, the label of a basis state
n (int): The increment
Raises:
IndexError: If going beyond the last or first basis state
ValueError: If `label` is not a label for any basis state in the
Hilbert space
.BasisNotSetError: If the Hilbert space has no defined basis
TypeError: if `label_or_index` is neither a :class:`str` nor an
:class:`int`, nor a :class:`SymbolicLabelBase`
"""
if isinstance(label_or_index, int):
new_index = label_or_index + n
if new_index < 0:
raise IndexError("index %d < 0" % new_index)
if self.has_basis:
if new_index >= self.dimension:
raise IndexError("index %d out of range for basis %s"
% (new_index, self._basis))
return new_index
elif isinstance(label_or_index, str):
label_index = self.basis_labels.index(label_or_index)
new_index = label_index + n
if (new_index < 0) or (new_index >= len(self._basis)):
raise IndexError("index %d out of range for basis %s"
% (new_index, self._basis))
return self._basis[new_index]
elif isinstance(label_or_index, SymbolicLabelBase):
return label_or_index.__class__(expr=label_or_index.expr + n)
else:
raise TypeError(
"Invalid type for label_or_index: %s"
% label_or_index.__class__.__name__) | ['def', 'next_basis_label_or_index', '(', 'self', ',', 'label_or_index', ',', 'n', '=', '1', ')', ':', 'if', 'isinstance', '(', 'label_or_index', ',', 'int', ')', ':', 'new_index', '=', 'label_or_index', '+', 'n', 'if', 'new_index', '<', '0', ':', 'raise', 'IndexError', '(', '"index %d < 0"', '%', 'new_index', ')', 'if', 'self', '.', 'has_basis', ':', 'if', 'new_index', '>=', 'self', '.', 'dimension', ':', 'raise', 'IndexError', '(', '"index %d out of range for basis %s"', '%', '(', 'new_index', ',', 'self', '.', '_basis', ')', ')', 'return', 'new_index', 'elif', 'isinstance', '(', 'label_or_index', ',', 'str', ')', ':', 'label_index', '=', 'self', '.', 'basis_labels', '.', 'index', '(', 'label_or_index', ')', 'new_index', '=', 'label_index', '+', 'n', 'if', '(', 'new_index', '<', '0', ')', 'or', '(', 'new_index', '>=', 'len', '(', 'self', '.', '_basis', ')', ')', ':', 'raise', 'IndexError', '(', '"index %d out of range for basis %s"', '%', '(', 'new_index', ',', 'self', '.', '_basis', ')', ')', 'return', 'self', '.', '_basis', '[', 'new_index', ']', 'elif', 'isinstance', '(', 'label_or_index', ',', 'SymbolicLabelBase', ')', ':', 'return', 'label_or_index', '.', '__class__', '(', 'expr', '=', 'label_or_index', '.', 'expr', '+', 'n', ')', 'else', ':', 'raise', 'TypeError', '(', '"Invalid type for label_or_index: %s"', '%', 'label_or_index', '.', '__class__', '.', '__name__', ')'] | Given the label or index of a basis state, return the label/index of
the next basis state.
More generally, if `n` is given, return the `n`'th next basis state
label/index; `n` may also be negative to obtain previous basis state
labels/indices.
The return type is the same as the type of `label_or_index`.
Args:
label_or_index (int or str or SymbolicLabelBase): If `int`, the
index of a basis state; if `str`, the label of a basis state
n (int): The increment
Raises:
IndexError: If going beyond the last or first basis state
ValueError: If `label` is not a label for any basis state in the
Hilbert space
.BasisNotSetError: If the Hilbert space has no defined basis
TypeError: if `label_or_index` is neither a :class:`str` nor an
:class:`int`, nor a :class:`SymbolicLabelBase` | ['Given', 'the', 'label', 'or', 'index', 'of', 'a', 'basis', 'state', 'return', 'the', 'label', '/', 'index', 'of', 'the', 'next', 'basis', 'state', '.'] | train | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/hilbert_space_algebra.py#L463-L507 |
1,164 | Scoppio/RagnarokEngine3 | RagnarokEngine3/RE3.py | Sprite.__execute_rot | def __execute_rot(self, surface):
"""Executes the rotating operation"""
self.image = pygame.transform.rotate(surface, self.__rotation)
self.__resize_surface_extents() | python | def __execute_rot(self, surface):
"""Executes the rotating operation"""
self.image = pygame.transform.rotate(surface, self.__rotation)
self.__resize_surface_extents() | ['def', '__execute_rot', '(', 'self', ',', 'surface', ')', ':', 'self', '.', 'image', '=', 'pygame', '.', 'transform', '.', 'rotate', '(', 'surface', ',', 'self', '.', '__rotation', ')', 'self', '.', '__resize_surface_extents', '(', ')'] | Executes the rotating operation | ['Executes', 'the', 'rotating', 'operation'] | train | https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L1403-L1406 |
1,165 | spyder-ide/spyder | spyder/plugins/ipythonconsole/plugin.py | IPythonConsole.set_elapsed_time | def set_elapsed_time(self, client):
"""Set elapsed time for slave clients."""
related_clients = self.get_related_clients(client)
for cl in related_clients:
if cl.timer is not None:
client.create_time_label()
client.t0 = cl.t0
client.timer.timeout.connect(client.show_time)
client.timer.start(1000)
break | python | def set_elapsed_time(self, client):
"""Set elapsed time for slave clients."""
related_clients = self.get_related_clients(client)
for cl in related_clients:
if cl.timer is not None:
client.create_time_label()
client.t0 = cl.t0
client.timer.timeout.connect(client.show_time)
client.timer.start(1000)
break | ['def', 'set_elapsed_time', '(', 'self', ',', 'client', ')', ':', 'related_clients', '=', 'self', '.', 'get_related_clients', '(', 'client', ')', 'for', 'cl', 'in', 'related_clients', ':', 'if', 'cl', '.', 'timer', 'is', 'not', 'None', ':', 'client', '.', 'create_time_label', '(', ')', 'client', '.', 't0', '=', 'cl', '.', 't0', 'client', '.', 'timer', '.', 'timeout', '.', 'connect', '(', 'client', '.', 'show_time', ')', 'client', '.', 'timer', '.', 'start', '(', '1000', ')', 'break'] | Set elapsed time for slave clients. | ['Set', 'elapsed', 'time', 'for', 'slave', 'clients', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L1106-L1115 |
1,166 | NASA-AMMOS/AIT-Core | ait/core/cfg.py | flatten | def flatten (d, *keys):
"""Flattens the dictionary d by merging keys in order such that later
keys take precedence over earlier keys.
"""
flat = { }
for k in keys:
flat = merge(flat, d.pop(k, { }))
return flat | python | def flatten (d, *keys):
"""Flattens the dictionary d by merging keys in order such that later
keys take precedence over earlier keys.
"""
flat = { }
for k in keys:
flat = merge(flat, d.pop(k, { }))
return flat | ['def', 'flatten', '(', 'd', ',', '*', 'keys', ')', ':', 'flat', '=', '{', '}', 'for', 'k', 'in', 'keys', ':', 'flat', '=', 'merge', '(', 'flat', ',', 'd', '.', 'pop', '(', 'k', ',', '{', '}', ')', ')', 'return', 'flat'] | Flattens the dictionary d by merging keys in order such that later
keys take precedence over earlier keys. | ['Flattens', 'the', 'dictionary', 'd', 'by', 'merging', 'keys', 'in', 'order', 'such', 'that', 'later', 'keys', 'take', 'precedence', 'over', 'earlier', 'keys', '.'] | train | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/cfg.py#L140-L150 |
1,167 | codelv/enaml-native | src/enamlnative/android/android_dialog.py | AndroidDialog.create_widget | def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
self.dialog = Dialog(self.get_context(), d.style) | python | def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
self.dialog = Dialog(self.get_context(), d.style) | ['def', 'create_widget', '(', 'self', ')', ':', 'd', '=', 'self', '.', 'declaration', 'self', '.', 'dialog', '=', 'Dialog', '(', 'self', '.', 'get_context', '(', ')', ',', 'd', '.', 'style', ')'] | Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent. | ['Create', 'the', 'underlying', 'widget', '.'] | train | https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_dialog.py#L53-L61 |
1,168 | cslarsen/elv | elv/elv.py | Transactions.balance | def balance(self):
"""Returns a tuple of (total amount deposited, total amount
withdrawn)."""
sin = Decimal("0.00")
sout = Decimal("0.00")
for t in self.trans:
if t.amount < Decimal("0.00"):
sout += t.amount
else:
sin += t.amount
return sin, sout | python | def balance(self):
"""Returns a tuple of (total amount deposited, total amount
withdrawn)."""
sin = Decimal("0.00")
sout = Decimal("0.00")
for t in self.trans:
if t.amount < Decimal("0.00"):
sout += t.amount
else:
sin += t.amount
return sin, sout | ['def', 'balance', '(', 'self', ')', ':', 'sin', '=', 'Decimal', '(', '"0.00"', ')', 'sout', '=', 'Decimal', '(', '"0.00"', ')', 'for', 't', 'in', 'self', '.', 'trans', ':', 'if', 't', '.', 'amount', '<', 'Decimal', '(', '"0.00"', ')', ':', 'sout', '+=', 't', '.', 'amount', 'else', ':', 'sin', '+=', 't', '.', 'amount', 'return', 'sin', ',', 'sout'] | Returns a tuple of (total amount deposited, total amount
withdrawn). | ['Returns', 'a', 'tuple', 'of', '(', 'total', 'amount', 'deposited', 'total', 'amount', 'withdrawn', ')', '.'] | train | https://github.com/cslarsen/elv/blob/4bacf2093a0dcbe6a2b4d79be0fe339bb2b99097/elv/elv.py#L379-L391 |
1,169 | ejeschke/ginga | ginga/gtk3w/ImageViewGtk.py | ImageViewGtk.get_plain_image_as_widget | def get_plain_image_as_widget(self):
"""Used for generating thumbnails. Does not include overlaid
graphics.
"""
pixbuf = self.get_plain_image_as_pixbuf()
image = Gtk.Image()
image.set_from_pixbuf(pixbuf)
image.show()
return image | python | def get_plain_image_as_widget(self):
"""Used for generating thumbnails. Does not include overlaid
graphics.
"""
pixbuf = self.get_plain_image_as_pixbuf()
image = Gtk.Image()
image.set_from_pixbuf(pixbuf)
image.show()
return image | ['def', 'get_plain_image_as_widget', '(', 'self', ')', ':', 'pixbuf', '=', 'self', '.', 'get_plain_image_as_pixbuf', '(', ')', 'image', '=', 'Gtk', '.', 'Image', '(', ')', 'image', '.', 'set_from_pixbuf', '(', 'pixbuf', ')', 'image', '.', 'show', '(', ')', 'return', 'image'] | Used for generating thumbnails. Does not include overlaid
graphics. | ['Used', 'for', 'generating', 'thumbnails', '.', 'Does', 'not', 'include', 'overlaid', 'graphics', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/gtk3w/ImageViewGtk.py#L65-L73 |
1,170 | QualiSystems/vCenterShell | package/cloudshell/cp/vcenter/commands/command_orchestrator.py | CommandOrchestrator.deploy_clone_from_vm | def deploy_clone_from_vm(self, context, deploy_action, cancellation_context):
"""
Deploy Cloned VM From VM Command, will deploy vm from template
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return DeployAppResult deploy results
"""
deploy_from_vm_model = self.resource_model_parser.convert_to_resource_model(
attributes=deploy_action.actionParams.deployment.attributes,
resource_model_type=vCenterCloneVMFromVMResourceModel)
data_holder = DeployFromTemplateDetails(deploy_from_vm_model, deploy_action.actionParams.appName)
deploy_result_action = self.command_wrapper.execute_command_with_connection(
context,
self.deploy_command.execute_deploy_clone_from_vm,
data_holder,
cancellation_context,
self.folder_manager)
deploy_result_action.actionId = deploy_action.actionId
return deploy_result_action | python | def deploy_clone_from_vm(self, context, deploy_action, cancellation_context):
"""
Deploy Cloned VM From VM Command, will deploy vm from template
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return DeployAppResult deploy results
"""
deploy_from_vm_model = self.resource_model_parser.convert_to_resource_model(
attributes=deploy_action.actionParams.deployment.attributes,
resource_model_type=vCenterCloneVMFromVMResourceModel)
data_holder = DeployFromTemplateDetails(deploy_from_vm_model, deploy_action.actionParams.appName)
deploy_result_action = self.command_wrapper.execute_command_with_connection(
context,
self.deploy_command.execute_deploy_clone_from_vm,
data_holder,
cancellation_context,
self.folder_manager)
deploy_result_action.actionId = deploy_action.actionId
return deploy_result_action | ['def', 'deploy_clone_from_vm', '(', 'self', ',', 'context', ',', 'deploy_action', ',', 'cancellation_context', ')', ':', 'deploy_from_vm_model', '=', 'self', '.', 'resource_model_parser', '.', 'convert_to_resource_model', '(', 'attributes', '=', 'deploy_action', '.', 'actionParams', '.', 'deployment', '.', 'attributes', ',', 'resource_model_type', '=', 'vCenterCloneVMFromVMResourceModel', ')', 'data_holder', '=', 'DeployFromTemplateDetails', '(', 'deploy_from_vm_model', ',', 'deploy_action', '.', 'actionParams', '.', 'appName', ')', 'deploy_result_action', '=', 'self', '.', 'command_wrapper', '.', 'execute_command_with_connection', '(', 'context', ',', 'self', '.', 'deploy_command', '.', 'execute_deploy_clone_from_vm', ',', 'data_holder', ',', 'cancellation_context', ',', 'self', '.', 'folder_manager', ')', 'deploy_result_action', '.', 'actionId', '=', 'deploy_action', '.', 'actionId', 'return', 'deploy_result_action'] | Deploy Cloned VM From VM Command, will deploy vm from template
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return DeployAppResult deploy results | ['Deploy', 'Cloned', 'VM', 'From', 'VM', 'Command', 'will', 'deploy', 'vm', 'from', 'template'] | train | https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/command_orchestrator.py#L242-L264 |
1,171 | OCR-D/core | ocrd_utils/ocrd_utils/__init__.py | polygon_from_points | def polygon_from_points(points):
"""
Constructs a numpy-compatible polygon from a page representation.
"""
polygon = []
for pair in points.split(" "):
x_y = pair.split(",")
polygon.append([float(x_y[0]), float(x_y[1])])
return polygon | python | def polygon_from_points(points):
"""
Constructs a numpy-compatible polygon from a page representation.
"""
polygon = []
for pair in points.split(" "):
x_y = pair.split(",")
polygon.append([float(x_y[0]), float(x_y[1])])
return polygon | ['def', 'polygon_from_points', '(', 'points', ')', ':', 'polygon', '=', '[', ']', 'for', 'pair', 'in', 'points', '.', 'split', '(', '" "', ')', ':', 'x_y', '=', 'pair', '.', 'split', '(', '","', ')', 'polygon', '.', 'append', '(', '[', 'float', '(', 'x_y', '[', '0', ']', ')', ',', 'float', '(', 'x_y', '[', '1', ']', ')', ']', ')', 'return', 'polygon'] | Constructs a numpy-compatible polygon from a page representation. | ['Constructs', 'a', 'numpy', '-', 'compatible', 'polygon', 'from', 'a', 'page', 'representation', '.'] | train | https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd_utils/ocrd_utils/__init__.py#L128-L136 |
1,172 | skymill/automated-ebs-snapshots | automated_ebs_snapshots/connection_manager.py | connect_to_ec2 | def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None):
""" Connect to AWS ec2
:type region: str
:param region: AWS region to connect to
:type access_key: str
:param access_key: AWS access key id
:type secret_key: str
:param secret_key: AWS secret access key
:returns: boto.ec2.connection.EC2Connection -- EC2 connection
"""
if access_key:
# Connect using supplied credentials
logger.info('Connecting to AWS EC2 in {}'.format(region))
connection = ec2.connect_to_region(
region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
else:
# Fetch instance metadata
metadata = get_instance_metadata(timeout=1, num_retries=1)
if metadata:
try:
region = metadata['placement']['availability-zone'][:-1]
except KeyError:
pass
# Connect using env vars or boto credentials
logger.info('Connecting to AWS EC2 in {}'.format(region))
connection = ec2.connect_to_region(region)
if not connection:
logger.error('An error occurred when connecting to EC2')
sys.exit(1)
return connection | python | def connect_to_ec2(region='us-east-1', access_key=None, secret_key=None):
""" Connect to AWS ec2
:type region: str
:param region: AWS region to connect to
:type access_key: str
:param access_key: AWS access key id
:type secret_key: str
:param secret_key: AWS secret access key
:returns: boto.ec2.connection.EC2Connection -- EC2 connection
"""
if access_key:
# Connect using supplied credentials
logger.info('Connecting to AWS EC2 in {}'.format(region))
connection = ec2.connect_to_region(
region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
else:
# Fetch instance metadata
metadata = get_instance_metadata(timeout=1, num_retries=1)
if metadata:
try:
region = metadata['placement']['availability-zone'][:-1]
except KeyError:
pass
# Connect using env vars or boto credentials
logger.info('Connecting to AWS EC2 in {}'.format(region))
connection = ec2.connect_to_region(region)
if not connection:
logger.error('An error occurred when connecting to EC2')
sys.exit(1)
return connection | ['def', 'connect_to_ec2', '(', 'region', '=', "'us-east-1'", ',', 'access_key', '=', 'None', ',', 'secret_key', '=', 'None', ')', ':', 'if', 'access_key', ':', '# Connect using supplied credentials', 'logger', '.', 'info', '(', "'Connecting to AWS EC2 in {}'", '.', 'format', '(', 'region', ')', ')', 'connection', '=', 'ec2', '.', 'connect_to_region', '(', 'region', ',', 'aws_access_key_id', '=', 'access_key', ',', 'aws_secret_access_key', '=', 'secret_key', ')', 'else', ':', '# Fetch instance metadata', 'metadata', '=', 'get_instance_metadata', '(', 'timeout', '=', '1', ',', 'num_retries', '=', '1', ')', 'if', 'metadata', ':', 'try', ':', 'region', '=', 'metadata', '[', "'placement'", ']', '[', "'availability-zone'", ']', '[', ':', '-', '1', ']', 'except', 'KeyError', ':', 'pass', '# Connect using env vars or boto credentials', 'logger', '.', 'info', '(', "'Connecting to AWS EC2 in {}'", '.', 'format', '(', 'region', ')', ')', 'connection', '=', 'ec2', '.', 'connect_to_region', '(', 'region', ')', 'if', 'not', 'connection', ':', 'logger', '.', 'error', '(', "'An error occurred when connecting to EC2'", ')', 'sys', '.', 'exit', '(', '1', ')', 'return', 'connection'] | Connect to AWS ec2
:type region: str
:param region: AWS region to connect to
:type access_key: str
:param access_key: AWS access key id
:type secret_key: str
:param secret_key: AWS secret access key
:returns: boto.ec2.connection.EC2Connection -- EC2 connection | ['Connect', 'to', 'AWS', 'ec2'] | train | https://github.com/skymill/automated-ebs-snapshots/blob/9595bc49d458f6ffb93430722757d2284e878fab/automated_ebs_snapshots/connection_manager.py#L11-L47 |
1,173 | gboeing/osmnx | osmnx/plot.py | make_folium_polyline | def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None):
"""
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
"""
# check if we were able to import folium successfully
if not folium:
raise ImportError('The folium package must be installed to use this optional feature.')
# locations is a list of points for the polyline
# folium takes coords in lat,lon but geopandas provides them in lon,lat
# so we have to flip them around
locations = list([(lat, lon) for lon, lat in edge['geometry'].coords])
# if popup_attribute is None, then create no pop-up
if popup_attribute is None:
popup = None
else:
# folium doesn't interpret html in the html argument (weird), so can't
# do newlines without an iframe
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
# create a folium polyline with attributes
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl | python | def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None):
"""
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
"""
# check if we were able to import folium successfully
if not folium:
raise ImportError('The folium package must be installed to use this optional feature.')
# locations is a list of points for the polyline
# folium takes coords in lat,lon but geopandas provides them in lon,lat
# so we have to flip them around
locations = list([(lat, lon) for lon, lat in edge['geometry'].coords])
# if popup_attribute is None, then create no pop-up
if popup_attribute is None:
popup = None
else:
# folium doesn't interpret html in the html argument (weird), so can't
# do newlines without an iframe
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
# create a folium polyline with attributes
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl | ['def', 'make_folium_polyline', '(', 'edge', ',', 'edge_color', ',', 'edge_width', ',', 'edge_opacity', ',', 'popup_attribute', '=', 'None', ')', ':', '# check if we were able to import folium successfully', 'if', 'not', 'folium', ':', 'raise', 'ImportError', '(', "'The folium package must be installed to use this optional feature.'", ')', '# locations is a list of points for the polyline', '# folium takes coords in lat,lon but geopandas provides them in lon,lat', '# so we have to flip them around', 'locations', '=', 'list', '(', '[', '(', 'lat', ',', 'lon', ')', 'for', 'lon', ',', 'lat', 'in', 'edge', '[', "'geometry'", ']', '.', 'coords', ']', ')', '# if popup_attribute is None, then create no pop-up', 'if', 'popup_attribute', 'is', 'None', ':', 'popup', '=', 'None', 'else', ':', "# folium doesn't interpret html in the html argument (weird), so can't", '# do newlines without an iframe', 'popup_text', '=', 'json', '.', 'dumps', '(', 'edge', '[', 'popup_attribute', ']', ')', 'popup', '=', 'folium', '.', 'Popup', '(', 'html', '=', 'popup_text', ')', '# create a folium polyline with attributes', 'pl', '=', 'folium', '.', 'PolyLine', '(', 'locations', '=', 'locations', ',', 'popup', '=', 'popup', ',', 'color', '=', 'edge_color', ',', 'weight', '=', 'edge_width', ',', 'opacity', '=', 'edge_opacity', ')', 'return', 'pl'] | Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine | ['Turn', 'a', 'row', 'from', 'the', 'gdf_edges', 'GeoDataFrame', 'into', 'a', 'folium', 'PolyLine', 'with', 'attributes', '.'] | train | https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/plot.py#L763-L809 |
1,174 | B2W-BIT/aiologger | aiologger/formatters/json.py | ExtendedJsonFormatter.format | def format(self, record) -> str:
"""
:type record: aiologger.loggers.json.LogRecord
"""
msg = dict(self.formatter_fields_for_record(record))
if record.flatten and isinstance(record.msg, dict):
msg.update(record.msg)
else:
msg[MSG_FIELDNAME] = record.msg
if record.extra:
msg.update(record.extra)
if record.exc_info:
msg["exc_info"] = record.exc_info
if record.exc_text:
msg["exc_text"] = record.exc_text
return self.serializer(
msg, default=self._default_handler, **record.serializer_kwargs
) | python | def format(self, record) -> str:
"""
:type record: aiologger.loggers.json.LogRecord
"""
msg = dict(self.formatter_fields_for_record(record))
if record.flatten and isinstance(record.msg, dict):
msg.update(record.msg)
else:
msg[MSG_FIELDNAME] = record.msg
if record.extra:
msg.update(record.extra)
if record.exc_info:
msg["exc_info"] = record.exc_info
if record.exc_text:
msg["exc_text"] = record.exc_text
return self.serializer(
msg, default=self._default_handler, **record.serializer_kwargs
) | ['def', 'format', '(', 'self', ',', 'record', ')', '->', 'str', ':', 'msg', '=', 'dict', '(', 'self', '.', 'formatter_fields_for_record', '(', 'record', ')', ')', 'if', 'record', '.', 'flatten', 'and', 'isinstance', '(', 'record', '.', 'msg', ',', 'dict', ')', ':', 'msg', '.', 'update', '(', 'record', '.', 'msg', ')', 'else', ':', 'msg', '[', 'MSG_FIELDNAME', ']', '=', 'record', '.', 'msg', 'if', 'record', '.', 'extra', ':', 'msg', '.', 'update', '(', 'record', '.', 'extra', ')', 'if', 'record', '.', 'exc_info', ':', 'msg', '[', '"exc_info"', ']', '=', 'record', '.', 'exc_info', 'if', 'record', '.', 'exc_text', ':', 'msg', '[', '"exc_text"', ']', '=', 'record', '.', 'exc_text', 'return', 'self', '.', 'serializer', '(', 'msg', ',', 'default', '=', 'self', '.', '_default_handler', ',', '*', '*', 'record', '.', 'serializer_kwargs', ')'] | :type record: aiologger.loggers.json.LogRecord | [':', 'type', 'record', ':', 'aiologger', '.', 'loggers', '.', 'json', '.', 'LogRecord'] | train | https://github.com/B2W-BIT/aiologger/blob/0b366597a8305d5577a267305e81d5e4784cd398/aiologger/formatters/json.py#L111-L130 |
1,175 | DAI-Lab/Copulas | copulas/multivariate/gaussian.py | GaussianMultivariate._get_covariance | def _get_covariance(self, X):
"""Compute covariance matrix with transformed data.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`.
Returns:
np.ndarray
"""
result = pd.DataFrame(index=range(len(X)))
column_names = self.get_column_names(X)
for column_name in column_names:
column = self.get_column(X, column_name)
distrib = self.distribs[column_name]
# get original distrib's cdf of the column
cdf = distrib.cumulative_distribution(column)
if distrib.constant_value is not None:
# This is to avoid np.inf in the case the column is constant.
cdf = np.ones(column.shape) - EPSILON
# get inverse cdf using standard normal
result = self.set_column(result, column_name, stats.norm.ppf(cdf))
# remove any rows that have infinite values
result = result[(result != np.inf).all(axis=1)]
return pd.DataFrame(data=result).cov().values | python | def _get_covariance(self, X):
"""Compute covariance matrix with transformed data.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`.
Returns:
np.ndarray
"""
result = pd.DataFrame(index=range(len(X)))
column_names = self.get_column_names(X)
for column_name in column_names:
column = self.get_column(X, column_name)
distrib = self.distribs[column_name]
# get original distrib's cdf of the column
cdf = distrib.cumulative_distribution(column)
if distrib.constant_value is not None:
# This is to avoid np.inf in the case the column is constant.
cdf = np.ones(column.shape) - EPSILON
# get inverse cdf using standard normal
result = self.set_column(result, column_name, stats.norm.ppf(cdf))
# remove any rows that have infinite values
result = result[(result != np.inf).all(axis=1)]
return pd.DataFrame(data=result).cov().values | ['def', '_get_covariance', '(', 'self', ',', 'X', ')', ':', 'result', '=', 'pd', '.', 'DataFrame', '(', 'index', '=', 'range', '(', 'len', '(', 'X', ')', ')', ')', 'column_names', '=', 'self', '.', 'get_column_names', '(', 'X', ')', 'for', 'column_name', 'in', 'column_names', ':', 'column', '=', 'self', '.', 'get_column', '(', 'X', ',', 'column_name', ')', 'distrib', '=', 'self', '.', 'distribs', '[', 'column_name', ']', "# get original distrib's cdf of the column", 'cdf', '=', 'distrib', '.', 'cumulative_distribution', '(', 'column', ')', 'if', 'distrib', '.', 'constant_value', 'is', 'not', 'None', ':', '# This is to avoid np.inf in the case the column is constant.', 'cdf', '=', 'np', '.', 'ones', '(', 'column', '.', 'shape', ')', '-', 'EPSILON', '# get inverse cdf using standard normal', 'result', '=', 'self', '.', 'set_column', '(', 'result', ',', 'column_name', ',', 'stats', '.', 'norm', '.', 'ppf', '(', 'cdf', ')', ')', '# remove any rows that have infinite values', 'result', '=', 'result', '[', '(', 'result', '!=', 'np', '.', 'inf', ')', '.', 'all', '(', 'axis', '=', '1', ')', ']', 'return', 'pd', '.', 'DataFrame', '(', 'data', '=', 'result', ')', '.', 'cov', '(', ')', '.', 'values'] | Compute covariance matrix with transformed data.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`.
Returns:
np.ndarray | ['Compute', 'covariance', 'matrix', 'with', 'transformed', 'data', '.'] | train | https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/gaussian.py#L107-L135 |
1,176 | Alignak-monitoring/alignak | alignak/http/scheduler_interface.py | SchedulerInterface._initial_broks | def _initial_broks(self, broker_name):
"""Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
"""
with self.app.conf_lock:
logger.info("A new broker just connected : %s", broker_name)
return self.app.sched.fill_initial_broks(broker_name) | python | def _initial_broks(self, broker_name):
"""Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None
"""
with self.app.conf_lock:
logger.info("A new broker just connected : %s", broker_name)
return self.app.sched.fill_initial_broks(broker_name) | ['def', '_initial_broks', '(', 'self', ',', 'broker_name', ')', ':', 'with', 'self', '.', 'app', '.', 'conf_lock', ':', 'logger', '.', 'info', '(', '"A new broker just connected : %s"', ',', 'broker_name', ')', 'return', 'self', '.', 'app', '.', 'sched', '.', 'fill_initial_broks', '(', 'broker_name', ')'] | Get initial_broks from the scheduler
This is used by the brokers to prepare the initial status broks
This do not send broks, it only makes scheduler internal processing. Then the broker
must use the *_broks* API to get all the stuff
:param broker_name: broker name, used to filter broks
:type broker_name: str
:return: None | ['Get', 'initial_broks', 'from', 'the', 'scheduler'] | train | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/scheduler_interface.py#L370-L384 |
1,177 | jtwhite79/pyemu | pyemu/pst/pst_handler.py | Pst.prior_groups | def prior_groups(self):
"""get the prior info groups
Returns
-------
prior_groups : list
a list of prior information groups
"""
og = list(self.prior_information.groupby("obgnme").groups.keys())
#og = list(map(pst_utils.SFMT, og))
return og | python | def prior_groups(self):
"""get the prior info groups
Returns
-------
prior_groups : list
a list of prior information groups
"""
og = list(self.prior_information.groupby("obgnme").groups.keys())
#og = list(map(pst_utils.SFMT, og))
return og | ['def', 'prior_groups', '(', 'self', ')', ':', 'og', '=', 'list', '(', 'self', '.', 'prior_information', '.', 'groupby', '(', '"obgnme"', ')', '.', 'groups', '.', 'keys', '(', ')', ')', '#og = list(map(pst_utils.SFMT, og))', 'return', 'og'] | get the prior info groups
Returns
-------
prior_groups : list
a list of prior information groups | ['get', 'the', 'prior', 'info', 'groups'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L410-L421 |
1,178 | ev3dev/ev3dev-lang-python | ev3dev2/sensor/__init__.py | Sensor._scale | def _scale(self, mode):
"""
Returns value scaling coefficient for the given mode.
"""
if mode in self._mode_scale:
scale = self._mode_scale[mode]
else:
scale = 10**(-self.decimals)
self._mode_scale[mode] = scale
return scale | python | def _scale(self, mode):
"""
Returns value scaling coefficient for the given mode.
"""
if mode in self._mode_scale:
scale = self._mode_scale[mode]
else:
scale = 10**(-self.decimals)
self._mode_scale[mode] = scale
return scale | ['def', '_scale', '(', 'self', ',', 'mode', ')', ':', 'if', 'mode', 'in', 'self', '.', '_mode_scale', ':', 'scale', '=', 'self', '.', '_mode_scale', '[', 'mode', ']', 'else', ':', 'scale', '=', '10', '**', '(', '-', 'self', '.', 'decimals', ')', 'self', '.', '_mode_scale', '[', 'mode', ']', '=', 'scale', 'return', 'scale'] | Returns value scaling coefficient for the given mode. | ['Returns', 'value', 'scaling', 'coefficient', 'for', 'the', 'given', 'mode', '.'] | train | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sensor/__init__.py#L112-L122 |
1,179 | nicolargo/glances | glances/cpu_percent.py | CpuPercent.__get_percpu | def __get_percpu(self):
"""Update and/or return the per CPU list using the psutil library."""
# Never update more than 1 time per cached_time
if self.timer_percpu.finished():
self.percpu_percent = []
for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)):
cpu = {'key': self.get_key(),
'cpu_number': cpu_number,
'total': round(100 - cputimes.idle, 1),
'user': cputimes.user,
'system': cputimes.system,
'idle': cputimes.idle}
# The following stats are for API purposes only
if hasattr(cputimes, 'nice'):
cpu['nice'] = cputimes.nice
if hasattr(cputimes, 'iowait'):
cpu['iowait'] = cputimes.iowait
if hasattr(cputimes, 'irq'):
cpu['irq'] = cputimes.irq
if hasattr(cputimes, 'softirq'):
cpu['softirq'] = cputimes.softirq
if hasattr(cputimes, 'steal'):
cpu['steal'] = cputimes.steal
if hasattr(cputimes, 'guest'):
cpu['guest'] = cputimes.guest
if hasattr(cputimes, 'guest_nice'):
cpu['guest_nice'] = cputimes.guest_nice
# Append new CPU to the list
self.percpu_percent.append(cpu)
# Reset timer for cache
self.timer_percpu = Timer(self.cached_time)
return self.percpu_percent | python | def __get_percpu(self):
"""Update and/or return the per CPU list using the psutil library."""
# Never update more than 1 time per cached_time
if self.timer_percpu.finished():
self.percpu_percent = []
for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)):
cpu = {'key': self.get_key(),
'cpu_number': cpu_number,
'total': round(100 - cputimes.idle, 1),
'user': cputimes.user,
'system': cputimes.system,
'idle': cputimes.idle}
# The following stats are for API purposes only
if hasattr(cputimes, 'nice'):
cpu['nice'] = cputimes.nice
if hasattr(cputimes, 'iowait'):
cpu['iowait'] = cputimes.iowait
if hasattr(cputimes, 'irq'):
cpu['irq'] = cputimes.irq
if hasattr(cputimes, 'softirq'):
cpu['softirq'] = cputimes.softirq
if hasattr(cputimes, 'steal'):
cpu['steal'] = cputimes.steal
if hasattr(cputimes, 'guest'):
cpu['guest'] = cputimes.guest
if hasattr(cputimes, 'guest_nice'):
cpu['guest_nice'] = cputimes.guest_nice
# Append new CPU to the list
self.percpu_percent.append(cpu)
# Reset timer for cache
self.timer_percpu = Timer(self.cached_time)
return self.percpu_percent | ['def', '__get_percpu', '(', 'self', ')', ':', '# Never update more than 1 time per cached_time', 'if', 'self', '.', 'timer_percpu', '.', 'finished', '(', ')', ':', 'self', '.', 'percpu_percent', '=', '[', ']', 'for', 'cpu_number', ',', 'cputimes', 'in', 'enumerate', '(', 'psutil', '.', 'cpu_times_percent', '(', 'interval', '=', '0.0', ',', 'percpu', '=', 'True', ')', ')', ':', 'cpu', '=', '{', "'key'", ':', 'self', '.', 'get_key', '(', ')', ',', "'cpu_number'", ':', 'cpu_number', ',', "'total'", ':', 'round', '(', '100', '-', 'cputimes', '.', 'idle', ',', '1', ')', ',', "'user'", ':', 'cputimes', '.', 'user', ',', "'system'", ':', 'cputimes', '.', 'system', ',', "'idle'", ':', 'cputimes', '.', 'idle', '}', '# The following stats are for API purposes only', 'if', 'hasattr', '(', 'cputimes', ',', "'nice'", ')', ':', 'cpu', '[', "'nice'", ']', '=', 'cputimes', '.', 'nice', 'if', 'hasattr', '(', 'cputimes', ',', "'iowait'", ')', ':', 'cpu', '[', "'iowait'", ']', '=', 'cputimes', '.', 'iowait', 'if', 'hasattr', '(', 'cputimes', ',', "'irq'", ')', ':', 'cpu', '[', "'irq'", ']', '=', 'cputimes', '.', 'irq', 'if', 'hasattr', '(', 'cputimes', ',', "'softirq'", ')', ':', 'cpu', '[', "'softirq'", ']', '=', 'cputimes', '.', 'softirq', 'if', 'hasattr', '(', 'cputimes', ',', "'steal'", ')', ':', 'cpu', '[', "'steal'", ']', '=', 'cputimes', '.', 'steal', 'if', 'hasattr', '(', 'cputimes', ',', "'guest'", ')', ':', 'cpu', '[', "'guest'", ']', '=', 'cputimes', '.', 'guest', 'if', 'hasattr', '(', 'cputimes', ',', "'guest_nice'", ')', ':', 'cpu', '[', "'guest_nice'", ']', '=', 'cputimes', '.', 'guest_nice', '# Append new CPU to the list', 'self', '.', 'percpu_percent', '.', 'append', '(', 'cpu', ')', '# Reset timer for cache', 'self', '.', 'timer_percpu', '=', 'Timer', '(', 'self', '.', 'cached_time', ')', 'return', 'self', '.', 'percpu_percent'] | Update and/or return the per CPU list using the psutil library. | ['Update', 'and', '/', 'or', 'return', 'the', 'per', 'CPU', 'list', 'using', 'the', 'psutil', 'library', '.'] | train | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/cpu_percent.py#L62-L93 |
1,180 | ktbyers/netmiko | netmiko/utilities.py | write_bytes | def write_bytes(out_data, encoding="ascii"):
"""Write Python2 and Python3 compatible byte stream."""
if sys.version_info[0] >= 3:
if isinstance(out_data, type("")):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, type(b"")):
return out_data
else:
if isinstance(out_data, type("")):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, type(str(""))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(
out_data
)
raise ValueError(msg) | python | def write_bytes(out_data, encoding="ascii"):
"""Write Python2 and Python3 compatible byte stream."""
if sys.version_info[0] >= 3:
if isinstance(out_data, type("")):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, type(b"")):
return out_data
else:
if isinstance(out_data, type("")):
if encoding == "utf-8":
return out_data.encode("utf-8")
else:
return out_data.encode("ascii", "ignore")
elif isinstance(out_data, type(str(""))):
return out_data
msg = "Invalid value for out_data neither unicode nor byte string: {}".format(
out_data
)
raise ValueError(msg) | ['def', 'write_bytes', '(', 'out_data', ',', 'encoding', '=', '"ascii"', ')', ':', 'if', 'sys', '.', 'version_info', '[', '0', ']', '>=', '3', ':', 'if', 'isinstance', '(', 'out_data', ',', 'type', '(', '""', ')', ')', ':', 'if', 'encoding', '==', '"utf-8"', ':', 'return', 'out_data', '.', 'encode', '(', '"utf-8"', ')', 'else', ':', 'return', 'out_data', '.', 'encode', '(', '"ascii"', ',', '"ignore"', ')', 'elif', 'isinstance', '(', 'out_data', ',', 'type', '(', 'b""', ')', ')', ':', 'return', 'out_data', 'else', ':', 'if', 'isinstance', '(', 'out_data', ',', 'type', '(', '""', ')', ')', ':', 'if', 'encoding', '==', '"utf-8"', ':', 'return', 'out_data', '.', 'encode', '(', '"utf-8"', ')', 'else', ':', 'return', 'out_data', '.', 'encode', '(', '"ascii"', ',', '"ignore"', ')', 'elif', 'isinstance', '(', 'out_data', ',', 'type', '(', 'str', '(', '""', ')', ')', ')', ':', 'return', 'out_data', 'msg', '=', '"Invalid value for out_data neither unicode nor byte string: {}"', '.', 'format', '(', 'out_data', ')', 'raise', 'ValueError', '(', 'msg', ')'] | Write Python2 and Python3 compatible byte stream. | ['Write', 'Python2', 'and', 'Python3', 'compatible', 'byte', 'stream', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/utilities.py#L158-L179 |
1,181 | sorgerlab/indra | indra/assemblers/pysb/base_agents.py | BaseAgentSet.get_create_base_agent | def get_create_base_agent(self, agent):
"""Return base agent with given name, creating it if needed."""
try:
base_agent = self.agents[_n(agent.name)]
except KeyError:
base_agent = BaseAgent(_n(agent.name))
self.agents[_n(agent.name)] = base_agent
# If it's a molecular agent
if isinstance(agent, Agent):
# Handle bound conditions
for bc in agent.bound_conditions:
bound_base_agent = self.get_create_base_agent(bc.agent)
bound_base_agent.create_site(get_binding_site_name(agent))
base_agent.create_site(get_binding_site_name(bc.agent))
# Handle modification conditions
for mc in agent.mods:
base_agent.create_mod_site(mc)
# Handle mutation conditions
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mc.position
base_agent.create_site(mut_site_name, states=['WT', res_to])
# Handle location condition
if agent.location is not None:
base_agent.create_site('loc', [_n(agent.location)])
# Handle activity
if agent.activity is not None:
site_name = agent.activity.activity_type
base_agent.create_site(site_name, ['inactive', 'active'])
# There might be overwrites here
for db_name, db_ref in agent.db_refs.items():
base_agent.db_refs[db_name] = db_ref
return base_agent | python | def get_create_base_agent(self, agent):
"""Return base agent with given name, creating it if needed."""
try:
base_agent = self.agents[_n(agent.name)]
except KeyError:
base_agent = BaseAgent(_n(agent.name))
self.agents[_n(agent.name)] = base_agent
# If it's a molecular agent
if isinstance(agent, Agent):
# Handle bound conditions
for bc in agent.bound_conditions:
bound_base_agent = self.get_create_base_agent(bc.agent)
bound_base_agent.create_site(get_binding_site_name(agent))
base_agent.create_site(get_binding_site_name(bc.agent))
# Handle modification conditions
for mc in agent.mods:
base_agent.create_mod_site(mc)
# Handle mutation conditions
for mc in agent.mutations:
res_from = mc.residue_from if mc.residue_from else 'mut'
res_to = mc.residue_to if mc.residue_to else 'X'
if mc.position is None:
mut_site_name = res_from
else:
mut_site_name = res_from + mc.position
base_agent.create_site(mut_site_name, states=['WT', res_to])
# Handle location condition
if agent.location is not None:
base_agent.create_site('loc', [_n(agent.location)])
# Handle activity
if agent.activity is not None:
site_name = agent.activity.activity_type
base_agent.create_site(site_name, ['inactive', 'active'])
# There might be overwrites here
for db_name, db_ref in agent.db_refs.items():
base_agent.db_refs[db_name] = db_ref
return base_agent | ['def', 'get_create_base_agent', '(', 'self', ',', 'agent', ')', ':', 'try', ':', 'base_agent', '=', 'self', '.', 'agents', '[', '_n', '(', 'agent', '.', 'name', ')', ']', 'except', 'KeyError', ':', 'base_agent', '=', 'BaseAgent', '(', '_n', '(', 'agent', '.', 'name', ')', ')', 'self', '.', 'agents', '[', '_n', '(', 'agent', '.', 'name', ')', ']', '=', 'base_agent', "# If it's a molecular agent", 'if', 'isinstance', '(', 'agent', ',', 'Agent', ')', ':', '# Handle bound conditions', 'for', 'bc', 'in', 'agent', '.', 'bound_conditions', ':', 'bound_base_agent', '=', 'self', '.', 'get_create_base_agent', '(', 'bc', '.', 'agent', ')', 'bound_base_agent', '.', 'create_site', '(', 'get_binding_site_name', '(', 'agent', ')', ')', 'base_agent', '.', 'create_site', '(', 'get_binding_site_name', '(', 'bc', '.', 'agent', ')', ')', '# Handle modification conditions', 'for', 'mc', 'in', 'agent', '.', 'mods', ':', 'base_agent', '.', 'create_mod_site', '(', 'mc', ')', '# Handle mutation conditions', 'for', 'mc', 'in', 'agent', '.', 'mutations', ':', 'res_from', '=', 'mc', '.', 'residue_from', 'if', 'mc', '.', 'residue_from', 'else', "'mut'", 'res_to', '=', 'mc', '.', 'residue_to', 'if', 'mc', '.', 'residue_to', 'else', "'X'", 'if', 'mc', '.', 'position', 'is', 'None', ':', 'mut_site_name', '=', 'res_from', 'else', ':', 'mut_site_name', '=', 'res_from', '+', 'mc', '.', 'position', 'base_agent', '.', 'create_site', '(', 'mut_site_name', ',', 'states', '=', '[', "'WT'", ',', 'res_to', ']', ')', '# Handle location condition', 'if', 'agent', '.', 'location', 'is', 'not', 'None', ':', 'base_agent', '.', 'create_site', '(', "'loc'", ',', '[', '_n', '(', 'agent', '.', 'location', ')', ']', ')', '# Handle activity', 'if', 'agent', '.', 'activity', 'is', 'not', 'None', ':', 'site_name', '=', 'agent', '.', 'activity', '.', 'activity_type', 'base_agent', '.', 'create_site', '(', 'site_name', ',', '[', "'inactive'", ',', "'active'", ']', ')', '# There might be overwrites here', 'for', 'db_name', ',', 'db_ref', 'in', 'agent', '.', 'db_refs', '.', 'items', '(', ')', ':', 'base_agent', '.', 'db_refs', '[', 'db_name', ']', '=', 'db_ref', 'return', 'base_agent'] | Return base agent with given name, creating it if needed. | ['Return', 'base', 'agent', 'with', 'given', 'name', 'creating', 'it', 'if', 'needed', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/pysb/base_agents.py#L13-L57 |
1,182 | bmuller/kademlia | kademlia/protocol.py | KademliaProtocol.handle_call_response | def handle_call_response(self, result, node):
"""
If we get a response, add the node to the routing table. If
we get no response, make sure it's removed from the routing table.
"""
if not result[0]:
log.warning("no response from %s, removing from router", node)
self.router.remove_contact(node)
return result
log.info("got successful response from %s", node)
self.welcome_if_new(node)
return result | python | def handle_call_response(self, result, node):
"""
If we get a response, add the node to the routing table. If
we get no response, make sure it's removed from the routing table.
"""
if not result[0]:
log.warning("no response from %s, removing from router", node)
self.router.remove_contact(node)
return result
log.info("got successful response from %s", node)
self.welcome_if_new(node)
return result | ['def', 'handle_call_response', '(', 'self', ',', 'result', ',', 'node', ')', ':', 'if', 'not', 'result', '[', '0', ']', ':', 'log', '.', 'warning', '(', '"no response from %s, removing from router"', ',', 'node', ')', 'self', '.', 'router', '.', 'remove_contact', '(', 'node', ')', 'return', 'result', 'log', '.', 'info', '(', '"got successful response from %s"', ',', 'node', ')', 'self', '.', 'welcome_if_new', '(', 'node', ')', 'return', 'result'] | If we get a response, add the node to the routing table. If
we get no response, make sure it's removed from the routing table. | ['If', 'we', 'get', 'a', 'response', 'add', 'the', 'node', 'to', 'the', 'routing', 'table', '.', 'If', 'we', 'get', 'no', 'response', 'make', 'sure', 'it', 's', 'removed', 'from', 'the', 'routing', 'table', '.'] | train | https://github.com/bmuller/kademlia/blob/4a8d445c9ee8f3ca10f56107e4445daed4933c8a/kademlia/protocol.py#L116-L128 |
1,183 | ultrabug/py3status | py3status/core.py | Py3statusWrapper.get_user_modules | def get_user_modules(self):
"""
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
"""
user_modules = {}
for include_path in self.config["include_paths"]:
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith(".py"):
continue
module_name = f_name[:-3]
# do not overwrite modules if already found
if module_name in user_modules:
pass
user_modules[module_name] = (include_path, f_name)
return user_modules | python | def get_user_modules(self):
"""
Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
}
"""
user_modules = {}
for include_path in self.config["include_paths"]:
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith(".py"):
continue
module_name = f_name[:-3]
# do not overwrite modules if already found
if module_name in user_modules:
pass
user_modules[module_name] = (include_path, f_name)
return user_modules | ['def', 'get_user_modules', '(', 'self', ')', ':', 'user_modules', '=', '{', '}', 'for', 'include_path', 'in', 'self', '.', 'config', '[', '"include_paths"', ']', ':', 'for', 'f_name', 'in', 'sorted', '(', 'os', '.', 'listdir', '(', 'include_path', ')', ')', ':', 'if', 'not', 'f_name', '.', 'endswith', '(', '".py"', ')', ':', 'continue', 'module_name', '=', 'f_name', '[', ':', '-', '3', ']', '# do not overwrite modules if already found', 'if', 'module_name', 'in', 'user_modules', ':', 'pass', 'user_modules', '[', 'module_name', ']', '=', '(', 'include_path', ',', 'f_name', ')', 'return', 'user_modules'] | Search configured include directories for user provided modules.
user_modules: {
'weather_yahoo': ('~/i3/py3status/', 'weather_yahoo.py')
} | ['Search', 'configured', 'include', 'directories', 'for', 'user', 'provided', 'modules', '.'] | train | https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/core.py#L435-L453 |
1,184 | lambdalisue/maidenhair | src/maidenhair/utils/environment.py | get_system_root_directory | def get_system_root_directory():
"""
Get system root directory (application installed root directory)
Returns
-------
string
A full path
"""
root = os.path.dirname(__file__)
root = os.path.dirname(root)
root = os.path.abspath(root)
return root | python | def get_system_root_directory():
"""
Get system root directory (application installed root directory)
Returns
-------
string
A full path
"""
root = os.path.dirname(__file__)
root = os.path.dirname(root)
root = os.path.abspath(root)
return root | ['def', 'get_system_root_directory', '(', ')', ':', 'root', '=', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', 'root', '=', 'os', '.', 'path', '.', 'dirname', '(', 'root', ')', 'root', '=', 'os', '.', 'path', '.', 'abspath', '(', 'root', ')', 'return', 'root'] | Get system root directory (application installed root directory)
Returns
-------
string
A full path | ['Get', 'system', 'root', 'directory', '(', 'application', 'installed', 'root', 'directory', ')'] | train | https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/utils/environment.py#L19-L32 |
1,185 | Grunny/zap-cli | zapcli/commands/scripts.py | load_script | def load_script(zap_helper, **options):
"""Load a script from a file."""
with zap_error_handler():
if not os.path.isfile(options['file_path']):
raise ZAPError('No file found at "{0}", cannot load script.'.format(options['file_path']))
if not _is_valid_script_engine(zap_helper.zap, options['engine']):
engines = zap_helper.zap.script.list_engines
raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines)))
console.debug('Loading script "{0}" from "{1}"'.format(options['name'], options['file_path']))
result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'],
options['file_path'], scriptdescription=options['description'])
if result != 'OK':
raise ZAPError('Error loading script: {0}'.format(result))
console.info('Script "{0}" loaded'.format(options['name'])) | python | def load_script(zap_helper, **options):
"""Load a script from a file."""
with zap_error_handler():
if not os.path.isfile(options['file_path']):
raise ZAPError('No file found at "{0}", cannot load script.'.format(options['file_path']))
if not _is_valid_script_engine(zap_helper.zap, options['engine']):
engines = zap_helper.zap.script.list_engines
raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines)))
console.debug('Loading script "{0}" from "{1}"'.format(options['name'], options['file_path']))
result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'],
options['file_path'], scriptdescription=options['description'])
if result != 'OK':
raise ZAPError('Error loading script: {0}'.format(result))
console.info('Script "{0}" loaded'.format(options['name'])) | ['def', 'load_script', '(', 'zap_helper', ',', '*', '*', 'options', ')', ':', 'with', 'zap_error_handler', '(', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'options', '[', "'file_path'", ']', ')', ':', 'raise', 'ZAPError', '(', '\'No file found at "{0}", cannot load script.\'', '.', 'format', '(', 'options', '[', "'file_path'", ']', ')', ')', 'if', 'not', '_is_valid_script_engine', '(', 'zap_helper', '.', 'zap', ',', 'options', '[', "'engine'", ']', ')', ':', 'engines', '=', 'zap_helper', '.', 'zap', '.', 'script', '.', 'list_engines', 'raise', 'ZAPError', '(', "'Invalid script engine provided. Valid engines are: {0}'", '.', 'format', '(', "', '", '.', 'join', '(', 'engines', ')', ')', ')', 'console', '.', 'debug', '(', '\'Loading script "{0}" from "{1}"\'', '.', 'format', '(', 'options', '[', "'name'", ']', ',', 'options', '[', "'file_path'", ']', ')', ')', 'result', '=', 'zap_helper', '.', 'zap', '.', 'script', '.', 'load', '(', 'options', '[', "'name'", ']', ',', 'options', '[', "'script_type'", ']', ',', 'options', '[', "'engine'", ']', ',', 'options', '[', "'file_path'", ']', ',', 'scriptdescription', '=', 'options', '[', "'description'", ']', ')', 'if', 'result', '!=', "'OK'", ':', 'raise', 'ZAPError', '(', "'Error loading script: {0}'", '.', 'format', '(', 'result', ')', ')', 'console', '.', 'info', '(', '\'Script "{0}" loaded\'', '.', 'format', '(', 'options', '[', "'name'", ']', ')', ')'] | Load a script from a file. | ['Load', 'a', 'script', 'from', 'a', 'file', '.'] | train | https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/scripts.py#L102-L119 |
1,186 | wright-group/WrightTools | WrightTools/_dataset.py | Dataset.max | def max(self):
"""Maximum, ignorning nans."""
if "max" not in self.attrs.keys():
def f(dataset, s):
return np.nanmax(dataset[s])
self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values()))
return self.attrs["max"] | python | def max(self):
"""Maximum, ignorning nans."""
if "max" not in self.attrs.keys():
def f(dataset, s):
return np.nanmax(dataset[s])
self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values()))
return self.attrs["max"] | ['def', 'max', '(', 'self', ')', ':', 'if', '"max"', 'not', 'in', 'self', '.', 'attrs', '.', 'keys', '(', ')', ':', 'def', 'f', '(', 'dataset', ',', 's', ')', ':', 'return', 'np', '.', 'nanmax', '(', 'dataset', '[', 's', ']', ')', 'self', '.', 'attrs', '[', '"max"', ']', '=', 'np', '.', 'nanmax', '(', 'list', '(', 'self', '.', 'chunkwise', '(', 'f', ')', '.', 'values', '(', ')', ')', ')', 'return', 'self', '.', 'attrs', '[', '"max"', ']'] | Maximum, ignorning nans. | ['Maximum', 'ignorning', 'nans', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/_dataset.py#L371-L379 |
1,187 | calston/rhumba | rhumba/backends/redis.py | Backend.clusterQueues | def clusterQueues(self):
""" Return a dict of queues in cluster and servers running them
"""
servers = yield self.getClusterServers()
queues = {}
for sname in servers:
qs = yield self.get('rhumba.server.%s.queues' % sname)
uuid = yield self.get('rhumba.server.%s.uuid' % sname)
qs = json.loads(qs)
for q in qs:
if q not in queues:
queues[q] = []
queues[q].append({'host': sname, 'uuid': uuid})
defer.returnValue(queues) | python | def clusterQueues(self):
""" Return a dict of queues in cluster and servers running them
"""
servers = yield self.getClusterServers()
queues = {}
for sname in servers:
qs = yield self.get('rhumba.server.%s.queues' % sname)
uuid = yield self.get('rhumba.server.%s.uuid' % sname)
qs = json.loads(qs)
for q in qs:
if q not in queues:
queues[q] = []
queues[q].append({'host': sname, 'uuid': uuid})
defer.returnValue(queues) | ['def', 'clusterQueues', '(', 'self', ')', ':', 'servers', '=', 'yield', 'self', '.', 'getClusterServers', '(', ')', 'queues', '=', '{', '}', 'for', 'sname', 'in', 'servers', ':', 'qs', '=', 'yield', 'self', '.', 'get', '(', "'rhumba.server.%s.queues'", '%', 'sname', ')', 'uuid', '=', 'yield', 'self', '.', 'get', '(', "'rhumba.server.%s.uuid'", '%', 'sname', ')', 'qs', '=', 'json', '.', 'loads', '(', 'qs', ')', 'for', 'q', 'in', 'qs', ':', 'if', 'q', 'not', 'in', 'queues', ':', 'queues', '[', 'q', ']', '=', '[', ']', 'queues', '[', 'q', ']', '.', 'append', '(', '{', "'host'", ':', 'sname', ',', "'uuid'", ':', 'uuid', '}', ')', 'defer', '.', 'returnValue', '(', 'queues', ')'] | Return a dict of queues in cluster and servers running them | ['Return', 'a', 'dict', 'of', 'queues', 'in', 'cluster', 'and', 'servers', 'running', 'them'] | train | https://github.com/calston/rhumba/blob/05e3cbf4e531cc51b4777912eb98a4f006893f5e/rhumba/backends/redis.py#L207-L226 |
1,188 | quantmind/pulsar | pulsar/utils/config.py | Config.get | def get(self, name, default=None):
"""Get the value at ``name`` for this :class:`Config` container
The returned value is obtained from:
* the value at ``name`` in the :attr:`settings` dictionary
if available.
* the value at ``name`` in the :attr:`params` dictionary if available.
* the ``default`` value.
"""
try:
return self._get(name, default)
except KeyError:
return default | python | def get(self, name, default=None):
"""Get the value at ``name`` for this :class:`Config` container
The returned value is obtained from:
* the value at ``name`` in the :attr:`settings` dictionary
if available.
* the value at ``name`` in the :attr:`params` dictionary if available.
* the ``default`` value.
"""
try:
return self._get(name, default)
except KeyError:
return default | ['def', 'get', '(', 'self', ',', 'name', ',', 'default', '=', 'None', ')', ':', 'try', ':', 'return', 'self', '.', '_get', '(', 'name', ',', 'default', ')', 'except', 'KeyError', ':', 'return', 'default'] | Get the value at ``name`` for this :class:`Config` container
The returned value is obtained from:
* the value at ``name`` in the :attr:`settings` dictionary
if available.
* the value at ``name`` in the :attr:`params` dictionary if available.
* the ``default`` value. | ['Get', 'the', 'value', 'at', 'name', 'for', 'this', ':', 'class', ':', 'Config', 'container'] | train | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/config.py#L208-L221 |
1,189 | ga4gh/ga4gh-client | ga4gh/client/client.py | AbstractClient.search_variant_annotations | def search_variant_annotations(
self, variant_annotation_set_id, reference_name="",
reference_id="", start=0, end=0, effects=[]):
"""
Returns an iterator over the Variant Annotations fulfilling
the specified conditions from the specified VariantSet.
:param str variant_annotation_set_id: The ID of the
:class:`ga4gh.protocol.VariantAnnotationSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:return: An iterator over the
:class:`ga4gh.protocol.VariantAnnotation` objects
defined by the query parameters.
:rtype: iter
"""
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = variant_annotation_set_id
request.reference_name = reference_name
request.reference_id = reference_id
request.start = start
request.end = end
for effect in effects:
request.effects.add().CopyFrom(protocol.OntologyTerm(**effect))
for effect in request.effects:
if not effect.term_id:
raise exceptions.ErrantRequestException(
"Each ontology term should have an id set")
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variantannotations",
protocol.SearchVariantAnnotationsResponse) | python | def search_variant_annotations(
self, variant_annotation_set_id, reference_name="",
reference_id="", start=0, end=0, effects=[]):
"""
Returns an iterator over the Variant Annotations fulfilling
the specified conditions from the specified VariantSet.
:param str variant_annotation_set_id: The ID of the
:class:`ga4gh.protocol.VariantAnnotationSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:return: An iterator over the
:class:`ga4gh.protocol.VariantAnnotation` objects
defined by the query parameters.
:rtype: iter
"""
request = protocol.SearchVariantAnnotationsRequest()
request.variant_annotation_set_id = variant_annotation_set_id
request.reference_name = reference_name
request.reference_id = reference_id
request.start = start
request.end = end
for effect in effects:
request.effects.add().CopyFrom(protocol.OntologyTerm(**effect))
for effect in request.effects:
if not effect.term_id:
raise exceptions.ErrantRequestException(
"Each ontology term should have an id set")
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variantannotations",
protocol.SearchVariantAnnotationsResponse) | ['def', 'search_variant_annotations', '(', 'self', ',', 'variant_annotation_set_id', ',', 'reference_name', '=', '""', ',', 'reference_id', '=', '""', ',', 'start', '=', '0', ',', 'end', '=', '0', ',', 'effects', '=', '[', ']', ')', ':', 'request', '=', 'protocol', '.', 'SearchVariantAnnotationsRequest', '(', ')', 'request', '.', 'variant_annotation_set_id', '=', 'variant_annotation_set_id', 'request', '.', 'reference_name', '=', 'reference_name', 'request', '.', 'reference_id', '=', 'reference_id', 'request', '.', 'start', '=', 'start', 'request', '.', 'end', '=', 'end', 'for', 'effect', 'in', 'effects', ':', 'request', '.', 'effects', '.', 'add', '(', ')', '.', 'CopyFrom', '(', 'protocol', '.', 'OntologyTerm', '(', '*', '*', 'effect', ')', ')', 'for', 'effect', 'in', 'request', '.', 'effects', ':', 'if', 'not', 'effect', '.', 'term_id', ':', 'raise', 'exceptions', '.', 'ErrantRequestException', '(', '"Each ontology term should have an id set"', ')', 'request', '.', 'page_size', '=', 'pb', '.', 'int', '(', 'self', '.', '_page_size', ')', 'return', 'self', '.', '_run_search_request', '(', 'request', ',', '"variantannotations"', ',', 'protocol', '.', 'SearchVariantAnnotationsResponse', ')'] | Returns an iterator over the Variant Annotations fulfilling
the specified conditions from the specified VariantSet.
:param str variant_annotation_set_id: The ID of the
:class:`ga4gh.protocol.VariantAnnotationSet` of interest.
:param int start: Required. The beginning of the window (0-based,
inclusive) for which overlapping variants should be returned.
Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are
represented as two requests one on each side of the join
(position 0).
:param int end: Required. The end of the window (0-based, exclusive)
for which overlapping variants should be returned.
:param str reference_name: The name of the
:class:`ga4gh.protocol.Reference` we wish to return variants from.
:return: An iterator over the
:class:`ga4gh.protocol.VariantAnnotation` objects
defined by the query parameters.
:rtype: iter | ['Returns', 'an', 'iterator', 'over', 'the', 'Variant', 'Annotations', 'fulfilling', 'the', 'specified', 'conditions', 'from', 'the', 'specified', 'VariantSet', '.'] | train | https://github.com/ga4gh/ga4gh-client/blob/d23b00b89112ef0930d45ee75aa3c6de3db615c5/ga4gh/client/client.py#L408-L448 |
1,190 | saltstack/salt | salt/modules/zcbuildout.py | bootstrap | def bootstrap(directory='.',
config='buildout.cfg',
python=sys.executable,
onlyif=None,
unless=None,
runas=None,
env=(),
distribute=None,
buildout_ver=None,
test_release=False,
offline=False,
new_st=None,
use_vt=False,
loglevel=None):
'''
Run the buildout bootstrap dance (python bootstrap.py).
directory
directory to execute in
config
alternative buildout configuration file to use
runas
User used to run buildout as
env
environment variables to set when running
buildout_ver
force a specific buildout version (1 | 2)
test_release
buildout accept test release
offline
are we executing buildout in offline mode
distribute
Forcing use of distribute
new_st
Forcing use of setuptools >= 0.7
python
path to a python executable to use in place of default (salt one)
onlyif
Only execute cmd if statement on the host return 0
unless
Do not execute cmd if statement on the host return 0
use_vt
Use the new salt VT to stream output [experimental]
CLI Example:
.. code-block:: bash
salt '*' buildout.bootstrap /srv/mybuildout
'''
directory = os.path.abspath(directory)
dbuild = _dot_buildout(directory)
bootstrap_args = ''
has_distribute = _has_old_distribute(python=python, runas=runas, env=env)
has_new_st = _has_setuptools7(python=python, runas=runas, env=env)
if (
has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and has_new_st
and distribute and not new_st
):
new_st = True
distribute = False
if (
has_distribute and has_new_st
and not distribute and not new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and distribute and not new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and not distribute and not new_st
):
new_st = True
distribute = False
if (
has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and not has_new_st
and distribute and not new_st
):
new_st = False
distribute = True
if (
has_distribute and not has_new_st
and not distribute and not new_st
):
new_st = False
distribute = True
if (
not has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and not has_new_st
and distribute and not new_st
):
new_st = False
distribute = True
if (
not has_distribute and not has_new_st
and not distribute and not new_st
):
new_st = True
distribute = False
if new_st and distribute:
distribute = False
if new_st:
distribute = False
LOG.warning('Forcing to use setuptools as we have setuptools >= 0.7')
if distribute:
new_st = False
if buildout_ver == 1:
LOG.warning('Using distribute !')
bootstrap_args += ' --distribute'
if not os.path.isdir(dbuild):
os.makedirs(dbuild)
upgrade_bootstrap(directory,
offline=offline,
buildout_ver=buildout_ver)
# be sure which buildout bootstrap we have
b_py = os.path.join(directory, 'bootstrap.py')
with salt.utils.files.fopen(b_py) as fic:
content = salt.utils.stringutils.to_unicode(fic.read())
if (
(test_release is not False)
and ' --accept-buildout-test-releases' in content
):
bootstrap_args += ' --accept-buildout-test-releases'
if config and '"-c"' in content:
bootstrap_args += ' -c {0}'.format(config)
# be sure that the bootstrap belongs to the running user
try:
if runas:
uid = __salt__['user.info'](runas)['uid']
gid = __salt__['user.info'](runas)['gid']
os.chown('bootstrap.py', uid, gid)
except (IOError, OSError) as exc:
# don't block here, try to execute it if can pass
_logger.error('BUILDOUT bootstrap permissions error: %s',
exc, exc_info=_logger.isEnabledFor(logging.DEBUG))
cmd = '{0} bootstrap.py {1}'.format(python, bootstrap_args)
ret = _Popen(cmd, directory=directory, runas=runas, loglevel=loglevel,
env=env, use_vt=use_vt)
output = ret['output']
return {'comment': cmd, 'out': output} | python | def bootstrap(directory='.',
config='buildout.cfg',
python=sys.executable,
onlyif=None,
unless=None,
runas=None,
env=(),
distribute=None,
buildout_ver=None,
test_release=False,
offline=False,
new_st=None,
use_vt=False,
loglevel=None):
'''
Run the buildout bootstrap dance (python bootstrap.py).
directory
directory to execute in
config
alternative buildout configuration file to use
runas
User used to run buildout as
env
environment variables to set when running
buildout_ver
force a specific buildout version (1 | 2)
test_release
buildout accept test release
offline
are we executing buildout in offline mode
distribute
Forcing use of distribute
new_st
Forcing use of setuptools >= 0.7
python
path to a python executable to use in place of default (salt one)
onlyif
Only execute cmd if statement on the host return 0
unless
Do not execute cmd if statement on the host return 0
use_vt
Use the new salt VT to stream output [experimental]
CLI Example:
.. code-block:: bash
salt '*' buildout.bootstrap /srv/mybuildout
'''
directory = os.path.abspath(directory)
dbuild = _dot_buildout(directory)
bootstrap_args = ''
has_distribute = _has_old_distribute(python=python, runas=runas, env=env)
has_new_st = _has_setuptools7(python=python, runas=runas, env=env)
if (
has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and has_new_st
and distribute and not new_st
):
new_st = True
distribute = False
if (
has_distribute and has_new_st
and not distribute and not new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and distribute and not new_st
):
new_st = True
distribute = False
if (
not has_distribute and has_new_st
and not distribute and not new_st
):
new_st = True
distribute = False
if (
has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
has_distribute and not has_new_st
and distribute and not new_st
):
new_st = False
distribute = True
if (
has_distribute and not has_new_st
and not distribute and not new_st
):
new_st = False
distribute = True
if (
not has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and not has_new_st
and not distribute and new_st
):
new_st = True
distribute = False
if (
not has_distribute and not has_new_st
and distribute and not new_st
):
new_st = False
distribute = True
if (
not has_distribute and not has_new_st
and not distribute and not new_st
):
new_st = True
distribute = False
if new_st and distribute:
distribute = False
if new_st:
distribute = False
LOG.warning('Forcing to use setuptools as we have setuptools >= 0.7')
if distribute:
new_st = False
if buildout_ver == 1:
LOG.warning('Using distribute !')
bootstrap_args += ' --distribute'
if not os.path.isdir(dbuild):
os.makedirs(dbuild)
upgrade_bootstrap(directory,
offline=offline,
buildout_ver=buildout_ver)
# be sure which buildout bootstrap we have
b_py = os.path.join(directory, 'bootstrap.py')
with salt.utils.files.fopen(b_py) as fic:
content = salt.utils.stringutils.to_unicode(fic.read())
if (
(test_release is not False)
and ' --accept-buildout-test-releases' in content
):
bootstrap_args += ' --accept-buildout-test-releases'
if config and '"-c"' in content:
bootstrap_args += ' -c {0}'.format(config)
# be sure that the bootstrap belongs to the running user
try:
if runas:
uid = __salt__['user.info'](runas)['uid']
gid = __salt__['user.info'](runas)['gid']
os.chown('bootstrap.py', uid, gid)
except (IOError, OSError) as exc:
# don't block here, try to execute it if can pass
_logger.error('BUILDOUT bootstrap permissions error: %s',
exc, exc_info=_logger.isEnabledFor(logging.DEBUG))
cmd = '{0} bootstrap.py {1}'.format(python, bootstrap_args)
ret = _Popen(cmd, directory=directory, runas=runas, loglevel=loglevel,
env=env, use_vt=use_vt)
output = ret['output']
return {'comment': cmd, 'out': output} | ['def', 'bootstrap', '(', 'directory', '=', "'.'", ',', 'config', '=', "'buildout.cfg'", ',', 'python', '=', 'sys', '.', 'executable', ',', 'onlyif', '=', 'None', ',', 'unless', '=', 'None', ',', 'runas', '=', 'None', ',', 'env', '=', '(', ')', ',', 'distribute', '=', 'None', ',', 'buildout_ver', '=', 'None', ',', 'test_release', '=', 'False', ',', 'offline', '=', 'False', ',', 'new_st', '=', 'None', ',', 'use_vt', '=', 'False', ',', 'loglevel', '=', 'None', ')', ':', 'directory', '=', 'os', '.', 'path', '.', 'abspath', '(', 'directory', ')', 'dbuild', '=', '_dot_buildout', '(', 'directory', ')', 'bootstrap_args', '=', "''", 'has_distribute', '=', '_has_old_distribute', '(', 'python', '=', 'python', ',', 'runas', '=', 'runas', ',', 'env', '=', 'env', ')', 'has_new_st', '=', '_has_setuptools7', '(', 'python', '=', 'python', ',', 'runas', '=', 'runas', ',', 'env', '=', 'env', ')', 'if', '(', 'has_distribute', 'and', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'has_distribute', 'and', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'has_distribute', 'and', 'has_new_st', 'and', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'has_distribute', 'and', 'has_new_st', 'and', 'not', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'not', 'has_distribute', 'and', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'not', 'has_distribute', 'and', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'not', 'has_distribute', 'and', 'has_new_st', 'and', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'not', 'has_distribute', 'and', 'has_new_st', 'and', 'not', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'False', 'distribute', '=', 'True', 'if', '(', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'not', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'False', 'distribute', '=', 'True', 'if', '(', 'not', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'not', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'not', 'distribute', 'and', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', '(', 'not', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'False', 'distribute', '=', 'True', 'if', '(', 'not', 'has_distribute', 'and', 'not', 'has_new_st', 'and', 'not', 'distribute', 'and', 'not', 'new_st', ')', ':', 'new_st', '=', 'True', 'distribute', '=', 'False', 'if', 'new_st', 'and', 'distribute', ':', 'distribute', '=', 'False', 'if', 'new_st', ':', 'distribute', '=', 'False', 'LOG', '.', 'warning', '(', "'Forcing to use setuptools as we have setuptools >= 0.7'", ')', 'if', 'distribute', ':', 'new_st', '=', 'False', 'if', 'buildout_ver', '==', '1', ':', 'LOG', '.', 'warning', '(', "'Using distribute !'", ')', 'bootstrap_args', '+=', "' --distribute'", 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'dbuild', ')', ':', 'os', '.', 'makedirs', '(', 'dbuild', ')', 'upgrade_bootstrap', '(', 'directory', ',', 'offline', '=', 'offline', ',', 'buildout_ver', '=', 'buildout_ver', ')', '# be sure which buildout bootstrap we have', 'b_py', '=', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', "'bootstrap.py'", ')', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'b_py', ')', 'as', 'fic', ':', 'content', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'fic', '.', 'read', '(', ')', ')', 'if', '(', '(', 'test_release', 'is', 'not', 'False', ')', 'and', "' --accept-buildout-test-releases'", 'in', 'content', ')', ':', 'bootstrap_args', '+=', "' --accept-buildout-test-releases'", 'if', 'config', 'and', '\'"-c"\'', 'in', 'content', ':', 'bootstrap_args', '+=', "' -c {0}'", '.', 'format', '(', 'config', ')', '# be sure that the bootstrap belongs to the running user', 'try', ':', 'if', 'runas', ':', 'uid', '=', '__salt__', '[', "'user.info'", ']', '(', 'runas', ')', '[', "'uid'", ']', 'gid', '=', '__salt__', '[', "'user.info'", ']', '(', 'runas', ')', '[', "'gid'", ']', 'os', '.', 'chown', '(', "'bootstrap.py'", ',', 'uid', ',', 'gid', ')', 'except', '(', 'IOError', ',', 'OSError', ')', 'as', 'exc', ':', "# don't block here, try to execute it if can pass", '_logger', '.', 'error', '(', "'BUILDOUT bootstrap permissions error: %s'", ',', 'exc', ',', 'exc_info', '=', '_logger', '.', 'isEnabledFor', '(', 'logging', '.', 'DEBUG', ')', ')', 'cmd', '=', "'{0} bootstrap.py {1}'", '.', 'format', '(', 'python', ',', 'bootstrap_args', ')', 'ret', '=', '_Popen', '(', 'cmd', ',', 'directory', '=', 'directory', ',', 'runas', '=', 'runas', ',', 'loglevel', '=', 'loglevel', ',', 'env', '=', 'env', ',', 'use_vt', '=', 'use_vt', ')', 'output', '=', 'ret', '[', "'output'", ']', 'return', '{', "'comment'", ':', 'cmd', ',', "'out'", ':', 'output', '}'] | Run the buildout bootstrap dance (python bootstrap.py).
directory
directory to execute in
config
alternative buildout configuration file to use
runas
User used to run buildout as
env
environment variables to set when running
buildout_ver
force a specific buildout version (1 | 2)
test_release
buildout accept test release
offline
are we executing buildout in offline mode
distribute
Forcing use of distribute
new_st
Forcing use of setuptools >= 0.7
python
path to a python executable to use in place of default (salt one)
onlyif
Only execute cmd if statement on the host return 0
unless
Do not execute cmd if statement on the host return 0
use_vt
Use the new salt VT to stream output [experimental]
CLI Example:
.. code-block:: bash
salt '*' buildout.bootstrap /srv/mybuildout | ['Run', 'the', 'buildout', 'bootstrap', 'dance', '(', 'python', 'bootstrap', '.', 'py', ')', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zcbuildout.py#L557-L764 |
1,191 | PeerAssets/pypeerassets | pypeerassets/networks.py | net_query | def net_query(name: str) -> Constants:
'''Find the NetworkParams for a network by its long or short name. Raises
UnsupportedNetwork if no NetworkParams is found.
'''
for net_params in networks:
if name in (net_params.name, net_params.shortname,):
return net_params
raise UnsupportedNetwork | python | def net_query(name: str) -> Constants:
'''Find the NetworkParams for a network by its long or short name. Raises
UnsupportedNetwork if no NetworkParams is found.
'''
for net_params in networks:
if name in (net_params.name, net_params.shortname,):
return net_params
raise UnsupportedNetwork | ['def', 'net_query', '(', 'name', ':', 'str', ')', '->', 'Constants', ':', 'for', 'net_params', 'in', 'networks', ':', 'if', 'name', 'in', '(', 'net_params', '.', 'name', ',', 'net_params', '.', 'shortname', ',', ')', ':', 'return', 'net_params', 'raise', 'UnsupportedNetwork'] | Find the NetworkParams for a network by its long or short name. Raises
UnsupportedNetwork if no NetworkParams is found. | ['Find', 'the', 'NetworkParams', 'for', 'a', 'network', 'by', 'its', 'long', 'or', 'short', 'name', '.', 'Raises', 'UnsupportedNetwork', 'if', 'no', 'NetworkParams', 'is', 'found', '.'] | train | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/networks.py#L100-L109 |
1,192 | bitesofcode/projexui | projexui/widgets/xtreewidget/xtreewidgetitem.py | XTreeWidgetItem.setHoverIcon | def setHoverIcon( self, column, icon ):
"""
Returns the icon to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
icon | <QtGui.QIcon)
"""
self._hoverIcon[column] = QtGui.QIcon(icon) | python | def setHoverIcon( self, column, icon ):
"""
Returns the icon to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
icon | <QtGui.QIcon)
"""
self._hoverIcon[column] = QtGui.QIcon(icon) | ['def', 'setHoverIcon', '(', 'self', ',', 'column', ',', 'icon', ')', ':', 'self', '.', '_hoverIcon', '[', 'column', ']', '=', 'QtGui', '.', 'QIcon', '(', 'icon', ')'] | Returns the icon to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
icon | <QtGui.QIcon) | ['Returns', 'the', 'icon', 'to', 'use', 'when', 'coloring', 'when', 'the', 'user', 'hovers', 'over', 'the', 'item', 'for', 'the', 'given', 'column', '.', ':', 'param', 'column', '|', '<int', '>', 'icon', '|', '<QtGui', '.', 'QIcon', ')'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetitem.py#L442-L450 |
1,193 | bennyrowland/suspect | suspect/fitting/singlet.py | complex_to_real | def complex_to_real(complex_fid):
"""
Standard optimization routines as used in lmfit require real data. This
function takes a complex FID and constructs a real version by concatenating
the imaginary part to the complex part. The imaginary part is also reversed
to keep the maxima at each end of the FID and avoid discontinuities in the
center.
:param complex_fid: the complex FID to be converted to real.
:return: the real FID, which has twice as many points as the input.
"""
np = complex_fid.shape[0]
real_fid = numpy.zeros(np * 2)
real_fid[:np] = complex_fid.real
real_fid[np:] = complex_fid.imag[::-1]
return real_fid | python | def complex_to_real(complex_fid):
"""
Standard optimization routines as used in lmfit require real data. This
function takes a complex FID and constructs a real version by concatenating
the imaginary part to the complex part. The imaginary part is also reversed
to keep the maxima at each end of the FID and avoid discontinuities in the
center.
:param complex_fid: the complex FID to be converted to real.
:return: the real FID, which has twice as many points as the input.
"""
np = complex_fid.shape[0]
real_fid = numpy.zeros(np * 2)
real_fid[:np] = complex_fid.real
real_fid[np:] = complex_fid.imag[::-1]
return real_fid | ['def', 'complex_to_real', '(', 'complex_fid', ')', ':', 'np', '=', 'complex_fid', '.', 'shape', '[', '0', ']', 'real_fid', '=', 'numpy', '.', 'zeros', '(', 'np', '*', '2', ')', 'real_fid', '[', ':', 'np', ']', '=', 'complex_fid', '.', 'real', 'real_fid', '[', 'np', ':', ']', '=', 'complex_fid', '.', 'imag', '[', ':', ':', '-', '1', ']', 'return', 'real_fid'] | Standard optimization routines as used in lmfit require real data. This
function takes a complex FID and constructs a real version by concatenating
the imaginary part to the complex part. The imaginary part is also reversed
to keep the maxima at each end of the FID and avoid discontinuities in the
center.
:param complex_fid: the complex FID to be converted to real.
:return: the real FID, which has twice as many points as the input. | ['Standard', 'optimization', 'routines', 'as', 'used', 'in', 'lmfit', 'require', 'real', 'data', '.', 'This', 'function', 'takes', 'a', 'complex', 'FID', 'and', 'constructs', 'a', 'real', 'version', 'by', 'concatenating', 'the', 'imaginary', 'part', 'to', 'the', 'complex', 'part', '.', 'The', 'imaginary', 'part', 'is', 'also', 'reversed', 'to', 'keep', 'the', 'maxima', 'at', 'each', 'end', 'of', 'the', 'FID', 'and', 'avoid', 'discontinuities', 'in', 'the', 'center', '.'] | train | https://github.com/bennyrowland/suspect/blob/c09ab0a5013c5a199218214cdd791659243d7e41/suspect/fitting/singlet.py#L10-L25 |
1,194 | benfred/implicit | setup.py | extract_gcc_binaries | def extract_gcc_binaries():
"""Try to find GCC on OSX for OpenMP support."""
patterns = ['/opt/local/bin/g++-mp-[0-9].[0-9]',
'/opt/local/bin/g++-mp-[0-9]',
'/usr/local/bin/g++-[0-9].[0-9]',
'/usr/local/bin/g++-[0-9]']
if 'darwin' in platform.platform().lower():
gcc_binaries = []
for pattern in patterns:
gcc_binaries += glob.glob(pattern)
gcc_binaries.sort()
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
return gcc
else:
return None
else:
return None | python | def extract_gcc_binaries():
"""Try to find GCC on OSX for OpenMP support."""
patterns = ['/opt/local/bin/g++-mp-[0-9].[0-9]',
'/opt/local/bin/g++-mp-[0-9]',
'/usr/local/bin/g++-[0-9].[0-9]',
'/usr/local/bin/g++-[0-9]']
if 'darwin' in platform.platform().lower():
gcc_binaries = []
for pattern in patterns:
gcc_binaries += glob.glob(pattern)
gcc_binaries.sort()
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
return gcc
else:
return None
else:
return None | ['def', 'extract_gcc_binaries', '(', ')', ':', 'patterns', '=', '[', "'/opt/local/bin/g++-mp-[0-9].[0-9]'", ',', "'/opt/local/bin/g++-mp-[0-9]'", ',', "'/usr/local/bin/g++-[0-9].[0-9]'", ',', "'/usr/local/bin/g++-[0-9]'", ']', 'if', "'darwin'", 'in', 'platform', '.', 'platform', '(', ')', '.', 'lower', '(', ')', ':', 'gcc_binaries', '=', '[', ']', 'for', 'pattern', 'in', 'patterns', ':', 'gcc_binaries', '+=', 'glob', '.', 'glob', '(', 'pattern', ')', 'gcc_binaries', '.', 'sort', '(', ')', 'if', 'gcc_binaries', ':', '_', ',', 'gcc', '=', 'os', '.', 'path', '.', 'split', '(', 'gcc_binaries', '[', '-', '1', ']', ')', 'return', 'gcc', 'else', ':', 'return', 'None', 'else', ':', 'return', 'None'] | Try to find GCC on OSX for OpenMP support. | ['Try', 'to', 'find', 'GCC', 'on', 'OSX', 'for', 'OpenMP', 'support', '.'] | train | https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/setup.py#L81-L98 |
1,195 | pjuren/pyokit | src/pyokit/io/genomeAlignment.py | _build_index | def _build_index(maf_strm, ref_spec):
"""Build an index for a MAF genome alig file and return StringIO of it."""
idx_strm = StringIO.StringIO()
bound_iter = functools.partial(genome_alignment_iterator,
reference_species=ref_spec)
hash_func = JustInTimeGenomeAlignmentBlock.build_hash
idx = IndexedFile(maf_strm, bound_iter, hash_func)
idx.write_index(idx_strm)
idx_strm.seek(0) # seek to the start
return idx_strm | python | def _build_index(maf_strm, ref_spec):
"""Build an index for a MAF genome alig file and return StringIO of it."""
idx_strm = StringIO.StringIO()
bound_iter = functools.partial(genome_alignment_iterator,
reference_species=ref_spec)
hash_func = JustInTimeGenomeAlignmentBlock.build_hash
idx = IndexedFile(maf_strm, bound_iter, hash_func)
idx.write_index(idx_strm)
idx_strm.seek(0) # seek to the start
return idx_strm | ['def', '_build_index', '(', 'maf_strm', ',', 'ref_spec', ')', ':', 'idx_strm', '=', 'StringIO', '.', 'StringIO', '(', ')', 'bound_iter', '=', 'functools', '.', 'partial', '(', 'genome_alignment_iterator', ',', 'reference_species', '=', 'ref_spec', ')', 'hash_func', '=', 'JustInTimeGenomeAlignmentBlock', '.', 'build_hash', 'idx', '=', 'IndexedFile', '(', 'maf_strm', ',', 'bound_iter', ',', 'hash_func', ')', 'idx', '.', 'write_index', '(', 'idx_strm', ')', 'idx_strm', '.', 'seek', '(', '0', ')', '# seek to the start', 'return', 'idx_strm'] | Build an index for a MAF genome alig file and return StringIO of it. | ['Build', 'an', 'index', 'for', 'a', 'MAF', 'genome', 'alig', 'file', 'and', 'return', 'StringIO', 'of', 'it', '.'] | train | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/genomeAlignment.py#L297-L306 |
1,196 | Erotemic/utool | utool/util_str.py | _rectify_countdown_or_bool | def _rectify_countdown_or_bool(count_or_bool):
"""
used by recrusive functions to specify which level to turn a bool on in
counting down yeilds True, True, ..., False
conting up yeilds False, False, False, ... True
Args:
count_or_bool (bool or int): if positive will count down, if negative
will count up, if bool will remain same
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> result = [a1, a2, a3, a4, a5, a6, a7]
>>> print(result)
[1.0, 0.0, 0, 0.0, -1.0, True, False]
[1.0, True, False, False, -1.0, True, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
sign_ = math.copysign(1, count_or_bool)
count_or_bool_ = int(count_or_bool - sign_)
#if count_or_bool_ == 0:
# return sign_ == 1
else:
count_or_bool_ = False
return count_or_bool_ | python | def _rectify_countdown_or_bool(count_or_bool):
"""
used by recrusive functions to specify which level to turn a bool on in
counting down yeilds True, True, ..., False
conting up yeilds False, False, False, ... True
Args:
count_or_bool (bool or int): if positive will count down, if negative
will count up, if bool will remain same
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> result = [a1, a2, a3, a4, a5, a6, a7]
>>> print(result)
[1.0, 0.0, 0, 0.0, -1.0, True, False]
[1.0, True, False, False, -1.0, True, False]
"""
if count_or_bool is True or count_or_bool is False:
count_or_bool_ = count_or_bool
elif isinstance(count_or_bool, int):
if count_or_bool == 0:
return 0
sign_ = math.copysign(1, count_or_bool)
count_or_bool_ = int(count_or_bool - sign_)
#if count_or_bool_ == 0:
# return sign_ == 1
else:
count_or_bool_ = False
return count_or_bool_ | ['def', '_rectify_countdown_or_bool', '(', 'count_or_bool', ')', ':', 'if', 'count_or_bool', 'is', 'True', 'or', 'count_or_bool', 'is', 'False', ':', 'count_or_bool_', '=', 'count_or_bool', 'elif', 'isinstance', '(', 'count_or_bool', ',', 'int', ')', ':', 'if', 'count_or_bool', '==', '0', ':', 'return', '0', 'sign_', '=', 'math', '.', 'copysign', '(', '1', ',', 'count_or_bool', ')', 'count_or_bool_', '=', 'int', '(', 'count_or_bool', '-', 'sign_', ')', '#if count_or_bool_ == 0:', '# return sign_ == 1', 'else', ':', 'count_or_bool_', '=', 'False', 'return', 'count_or_bool_'] | used by recrusive functions to specify which level to turn a bool on in
counting down yeilds True, True, ..., False
conting up yeilds False, False, False, ... True
Args:
count_or_bool (bool or int): if positive will count down, if negative
will count up, if bool will remain same
Returns:
int or bool: count_or_bool_
CommandLine:
python -m utool.util_str --test-_rectify_countdown_or_bool
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_str import _rectify_countdown_or_bool # NOQA
>>> count_or_bool = True
>>> a1 = (_rectify_countdown_or_bool(2))
>>> a2 = (_rectify_countdown_or_bool(1))
>>> a3 = (_rectify_countdown_or_bool(0))
>>> a4 = (_rectify_countdown_or_bool(-1))
>>> a5 = (_rectify_countdown_or_bool(-2))
>>> a6 = (_rectify_countdown_or_bool(True))
>>> a7 = (_rectify_countdown_or_bool(False))
>>> result = [a1, a2, a3, a4, a5, a6, a7]
>>> print(result)
[1.0, 0.0, 0, 0.0, -1.0, True, False]
[1.0, True, False, False, -1.0, True, False] | ['used', 'by', 'recrusive', 'functions', 'to', 'specify', 'which', 'level', 'to', 'turn', 'a', 'bool', 'on', 'in', 'counting', 'down', 'yeilds', 'True', 'True', '...', 'False', 'conting', 'up', 'yeilds', 'False', 'False', 'False', '...', 'True'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L1266-L1310 |
1,197 | saltstack/salt | salt/master.py | Maintenance.handle_key_cache | def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file) | python | def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file) | ['def', 'handle_key_cache', '(', 'self', ')', ':', 'if', 'self', '.', 'opts', '[', "'key_cache'", ']', '==', "'sched'", ':', 'keys', '=', '[', ']', '#TODO DRY from CKMinions', 'if', 'self', '.', 'opts', '[', "'transport'", ']', 'in', '(', "'zeromq'", ',', "'tcp'", ')', ':', 'acc', '=', "'minions'", 'else', ':', 'acc', '=', "'accepted'", 'for', 'fn_', 'in', 'os', '.', 'listdir', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'pki_dir'", ']', ',', 'acc', ')', ')', ':', 'if', 'not', 'fn_', '.', 'startswith', '(', "'.'", ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'pki_dir'", ']', ',', 'acc', ',', 'fn_', ')', ')', ':', 'keys', '.', 'append', '(', 'fn_', ')', 'log', '.', 'debug', '(', "'Writing master key cache'", ')', '# Write a temporary file securely', 'if', 'six', '.', 'PY2', ':', 'with', 'salt', '.', 'utils', '.', 'atomicfile', '.', 'atomic_open', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'pki_dir'", ']', ',', 'acc', ',', "'.key_cache'", ')', ')', 'as', 'cache_file', ':', 'self', '.', 'serial', '.', 'dump', '(', 'keys', ',', 'cache_file', ')', 'else', ':', 'with', 'salt', '.', 'utils', '.', 'atomicfile', '.', 'atomic_open', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'pki_dir'", ']', ',', 'acc', ',', "'.key_cache'", ')', ',', 'mode', '=', "'wb'", ')', 'as', 'cache_file', ':', 'self', '.', 'serial', '.', 'dump', '(', 'keys', ',', 'cache_file', ')'] | Evaluate accepted keys and create a msgpack file
which contains a list | ['Evaluate', 'accepted', 'keys', 'and', 'create', 'a', 'msgpack', 'file', 'which', 'contains', 'a', 'list'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L247-L270 |
1,198 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_sensors.py | SensorsModule.report_change | def report_change(self, name, value, maxdiff=1, deltat=10):
'''report a sensor change'''
r = self.reports[name]
if time.time() < r.last_report + deltat:
return
r.last_report = time.time()
if math.fabs(r.value - value) < maxdiff:
return
r.value = value
self.say("%s %u" % (name, value)) | python | def report_change(self, name, value, maxdiff=1, deltat=10):
'''report a sensor change'''
r = self.reports[name]
if time.time() < r.last_report + deltat:
return
r.last_report = time.time()
if math.fabs(r.value - value) < maxdiff:
return
r.value = value
self.say("%s %u" % (name, value)) | ['def', 'report_change', '(', 'self', ',', 'name', ',', 'value', ',', 'maxdiff', '=', '1', ',', 'deltat', '=', '10', ')', ':', 'r', '=', 'self', '.', 'reports', '[', 'name', ']', 'if', 'time', '.', 'time', '(', ')', '<', 'r', '.', 'last_report', '+', 'deltat', ':', 'return', 'r', '.', 'last_report', '=', 'time', '.', 'time', '(', ')', 'if', 'math', '.', 'fabs', '(', 'r', '.', 'value', '-', 'value', ')', '<', 'maxdiff', ':', 'return', 'r', '.', 'value', '=', 'value', 'self', '.', 'say', '(', '"%s %u"', '%', '(', 'name', ',', 'value', ')', ')'] | report a sensor change | ['report', 'a', 'sensor', 'change'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_sensors.py#L95-L104 |
1,199 | ClericPy/torequests | torequests/dummy.py | Loop.apply | def apply(self, coro_function, args=None, kwargs=None, callback=None):
"""Submit a coro_function(*args, **kwargs) as NewTask to self.loop with loop.frequncy control.
::
from torequests.dummy import Loop
import asyncio
loop = Loop()
async def test(i):
result = await asyncio.sleep(1)
return (loop.frequency, i)
task = loop.apply(test, [1])
print(task)
# loop.x can be ignore
loop.x
print(task.x)
# <NewTask pending coro=<new_coro_func() running at torequests/torequests/dummy.py:154>>
# (Frequency(sem=<0/0>, interval=0, name=loop_sem), 1)
"""
args = args or ()
kwargs = kwargs or {}
coro = self._wrap_coro_function_with_sem(coro_function)(*args, **kwargs)
return self.submit(coro, callback=callback) | python | def apply(self, coro_function, args=None, kwargs=None, callback=None):
"""Submit a coro_function(*args, **kwargs) as NewTask to self.loop with loop.frequncy control.
::
from torequests.dummy import Loop
import asyncio
loop = Loop()
async def test(i):
result = await asyncio.sleep(1)
return (loop.frequency, i)
task = loop.apply(test, [1])
print(task)
# loop.x can be ignore
loop.x
print(task.x)
# <NewTask pending coro=<new_coro_func() running at torequests/torequests/dummy.py:154>>
# (Frequency(sem=<0/0>, interval=0, name=loop_sem), 1)
"""
args = args or ()
kwargs = kwargs or {}
coro = self._wrap_coro_function_with_sem(coro_function)(*args, **kwargs)
return self.submit(coro, callback=callback) | ['def', 'apply', '(', 'self', ',', 'coro_function', ',', 'args', '=', 'None', ',', 'kwargs', '=', 'None', ',', 'callback', '=', 'None', ')', ':', 'args', '=', 'args', 'or', '(', ')', 'kwargs', '=', 'kwargs', 'or', '{', '}', 'coro', '=', 'self', '.', '_wrap_coro_function_with_sem', '(', 'coro_function', ')', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', 'submit', '(', 'coro', ',', 'callback', '=', 'callback', ')'] | Submit a coro_function(*args, **kwargs) as NewTask to self.loop with loop.frequncy control.
::
from torequests.dummy import Loop
import asyncio
loop = Loop()
async def test(i):
result = await asyncio.sleep(1)
return (loop.frequency, i)
task = loop.apply(test, [1])
print(task)
# loop.x can be ignore
loop.x
print(task.x)
# <NewTask pending coro=<new_coro_func() running at torequests/torequests/dummy.py:154>>
# (Frequency(sem=<0/0>, interval=0, name=loop_sem), 1) | ['Submit', 'a', 'coro_function', '(', '*', 'args', '**', 'kwargs', ')', 'as', 'NewTask', 'to', 'self', '.', 'loop', 'with', 'loop', '.', 'frequncy', 'control', '.'] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/dummy.py#L208-L233 |
Subsets and Splits