Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
8,900 | DataDog/integrations-core | kubelet/datadog_checks/kubelet/common.py | get_pod_by_uid | def get_pod_by_uid(uid, podlist):
"""
Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found
"""
for pod in podlist.get("items", []):
try:
if pod["metadata"]["uid"] == uid:
return pod
except KeyError:
continue
return None | python | def get_pod_by_uid(uid, podlist):
"""
Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found
"""
for pod in podlist.get("items", []):
try:
if pod["metadata"]["uid"] == uid:
return pod
except KeyError:
continue
return None | ['def', 'get_pod_by_uid', '(', 'uid', ',', 'podlist', ')', ':', 'for', 'pod', 'in', 'podlist', '.', 'get', '(', '"items"', ',', '[', ']', ')', ':', 'try', ':', 'if', 'pod', '[', '"metadata"', ']', '[', '"uid"', ']', '==', 'uid', ':', 'return', 'pod', 'except', 'KeyError', ':', 'continue', 'return', 'None'] | Searches for a pod uid in the podlist and returns the pod if found
:param uid: pod uid
:param podlist: podlist dict object
:return: pod dict object if found, None if not found | ['Searches', 'for', 'a', 'pod', 'uid', 'in', 'the', 'podlist', 'and', 'returns', 'the', 'pod', 'if', 'found', ':', 'param', 'uid', ':', 'pod', 'uid', ':', 'param', 'podlist', ':', 'podlist', 'dict', 'object', ':', 'return', ':', 'pod', 'dict', 'object', 'if', 'found', 'None', 'if', 'not', 'found'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubelet/datadog_checks/kubelet/common.py#L41-L54 |
8,901 | pushyzheng/flask-rabbitmq | example/producer/flask_rabbitmq/RabbitMQ.py | RabbitMQ.bind_topic_exchange | def bind_topic_exchange(self, exchange_name, routing_key, queue_name):
"""
绑定主题交换机和队列
:param exchange_name: 需要绑定的交换机名
:param routing_key:
:param queue_name: 需要绑定的交换机队列名
:return:
"""
self._channel.queue_declare(
queue=queue_name,
auto_delete=True,
durable=True,
)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type='topic',
auto_delete=True,
)
self._channel.queue_bind(
exchange=exchange_name,
queue=queue_name,
routing_key=routing_key
) | python | def bind_topic_exchange(self, exchange_name, routing_key, queue_name):
"""
绑定主题交换机和队列
:param exchange_name: 需要绑定的交换机名
:param routing_key:
:param queue_name: 需要绑定的交换机队列名
:return:
"""
self._channel.queue_declare(
queue=queue_name,
auto_delete=True,
durable=True,
)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type='topic',
auto_delete=True,
)
self._channel.queue_bind(
exchange=exchange_name,
queue=queue_name,
routing_key=routing_key
) | ['def', 'bind_topic_exchange', '(', 'self', ',', 'exchange_name', ',', 'routing_key', ',', 'queue_name', ')', ':', 'self', '.', '_channel', '.', 'queue_declare', '(', 'queue', '=', 'queue_name', ',', 'auto_delete', '=', 'True', ',', 'durable', '=', 'True', ',', ')', 'self', '.', '_channel', '.', 'exchange_declare', '(', 'exchange', '=', 'exchange_name', ',', 'exchange_type', '=', "'topic'", ',', 'auto_delete', '=', 'True', ',', ')', 'self', '.', '_channel', '.', 'queue_bind', '(', 'exchange', '=', 'exchange_name', ',', 'queue', '=', 'queue_name', ',', 'routing_key', '=', 'routing_key', ')'] | 绑定主题交换机和队列
:param exchange_name: 需要绑定的交换机名
:param routing_key:
:param queue_name: 需要绑定的交换机队列名
:return: | ['绑定主题交换机和队列', ':', 'param', 'exchange_name', ':', '需要绑定的交换机名', ':', 'param', 'routing_key', ':', ':', 'param', 'queue_name', ':', '需要绑定的交换机队列名', ':', 'return', ':'] | train | https://github.com/pushyzheng/flask-rabbitmq/blob/beecefdf7bb6ff0892388e2bc303aa96931588bd/example/producer/flask_rabbitmq/RabbitMQ.py#L61-L83 |
8,902 | Cog-Creators/Red-Lavalink | lavalink/player_manager.py | Player.stop | async def stop(self):
"""
Stops playback from lavalink.
.. important::
This method will clear the queue.
"""
await self.node.stop(self.channel.guild.id)
self.queue = []
self.current = None
self.position = 0
self._paused = False | python | async def stop(self):
"""
Stops playback from lavalink.
.. important::
This method will clear the queue.
"""
await self.node.stop(self.channel.guild.id)
self.queue = []
self.current = None
self.position = 0
self._paused = False | ['async', 'def', 'stop', '(', 'self', ')', ':', 'await', 'self', '.', 'node', '.', 'stop', '(', 'self', '.', 'channel', '.', 'guild', '.', 'id', ')', 'self', '.', 'queue', '=', '[', ']', 'self', '.', 'current', '=', 'None', 'self', '.', 'position', '=', '0', 'self', '.', '_paused', '=', 'False'] | Stops playback from lavalink.
.. important::
This method will clear the queue. | ['Stops', 'playback', 'from', 'lavalink', '.'] | train | https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/player_manager.py#L265-L277 |
8,903 | limodou/uliweb | uliweb/i18n/po_merge.py | split_comments | def split_comments(comments):
"""Split COMMENTS into flag comments and other comments. Flag
comments are those that begin with '#,', e.g. '#,fuzzy'."""
flags = []
other = []
for c in comments:
if len(c) > 1 and c[1] == ',':
flags.append(c)
else:
other.append(c)
return flags, other | python | def split_comments(comments):
"""Split COMMENTS into flag comments and other comments. Flag
comments are those that begin with '#,', e.g. '#,fuzzy'."""
flags = []
other = []
for c in comments:
if len(c) > 1 and c[1] == ',':
flags.append(c)
else:
other.append(c)
return flags, other | ['def', 'split_comments', '(', 'comments', ')', ':', 'flags', '=', '[', ']', 'other', '=', '[', ']', 'for', 'c', 'in', 'comments', ':', 'if', 'len', '(', 'c', ')', '>', '1', 'and', 'c', '[', '1', ']', '==', "','", ':', 'flags', '.', 'append', '(', 'c', ')', 'else', ':', 'other', '.', 'append', '(', 'c', ')', 'return', 'flags', ',', 'other'] | Split COMMENTS into flag comments and other comments. Flag
comments are those that begin with '#,', e.g. '#,fuzzy'. | ['Split', 'COMMENTS', 'into', 'flag', 'comments', 'and', 'other', 'comments', '.', 'Flag', 'comments', 'are', 'those', 'that', 'begin', 'with', '#', 'e', '.', 'g', '.', '#', 'fuzzy', '.'] | train | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/i18n/po_merge.py#L61-L71 |
8,904 | numenta/htmresearch | projects/sequence_prediction/continuous_sequence/run_adaptive_filter.py | normalizeSequence | def normalizeSequence(sequence):
"""
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
"""
seq = np.array(sequence).astype('float64')
meanSeq = np.mean(seq)
stdSeq = np.std(seq)
seq = (seq - np.mean(seq)) / np.std(seq)
sequence = seq.tolist()
return sequence, meanSeq, stdSeq | python | def normalizeSequence(sequence):
"""
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
"""
seq = np.array(sequence).astype('float64')
meanSeq = np.mean(seq)
stdSeq = np.std(seq)
seq = (seq - np.mean(seq)) / np.std(seq)
sequence = seq.tolist()
return sequence, meanSeq, stdSeq | ['def', 'normalizeSequence', '(', 'sequence', ')', ':', 'seq', '=', 'np', '.', 'array', '(', 'sequence', ')', '.', 'astype', '(', "'float64'", ')', 'meanSeq', '=', 'np', '.', 'mean', '(', 'seq', ')', 'stdSeq', '=', 'np', '.', 'std', '(', 'seq', ')', 'seq', '=', '(', 'seq', '-', 'np', '.', 'mean', '(', 'seq', ')', ')', '/', 'np', '.', 'std', '(', 'seq', ')', 'sequence', '=', 'seq', '.', 'tolist', '(', ')', 'return', 'sequence', ',', 'meanSeq', ',', 'stdSeq'] | normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence | ['normalize', 'sequence', 'by', 'subtracting', 'the', 'mean', 'and', ':', 'param', 'sequence', ':', 'a', 'list', 'of', 'data', 'samples', ':', 'param', 'considerDimensions', ':', 'a', 'list', 'of', 'dimensions', 'to', 'consider', ':', 'return', ':', 'normalized', 'sequence'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/continuous_sequence/run_adaptive_filter.py#L109-L123 |
8,905 | garnaat/placebo | placebo/pill.py | Pill._mock_request | def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation) | python | def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation) | ['def', '_mock_request', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'model', '=', 'kwargs', '.', 'get', '(', "'model'", ')', 'service', '=', 'model', '.', 'service_model', '.', 'endpoint_prefix', 'operation', '=', 'model', '.', 'name', 'LOG', '.', 'debug', '(', "'_make_request: %s.%s'", ',', 'service', ',', 'operation', ')', 'return', 'self', '.', 'load_response', '(', 'service', ',', 'operation', ')'] | A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined. | ['A', 'mocked', 'out', 'make_request', 'call', 'that', 'bypasses', 'all', 'network', 'calls', 'and', 'simply', 'returns', 'any', 'mocked', 'responses', 'defined', '.'] | train | https://github.com/garnaat/placebo/blob/1e8ab91b92fa7c5bb1fdbce2331f0c55455093d7/placebo/pill.py#L290-L299 |
8,906 | ska-sa/katcp-python | katcp/server.py | KATCPServer.start | def start(self, timeout=None):
"""Install the server on its IOLoop, optionally starting the IOLoop.
Parameters
----------
timeout : float or None, optional
Time in seconds to wait for server thread to start.
"""
if self._running.isSet():
raise RuntimeError('Server already started')
self._stopped.clear()
# Make sure we have an ioloop
self.ioloop = self._ioloop_manager.get_ioloop()
self._ioloop_manager.start()
# Set max_buffer_size to ensure streams are closed
# if too-large messages are received
self._tcp_server = tornado.tcpserver.TCPServer(
self.ioloop, max_buffer_size=self.MAX_MSG_SIZE)
self._tcp_server.handle_stream = self._handle_stream
self._server_sock = self._bind_socket(self._bindaddr)
self._bindaddr = self._server_sock.getsockname()
self.ioloop.add_callback(self._install)
if timeout:
return self._running.wait(timeout) | python | def start(self, timeout=None):
"""Install the server on its IOLoop, optionally starting the IOLoop.
Parameters
----------
timeout : float or None, optional
Time in seconds to wait for server thread to start.
"""
if self._running.isSet():
raise RuntimeError('Server already started')
self._stopped.clear()
# Make sure we have an ioloop
self.ioloop = self._ioloop_manager.get_ioloop()
self._ioloop_manager.start()
# Set max_buffer_size to ensure streams are closed
# if too-large messages are received
self._tcp_server = tornado.tcpserver.TCPServer(
self.ioloop, max_buffer_size=self.MAX_MSG_SIZE)
self._tcp_server.handle_stream = self._handle_stream
self._server_sock = self._bind_socket(self._bindaddr)
self._bindaddr = self._server_sock.getsockname()
self.ioloop.add_callback(self._install)
if timeout:
return self._running.wait(timeout) | ['def', 'start', '(', 'self', ',', 'timeout', '=', 'None', ')', ':', 'if', 'self', '.', '_running', '.', 'isSet', '(', ')', ':', 'raise', 'RuntimeError', '(', "'Server already started'", ')', 'self', '.', '_stopped', '.', 'clear', '(', ')', '# Make sure we have an ioloop', 'self', '.', 'ioloop', '=', 'self', '.', '_ioloop_manager', '.', 'get_ioloop', '(', ')', 'self', '.', '_ioloop_manager', '.', 'start', '(', ')', '# Set max_buffer_size to ensure streams are closed', '# if too-large messages are received', 'self', '.', '_tcp_server', '=', 'tornado', '.', 'tcpserver', '.', 'TCPServer', '(', 'self', '.', 'ioloop', ',', 'max_buffer_size', '=', 'self', '.', 'MAX_MSG_SIZE', ')', 'self', '.', '_tcp_server', '.', 'handle_stream', '=', 'self', '.', '_handle_stream', 'self', '.', '_server_sock', '=', 'self', '.', '_bind_socket', '(', 'self', '.', '_bindaddr', ')', 'self', '.', '_bindaddr', '=', 'self', '.', '_server_sock', '.', 'getsockname', '(', ')', 'self', '.', 'ioloop', '.', 'add_callback', '(', 'self', '.', '_install', ')', 'if', 'timeout', ':', 'return', 'self', '.', '_running', '.', 'wait', '(', 'timeout', ')'] | Install the server on its IOLoop, optionally starting the IOLoop.
Parameters
----------
timeout : float or None, optional
Time in seconds to wait for server thread to start. | ['Install', 'the', 'server', 'on', 'its', 'IOLoop', 'optionally', 'starting', 'the', 'IOLoop', '.'] | train | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/server.py#L369-L394 |
8,907 | evolbioinfo/pastml | pastml/ml.py | choose_ancestral_states_mppa | def choose_ancestral_states_mppa(tree, feature, states, force_joint=True):
"""
Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree of interest
:type tree: ete3.Tree
:param feature: character for which the ancestral states are to be chosen
:type feature: str
:param states: possible character states in order corresponding to the probabilities array
:type states: numpy.array
:return: number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also modified the get_personalized_feature_name(feature, ALLOWED_STATES) feature of each node
to only contain the selected states.
:rtype: int
"""
lh_feature = get_personalized_feature_name(feature, LH)
allowed_state_feature = get_personalized_feature_name(feature, ALLOWED_STATES)
joint_state_feature = get_personalized_feature_name(feature, JOINT_STATE)
n = len(states)
_, state2array = get_state2allowed_states(states, False)
num_scenarios = 1
unresolved_nodes = 0
num_states = 0
# If force_joint == True,
# we make sure that the joint state is always chosen,
# for this we sort the marginal probabilities array as [lowest_non_joint_mp, ..., highest_non_joint_mp, joint_mp]
# select k in 1:n such as the correction between choosing 0, 0, ..., 1/k, ..., 1/k and our sorted array is min
# and return the corresponding states
for node in tree.traverse():
marginal_likelihoods = getattr(node, lh_feature)
marginal_probs = marginal_likelihoods / marginal_likelihoods.sum()
if force_joint:
joint_index = getattr(node, joint_state_feature)
joint_prob = marginal_probs[joint_index]
marginal_probs = np.hstack((np.sort(np.delete(marginal_probs, joint_index)), [joint_prob]))
else:
marginal_probs = np.sort(marginal_probs)
best_k = n
best_correstion = np.inf
for k in range(1, n + 1):
correction = np.hstack((np.zeros(n - k), np.ones(k) / k)) - marginal_probs
correction = correction.dot(correction)
if correction < best_correstion:
best_correstion = correction
best_k = k
num_scenarios *= best_k
num_states += best_k
if force_joint:
indices_selected = sorted(range(n),
key=lambda _: (0 if n == joint_index else 1, -marginal_likelihoods[_]))[:best_k]
else:
indices_selected = sorted(range(n), key=lambda _: -marginal_likelihoods[_])[:best_k]
if best_k == 1:
allowed_states = state2array[indices_selected[0]]
else:
allowed_states = np.zeros(len(states), dtype=np.int)
allowed_states[indices_selected] = 1
unresolved_nodes += 1
node.add_feature(allowed_state_feature, allowed_states)
return num_scenarios, unresolved_nodes, num_states | python | def choose_ancestral_states_mppa(tree, feature, states, force_joint=True):
"""
Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree of interest
:type tree: ete3.Tree
:param feature: character for which the ancestral states are to be chosen
:type feature: str
:param states: possible character states in order corresponding to the probabilities array
:type states: numpy.array
:return: number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also modified the get_personalized_feature_name(feature, ALLOWED_STATES) feature of each node
to only contain the selected states.
:rtype: int
"""
lh_feature = get_personalized_feature_name(feature, LH)
allowed_state_feature = get_personalized_feature_name(feature, ALLOWED_STATES)
joint_state_feature = get_personalized_feature_name(feature, JOINT_STATE)
n = len(states)
_, state2array = get_state2allowed_states(states, False)
num_scenarios = 1
unresolved_nodes = 0
num_states = 0
# If force_joint == True,
# we make sure that the joint state is always chosen,
# for this we sort the marginal probabilities array as [lowest_non_joint_mp, ..., highest_non_joint_mp, joint_mp]
# select k in 1:n such as the correction between choosing 0, 0, ..., 1/k, ..., 1/k and our sorted array is min
# and return the corresponding states
for node in tree.traverse():
marginal_likelihoods = getattr(node, lh_feature)
marginal_probs = marginal_likelihoods / marginal_likelihoods.sum()
if force_joint:
joint_index = getattr(node, joint_state_feature)
joint_prob = marginal_probs[joint_index]
marginal_probs = np.hstack((np.sort(np.delete(marginal_probs, joint_index)), [joint_prob]))
else:
marginal_probs = np.sort(marginal_probs)
best_k = n
best_correstion = np.inf
for k in range(1, n + 1):
correction = np.hstack((np.zeros(n - k), np.ones(k) / k)) - marginal_probs
correction = correction.dot(correction)
if correction < best_correstion:
best_correstion = correction
best_k = k
num_scenarios *= best_k
num_states += best_k
if force_joint:
indices_selected = sorted(range(n),
key=lambda _: (0 if n == joint_index else 1, -marginal_likelihoods[_]))[:best_k]
else:
indices_selected = sorted(range(n), key=lambda _: -marginal_likelihoods[_])[:best_k]
if best_k == 1:
allowed_states = state2array[indices_selected[0]]
else:
allowed_states = np.zeros(len(states), dtype=np.int)
allowed_states[indices_selected] = 1
unresolved_nodes += 1
node.add_feature(allowed_state_feature, allowed_states)
return num_scenarios, unresolved_nodes, num_states | ['def', 'choose_ancestral_states_mppa', '(', 'tree', ',', 'feature', ',', 'states', ',', 'force_joint', '=', 'True', ')', ':', 'lh_feature', '=', 'get_personalized_feature_name', '(', 'feature', ',', 'LH', ')', 'allowed_state_feature', '=', 'get_personalized_feature_name', '(', 'feature', ',', 'ALLOWED_STATES', ')', 'joint_state_feature', '=', 'get_personalized_feature_name', '(', 'feature', ',', 'JOINT_STATE', ')', 'n', '=', 'len', '(', 'states', ')', '_', ',', 'state2array', '=', 'get_state2allowed_states', '(', 'states', ',', 'False', ')', 'num_scenarios', '=', '1', 'unresolved_nodes', '=', '0', 'num_states', '=', '0', '# If force_joint == True,', '# we make sure that the joint state is always chosen,', '# for this we sort the marginal probabilities array as [lowest_non_joint_mp, ..., highest_non_joint_mp, joint_mp]', '# select k in 1:n such as the correction between choosing 0, 0, ..., 1/k, ..., 1/k and our sorted array is min', '# and return the corresponding states', 'for', 'node', 'in', 'tree', '.', 'traverse', '(', ')', ':', 'marginal_likelihoods', '=', 'getattr', '(', 'node', ',', 'lh_feature', ')', 'marginal_probs', '=', 'marginal_likelihoods', '/', 'marginal_likelihoods', '.', 'sum', '(', ')', 'if', 'force_joint', ':', 'joint_index', '=', 'getattr', '(', 'node', ',', 'joint_state_feature', ')', 'joint_prob', '=', 'marginal_probs', '[', 'joint_index', ']', 'marginal_probs', '=', 'np', '.', 'hstack', '(', '(', 'np', '.', 'sort', '(', 'np', '.', 'delete', '(', 'marginal_probs', ',', 'joint_index', ')', ')', ',', '[', 'joint_prob', ']', ')', ')', 'else', ':', 'marginal_probs', '=', 'np', '.', 'sort', '(', 'marginal_probs', ')', 'best_k', '=', 'n', 'best_correstion', '=', 'np', '.', 'inf', 'for', 'k', 'in', 'range', '(', '1', ',', 'n', '+', '1', ')', ':', 'correction', '=', 'np', '.', 'hstack', '(', '(', 'np', '.', 'zeros', '(', 'n', '-', 'k', ')', ',', 'np', '.', 'ones', '(', 'k', ')', '/', 'k', ')', ')', '-', 'marginal_probs', 'correction', '=', 'correction', '.', 'dot', '(', 'correction', ')', 'if', 'correction', '<', 'best_correstion', ':', 'best_correstion', '=', 'correction', 'best_k', '=', 'k', 'num_scenarios', '*=', 'best_k', 'num_states', '+=', 'best_k', 'if', 'force_joint', ':', 'indices_selected', '=', 'sorted', '(', 'range', '(', 'n', ')', ',', 'key', '=', 'lambda', '_', ':', '(', '0', 'if', 'n', '==', 'joint_index', 'else', '1', ',', '-', 'marginal_likelihoods', '[', '_', ']', ')', ')', '[', ':', 'best_k', ']', 'else', ':', 'indices_selected', '=', 'sorted', '(', 'range', '(', 'n', ')', ',', 'key', '=', 'lambda', '_', ':', '-', 'marginal_likelihoods', '[', '_', ']', ')', '[', ':', 'best_k', ']', 'if', 'best_k', '==', '1', ':', 'allowed_states', '=', 'state2array', '[', 'indices_selected', '[', '0', ']', ']', 'else', ':', 'allowed_states', '=', 'np', '.', 'zeros', '(', 'len', '(', 'states', ')', ',', 'dtype', '=', 'np', '.', 'int', ')', 'allowed_states', '[', 'indices_selected', ']', '=', '1', 'unresolved_nodes', '+=', '1', 'node', '.', 'add_feature', '(', 'allowed_state_feature', ',', 'allowed_states', ')', 'return', 'num_scenarios', ',', 'unresolved_nodes', ',', 'num_states'] | Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree of interest
:type tree: ete3.Tree
:param feature: character for which the ancestral states are to be chosen
:type feature: str
:param states: possible character states in order corresponding to the probabilities array
:type states: numpy.array
:return: number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also modified the get_personalized_feature_name(feature, ALLOWED_STATES) feature of each node
to only contain the selected states.
:rtype: int | ['Chooses', 'node', 'ancestral', 'states', 'based', 'on', 'their', 'marginal', 'probabilities', 'using', 'MPPA', 'method', '.'] | train | https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/ml.py#L496-L563 |
8,908 | phaethon/kamene | kamene/arch/windows/__init__.py | sniff | def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None, stop_callback=None, *arg, **karg):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
Select interface to sniff by setting conf.iface. Use show_interfaces() to see interface names.
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
offline: pcap file to read packets from, instead of sniffing them
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
stop_callback: Call every loop to determine if we need
to stop the capture
"""
c = 0
if offline is None:
log_runtime.info('Sniffing on %s' % conf.iface)
if L2socket is None:
L2socket = conf.L2listen
s = L2socket(type=ETH_P_ALL, *arg, **karg)
else:
s = PcapReader(offline)
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
while 1:
try:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
if stop_callback and stop_callback():
break
try:
p = s.recv(MTU)
except PcapTimeoutElapsed:
continue
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print(r)
if count > 0 and c >= count:
break
except KeyboardInterrupt:
break
s.close()
return plist.PacketList(lst,"Sniffed") | python | def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None, stop_callback=None, *arg, **karg):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
Select interface to sniff by setting conf.iface. Use show_interfaces() to see interface names.
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
offline: pcap file to read packets from, instead of sniffing them
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
stop_callback: Call every loop to determine if we need
to stop the capture
"""
c = 0
if offline is None:
log_runtime.info('Sniffing on %s' % conf.iface)
if L2socket is None:
L2socket = conf.L2listen
s = L2socket(type=ETH_P_ALL, *arg, **karg)
else:
s = PcapReader(offline)
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
while 1:
try:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
if stop_callback and stop_callback():
break
try:
p = s.recv(MTU)
except PcapTimeoutElapsed:
continue
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print(r)
if count > 0 and c >= count:
break
except KeyboardInterrupt:
break
s.close()
return plist.PacketList(lst,"Sniffed") | ['def', 'sniff', '(', 'count', '=', '0', ',', 'store', '=', '1', ',', 'offline', '=', 'None', ',', 'prn', '=', 'None', ',', 'lfilter', '=', 'None', ',', 'L2socket', '=', 'None', ',', 'timeout', '=', 'None', ',', 'stop_callback', '=', 'None', ',', '*', 'arg', ',', '*', '*', 'karg', ')', ':', 'c', '=', '0', 'if', 'offline', 'is', 'None', ':', 'log_runtime', '.', 'info', '(', "'Sniffing on %s'", '%', 'conf', '.', 'iface', ')', 'if', 'L2socket', 'is', 'None', ':', 'L2socket', '=', 'conf', '.', 'L2listen', 's', '=', 'L2socket', '(', 'type', '=', 'ETH_P_ALL', ',', '*', 'arg', ',', '*', '*', 'karg', ')', 'else', ':', 's', '=', 'PcapReader', '(', 'offline', ')', 'lst', '=', '[', ']', 'if', 'timeout', 'is', 'not', 'None', ':', 'stoptime', '=', 'time', '.', 'time', '(', ')', '+', 'timeout', 'remain', '=', 'None', 'while', '1', ':', 'try', ':', 'if', 'timeout', 'is', 'not', 'None', ':', 'remain', '=', 'stoptime', '-', 'time', '.', 'time', '(', ')', 'if', 'remain', '<=', '0', ':', 'break', 'if', 'stop_callback', 'and', 'stop_callback', '(', ')', ':', 'break', 'try', ':', 'p', '=', 's', '.', 'recv', '(', 'MTU', ')', 'except', 'PcapTimeoutElapsed', ':', 'continue', 'if', 'p', 'is', 'None', ':', 'break', 'if', 'lfilter', 'and', 'not', 'lfilter', '(', 'p', ')', ':', 'continue', 'if', 'store', ':', 'lst', '.', 'append', '(', 'p', ')', 'c', '+=', '1', 'if', 'prn', ':', 'r', '=', 'prn', '(', 'p', ')', 'if', 'r', 'is', 'not', 'None', ':', 'print', '(', 'r', ')', 'if', 'count', '>', '0', 'and', 'c', '>=', 'count', ':', 'break', 'except', 'KeyboardInterrupt', ':', 'break', 's', '.', 'close', '(', ')', 'return', 'plist', '.', 'PacketList', '(', 'lst', ',', '"Sniffed"', ')'] | Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
Select interface to sniff by setting conf.iface. Use show_interfaces() to see interface names.
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
offline: pcap file to read packets from, instead of sniffing them
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
stop_callback: Call every loop to determine if we need
to stop the capture | ['Sniff', 'packets', 'sniff', '(', '[', 'count', '=', '0', ']', '[', 'prn', '=', 'None', ']', '[', 'store', '=', '1', ']', '[', 'offline', '=', 'None', ']', '[', 'lfilter', '=', 'None', ']', '+', 'L2ListenSocket', 'args', ')', '-', '>', 'list', 'of', 'packets', 'Select', 'interface', 'to', 'sniff', 'by', 'setting', 'conf', '.', 'iface', '.', 'Use', 'show_interfaces', '()', 'to', 'see', 'interface', 'names', '.', 'count', ':', 'number', 'of', 'packets', 'to', 'capture', '.', '0', 'means', 'infinity', 'store', ':', 'wether', 'to', 'store', 'sniffed', 'packets', 'or', 'discard', 'them', 'prn', ':', 'function', 'to', 'apply', 'to', 'each', 'packet', '.', 'If', 'something', 'is', 'returned', 'it', 'is', 'displayed', '.', 'Ex', ':', 'ex', ':', 'prn', '=', 'lambda', 'x', ':', 'x', '.', 'summary', '()', 'lfilter', ':', 'python', 'function', 'applied', 'to', 'each', 'packet', 'to', 'determine', 'if', 'further', 'action', 'may', 'be', 'done', 'ex', ':', 'lfilter', '=', 'lambda', 'x', ':', 'x', '.', 'haslayer', '(', 'Padding', ')', 'offline', ':', 'pcap', 'file', 'to', 'read', 'packets', 'from', 'instead', 'of', 'sniffing', 'them', 'timeout', ':', 'stop', 'sniffing', 'after', 'a', 'given', 'time', '(', 'default', ':', 'None', ')', 'L2socket', ':', 'use', 'the', 'provided', 'L2socket', 'stop_callback', ':', 'Call', 'every', 'loop', 'to', 'determine', 'if', 'we', 'need', 'to', 'stop', 'the', 'capture'] | train | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/arch/windows/__init__.py#L445-L506 |
8,909 | fermiPy/fermipy | fermipy/stats_utils.py | norm | def norm(x, mu, sigma=1.0):
""" Scipy norm function """
return stats.norm(loc=mu, scale=sigma).pdf(x) | python | def norm(x, mu, sigma=1.0):
""" Scipy norm function """
return stats.norm(loc=mu, scale=sigma).pdf(x) | ['def', 'norm', '(', 'x', ',', 'mu', ',', 'sigma', '=', '1.0', ')', ':', 'return', 'stats', '.', 'norm', '(', 'loc', '=', 'mu', ',', 'scale', '=', 'sigma', ')', '.', 'pdf', '(', 'x', ')'] | Scipy norm function | ['Scipy', 'norm', 'function'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/stats_utils.py#L14-L16 |
8,910 | klen/aioauth-client | aioauth_client.py | OAuth1Client.request | def request(self, method, url, params=None, **aio_kwargs):
"""Make a request to provider."""
oparams = {
'oauth_consumer_key': self.consumer_key,
'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(),
'oauth_signature_method': self.signature.name,
'oauth_timestamp': str(int(time.time())),
'oauth_version': self.version,
}
oparams.update(params or {})
if self.oauth_token:
oparams['oauth_token'] = self.oauth_token
url = self._get_url(url)
if urlsplit(url).query:
raise ValueError(
'Request parameters should be in the "params" parameter, '
'not inlined in the URL')
oparams['oauth_signature'] = self.signature.sign(
self.consumer_secret, method, url,
oauth_token_secret=self.oauth_token_secret, **oparams)
self.logger.debug("%s %s", url, oparams)
return self._request(method, url, params=oparams, **aio_kwargs) | python | def request(self, method, url, params=None, **aio_kwargs):
"""Make a request to provider."""
oparams = {
'oauth_consumer_key': self.consumer_key,
'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(),
'oauth_signature_method': self.signature.name,
'oauth_timestamp': str(int(time.time())),
'oauth_version': self.version,
}
oparams.update(params or {})
if self.oauth_token:
oparams['oauth_token'] = self.oauth_token
url = self._get_url(url)
if urlsplit(url).query:
raise ValueError(
'Request parameters should be in the "params" parameter, '
'not inlined in the URL')
oparams['oauth_signature'] = self.signature.sign(
self.consumer_secret, method, url,
oauth_token_secret=self.oauth_token_secret, **oparams)
self.logger.debug("%s %s", url, oparams)
return self._request(method, url, params=oparams, **aio_kwargs) | ['def', 'request', '(', 'self', ',', 'method', ',', 'url', ',', 'params', '=', 'None', ',', '*', '*', 'aio_kwargs', ')', ':', 'oparams', '=', '{', "'oauth_consumer_key'", ':', 'self', '.', 'consumer_key', ',', "'oauth_nonce'", ':', 'sha1', '(', 'str', '(', 'RANDOM', '(', ')', ')', '.', 'encode', '(', "'ascii'", ')', ')', '.', 'hexdigest', '(', ')', ',', "'oauth_signature_method'", ':', 'self', '.', 'signature', '.', 'name', ',', "'oauth_timestamp'", ':', 'str', '(', 'int', '(', 'time', '.', 'time', '(', ')', ')', ')', ',', "'oauth_version'", ':', 'self', '.', 'version', ',', '}', 'oparams', '.', 'update', '(', 'params', 'or', '{', '}', ')', 'if', 'self', '.', 'oauth_token', ':', 'oparams', '[', "'oauth_token'", ']', '=', 'self', '.', 'oauth_token', 'url', '=', 'self', '.', '_get_url', '(', 'url', ')', 'if', 'urlsplit', '(', 'url', ')', '.', 'query', ':', 'raise', 'ValueError', '(', '\'Request parameters should be in the "params" parameter, \'', "'not inlined in the URL'", ')', 'oparams', '[', "'oauth_signature'", ']', '=', 'self', '.', 'signature', '.', 'sign', '(', 'self', '.', 'consumer_secret', ',', 'method', ',', 'url', ',', 'oauth_token_secret', '=', 'self', '.', 'oauth_token_secret', ',', '*', '*', 'oparams', ')', 'self', '.', 'logger', '.', 'debug', '(', '"%s %s"', ',', 'url', ',', 'oparams', ')', 'return', 'self', '.', '_request', '(', 'method', ',', 'url', ',', 'params', '=', 'oparams', ',', '*', '*', 'aio_kwargs', ')'] | Make a request to provider. | ['Make', 'a', 'request', 'to', 'provider', '.'] | train | https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L220-L246 |
8,911 | seibert-media/Highton | highton/fields/list_field.py | ListField.encode | def encode(self):
"""
Just iterate over the child elements and append them to the current element
:return: the encoded element
:rtype: xml.etree.ElementTree.Element
"""
element = ElementTree.Element(
self.name,
attrib={'type': FieldConstants.ARRAY},
)
for item in self.value:
element.append(item.encode())
return element | python | def encode(self):
"""
Just iterate over the child elements and append them to the current element
:return: the encoded element
:rtype: xml.etree.ElementTree.Element
"""
element = ElementTree.Element(
self.name,
attrib={'type': FieldConstants.ARRAY},
)
for item in self.value:
element.append(item.encode())
return element | ['def', 'encode', '(', 'self', ')', ':', 'element', '=', 'ElementTree', '.', 'Element', '(', 'self', '.', 'name', ',', 'attrib', '=', '{', "'type'", ':', 'FieldConstants', '.', 'ARRAY', '}', ',', ')', 'for', 'item', 'in', 'self', '.', 'value', ':', 'element', '.', 'append', '(', 'item', '.', 'encode', '(', ')', ')', 'return', 'element'] | Just iterate over the child elements and append them to the current element
:return: the encoded element
:rtype: xml.etree.ElementTree.Element | ['Just', 'iterate', 'over', 'the', 'child', 'elements', 'and', 'append', 'them', 'to', 'the', 'current', 'element'] | train | https://github.com/seibert-media/Highton/blob/1519e4fb105f62882c2e7bc81065d994649558d8/highton/fields/list_field.py#L18-L31 |
8,912 | abe-winter/pg13-py | pg13/pg.py | Row.updatewhere | def updatewhere(clas,pool_or_cursor,where_keys,**update_keys):
"this doesn't allow raw_keys for now"
# if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField?
if not where_keys or not update_keys: raise ValueError
setclause=','.join(k+'=%s' for k in update_keys)
whereclause=' and '.join(eqexpr(k,v) for k,v in where_keys.items())
q='update %s set %s where %s'%(clas.TABLE,setclause,whereclause)
vals = tuple(update_keys.values()+where_keys.values())
commit_or_execute(pool_or_cursor,q,vals) | python | def updatewhere(clas,pool_or_cursor,where_keys,**update_keys):
"this doesn't allow raw_keys for now"
# if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField?
if not where_keys or not update_keys: raise ValueError
setclause=','.join(k+'=%s' for k in update_keys)
whereclause=' and '.join(eqexpr(k,v) for k,v in where_keys.items())
q='update %s set %s where %s'%(clas.TABLE,setclause,whereclause)
vals = tuple(update_keys.values()+where_keys.values())
commit_or_execute(pool_or_cursor,q,vals) | ['def', 'updatewhere', '(', 'clas', ',', 'pool_or_cursor', ',', 'where_keys', ',', '*', '*', 'update_keys', ')', ':', '# if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField?\r', 'if', 'not', 'where_keys', 'or', 'not', 'update_keys', ':', 'raise', 'ValueError', 'setclause', '=', "','", '.', 'join', '(', 'k', '+', "'=%s'", 'for', 'k', 'in', 'update_keys', ')', 'whereclause', '=', "' and '", '.', 'join', '(', 'eqexpr', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'where_keys', '.', 'items', '(', ')', ')', 'q', '=', "'update %s set %s where %s'", '%', '(', 'clas', '.', 'TABLE', ',', 'setclause', ',', 'whereclause', ')', 'vals', '=', 'tuple', '(', 'update_keys', '.', 'values', '(', ')', '+', 'where_keys', '.', 'values', '(', ')', ')', 'commit_or_execute', '(', 'pool_or_cursor', ',', 'q', ',', 'vals', ')'] | this doesn't allow raw_keys for now | ['this', 'doesn', 't', 'allow', 'raw_keys', 'for', 'now'] | train | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/pg.py#L277-L285 |
8,913 | GNS3/gns3-server | gns3server/compute/dynamips/nodes/router.py | Router.set_idlesleep | def set_idlesleep(self, idlesleep):
"""
Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer)
"""
is_running = yield from self.is_running()
if is_running: # router is running
yield from self._hypervisor.send('vm set_idle_sleep_time "{name}" 0 {idlesleep}'.format(name=self._name,
idlesleep=idlesleep))
log.info('Router "{name}" [{id}]: idlesleep updated from {old_idlesleep} to {new_idlesleep}'.format(name=self._name,
id=self._id,
old_idlesleep=self._idlesleep,
new_idlesleep=idlesleep))
self._idlesleep = idlesleep | python | def set_idlesleep(self, idlesleep):
"""
Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer)
"""
is_running = yield from self.is_running()
if is_running: # router is running
yield from self._hypervisor.send('vm set_idle_sleep_time "{name}" 0 {idlesleep}'.format(name=self._name,
idlesleep=idlesleep))
log.info('Router "{name}" [{id}]: idlesleep updated from {old_idlesleep} to {new_idlesleep}'.format(name=self._name,
id=self._id,
old_idlesleep=self._idlesleep,
new_idlesleep=idlesleep))
self._idlesleep = idlesleep | ['def', 'set_idlesleep', '(', 'self', ',', 'idlesleep', ')', ':', 'is_running', '=', 'yield', 'from', 'self', '.', 'is_running', '(', ')', 'if', 'is_running', ':', '# router is running', 'yield', 'from', 'self', '.', '_hypervisor', '.', 'send', '(', '\'vm set_idle_sleep_time "{name}" 0 {idlesleep}\'', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'idlesleep', '=', 'idlesleep', ')', ')', 'log', '.', 'info', '(', '\'Router "{name}" [{id}]: idlesleep updated from {old_idlesleep} to {new_idlesleep}\'', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'id', '=', 'self', '.', '_id', ',', 'old_idlesleep', '=', 'self', '.', '_idlesleep', ',', 'new_idlesleep', '=', 'idlesleep', ')', ')', 'self', '.', '_idlesleep', '=', 'idlesleep'] | Sets CPU idle sleep time value.
:param idlesleep: idle sleep value (integer) | ['Sets', 'CPU', 'idle', 'sleep', 'time', 'value', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/router.py#L767-L784 |
8,914 | weso/CWR-DataApi | cwr/grammar/field/basic.py | _check_above_value_float | def _check_above_value_float(string, minimum):
"""
Checks that the number parsed from the string is above a minimum.
This is used on compulsory numeric fields.
If the value is not above the minimum an exception is thrown.
:param string: the field value
:param minimum: minimum value
"""
value = float(string)
if value < minimum:
message = 'The Numeric Field value should be above %s' % minimum
raise pp.ParseException(message) | python | def _check_above_value_float(string, minimum):
"""
Checks that the number parsed from the string is above a minimum.
This is used on compulsory numeric fields.
If the value is not above the minimum an exception is thrown.
:param string: the field value
:param minimum: minimum value
"""
value = float(string)
if value < minimum:
message = 'The Numeric Field value should be above %s' % minimum
raise pp.ParseException(message) | ['def', '_check_above_value_float', '(', 'string', ',', 'minimum', ')', ':', 'value', '=', 'float', '(', 'string', ')', 'if', 'value', '<', 'minimum', ':', 'message', '=', "'The Numeric Field value should be above %s'", '%', 'minimum', 'raise', 'pp', '.', 'ParseException', '(', 'message', ')'] | Checks that the number parsed from the string is above a minimum.
This is used on compulsory numeric fields.
If the value is not above the minimum an exception is thrown.
:param string: the field value
:param minimum: minimum value | ['Checks', 'that', 'the', 'number', 'parsed', 'from', 'the', 'string', 'is', 'above', 'a', 'minimum', '.'] | train | https://github.com/weso/CWR-DataApi/blob/f3b6ba8308c901b6ab87073c155c08e30692333c/cwr/grammar/field/basic.py#L261-L276 |
8,915 | fermiPy/fermipy | fermipy/diffuse/name_policy.py | NameFactory.generic | def generic(self, input_string, **kwargs):
""" return a generic filename for a given dataset and component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
return input_string.format(**kwargs_copy) | python | def generic(self, input_string, **kwargs):
""" return a generic filename for a given dataset and component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
return input_string.format(**kwargs_copy) | ['def', 'generic', '(', 'self', ',', 'input_string', ',', '*', '*', 'kwargs', ')', ':', 'kwargs_copy', '=', 'self', '.', 'base_dict', '.', 'copy', '(', ')', 'kwargs_copy', '.', 'update', '(', '*', '*', 'kwargs', ')', 'kwargs_copy', '[', "'dataset'", ']', '=', 'kwargs', '.', 'get', '(', "'dataset'", ',', 'self', '.', 'dataset', '(', '*', '*', 'kwargs', ')', ')', 'kwargs_copy', '[', "'component'", ']', '=', 'kwargs', '.', 'get', '(', "'component'", ',', 'self', '.', 'component', '(', '*', '*', 'kwargs', ')', ')', 'self', '.', '_replace_none', '(', 'kwargs_copy', ')', 'return', 'input_string', '.', 'format', '(', '*', '*', 'kwargs_copy', ')'] | return a generic filename for a given dataset and component | ['return', 'a', 'generic', 'filename', 'for', 'a', 'given', 'dataset', 'and', 'component'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L594-L603 |
8,916 | tensorpack/tensorpack | examples/basics/mnist-visualizations.py | visualize_conv_activations | def visualize_conv_activations(activation, name):
"""Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations
"""
import math
with tf.name_scope('visualize_act_' + name):
_, h, w, c = activation.get_shape().as_list()
rows = []
c_per_row = int(math.sqrt(c))
for y in range(0, c - c_per_row, c_per_row):
row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5]
cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W]
row = tf.concat(cols, 1)
rows.append(row)
viz = tf.concat(rows, 2)
tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1)) | python | def visualize_conv_activations(activation, name):
"""Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations
"""
import math
with tf.name_scope('visualize_act_' + name):
_, h, w, c = activation.get_shape().as_list()
rows = []
c_per_row = int(math.sqrt(c))
for y in range(0, c - c_per_row, c_per_row):
row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5]
cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W]
row = tf.concat(cols, 1)
rows.append(row)
viz = tf.concat(rows, 2)
tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1)) | ['def', 'visualize_conv_activations', '(', 'activation', ',', 'name', ')', ':', 'import', 'math', 'with', 'tf', '.', 'name_scope', '(', "'visualize_act_'", '+', 'name', ')', ':', '_', ',', 'h', ',', 'w', ',', 'c', '=', 'activation', '.', 'get_shape', '(', ')', '.', 'as_list', '(', ')', 'rows', '=', '[', ']', 'c_per_row', '=', 'int', '(', 'math', '.', 'sqrt', '(', 'c', ')', ')', 'for', 'y', 'in', 'range', '(', '0', ',', 'c', '-', 'c_per_row', ',', 'c_per_row', ')', ':', 'row', '=', 'activation', '[', ':', ',', ':', ',', ':', ',', 'y', ':', 'y', '+', 'c_per_row', ']', '# [?, H, W, 32] --> [?, H, W, 5]', 'cols', '=', 'tf', '.', 'unstack', '(', 'row', ',', 'axis', '=', '3', ')', '# [?, H, W, 5] --> 5 * [?, H, W]', 'row', '=', 'tf', '.', 'concat', '(', 'cols', ',', '1', ')', 'rows', '.', 'append', '(', 'row', ')', 'viz', '=', 'tf', '.', 'concat', '(', 'rows', ',', '2', ')', 'tf', '.', 'summary', '.', 'image', '(', "'visualize_act_'", '+', 'name', ',', 'tf', '.', 'expand_dims', '(', 'viz', ',', '-', '1', ')', ')'] | Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations | ['Visualize', 'activations', 'for', 'convolution', 'layers', '.'] | train | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/basics/mnist-visualizations.py#L39-L64 |
8,917 | bitcraft/PyTMX | pytmx/pytmx.py | TiledObject.parse_xml | def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(float, i.split(','))) for i in text.split())
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid)
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False
if points:
x1 = x2 = y1 = y2 = 0
for x, y in points:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple(
[(i[0] + self.x, i[1] + self.y) for i in points])
return self | python | def parse_xml(self, node):
""" Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self
"""
def read_points(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
return tuple(tuple(map(float, i.split(','))) for i in text.split())
self._set_properties(node)
# correctly handle "tile objects" (object with gid set)
if self.gid:
self.gid = self.parent.register_gid(self.gid)
points = None
polygon = node.find('polygon')
if polygon is not None:
points = read_points(polygon.get('points'))
self.closed = True
polyline = node.find('polyline')
if polyline is not None:
points = read_points(polyline.get('points'))
self.closed = False
if points:
x1 = x2 = y1 = y2 = 0
for x, y in points:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
self.width = abs(x1) + abs(x2)
self.height = abs(y1) + abs(y2)
self.points = tuple(
[(i[0] + self.x, i[1] + self.y) for i in points])
return self | ['def', 'parse_xml', '(', 'self', ',', 'node', ')', ':', 'def', 'read_points', '(', 'text', ')', ':', '"""parse a text string of float tuples and return [(x,...),...]\n """', 'return', 'tuple', '(', 'tuple', '(', 'map', '(', 'float', ',', 'i', '.', 'split', '(', "','", ')', ')', ')', 'for', 'i', 'in', 'text', '.', 'split', '(', ')', ')', 'self', '.', '_set_properties', '(', 'node', ')', '# correctly handle "tile objects" (object with gid set)', 'if', 'self', '.', 'gid', ':', 'self', '.', 'gid', '=', 'self', '.', 'parent', '.', 'register_gid', '(', 'self', '.', 'gid', ')', 'points', '=', 'None', 'polygon', '=', 'node', '.', 'find', '(', "'polygon'", ')', 'if', 'polygon', 'is', 'not', 'None', ':', 'points', '=', 'read_points', '(', 'polygon', '.', 'get', '(', "'points'", ')', ')', 'self', '.', 'closed', '=', 'True', 'polyline', '=', 'node', '.', 'find', '(', "'polyline'", ')', 'if', 'polyline', 'is', 'not', 'None', ':', 'points', '=', 'read_points', '(', 'polyline', '.', 'get', '(', "'points'", ')', ')', 'self', '.', 'closed', '=', 'False', 'if', 'points', ':', 'x1', '=', 'x2', '=', 'y1', '=', 'y2', '=', '0', 'for', 'x', ',', 'y', 'in', 'points', ':', 'if', 'x', '<', 'x1', ':', 'x1', '=', 'x', 'if', 'x', '>', 'x2', ':', 'x2', '=', 'x', 'if', 'y', '<', 'y1', ':', 'y1', '=', 'y', 'if', 'y', '>', 'y2', ':', 'y2', '=', 'y', 'self', '.', 'width', '=', 'abs', '(', 'x1', ')', '+', 'abs', '(', 'x2', ')', 'self', '.', 'height', '=', 'abs', '(', 'y1', ')', '+', 'abs', '(', 'y2', ')', 'self', '.', 'points', '=', 'tuple', '(', '[', '(', 'i', '[', '0', ']', '+', 'self', '.', 'x', ',', 'i', '[', '1', ']', '+', 'self', '.', 'y', ')', 'for', 'i', 'in', 'points', ']', ')', 'return', 'self'] | Parse an Object from ElementTree xml node
:param node: ElementTree xml node
:return: self | ['Parse', 'an', 'Object', 'from', 'ElementTree', 'xml', 'node'] | train | https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L1146-L1187 |
8,918 | bxlab/bx-python | lib/bx_extras/stats.py | lfprob | def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p | python | def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p | ['def', 'lfprob', '(', 'dfnum', ',', 'dfden', ',', 'F', ')', ':', 'p', '=', 'betai', '(', '0.5', '*', 'dfden', ',', '0.5', '*', 'dfnum', ',', 'dfden', '/', 'float', '(', 'dfden', '+', 'dfnum', '*', 'F', ')', ')', 'return', 'p'] | Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn | ['Returns', 'the', '(', '1', '-', 'tailed', ')', 'significance', 'level', '(', 'p', '-', 'value', ')', 'of', 'an', 'F', 'statistic', 'given', 'the', 'degrees', 'of', 'freedom', 'for', 'the', 'numerator', '(', 'dfR', '-', 'dfF', ')', 'and', 'the', 'degrees', 'of', 'freedom', 'for', 'the', 'denominator', '(', 'dfF', ')', '.'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1424-L1433 |
8,919 | nschloe/meshplex | meshplex/mesh_tri.py | MeshTri._compute_surface_areas | def _compute_surface_areas(self, cell_ids):
"""For each edge, one half of the the edge goes to each of the end
points. Used for Neumann boundary conditions if on the boundary of the
mesh and transition conditions if in the interior.
"""
# Each of the three edges may contribute to the surface areas of all
# three vertices. Here, only the two adjacent nodes receive a
# contribution, but other approaches (e.g., the flat cell corrector),
# may contribute to all three nodes.
cn = self.cells["nodes"][cell_ids]
ids = numpy.stack([cn, cn, cn], axis=1)
half_el = 0.5 * self.edge_lengths[..., cell_ids]
zero = numpy.zeros([half_el.shape[1]])
vals = numpy.stack(
[
numpy.column_stack([zero, half_el[0], half_el[0]]),
numpy.column_stack([half_el[1], zero, half_el[1]]),
numpy.column_stack([half_el[2], half_el[2], zero]),
],
axis=1,
)
return ids, vals | python | def _compute_surface_areas(self, cell_ids):
"""For each edge, one half of the the edge goes to each of the end
points. Used for Neumann boundary conditions if on the boundary of the
mesh and transition conditions if in the interior.
"""
# Each of the three edges may contribute to the surface areas of all
# three vertices. Here, only the two adjacent nodes receive a
# contribution, but other approaches (e.g., the flat cell corrector),
# may contribute to all three nodes.
cn = self.cells["nodes"][cell_ids]
ids = numpy.stack([cn, cn, cn], axis=1)
half_el = 0.5 * self.edge_lengths[..., cell_ids]
zero = numpy.zeros([half_el.shape[1]])
vals = numpy.stack(
[
numpy.column_stack([zero, half_el[0], half_el[0]]),
numpy.column_stack([half_el[1], zero, half_el[1]]),
numpy.column_stack([half_el[2], half_el[2], zero]),
],
axis=1,
)
return ids, vals | ['def', '_compute_surface_areas', '(', 'self', ',', 'cell_ids', ')', ':', '# Each of the three edges may contribute to the surface areas of all', '# three vertices. Here, only the two adjacent nodes receive a', '# contribution, but other approaches (e.g., the flat cell corrector),', '# may contribute to all three nodes.', 'cn', '=', 'self', '.', 'cells', '[', '"nodes"', ']', '[', 'cell_ids', ']', 'ids', '=', 'numpy', '.', 'stack', '(', '[', 'cn', ',', 'cn', ',', 'cn', ']', ',', 'axis', '=', '1', ')', 'half_el', '=', '0.5', '*', 'self', '.', 'edge_lengths', '[', '...', ',', 'cell_ids', ']', 'zero', '=', 'numpy', '.', 'zeros', '(', '[', 'half_el', '.', 'shape', '[', '1', ']', ']', ')', 'vals', '=', 'numpy', '.', 'stack', '(', '[', 'numpy', '.', 'column_stack', '(', '[', 'zero', ',', 'half_el', '[', '0', ']', ',', 'half_el', '[', '0', ']', ']', ')', ',', 'numpy', '.', 'column_stack', '(', '[', 'half_el', '[', '1', ']', ',', 'zero', ',', 'half_el', '[', '1', ']', ']', ')', ',', 'numpy', '.', 'column_stack', '(', '[', 'half_el', '[', '2', ']', ',', 'half_el', '[', '2', ']', ',', 'zero', ']', ')', ',', ']', ',', 'axis', '=', '1', ',', ')', 'return', 'ids', ',', 'vals'] | For each edge, one half of the the edge goes to each of the end
points. Used for Neumann boundary conditions if on the boundary of the
mesh and transition conditions if in the interior. | ['For', 'each', 'edge', 'one', 'half', 'of', 'the', 'the', 'edge', 'goes', 'to', 'each', 'of', 'the', 'end', 'points', '.', 'Used', 'for', 'Neumann', 'boundary', 'conditions', 'if', 'on', 'the', 'boundary', 'of', 'the', 'mesh', 'and', 'transition', 'conditions', 'if', 'in', 'the', 'interior', '.'] | train | https://github.com/nschloe/meshplex/blob/376cfe8ce7b9917e5398c5d60c87455ff5590913/meshplex/mesh_tri.py#L537-L560 |
8,920 | scott-griffiths/bitstring | bitstring.py | Bits._readintle | def _readintle(self, length, start):
"""Read bits and interpret as a little-endian signed int."""
ui = self._readuintle(length, start)
if not ui >> (length - 1):
# Top bit not set, number is positive
return ui
# Top bit is set, so number is negative
tmp = (~(ui - 1)) & ((1 << length) - 1)
return -tmp | python | def _readintle(self, length, start):
"""Read bits and interpret as a little-endian signed int."""
ui = self._readuintle(length, start)
if not ui >> (length - 1):
# Top bit not set, number is positive
return ui
# Top bit is set, so number is negative
tmp = (~(ui - 1)) & ((1 << length) - 1)
return -tmp | ['def', '_readintle', '(', 'self', ',', 'length', ',', 'start', ')', ':', 'ui', '=', 'self', '.', '_readuintle', '(', 'length', ',', 'start', ')', 'if', 'not', 'ui', '>>', '(', 'length', '-', '1', ')', ':', '# Top bit not set, number is positive', 'return', 'ui', '# Top bit is set, so number is negative', 'tmp', '=', '(', '~', '(', 'ui', '-', '1', ')', ')', '&', '(', '(', '1', '<<', 'length', ')', '-', '1', ')', 'return', '-', 'tmp'] | Read bits and interpret as a little-endian signed int. | ['Read', 'bits', 'and', 'interpret', 'as', 'a', 'little', '-', 'endian', 'signed', 'int', '.'] | train | https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1528-L1536 |
8,921 | d11wtq/dockerpty | dockerpty/pty.py | WINCHHandler.start | def start(self):
"""
Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`.
"""
def handle(signum, frame):
if signum == signal.SIGWINCH:
self.pty.resize()
self.original_handler = signal.signal(signal.SIGWINCH, handle) | python | def start(self):
"""
Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`.
"""
def handle(signum, frame):
if signum == signal.SIGWINCH:
self.pty.resize()
self.original_handler = signal.signal(signal.SIGWINCH, handle) | ['def', 'start', '(', 'self', ')', ':', 'def', 'handle', '(', 'signum', ',', 'frame', ')', ':', 'if', 'signum', '==', 'signal', '.', 'SIGWINCH', ':', 'self', '.', 'pty', '.', 'resize', '(', ')', 'self', '.', 'original_handler', '=', 'signal', '.', 'signal', '(', 'signal', '.', 'SIGWINCH', ',', 'handle', ')'] | Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`. | ['Start', 'trapping', 'WINCH', 'signals', 'and', 'resizing', 'the', 'PTY', '.'] | train | https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L57-L69 |
8,922 | GetmeUK/MongoFrames | mongoframes/factory/__init__.py | Factory.reassemble | def reassemble(self, blueprint, fields, documents):
"""
Reassemble the given set of fields for a list of pre-assembed documents.
NOTE: Reassembly is done in place, since the data you send the method
should be JSON type safe, if you need to retain the existing document
it is recommended that you copy them using `copy.deepcopy`.
"""
# Reset the blueprint
blueprint.reset()
# Reassemble the documents
for document in documents:
blueprint.reassemble(fields, document) | python | def reassemble(self, blueprint, fields, documents):
"""
Reassemble the given set of fields for a list of pre-assembed documents.
NOTE: Reassembly is done in place, since the data you send the method
should be JSON type safe, if you need to retain the existing document
it is recommended that you copy them using `copy.deepcopy`.
"""
# Reset the blueprint
blueprint.reset()
# Reassemble the documents
for document in documents:
blueprint.reassemble(fields, document) | ['def', 'reassemble', '(', 'self', ',', 'blueprint', ',', 'fields', ',', 'documents', ')', ':', '# Reset the blueprint', 'blueprint', '.', 'reset', '(', ')', '# Reassemble the documents', 'for', 'document', 'in', 'documents', ':', 'blueprint', '.', 'reassemble', '(', 'fields', ',', 'document', ')'] | Reassemble the given set of fields for a list of pre-assembed documents.
NOTE: Reassembly is done in place, since the data you send the method
should be JSON type safe, if you need to retain the existing document
it is recommended that you copy them using `copy.deepcopy`. | ['Reassemble', 'the', 'given', 'set', 'of', 'fields', 'for', 'a', 'list', 'of', 'pre', '-', 'assembed', 'documents', '.'] | train | https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/factory/__init__.py#L96-L110 |
8,923 | pydata/xarray | xarray/core/variable.py | as_variable | def as_variable(obj, name=None) -> 'Union[Variable, IndexVariable]':
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__('Could not convert tuple of form '
'(dims, data[, attrs, encoding]): '
'{} to Variable.'.format(obj))
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError(
"variable %r has invalid type %r" % (name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
'cannot set variable %r with %r-dimensional data '
'without explicit dimension names. Pass a tuple of '
'(dims, data) instead.' % (name, data.ndim))
obj = Variable(name, data, fastpath=True)
else:
raise TypeError('unable to convert object into a variable without an '
'explicit list of dimensions: %r' % obj)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
'%r has more than 1-dimension and the same name as one of its '
'dimensions %r. xarray disallows such variables because they '
'conflict with the coordinates used to label '
'dimensions.' % (name, obj.dims))
obj = obj.to_index_variable()
return obj | python | def as_variable(obj, name=None) -> 'Union[Variable, IndexVariable]':
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__('Could not convert tuple of form '
'(dims, data[, attrs, encoding]): '
'{} to Variable.'.format(obj))
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError(
"variable %r has invalid type %r" % (name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
'cannot set variable %r with %r-dimensional data '
'without explicit dimension names. Pass a tuple of '
'(dims, data) instead.' % (name, data.ndim))
obj = Variable(name, data, fastpath=True)
else:
raise TypeError('unable to convert object into a variable without an '
'explicit list of dimensions: %r' % obj)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
'%r has more than 1-dimension and the same name as one of its '
'dimensions %r. xarray disallows such variables because they '
'conflict with the coordinates used to label '
'dimensions.' % (name, obj.dims))
obj = obj.to_index_variable()
return obj | ['def', 'as_variable', '(', 'obj', ',', 'name', '=', 'None', ')', '->', "'Union[Variable, IndexVariable]'", ':', 'from', '.', 'dataarray', 'import', 'DataArray', '# TODO: consider extending this method to automatically handle Iris and', 'if', 'isinstance', '(', 'obj', ',', 'DataArray', ')', ':', '# extract the primary Variable from DataArrays', 'obj', '=', 'obj', '.', 'variable', 'if', 'isinstance', '(', 'obj', ',', 'Variable', ')', ':', 'obj', '=', 'obj', '.', 'copy', '(', 'deep', '=', 'False', ')', 'elif', 'isinstance', '(', 'obj', ',', 'tuple', ')', ':', 'try', ':', 'obj', '=', 'Variable', '(', '*', 'obj', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', 'as', 'error', ':', '# use .format() instead of % because it handles tuples consistently', 'raise', 'error', '.', '__class__', '(', "'Could not convert tuple of form '", "'(dims, data[, attrs, encoding]): '", "'{} to Variable.'", '.', 'format', '(', 'obj', ')', ')', 'elif', 'utils', '.', 'is_scalar', '(', 'obj', ')', ':', 'obj', '=', 'Variable', '(', '[', ']', ',', 'obj', ')', 'elif', 'isinstance', '(', 'obj', ',', '(', 'pd', '.', 'Index', ',', 'IndexVariable', ')', ')', 'and', 'obj', '.', 'name', 'is', 'not', 'None', ':', 'obj', '=', 'Variable', '(', 'obj', '.', 'name', ',', 'obj', ')', 'elif', 'isinstance', '(', 'obj', ',', '(', 'set', ',', 'dict', ')', ')', ':', 'raise', 'TypeError', '(', '"variable %r has invalid type %r"', '%', '(', 'name', ',', 'type', '(', 'obj', ')', ')', ')', 'elif', 'name', 'is', 'not', 'None', ':', 'data', '=', 'as_compatible_data', '(', 'obj', ')', 'if', 'data', '.', 'ndim', '!=', '1', ':', 'raise', 'MissingDimensionsError', '(', "'cannot set variable %r with %r-dimensional data '", "'without explicit dimension names. Pass a tuple of '", "'(dims, data) instead.'", '%', '(', 'name', ',', 'data', '.', 'ndim', ')', ')', 'obj', '=', 'Variable', '(', 'name', ',', 'data', ',', 'fastpath', '=', 'True', ')', 'else', ':', 'raise', 'TypeError', '(', "'unable to convert object into a variable without an '", "'explicit list of dimensions: %r'", '%', 'obj', ')', 'if', 'name', 'is', 'not', 'None', 'and', 'name', 'in', 'obj', '.', 'dims', ':', '# convert the Variable into an Index', 'if', 'obj', '.', 'ndim', '!=', '1', ':', 'raise', 'MissingDimensionsError', '(', "'%r has more than 1-dimension and the same name as one of its '", "'dimensions %r. xarray disallows such variables because they '", "'conflict with the coordinates used to label '", "'dimensions.'", '%', '(', 'name', ',', 'obj', '.', 'dims', ')', ')', 'obj', '=', 'obj', '.', 'to_index_variable', '(', ')', 'return', 'obj'] | Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable. | ['Convert', 'an', 'object', 'into', 'a', 'Variable', '.'] | train | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/variable.py#L46-L119 |
8,924 | bcbio/bcbio-nextgen | bcbio/structural/purple.py | _run_amber | def _run_amber(paired, work_dir, lenient=False):
"""AMBER: calculate allele frequencies at likely heterozygous sites.
lenient flag allows amber runs on small test sets.
"""
amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber"))
out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data))
if not utils.file_exists(out_file) or not utils.file_exists(out_file + ".pcf"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
key = "germline_het_pon"
het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data)
cmd = ["AMBER"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \
["-threads", dd.get_num_cores(paired.tumor_data),
"-tumor", dd.get_sample_name(paired.tumor_data),
"-tumor_bam", dd.get_align_bam(paired.tumor_data),
"-reference", dd.get_sample_name(paired.normal_data),
"-reference_bam", dd.get_align_bam(paired.normal_data),
"-ref_genome", dd.get_ref_file(paired.tumor_data),
"-bed", het_bed,
"-output_dir", os.path.dirname(tx_out_file)]
if lenient:
cmd += ["-max_het_af_percent", "1.0"]
try:
do.run(cmd, "PURPLE: AMBER baf generation")
except subprocess.CalledProcessError as msg:
if not lenient and _amber_allowed_errors(str(msg)):
return _run_amber(paired, work_dir, True)
for f in os.listdir(os.path.dirname(tx_out_file)):
if f != os.path.basename(tx_out_file):
shutil.move(os.path.join(os.path.dirname(tx_out_file), f),
os.path.join(amber_dir, f))
return out_file | python | def _run_amber(paired, work_dir, lenient=False):
"""AMBER: calculate allele frequencies at likely heterozygous sites.
lenient flag allows amber runs on small test sets.
"""
amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber"))
out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data))
if not utils.file_exists(out_file) or not utils.file_exists(out_file + ".pcf"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
key = "germline_het_pon"
het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data)
cmd = ["AMBER"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \
["-threads", dd.get_num_cores(paired.tumor_data),
"-tumor", dd.get_sample_name(paired.tumor_data),
"-tumor_bam", dd.get_align_bam(paired.tumor_data),
"-reference", dd.get_sample_name(paired.normal_data),
"-reference_bam", dd.get_align_bam(paired.normal_data),
"-ref_genome", dd.get_ref_file(paired.tumor_data),
"-bed", het_bed,
"-output_dir", os.path.dirname(tx_out_file)]
if lenient:
cmd += ["-max_het_af_percent", "1.0"]
try:
do.run(cmd, "PURPLE: AMBER baf generation")
except subprocess.CalledProcessError as msg:
if not lenient and _amber_allowed_errors(str(msg)):
return _run_amber(paired, work_dir, True)
for f in os.listdir(os.path.dirname(tx_out_file)):
if f != os.path.basename(tx_out_file):
shutil.move(os.path.join(os.path.dirname(tx_out_file), f),
os.path.join(amber_dir, f))
return out_file | ['def', '_run_amber', '(', 'paired', ',', 'work_dir', ',', 'lenient', '=', 'False', ')', ':', 'amber_dir', '=', 'utils', '.', 'safe_makedir', '(', 'os', '.', 'path', '.', 'join', '(', 'work_dir', ',', '"amber"', ')', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'amber_dir', ',', '"%s.amber.baf"', '%', 'dd', '.', 'get_sample_name', '(', 'paired', '.', 'tumor_data', ')', ')', 'if', 'not', 'utils', '.', 'file_exists', '(', 'out_file', ')', 'or', 'not', 'utils', '.', 'file_exists', '(', 'out_file', '+', '".pcf"', ')', ':', 'with', 'file_transaction', '(', 'paired', '.', 'tumor_data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'key', '=', '"germline_het_pon"', 'het_bed', '=', 'tz', '.', 'get_in', '(', '[', '"genome_resources"', ',', '"variation"', ',', 'key', ']', ',', 'paired', '.', 'tumor_data', ')', 'cmd', '=', '[', '"AMBER"', ']', '+', '_get_jvm_opts', '(', 'tx_out_file', ',', 'paired', '.', 'tumor_data', ')', '+', '[', '"-threads"', ',', 'dd', '.', 'get_num_cores', '(', 'paired', '.', 'tumor_data', ')', ',', '"-tumor"', ',', 'dd', '.', 'get_sample_name', '(', 'paired', '.', 'tumor_data', ')', ',', '"-tumor_bam"', ',', 'dd', '.', 'get_align_bam', '(', 'paired', '.', 'tumor_data', ')', ',', '"-reference"', ',', 'dd', '.', 'get_sample_name', '(', 'paired', '.', 'normal_data', ')', ',', '"-reference_bam"', ',', 'dd', '.', 'get_align_bam', '(', 'paired', '.', 'normal_data', ')', ',', '"-ref_genome"', ',', 'dd', '.', 'get_ref_file', '(', 'paired', '.', 'tumor_data', ')', ',', '"-bed"', ',', 'het_bed', ',', '"-output_dir"', ',', 'os', '.', 'path', '.', 'dirname', '(', 'tx_out_file', ')', ']', 'if', 'lenient', ':', 'cmd', '+=', '[', '"-max_het_af_percent"', ',', '"1.0"', ']', 'try', ':', 'do', '.', 'run', '(', 'cmd', ',', '"PURPLE: AMBER baf generation"', ')', 'except', 'subprocess', '.', 'CalledProcessError', 'as', 'msg', ':', 'if', 'not', 'lenient', 'and', '_amber_allowed_errors', '(', 'str', '(', 'msg', ')', ')', ':', 'return', '_run_amber', '(', 'paired', ',', 'work_dir', ',', 'True', ')', 'for', 'f', 'in', 'os', '.', 'listdir', '(', 'os', '.', 'path', '.', 'dirname', '(', 'tx_out_file', ')', ')', ':', 'if', 'f', '!=', 'os', '.', 'path', '.', 'basename', '(', 'tx_out_file', ')', ':', 'shutil', '.', 'move', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'tx_out_file', ')', ',', 'f', ')', ',', 'os', '.', 'path', '.', 'join', '(', 'amber_dir', ',', 'f', ')', ')', 'return', 'out_file'] | AMBER: calculate allele frequencies at likely heterozygous sites.
lenient flag allows amber runs on small test sets. | ['AMBER', ':', 'calculate', 'allele', 'frequencies', 'at', 'likely', 'heterozygous', 'sites', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/purple.py#L199-L230 |
8,925 | brocade/pynos | pynos/versions/ver_7/ver_7_0_0/interface.py | Interface.bfd | def bfd(self, **kwargs):
"""Configure BFD for Interface.
Args:
name (str): name of the interface to configure (230/0/1 etc)
int_type (str): interface type (gigabitethernet etc)
tx (str): BFD transmit interval in milliseconds (300, 500, etc)
rx (str): BFD receive interval in milliseconds (300, 500, etc)
multiplier (str): BFD multiplier. (3, 7, 5, etc)
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `tx`, `rx`, or `multiplier` is not passed.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3', int_type='tengigabitethernet')
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3',
... int_type='tengigabitethernet', get=True)
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3',
... int_type='tengigabitethernet', delete=True)
"""
int_type = str(kwargs.pop('int_type').lower())
kwargs['name'] = str(kwargs.pop('name'))
kwargs['min_tx'] = kwargs.pop('tx')
kwargs['min_rx'] = kwargs.pop('rx')
kwargs['delete'] = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
kwargs['int_type'] = int_type
bfd_tx = self._bfd_tx(**kwargs)
bfd_rx = self._bfd_rx(**kwargs)
bfd_multiplier = self._bfd_multiplier(**kwargs)
if kwargs.pop('get', False):
return self._get_bfd(bfd_tx, bfd_rx, bfd_multiplier)
config = pynos.utilities.merge_xml(bfd_tx, bfd_rx)
config = pynos.utilities.merge_xml(config, bfd_multiplier)
return callback(config) | python | def bfd(self, **kwargs):
"""Configure BFD for Interface.
Args:
name (str): name of the interface to configure (230/0/1 etc)
int_type (str): interface type (gigabitethernet etc)
tx (str): BFD transmit interval in milliseconds (300, 500, etc)
rx (str): BFD receive interval in milliseconds (300, 500, etc)
multiplier (str): BFD multiplier. (3, 7, 5, etc)
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `tx`, `rx`, or `multiplier` is not passed.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3', int_type='tengigabitethernet')
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3',
... int_type='tengigabitethernet', get=True)
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3',
... int_type='tengigabitethernet', delete=True)
"""
int_type = str(kwargs.pop('int_type').lower())
kwargs['name'] = str(kwargs.pop('name'))
kwargs['min_tx'] = kwargs.pop('tx')
kwargs['min_rx'] = kwargs.pop('rx')
kwargs['delete'] = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
kwargs['int_type'] = int_type
bfd_tx = self._bfd_tx(**kwargs)
bfd_rx = self._bfd_rx(**kwargs)
bfd_multiplier = self._bfd_multiplier(**kwargs)
if kwargs.pop('get', False):
return self._get_bfd(bfd_tx, bfd_rx, bfd_multiplier)
config = pynos.utilities.merge_xml(bfd_tx, bfd_rx)
config = pynos.utilities.merge_xml(config, bfd_multiplier)
return callback(config) | ['def', 'bfd', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'int_type', '=', 'str', '(', 'kwargs', '.', 'pop', '(', "'int_type'", ')', '.', 'lower', '(', ')', ')', 'kwargs', '[', "'name'", ']', '=', 'str', '(', 'kwargs', '.', 'pop', '(', "'name'", ')', ')', 'kwargs', '[', "'min_tx'", ']', '=', 'kwargs', '.', 'pop', '(', "'tx'", ')', 'kwargs', '[', "'min_rx'", ']', '=', 'kwargs', '.', 'pop', '(', "'rx'", ')', 'kwargs', '[', "'delete'", ']', '=', 'kwargs', '.', 'pop', '(', "'delete'", ',', 'False', ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'valid_int_types', '=', '[', "'gigabitethernet'", ',', "'tengigabitethernet'", ',', "'fortygigabitethernet'", ',', "'hundredgigabitethernet'", ']', 'if', 'int_type', 'not', 'in', 'valid_int_types', ':', 'raise', 'ValueError', '(', "'int_type must be one of: %s'", '%', 'repr', '(', 'valid_int_types', ')', ')', 'kwargs', '[', "'int_type'", ']', '=', 'int_type', 'bfd_tx', '=', 'self', '.', '_bfd_tx', '(', '*', '*', 'kwargs', ')', 'bfd_rx', '=', 'self', '.', '_bfd_rx', '(', '*', '*', 'kwargs', ')', 'bfd_multiplier', '=', 'self', '.', '_bfd_multiplier', '(', '*', '*', 'kwargs', ')', 'if', 'kwargs', '.', 'pop', '(', "'get'", ',', 'False', ')', ':', 'return', 'self', '.', '_get_bfd', '(', 'bfd_tx', ',', 'bfd_rx', ',', 'bfd_multiplier', ')', 'config', '=', 'pynos', '.', 'utilities', '.', 'merge_xml', '(', 'bfd_tx', ',', 'bfd_rx', ')', 'config', '=', 'pynos', '.', 'utilities', '.', 'merge_xml', '(', 'config', ',', 'bfd_multiplier', ')', 'return', 'callback', '(', 'config', ')'] | Configure BFD for Interface.
Args:
name (str): name of the interface to configure (230/0/1 etc)
int_type (str): interface type (gigabitethernet etc)
tx (str): BFD transmit interval in milliseconds (300, 500, etc)
rx (str): BFD receive interval in milliseconds (300, 500, etc)
multiplier (str): BFD multiplier. (3, 7, 5, etc)
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `tx`, `rx`, or `multiplier` is not passed.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3', int_type='tengigabitethernet')
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3',
... int_type='tengigabitethernet', get=True)
... output = dev.interface.bfd(name='230/0/4', rx='300',
... tx='300', multiplier='3',
... int_type='tengigabitethernet', delete=True) | ['Configure', 'BFD', 'for', 'Interface', '.'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_0_0/interface.py#L257-L316 |
8,926 | google/grr | grr/server/grr_response_server/databases/mem_hunts.py | InMemoryDBHuntMixin.UpdateHuntObject | def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
"""Updates the hunt object by applying the update function."""
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
key = k[:-len(delta_suffix)]
current_value = getattr(hunt_obj, key)
setattr(hunt_obj, key, current_value + v)
else:
setattr(hunt_obj, k, v)
if start_time is not None:
hunt_obj.init_start_time = hunt_obj.init_start_time or start_time
hunt_obj.last_start_time = start_time
hunt_obj.last_update_time = rdfvalue.RDFDatetime.Now()
self.hunts[hunt_obj.hunt_id] = hunt_obj | python | def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
"""Updates the hunt object by applying the update function."""
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
key = k[:-len(delta_suffix)]
current_value = getattr(hunt_obj, key)
setattr(hunt_obj, key, current_value + v)
else:
setattr(hunt_obj, k, v)
if start_time is not None:
hunt_obj.init_start_time = hunt_obj.init_start_time or start_time
hunt_obj.last_start_time = start_time
hunt_obj.last_update_time = rdfvalue.RDFDatetime.Now()
self.hunts[hunt_obj.hunt_id] = hunt_obj | ['def', 'UpdateHuntObject', '(', 'self', ',', 'hunt_id', ',', 'start_time', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'hunt_obj', '=', 'self', '.', 'ReadHuntObject', '(', 'hunt_id', ')', 'delta_suffix', '=', '"_delta"', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'if', 'v', 'is', 'None', ':', 'continue', 'if', 'k', '.', 'endswith', '(', 'delta_suffix', ')', ':', 'key', '=', 'k', '[', ':', '-', 'len', '(', 'delta_suffix', ')', ']', 'current_value', '=', 'getattr', '(', 'hunt_obj', ',', 'key', ')', 'setattr', '(', 'hunt_obj', ',', 'key', ',', 'current_value', '+', 'v', ')', 'else', ':', 'setattr', '(', 'hunt_obj', ',', 'k', ',', 'v', ')', 'if', 'start_time', 'is', 'not', 'None', ':', 'hunt_obj', '.', 'init_start_time', '=', 'hunt_obj', '.', 'init_start_time', 'or', 'start_time', 'hunt_obj', '.', 'last_start_time', '=', 'start_time', 'hunt_obj', '.', 'last_update_time', '=', 'rdfvalue', '.', 'RDFDatetime', '.', 'Now', '(', ')', 'self', '.', 'hunts', '[', 'hunt_obj', '.', 'hunt_id', ']', '=', 'hunt_obj'] | Updates the hunt object by applying the update function. | ['Updates', 'the', 'hunt', 'object', 'by', 'applying', 'the', 'update', 'function', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_hunts.py#L41-L63 |
8,927 | zsimic/runez | src/runez/logsetup.py | LogManager._fix_logging_shortcuts | def _fix_logging_shortcuts(cls):
"""
Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
logging.info("hello")
Instead of this:
import logging
LOG = logging.getLogger(__name__)
LOG.info("hello")
"""
if cls.is_using_format("%(pathname)s %(filename)s %(funcName)s %(module)s"):
logging._srcfile = cls._logging_snapshot._srcfile
else:
logging._srcfile = None
logging.logProcesses = cls.is_using_format("%(process)d")
logging.logThreads = cls.is_using_format("%(thread)d %(threadName)s")
def getframe():
return sys._getframe(4)
def log(level, msg, *args, **kwargs):
"""Wrapper to make logging.info() etc report the right module %(name)"""
name = get_caller_name()
logger = logging.getLogger(name)
try:
logging.currentframe = getframe
logger.log(level, msg, *args, **kwargs)
finally:
logging.currentframe = ORIGINAL_CF
def wrap(level, **kwargs):
"""Wrap corresponding logging shortcut function"""
original = getattr(logging, logging.getLevelName(level).lower())
f = partial(log, level, **kwargs)
f.__doc__ = original.__doc__
return f
logging.critical = wrap(logging.CRITICAL)
logging.fatal = logging.critical
logging.error = wrap(logging.ERROR)
logging.exception = partial(logging.error, exc_info=True)
logging.warning = wrap(logging.WARNING)
logging.info = wrap(logging.INFO)
logging.debug = wrap(logging.DEBUG)
logging.log = log | python | def _fix_logging_shortcuts(cls):
"""
Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
logging.info("hello")
Instead of this:
import logging
LOG = logging.getLogger(__name__)
LOG.info("hello")
"""
if cls.is_using_format("%(pathname)s %(filename)s %(funcName)s %(module)s"):
logging._srcfile = cls._logging_snapshot._srcfile
else:
logging._srcfile = None
logging.logProcesses = cls.is_using_format("%(process)d")
logging.logThreads = cls.is_using_format("%(thread)d %(threadName)s")
def getframe():
return sys._getframe(4)
def log(level, msg, *args, **kwargs):
"""Wrapper to make logging.info() etc report the right module %(name)"""
name = get_caller_name()
logger = logging.getLogger(name)
try:
logging.currentframe = getframe
logger.log(level, msg, *args, **kwargs)
finally:
logging.currentframe = ORIGINAL_CF
def wrap(level, **kwargs):
"""Wrap corresponding logging shortcut function"""
original = getattr(logging, logging.getLevelName(level).lower())
f = partial(log, level, **kwargs)
f.__doc__ = original.__doc__
return f
logging.critical = wrap(logging.CRITICAL)
logging.fatal = logging.critical
logging.error = wrap(logging.ERROR)
logging.exception = partial(logging.error, exc_info=True)
logging.warning = wrap(logging.WARNING)
logging.info = wrap(logging.INFO)
logging.debug = wrap(logging.DEBUG)
logging.log = log | ['def', '_fix_logging_shortcuts', '(', 'cls', ')', ':', 'if', 'cls', '.', 'is_using_format', '(', '"%(pathname)s %(filename)s %(funcName)s %(module)s"', ')', ':', 'logging', '.', '_srcfile', '=', 'cls', '.', '_logging_snapshot', '.', '_srcfile', 'else', ':', 'logging', '.', '_srcfile', '=', 'None', 'logging', '.', 'logProcesses', '=', 'cls', '.', 'is_using_format', '(', '"%(process)d"', ')', 'logging', '.', 'logThreads', '=', 'cls', '.', 'is_using_format', '(', '"%(thread)d %(threadName)s"', ')', 'def', 'getframe', '(', ')', ':', 'return', 'sys', '.', '_getframe', '(', '4', ')', 'def', 'log', '(', 'level', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '"""Wrapper to make logging.info() etc report the right module %(name)"""', 'name', '=', 'get_caller_name', '(', ')', 'logger', '=', 'logging', '.', 'getLogger', '(', 'name', ')', 'try', ':', 'logging', '.', 'currentframe', '=', 'getframe', 'logger', '.', 'log', '(', 'level', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'finally', ':', 'logging', '.', 'currentframe', '=', 'ORIGINAL_CF', 'def', 'wrap', '(', 'level', ',', '*', '*', 'kwargs', ')', ':', '"""Wrap corresponding logging shortcut function"""', 'original', '=', 'getattr', '(', 'logging', ',', 'logging', '.', 'getLevelName', '(', 'level', ')', '.', 'lower', '(', ')', ')', 'f', '=', 'partial', '(', 'log', ',', 'level', ',', '*', '*', 'kwargs', ')', 'f', '.', '__doc__', '=', 'original', '.', '__doc__', 'return', 'f', 'logging', '.', 'critical', '=', 'wrap', '(', 'logging', '.', 'CRITICAL', ')', 'logging', '.', 'fatal', '=', 'logging', '.', 'critical', 'logging', '.', 'error', '=', 'wrap', '(', 'logging', '.', 'ERROR', ')', 'logging', '.', 'exception', '=', 'partial', '(', 'logging', '.', 'error', ',', 'exc_info', '=', 'True', ')', 'logging', '.', 'warning', '=', 'wrap', '(', 'logging', '.', 'WARNING', ')', 'logging', '.', 'info', '=', 'wrap', '(', 'logging', '.', 'INFO', ')', 'logging', '.', 'debug', '=', 'wrap', '(', 'logging', '.', 'DEBUG', ')', 'logging', '.', 'log', '=', 'log'] | Fix standard logging shortcuts to correctly report logging module.
This is only useful if you:
- actually use %(name) and care about it being correct
- you would still like to use the logging.info() etc shortcuts
So basically you'd like to write this:
import logging
logging.info("hello")
Instead of this:
import logging
LOG = logging.getLogger(__name__)
LOG.info("hello") | ['Fix', 'standard', 'logging', 'shortcuts', 'to', 'correctly', 'report', 'logging', 'module', '.'] | train | https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L439-L491 |
8,928 | spencerahill/aospy | aospy/calc.py | Calc._save_files | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') | python | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') | ['def', '_save_files', '(', 'self', ',', 'data', ',', 'dtype_out_time', ')', ':', 'path', '=', 'self', '.', 'path_out', '[', 'dtype_out_time', ']', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'self', '.', 'dir_out', ')', ':', 'os', '.', 'makedirs', '(', 'self', '.', 'dir_out', ')', 'if', "'reg'", 'in', 'dtype_out_time', ':', 'try', ':', 'reg_data', '=', 'xr', '.', 'open_dataset', '(', 'path', ')', 'except', '(', 'EOFError', ',', 'RuntimeError', ',', 'IOError', ')', ':', 'reg_data', '=', 'xr', '.', 'Dataset', '(', ')', 'reg_data', '.', 'update', '(', 'data', ')', 'data_out', '=', 'reg_data', 'else', ':', 'data_out', '=', 'data', 'if', 'isinstance', '(', 'data_out', ',', 'xr', '.', 'DataArray', ')', ':', 'data_out', '=', 'xr', '.', 'Dataset', '(', '{', 'self', '.', 'name', ':', 'data_out', '}', ')', 'data_out', '.', 'to_netcdf', '(', 'path', ',', 'engine', '=', "'netcdf4'", ',', 'format', '=', "'NETCDF3_64BIT'", ')'] | Save the data to netcdf files in direc_out. | ['Save', 'the', 'data', 'to', 'netcdf', 'files', 'in', 'direc_out', '.'] | train | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L452-L468 |
8,929 | tdsmith/eleven | eleven/eleven.py | calculate_v | def calculate_v(nfs):
"""Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values [V(2/1), V(3/2), V(4/3), ...].
"""
v = []
if (nfs.columns != range(1, nfs.columns[-1]+1)).any():
raise ValueError("Column names invalid in nf_v_frame")
for i in nfs.columns[:-1]:
v.append(std(log2(nfs[i]/nfs[i+1]), ddof=1))
return pd.Series(v, index=nfs.columns[:-1]) | python | def calculate_v(nfs):
"""Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values [V(2/1), V(3/2), V(4/3), ...].
"""
v = []
if (nfs.columns != range(1, nfs.columns[-1]+1)).any():
raise ValueError("Column names invalid in nf_v_frame")
for i in nfs.columns[:-1]:
v.append(std(log2(nfs[i]/nfs[i+1]), ddof=1))
return pd.Series(v, index=nfs.columns[:-1]) | ['def', 'calculate_v', '(', 'nfs', ')', ':', 'v', '=', '[', ']', 'if', '(', 'nfs', '.', 'columns', '!=', 'range', '(', '1', ',', 'nfs', '.', 'columns', '[', '-', '1', ']', '+', '1', ')', ')', '.', 'any', '(', ')', ':', 'raise', 'ValueError', '(', '"Column names invalid in nf_v_frame"', ')', 'for', 'i', 'in', 'nfs', '.', 'columns', '[', ':', '-', '1', ']', ':', 'v', '.', 'append', '(', 'std', '(', 'log2', '(', 'nfs', '[', 'i', ']', '/', 'nfs', '[', 'i', '+', '1', ']', ')', ',', 'ddof', '=', '1', ')', ')', 'return', 'pd', '.', 'Series', '(', 'v', ',', 'index', '=', 'nfs', '.', 'columns', '[', ':', '-', '1', ']', ')'] | Calculates V(n+1/n) values. Useful for establishing the quality of
your normalization regime. See Vandesompele 2002 for advice on
interpretation.
:param DataFrame nfs: A matrix of all normalization factors, produced by
`calculate_all_nfs`.
:return: a Series of values [V(2/1), V(3/2), V(4/3), ...]. | ['Calculates', 'V', '(', 'n', '+', '1', '/', 'n', ')', 'values', '.', 'Useful', 'for', 'establishing', 'the', 'quality', 'of', 'your', 'normalization', 'regime', '.', 'See', 'Vandesompele', '2002', 'for', 'advice', 'on', 'interpretation', '.'] | train | https://github.com/tdsmith/eleven/blob/c79b7e784f6d4a76eb4371e69d5ee6f471fe56e1/eleven/eleven.py#L230-L244 |
8,930 | pip-services3-python/pip-services3-commons-python | pip_services3_commons/run/Parameters.py | Parameters.from_config | def from_config(config):
"""
Creates new Parameters from ConfigMap object.
:param config: a ConfigParams that contain parameters.
:return: a new Parameters object.
"""
result = Parameters()
if config == None or len(config) == 0:
return result
for (key, value) in config.items():
result.put(key, value)
return result | python | def from_config(config):
"""
Creates new Parameters from ConfigMap object.
:param config: a ConfigParams that contain parameters.
:return: a new Parameters object.
"""
result = Parameters()
if config == None or len(config) == 0:
return result
for (key, value) in config.items():
result.put(key, value)
return result | ['def', 'from_config', '(', 'config', ')', ':', 'result', '=', 'Parameters', '(', ')', 'if', 'config', '==', 'None', 'or', 'len', '(', 'config', ')', '==', '0', ':', 'return', 'result', 'for', '(', 'key', ',', 'value', ')', 'in', 'config', '.', 'items', '(', ')', ':', 'result', '.', 'put', '(', 'key', ',', 'value', ')', 'return', 'result'] | Creates new Parameters from ConfigMap object.
:param config: a ConfigParams that contain parameters.
:return: a new Parameters object. | ['Creates', 'new', 'Parameters', 'from', 'ConfigMap', 'object', '.'] | train | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/run/Parameters.py#L261-L277 |
8,931 | quantopian/zipline | zipline/utils/preprocess.py | preprocess | def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator | python | def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator | ['def', 'preprocess', '(', '*', '_unused', ',', '*', '*', 'processors', ')', ':', 'if', '_unused', ':', 'raise', 'TypeError', '(', '"preprocess() doesn\'t accept positional arguments"', ')', 'def', '_decorator', '(', 'f', ')', ':', 'args', ',', 'varargs', ',', 'varkw', ',', 'defaults', '=', 'argspec', '=', 'getargspec', '(', 'f', ')', 'if', 'defaults', 'is', 'None', ':', 'defaults', '=', '(', ')', 'no_defaults', '=', '(', 'NO_DEFAULT', ',', ')', '*', '(', 'len', '(', 'args', ')', '-', 'len', '(', 'defaults', ')', ')', 'args_defaults', '=', 'list', '(', 'zip', '(', 'args', ',', 'no_defaults', '+', 'defaults', ')', ')', 'if', 'varargs', ':', 'args_defaults', '.', 'append', '(', '(', 'varargs', ',', 'NO_DEFAULT', ')', ')', 'if', 'varkw', ':', 'args_defaults', '.', 'append', '(', '(', 'varkw', ',', 'NO_DEFAULT', ')', ')', 'argset', '=', 'set', '(', 'args', ')', '|', '{', 'varargs', ',', 'varkw', '}', '-', '{', 'None', '}', '# Arguments can be declared as tuples in Python 2.', 'if', 'not', 'all', '(', 'isinstance', '(', 'arg', ',', 'str', ')', 'for', 'arg', 'in', 'args', ')', ':', 'raise', 'TypeError', '(', '"Can\'t validate functions using tuple unpacking: %s"', '%', '(', 'argspec', ',', ')', ')', '# Ensure that all processors map to valid names.', 'bad_names', '=', 'viewkeys', '(', 'processors', ')', '-', 'argset', 'if', 'bad_names', ':', 'raise', 'TypeError', '(', '"Got processors for unknown arguments: %s."', '%', 'bad_names', ')', 'return', '_build_preprocessed_function', '(', 'f', ',', 'processors', ',', 'args_defaults', ',', 'varargs', ',', 'varkw', ',', ')', 'return', '_decorator'] | Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead. | ['Decorator', 'that', 'applies', 'pre', '-', 'processors', 'to', 'the', 'arguments', 'of', 'a', 'function', 'before', 'calling', 'the', 'function', '.'] | train | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/preprocess.py#L35-L112 |
8,932 | GNS3/gns3-server | gns3server/compute/vpcs/vpcs_vm.py | VPCSVM.script_file | def script_file(self):
"""
Returns the startup script file for this VPCS VM.
:returns: path to startup script file
"""
# use the default VPCS file if it exists
path = os.path.join(self.working_dir, 'startup.vpc')
if os.path.exists(path):
return path
else:
return None | python | def script_file(self):
"""
Returns the startup script file for this VPCS VM.
:returns: path to startup script file
"""
# use the default VPCS file if it exists
path = os.path.join(self.working_dir, 'startup.vpc')
if os.path.exists(path):
return path
else:
return None | ['def', 'script_file', '(', 'self', ')', ':', '# use the default VPCS file if it exists', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'working_dir', ',', "'startup.vpc'", ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'return', 'path', 'else', ':', 'return', 'None'] | Returns the startup script file for this VPCS VM.
:returns: path to startup script file | ['Returns', 'the', 'startup', 'script', 'file', 'for', 'this', 'VPCS', 'VM', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L539-L551 |
8,933 | cloudnull/turbolift | turbolift/clouderator/actions.py | CloudActions.delete_items | def delete_items(self, url, container, container_object=None):
"""Deletes an objects in a container.
:param url:
:param container:
"""
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object
)
return self._deleter(uri=container_uri, headers=headers) | python | def delete_items(self, url, container, container_object=None):
"""Deletes an objects in a container.
:param url:
:param container:
"""
headers, container_uri = self._return_base_data(
url=url,
container=container,
container_object=container_object
)
return self._deleter(uri=container_uri, headers=headers) | ['def', 'delete_items', '(', 'self', ',', 'url', ',', 'container', ',', 'container_object', '=', 'None', ')', ':', 'headers', ',', 'container_uri', '=', 'self', '.', '_return_base_data', '(', 'url', '=', 'url', ',', 'container', '=', 'container', ',', 'container_object', '=', 'container_object', ')', 'return', 'self', '.', '_deleter', '(', 'uri', '=', 'container_uri', ',', 'headers', '=', 'headers', ')'] | Deletes an objects in a container.
:param url:
:param container: | ['Deletes', 'an', 'objects', 'in', 'a', 'container', '.'] | train | https://github.com/cloudnull/turbolift/blob/da33034e88959226529ce762e2895e6f6356c448/turbolift/clouderator/actions.py#L642-L655 |
8,934 | nvbn/thefuck | thefuck/argument_parser.py | Parser._add_conflicting_arguments | def _add_conflicting_arguments(self):
"""It's too dangerous to use `-y` and `-r` together."""
group = self._parser.add_mutually_exclusive_group()
group.add_argument(
'-y', '--yes', '--yeah',
action='store_true',
help='execute fixed command without confirmation')
group.add_argument(
'-r', '--repeat',
action='store_true',
help='repeat on failure') | python | def _add_conflicting_arguments(self):
"""It's too dangerous to use `-y` and `-r` together."""
group = self._parser.add_mutually_exclusive_group()
group.add_argument(
'-y', '--yes', '--yeah',
action='store_true',
help='execute fixed command without confirmation')
group.add_argument(
'-r', '--repeat',
action='store_true',
help='repeat on failure') | ['def', '_add_conflicting_arguments', '(', 'self', ')', ':', 'group', '=', 'self', '.', '_parser', '.', 'add_mutually_exclusive_group', '(', ')', 'group', '.', 'add_argument', '(', "'-y'", ',', "'--yes'", ',', "'--yeah'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'execute fixed command without confirmation'", ')', 'group', '.', 'add_argument', '(', "'-r'", ',', "'--repeat'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'repeat on failure'", ')'] | It's too dangerous to use `-y` and `-r` together. | ['It', 's', 'too', 'dangerous', 'to', 'use', '-', 'y', 'and', '-', 'r', 'together', '.'] | train | https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/argument_parser.py#L54-L64 |
8,935 | edx/opaque-keys | opaque_keys/edx/locator.py | CourseLocator.offering | def offering(self):
"""
Deprecated. Use course and run independently.
"""
warnings.warn(
"Offering is no longer a supported property of Locator. Please use the course and run properties.",
DeprecationWarning,
stacklevel=2
)
if not self.course and not self.run:
return None
elif not self.run and self.course:
return self.course
return "/".join([self.course, self.run]) | python | def offering(self):
"""
Deprecated. Use course and run independently.
"""
warnings.warn(
"Offering is no longer a supported property of Locator. Please use the course and run properties.",
DeprecationWarning,
stacklevel=2
)
if not self.course and not self.run:
return None
elif not self.run and self.course:
return self.course
return "/".join([self.course, self.run]) | ['def', 'offering', '(', 'self', ')', ':', 'warnings', '.', 'warn', '(', '"Offering is no longer a supported property of Locator. Please use the course and run properties."', ',', 'DeprecationWarning', ',', 'stacklevel', '=', '2', ')', 'if', 'not', 'self', '.', 'course', 'and', 'not', 'self', '.', 'run', ':', 'return', 'None', 'elif', 'not', 'self', '.', 'run', 'and', 'self', '.', 'course', ':', 'return', 'self', '.', 'course', 'return', '"/"', '.', 'join', '(', '[', 'self', '.', 'course', ',', 'self', '.', 'run', ']', ')'] | Deprecated. Use course and run independently. | ['Deprecated', '.', 'Use', 'course', 'and', 'run', 'independently', '.'] | train | https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L234-L247 |
8,936 | lmcinnes/umap | umap/rp_tree.py | num_nodes | def num_nodes(tree):
"""Determine the number of nodes in a tree"""
if tree.is_leaf:
return 1
else:
return 1 + num_nodes(tree.left_child) + num_nodes(tree.right_child) | python | def num_nodes(tree):
"""Determine the number of nodes in a tree"""
if tree.is_leaf:
return 1
else:
return 1 + num_nodes(tree.left_child) + num_nodes(tree.right_child) | ['def', 'num_nodes', '(', 'tree', ')', ':', 'if', 'tree', '.', 'is_leaf', ':', 'return', '1', 'else', ':', 'return', '1', '+', 'num_nodes', '(', 'tree', '.', 'left_child', ')', '+', 'num_nodes', '(', 'tree', '.', 'right_child', ')'] | Determine the number of nodes in a tree | ['Determine', 'the', 'number', 'of', 'nodes', 'in', 'a', 'tree'] | train | https://github.com/lmcinnes/umap/blob/bbb01c03ba49f7bff8f77fd662d00e50d6686c77/umap/rp_tree.py#L580-L585 |
8,937 | aichaos/rivescript-python | rivescript/rivescript.py | RiveScript.reply | def reply(self, user, msg, errors_as_replies=True):
"""Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variables (including topic and history), so if your
bot has multiple users each one should have a unique ID.
msg (str): The user's message. This is allowed to contain
punctuation and such, but any extraneous data such as HTML tags
should be removed in advance.
errors_as_replies (bool): When errors are encountered (such as a
deep recursion error, no reply matched, etc.) this will make the
reply be a text representation of the error message. If you set
this to ``False``, errors will instead raise an exception, such as
a ``DeepRecursionError`` or ``NoReplyError``. By default, no
exceptions are raised and errors are set in the reply instead.
Returns:
str: The reply output.
"""
return self._brain.reply(user, msg, errors_as_replies) | python | def reply(self, user, msg, errors_as_replies=True):
"""Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variables (including topic and history), so if your
bot has multiple users each one should have a unique ID.
msg (str): The user's message. This is allowed to contain
punctuation and such, but any extraneous data such as HTML tags
should be removed in advance.
errors_as_replies (bool): When errors are encountered (such as a
deep recursion error, no reply matched, etc.) this will make the
reply be a text representation of the error message. If you set
this to ``False``, errors will instead raise an exception, such as
a ``DeepRecursionError`` or ``NoReplyError``. By default, no
exceptions are raised and errors are set in the reply instead.
Returns:
str: The reply output.
"""
return self._brain.reply(user, msg, errors_as_replies) | ['def', 'reply', '(', 'self', ',', 'user', ',', 'msg', ',', 'errors_as_replies', '=', 'True', ')', ':', 'return', 'self', '.', '_brain', '.', 'reply', '(', 'user', ',', 'msg', ',', 'errors_as_replies', ')'] | Fetch a reply from the RiveScript brain.
Arguments:
user (str): A unique user ID for the person requesting a reply.
This could be e.g. a screen name or nickname. It's used internally
to store user variables (including topic and history), so if your
bot has multiple users each one should have a unique ID.
msg (str): The user's message. This is allowed to contain
punctuation and such, but any extraneous data such as HTML tags
should be removed in advance.
errors_as_replies (bool): When errors are encountered (such as a
deep recursion error, no reply matched, etc.) this will make the
reply be a text representation of the error message. If you set
this to ``False``, errors will instead raise an exception, such as
a ``DeepRecursionError`` or ``NoReplyError``. By default, no
exceptions are raised and errors are set in the reply instead.
Returns:
str: The reply output. | ['Fetch', 'a', 'reply', 'from', 'the', 'RiveScript', 'brain', '.'] | train | https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L926-L947 |
8,938 | BD2KGenomics/protect | attic/precision_immuno.py | get_file_from_s3 | def get_file_from_s3(job, s3_url, encryption_key=None, write_to_jobstore=True):
"""
Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore.
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, os.path.basename(s3_url)])
# This is common to encrypted and unencrypted downloads
download_call = ['curl', '-fs', '--retry', '5']
# If an encryption key was provided, use it to create teh headers that need to be injected into
# the curl script and append to the call
if encryption_key:
key = generate_unique_key(encryption_key, s3_url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode( hashlib.md5(key).digest() )
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
download_call.extend(['-H', h1, '-H', h2, '-H', h3])
# This is also common to both types of downloads
download_call.extend([s3_url, '-o', filename])
try:
subprocess.check_call(download_call)
except subprocess.CalledProcessError:
raise RuntimeError('Curl returned a non-zero exit status processing %s. Do you' % s3_url +
'have premssions to access the file?')
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(filename)
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | python | def get_file_from_s3(job, s3_url, encryption_key=None, write_to_jobstore=True):
"""
Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore.
"""
work_dir = job.fileStore.getLocalTempDir()
filename = '/'.join([work_dir, os.path.basename(s3_url)])
# This is common to encrypted and unencrypted downloads
download_call = ['curl', '-fs', '--retry', '5']
# If an encryption key was provided, use it to create teh headers that need to be injected into
# the curl script and append to the call
if encryption_key:
key = generate_unique_key(encryption_key, s3_url)
encoded_key = base64.b64encode(key)
encoded_key_md5 = base64.b64encode( hashlib.md5(key).digest() )
h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256'
h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key)
h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5)
download_call.extend(['-H', h1, '-H', h2, '-H', h3])
# This is also common to both types of downloads
download_call.extend([s3_url, '-o', filename])
try:
subprocess.check_call(download_call)
except subprocess.CalledProcessError:
raise RuntimeError('Curl returned a non-zero exit status processing %s. Do you' % s3_url +
'have premssions to access the file?')
except OSError:
raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"')
assert os.path.exists(filename)
if write_to_jobstore:
filename = job.fileStore.writeGlobalFile(filename)
return filename | ['def', 'get_file_from_s3', '(', 'job', ',', 's3_url', ',', 'encryption_key', '=', 'None', ',', 'write_to_jobstore', '=', 'True', ')', ':', 'work_dir', '=', 'job', '.', 'fileStore', '.', 'getLocalTempDir', '(', ')', 'filename', '=', "'/'", '.', 'join', '(', '[', 'work_dir', ',', 'os', '.', 'path', '.', 'basename', '(', 's3_url', ')', ']', ')', '# This is common to encrypted and unencrypted downloads', 'download_call', '=', '[', "'curl'", ',', "'-fs'", ',', "'--retry'", ',', "'5'", ']', '# If an encryption key was provided, use it to create teh headers that need to be injected into', '# the curl script and append to the call', 'if', 'encryption_key', ':', 'key', '=', 'generate_unique_key', '(', 'encryption_key', ',', 's3_url', ')', 'encoded_key', '=', 'base64', '.', 'b64encode', '(', 'key', ')', 'encoded_key_md5', '=', 'base64', '.', 'b64encode', '(', 'hashlib', '.', 'md5', '(', 'key', ')', '.', 'digest', '(', ')', ')', 'h1', '=', "'x-amz-server-side-encryption-customer-algorithm:AES256'", 'h2', '=', "'x-amz-server-side-encryption-customer-key:{}'", '.', 'format', '(', 'encoded_key', ')', 'h3', '=', "'x-amz-server-side-encryption-customer-key-md5:{}'", '.', 'format', '(', 'encoded_key_md5', ')', 'download_call', '.', 'extend', '(', '[', "'-H'", ',', 'h1', ',', "'-H'", ',', 'h2', ',', "'-H'", ',', 'h3', ']', ')', '# This is also common to both types of downloads', 'download_call', '.', 'extend', '(', '[', 's3_url', ',', "'-o'", ',', 'filename', ']', ')', 'try', ':', 'subprocess', '.', 'check_call', '(', 'download_call', ')', 'except', 'subprocess', '.', 'CalledProcessError', ':', 'raise', 'RuntimeError', '(', "'Curl returned a non-zero exit status processing %s. Do you'", '%', 's3_url', '+', "'have premssions to access the file?'", ')', 'except', 'OSError', ':', 'raise', 'RuntimeError', '(', '\'Failed to find "curl". Install via "apt-get install curl"\'', ')', 'assert', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', 'if', 'write_to_jobstore', ':', 'filename', '=', 'job', '.', 'fileStore', '.', 'writeGlobalFile', '(', 'filename', ')', 'return', 'filename'] | Downloads a supplied URL that points to an unencrypted, unprotected file on Amazon S3. The file
is downloaded and a subsequently written to the jobstore and the return value is a the path to
the file in the jobstore. | ['Downloads', 'a', 'supplied', 'URL', 'that', 'points', 'to', 'an', 'unencrypted', 'unprotected', 'file', 'on', 'Amazon', 'S3', '.', 'The', 'file', 'is', 'downloaded', 'and', 'a', 'subsequently', 'written', 'to', 'the', 'jobstore', 'and', 'the', 'return', 'value', 'is', 'a', 'the', 'path', 'to', 'the', 'file', 'in', 'the', 'jobstore', '.'] | train | https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/attic/precision_immuno.py#L2159-L2191 |
8,939 | hotdoc/hotdoc | hotdoc/core/tree.py | Tree.write_out | def write_out(self, output):
"""Banana banana
"""
for page in self.walk():
ext = self.project.extensions[page.extension_name]
ext.write_out_page(output, page) | python | def write_out(self, output):
"""Banana banana
"""
for page in self.walk():
ext = self.project.extensions[page.extension_name]
ext.write_out_page(output, page) | ['def', 'write_out', '(', 'self', ',', 'output', ')', ':', 'for', 'page', 'in', 'self', '.', 'walk', '(', ')', ':', 'ext', '=', 'self', '.', 'project', '.', 'extensions', '[', 'page', '.', 'extension_name', ']', 'ext', '.', 'write_out_page', '(', 'output', ',', 'page', ')'] | Banana banana | ['Banana', 'banana'] | train | https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/tree.py#L623-L628 |
8,940 | PmagPy/PmagPy | programs/plot_magmap.py | main | def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu)
"""
cmap = 'RdYlBu'
date = 2016.
if not ccrs:
print("-W- You must intstall the cartopy module to run plot_magmap.py")
sys.exit()
dir_path = '.'
lincr = 1 # level increment for contours
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if fmt == 'jpg':
print('jpg not a supported option')
print(main.__doc__)
sys.exit()
else:
fmt = 'png'
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
cmap = sys.argv[ind+1]
if '-el' in sys.argv:
ind = sys.argv.index('-el')
el = sys.argv[ind+1]
else:
el = 'B'
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = sys.argv[ind+1]
else:
alt = 0
if '-lon0' in sys.argv:
ind = sys.argv.index('-lon0')
lon_0 = float(sys.argv[ind+1])
else:
lon_0 = 0
if '-mod' in sys.argv:
ind = sys.argv.index('-mod')
mod = sys.argv[ind+1]
ghfile = ''
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
ghfile = sys.argv[ind+1]
mod = 'custom'
date = ''
else:
mod, ghfile = 'cals10k', ''
if '-age' in sys.argv:
ind = sys.argv.index('-age')
date = float(sys.argv[ind+1])
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = float(sys.argv[ind+1])
else:
alt = 0
# doesn't work correctly with mod other than default
Ds, Is, Bs, Brs, lons, lats = pmag.do_mag_map(
date, mod=mod, lon_0=lon_0, alt=alt, file=ghfile)
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=lon_0))
ax.coastlines()
xx, yy = meshgrid(lons, lats)
if mod == 'custom':
str_date = 'Custom'
else:
str_date = str(date)
if el == 'B':
levmax = Bs.max()+lincr
levmin = round(Bs.min()-lincr)
levels = np.arange(levmin, levmax, lincr)
plt.contourf(xx, yy, Bs, levels=levels, cmap=cmap,
transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field strength ($\mu$T): '+ str_date)
if el == 'Br':
levmax = Brs.max()+lincr
levmin = round(Brs.min()-lincr)
plt.contourf(xx, yy, Brs,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Radial field strength ($\mu$T): '+ str_date)
if el == 'I':
levmax = Is.max()+lincr
levmin = round(Is.min()-lincr)
plt.contourf(xx, yy, Is,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Is, levels=np.arange(-80, 90, 10),
colors='black', transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field inclination: '+ str_date)
if el == 'D':
plt.contourf(xx, yy, Ds,
levels=np.arange(-180, 180, 10),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Ds, levels=np.arange(-180,
180, 10), colors='black')
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# m.contour(x,y,Ds,levels=np.arange(-180,180,10),colors='black')
plt.title('Field declination: '+ str_date)
cbar = plt.colorbar(orientation='horizontal')
figname = 'geomagnetic_field_' + str_date + '.'+fmt
plt.savefig(figname)
print('Figure saved as: ', figname) | python | def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu)
"""
cmap = 'RdYlBu'
date = 2016.
if not ccrs:
print("-W- You must intstall the cartopy module to run plot_magmap.py")
sys.exit()
dir_path = '.'
lincr = 1 # level increment for contours
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if fmt == 'jpg':
print('jpg not a supported option')
print(main.__doc__)
sys.exit()
else:
fmt = 'png'
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
cmap = sys.argv[ind+1]
if '-el' in sys.argv:
ind = sys.argv.index('-el')
el = sys.argv[ind+1]
else:
el = 'B'
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = sys.argv[ind+1]
else:
alt = 0
if '-lon0' in sys.argv:
ind = sys.argv.index('-lon0')
lon_0 = float(sys.argv[ind+1])
else:
lon_0 = 0
if '-mod' in sys.argv:
ind = sys.argv.index('-mod')
mod = sys.argv[ind+1]
ghfile = ''
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
ghfile = sys.argv[ind+1]
mod = 'custom'
date = ''
else:
mod, ghfile = 'cals10k', ''
if '-age' in sys.argv:
ind = sys.argv.index('-age')
date = float(sys.argv[ind+1])
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = float(sys.argv[ind+1])
else:
alt = 0
# doesn't work correctly with mod other than default
Ds, Is, Bs, Brs, lons, lats = pmag.do_mag_map(
date, mod=mod, lon_0=lon_0, alt=alt, file=ghfile)
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=lon_0))
ax.coastlines()
xx, yy = meshgrid(lons, lats)
if mod == 'custom':
str_date = 'Custom'
else:
str_date = str(date)
if el == 'B':
levmax = Bs.max()+lincr
levmin = round(Bs.min()-lincr)
levels = np.arange(levmin, levmax, lincr)
plt.contourf(xx, yy, Bs, levels=levels, cmap=cmap,
transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field strength ($\mu$T): '+ str_date)
if el == 'Br':
levmax = Brs.max()+lincr
levmin = round(Brs.min()-lincr)
plt.contourf(xx, yy, Brs,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Radial field strength ($\mu$T): '+ str_date)
if el == 'I':
levmax = Is.max()+lincr
levmin = round(Is.min()-lincr)
plt.contourf(xx, yy, Is,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Is, levels=np.arange(-80, 90, 10),
colors='black', transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field inclination: '+ str_date)
if el == 'D':
plt.contourf(xx, yy, Ds,
levels=np.arange(-180, 180, 10),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Ds, levels=np.arange(-180,
180, 10), colors='black')
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# m.contour(x,y,Ds,levels=np.arange(-180,180,10),colors='black')
plt.title('Field declination: '+ str_date)
cbar = plt.colorbar(orientation='horizontal')
figname = 'geomagnetic_field_' + str_date + '.'+fmt
plt.savefig(figname)
print('Figure saved as: ', figname) | ['def', 'main', '(', ')', ':', 'cmap', '=', "'RdYlBu'", 'date', '=', '2016.', 'if', 'not', 'ccrs', ':', 'print', '(', '"-W- You must intstall the cartopy module to run plot_magmap.py"', ')', 'sys', '.', 'exit', '(', ')', 'dir_path', '=', "'.'", 'lincr', '=', '1', '# level increment for contours', 'if', "'-WD'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-WD'", ')', 'dir_path', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'if', "'-h'", 'in', 'sys', '.', 'argv', ':', 'print', '(', 'main', '.', '__doc__', ')', 'sys', '.', 'exit', '(', ')', 'if', "'-fmt'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-fmt'", ')', 'fmt', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'if', 'fmt', '==', "'jpg'", ':', 'print', '(', "'jpg not a supported option'", ')', 'print', '(', 'main', '.', '__doc__', ')', 'sys', '.', 'exit', '(', ')', 'else', ':', 'fmt', '=', "'png'", 'if', "'-cm'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-cm'", ')', 'cmap', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'if', "'-el'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-el'", ')', 'el', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'else', ':', 'el', '=', "'B'", 'if', "'-alt'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-alt'", ')', 'alt', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'else', ':', 'alt', '=', '0', 'if', "'-lon0'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-lon0'", ')', 'lon_0', '=', 'float', '(', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', ')', 'else', ':', 'lon_0', '=', '0', 'if', "'-mod'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-mod'", ')', 'mod', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'ghfile', '=', "''", 'elif', "'-f'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-f'", ')', 'ghfile', '=', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', 'mod', '=', "'custom'", 'date', '=', "''", 'else', ':', 'mod', ',', 'ghfile', '=', "'cals10k'", ',', "''", 'if', "'-age'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-age'", ')', 'date', '=', 'float', '(', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', ')', 'if', "'-alt'", 'in', 'sys', '.', 'argv', ':', 'ind', '=', 'sys', '.', 'argv', '.', 'index', '(', "'-alt'", ')', 'alt', '=', 'float', '(', 'sys', '.', 'argv', '[', 'ind', '+', '1', ']', ')', 'else', ':', 'alt', '=', '0', "# doesn't work correctly with mod other than default", 'Ds', ',', 'Is', ',', 'Bs', ',', 'Brs', ',', 'lons', ',', 'lats', '=', 'pmag', '.', 'do_mag_map', '(', 'date', ',', 'mod', '=', 'mod', ',', 'lon_0', '=', 'lon_0', ',', 'alt', '=', 'alt', ',', 'file', '=', 'ghfile', ')', 'ax', '=', 'plt', '.', 'axes', '(', 'projection', '=', 'ccrs', '.', 'Mollweide', '(', 'central_longitude', '=', 'lon_0', ')', ')', 'ax', '.', 'coastlines', '(', ')', 'xx', ',', 'yy', '=', 'meshgrid', '(', 'lons', ',', 'lats', ')', 'if', 'mod', '==', "'custom'", ':', 'str_date', '=', "'Custom'", 'else', ':', 'str_date', '=', 'str', '(', 'date', ')', 'if', 'el', '==', "'B'", ':', 'levmax', '=', 'Bs', '.', 'max', '(', ')', '+', 'lincr', 'levmin', '=', 'round', '(', 'Bs', '.', 'min', '(', ')', '-', 'lincr', ')', 'levels', '=', 'np', '.', 'arange', '(', 'levmin', ',', 'levmax', ',', 'lincr', ')', 'plt', '.', 'contourf', '(', 'xx', ',', 'yy', ',', 'Bs', ',', 'levels', '=', 'levels', ',', 'cmap', '=', 'cmap', ',', 'transform', '=', 'ccrs', '.', 'PlateCarree', '(', 'central_longitude', '=', 'lon_0', ')', ')', 'plt', '.', 'title', '(', "'Field strength ($\\mu$T): '", '+', 'str_date', ')', 'if', 'el', '==', "'Br'", ':', 'levmax', '=', 'Brs', '.', 'max', '(', ')', '+', 'lincr', 'levmin', '=', 'round', '(', 'Brs', '.', 'min', '(', ')', '-', 'lincr', ')', 'plt', '.', 'contourf', '(', 'xx', ',', 'yy', ',', 'Brs', ',', 'levels', '=', 'np', '.', 'arange', '(', 'levmin', ',', 'levmax', ',', 'lincr', ')', ',', 'cmap', '=', 'cmap', ',', 'transform', '=', 'ccrs', '.', 'PlateCarree', '(', 'central_longitude', '=', 'lon_0', ')', ')', 'plt', '.', 'title', '(', "'Radial field strength ($\\mu$T): '", '+', 'str_date', ')', 'if', 'el', '==', "'I'", ':', 'levmax', '=', 'Is', '.', 'max', '(', ')', '+', 'lincr', 'levmin', '=', 'round', '(', 'Is', '.', 'min', '(', ')', '-', 'lincr', ')', 'plt', '.', 'contourf', '(', 'xx', ',', 'yy', ',', 'Is', ',', 'levels', '=', 'np', '.', 'arange', '(', 'levmin', ',', 'levmax', ',', 'lincr', ')', ',', 'cmap', '=', 'cmap', ',', 'transform', '=', 'ccrs', '.', 'PlateCarree', '(', 'central_longitude', '=', 'lon_0', ')', ')', 'plt', '.', 'contour', '(', 'xx', ',', 'yy', ',', 'Is', ',', 'levels', '=', 'np', '.', 'arange', '(', '-', '80', ',', '90', ',', '10', ')', ',', 'colors', '=', "'black'", ',', 'transform', '=', 'ccrs', '.', 'PlateCarree', '(', 'central_longitude', '=', 'lon_0', ')', ')', 'plt', '.', 'title', '(', "'Field inclination: '", '+', 'str_date', ')', 'if', 'el', '==', "'D'", ':', 'plt', '.', 'contourf', '(', 'xx', ',', 'yy', ',', 'Ds', ',', 'levels', '=', 'np', '.', 'arange', '(', '-', '180', ',', '180', ',', '10', ')', ',', 'cmap', '=', 'cmap', ',', 'transform', '=', 'ccrs', '.', 'PlateCarree', '(', 'central_longitude', '=', 'lon_0', ')', ')', 'plt', '.', 'contour', '(', 'xx', ',', 'yy', ',', 'Ds', ',', 'levels', '=', 'np', '.', 'arange', '(', '-', '180', ',', '180', ',', '10', ')', ',', 'colors', '=', "'black'", ')', '# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)', '# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)', "# m.contour(x,y,Ds,levels=np.arange(-180,180,10),colors='black')", 'plt', '.', 'title', '(', "'Field declination: '", '+', 'str_date', ')', 'cbar', '=', 'plt', '.', 'colorbar', '(', 'orientation', '=', "'horizontal'", ')', 'figname', '=', "'geomagnetic_field_'", '+', 'str_date', '+', "'.'", '+', 'fmt', 'plt', '.', 'savefig', '(', 'figname', ')', 'print', '(', "'Figure saved as: '", ',', 'figname', ')'] | NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu) | ['NAME', 'plot_magmap', '.', 'py'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plot_magmap.py#L24-L153 |
8,941 | django-parler/django-parler | parler/cache.py | _cache_translation | def _cache_translation(translation, timeout=cache.default_timeout):
"""
Store a new translation in the cache.
"""
if not appsettings.PARLER_ENABLE_CACHING:
return
if translation.master_id is None:
raise ValueError("Can't cache unsaved translation")
# Cache a translation object.
# For internal usage, object parameters are not suited for outside usage.
fields = translation.get_translated_fields()
values = {'id': translation.id}
for name in fields:
values[name] = getattr(translation, name)
key = get_translation_cache_key(translation.__class__, translation.master_id, translation.language_code)
cache.set(key, values, timeout=timeout) | python | def _cache_translation(translation, timeout=cache.default_timeout):
"""
Store a new translation in the cache.
"""
if not appsettings.PARLER_ENABLE_CACHING:
return
if translation.master_id is None:
raise ValueError("Can't cache unsaved translation")
# Cache a translation object.
# For internal usage, object parameters are not suited for outside usage.
fields = translation.get_translated_fields()
values = {'id': translation.id}
for name in fields:
values[name] = getattr(translation, name)
key = get_translation_cache_key(translation.__class__, translation.master_id, translation.language_code)
cache.set(key, values, timeout=timeout) | ['def', '_cache_translation', '(', 'translation', ',', 'timeout', '=', 'cache', '.', 'default_timeout', ')', ':', 'if', 'not', 'appsettings', '.', 'PARLER_ENABLE_CACHING', ':', 'return', 'if', 'translation', '.', 'master_id', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Can\'t cache unsaved translation"', ')', '# Cache a translation object.', '# For internal usage, object parameters are not suited for outside usage.', 'fields', '=', 'translation', '.', 'get_translated_fields', '(', ')', 'values', '=', '{', "'id'", ':', 'translation', '.', 'id', '}', 'for', 'name', 'in', 'fields', ':', 'values', '[', 'name', ']', '=', 'getattr', '(', 'translation', ',', 'name', ')', 'key', '=', 'get_translation_cache_key', '(', 'translation', '.', '__class__', ',', 'translation', '.', 'master_id', ',', 'translation', '.', 'language_code', ')', 'cache', '.', 'set', '(', 'key', ',', 'values', ',', 'timeout', '=', 'timeout', ')'] | Store a new translation in the cache. | ['Store', 'a', 'new', 'translation', 'in', 'the', 'cache', '.'] | train | https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/cache.py#L147-L165 |
8,942 | cemsbr/yala | yala/main.py | Main.print_results | def print_results(cls, stdout, stderr):
"""Print linter results and exits with an error if there's any."""
for line in stderr:
print(line, file=sys.stderr)
if stdout:
if stderr: # blank line to separate stdout from stderr
print(file=sys.stderr)
cls._print_stdout(stdout)
else:
print(':) No issues found.') | python | def print_results(cls, stdout, stderr):
"""Print linter results and exits with an error if there's any."""
for line in stderr:
print(line, file=sys.stderr)
if stdout:
if stderr: # blank line to separate stdout from stderr
print(file=sys.stderr)
cls._print_stdout(stdout)
else:
print(':) No issues found.') | ['def', 'print_results', '(', 'cls', ',', 'stdout', ',', 'stderr', ')', ':', 'for', 'line', 'in', 'stderr', ':', 'print', '(', 'line', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'if', 'stdout', ':', 'if', 'stderr', ':', '# blank line to separate stdout from stderr', 'print', '(', 'file', '=', 'sys', '.', 'stderr', ')', 'cls', '.', '_print_stdout', '(', 'stdout', ')', 'else', ':', 'print', '(', "':) No issues found.'", ')'] | Print linter results and exits with an error if there's any. | ['Print', 'linter', 'results', 'and', 'exits', 'with', 'an', 'error', 'if', 'there', 's', 'any', '.'] | train | https://github.com/cemsbr/yala/blob/efceb044cb3de8d1c12140087ae9d5f8269bfbf9/yala/main.py#L136-L145 |
8,943 | stbraun/fuzzing | fuzzing/fuzzer.py | fuzz_string | def fuzz_string(seed_str, runs=100, fuzz_factor=50):
"""A random fuzzer for a simulated text viewer application.
It takes a string as seed and generates <runs> variant of it.
:param seed_str: the string to use as seed for fuzzing.
:param runs: number of fuzzed variants to supply.
:param fuzz_factor: degree of fuzzing = 1 / fuzz_factor.
:return: list of fuzzed variants of seed_str.
:rtype: [str]
"""
buf = bytearray(seed_str, encoding="utf8")
variants = []
for _ in range(runs):
fuzzed = fuzzer(buf, fuzz_factor)
variants.append(''.join([chr(b) for b in fuzzed]))
logger().info('Fuzzed strings: {}'.format(variants))
return variants | python | def fuzz_string(seed_str, runs=100, fuzz_factor=50):
"""A random fuzzer for a simulated text viewer application.
It takes a string as seed and generates <runs> variant of it.
:param seed_str: the string to use as seed for fuzzing.
:param runs: number of fuzzed variants to supply.
:param fuzz_factor: degree of fuzzing = 1 / fuzz_factor.
:return: list of fuzzed variants of seed_str.
:rtype: [str]
"""
buf = bytearray(seed_str, encoding="utf8")
variants = []
for _ in range(runs):
fuzzed = fuzzer(buf, fuzz_factor)
variants.append(''.join([chr(b) for b in fuzzed]))
logger().info('Fuzzed strings: {}'.format(variants))
return variants | ['def', 'fuzz_string', '(', 'seed_str', ',', 'runs', '=', '100', ',', 'fuzz_factor', '=', '50', ')', ':', 'buf', '=', 'bytearray', '(', 'seed_str', ',', 'encoding', '=', '"utf8"', ')', 'variants', '=', '[', ']', 'for', '_', 'in', 'range', '(', 'runs', ')', ':', 'fuzzed', '=', 'fuzzer', '(', 'buf', ',', 'fuzz_factor', ')', 'variants', '.', 'append', '(', "''", '.', 'join', '(', '[', 'chr', '(', 'b', ')', 'for', 'b', 'in', 'fuzzed', ']', ')', ')', 'logger', '(', ')', '.', 'info', '(', "'Fuzzed strings: {}'", '.', 'format', '(', 'variants', ')', ')', 'return', 'variants'] | A random fuzzer for a simulated text viewer application.
It takes a string as seed and generates <runs> variant of it.
:param seed_str: the string to use as seed for fuzzing.
:param runs: number of fuzzed variants to supply.
:param fuzz_factor: degree of fuzzing = 1 / fuzz_factor.
:return: list of fuzzed variants of seed_str.
:rtype: [str] | ['A', 'random', 'fuzzer', 'for', 'a', 'simulated', 'text', 'viewer', 'application', '.'] | train | https://github.com/stbraun/fuzzing/blob/974a64472732d4e40db919d242149bf0856fe199/fuzzing/fuzzer.py#L55-L72 |
8,944 | binux/pyspider | pyspider/libs/counter.py | CounterManager.trim | def trim(self):
"""Clear not used counters"""
for key, value in list(iteritems(self.counters)):
if value.empty():
del self.counters[key] | python | def trim(self):
"""Clear not used counters"""
for key, value in list(iteritems(self.counters)):
if value.empty():
del self.counters[key] | ['def', 'trim', '(', 'self', ')', ':', 'for', 'key', ',', 'value', 'in', 'list', '(', 'iteritems', '(', 'self', '.', 'counters', ')', ')', ':', 'if', 'value', '.', 'empty', '(', ')', ':', 'del', 'self', '.', 'counters', '[', 'key', ']'] | Clear not used counters | ['Clear', 'not', 'used', 'counters'] | train | https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/counter.py#L366-L370 |
8,945 | darkfeline/animanager | animanager/commands/reset.py | command | def command(state, args):
"""Reset anime watched episodes."""
args = parser.parse_args(args[1:])
aid = state.results.parse_aid(args.aid, default_key='db')
query.update.reset(state.db, aid, args.episode) | python | def command(state, args):
"""Reset anime watched episodes."""
args = parser.parse_args(args[1:])
aid = state.results.parse_aid(args.aid, default_key='db')
query.update.reset(state.db, aid, args.episode) | ['def', 'command', '(', 'state', ',', 'args', ')', ':', 'args', '=', 'parser', '.', 'parse_args', '(', 'args', '[', '1', ':', ']', ')', 'aid', '=', 'state', '.', 'results', '.', 'parse_aid', '(', 'args', '.', 'aid', ',', 'default_key', '=', "'db'", ')', 'query', '.', 'update', '.', 'reset', '(', 'state', '.', 'db', ',', 'aid', ',', 'args', '.', 'episode', ')'] | Reset anime watched episodes. | ['Reset', 'anime', 'watched', 'episodes', '.'] | train | https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/reset.py#L22-L26 |
8,946 | serkanyersen/underscore.py | src/underscore.py | underscore.without | def without(self, *values):
"""
Return a version of the array that does not
contain the specified value(s).
"""
if self._clean.isDict():
newlist = {}
for i, k in enumerate(self.obj):
# if k not in values: # use indexof to check identity
if _(values).indexOf(k) is -1:
newlist.set(k, self.obj[k])
else:
newlist = []
for i, v in enumerate(self.obj):
# if v not in values: # use indexof to check identity
if _(values).indexOf(v) is -1:
newlist.append(v)
return self._wrap(newlist) | python | def without(self, *values):
"""
Return a version of the array that does not
contain the specified value(s).
"""
if self._clean.isDict():
newlist = {}
for i, k in enumerate(self.obj):
# if k not in values: # use indexof to check identity
if _(values).indexOf(k) is -1:
newlist.set(k, self.obj[k])
else:
newlist = []
for i, v in enumerate(self.obj):
# if v not in values: # use indexof to check identity
if _(values).indexOf(v) is -1:
newlist.append(v)
return self._wrap(newlist) | ['def', 'without', '(', 'self', ',', '*', 'values', ')', ':', 'if', 'self', '.', '_clean', '.', 'isDict', '(', ')', ':', 'newlist', '=', '{', '}', 'for', 'i', ',', 'k', 'in', 'enumerate', '(', 'self', '.', 'obj', ')', ':', '# if k not in values: # use indexof to check identity', 'if', '_', '(', 'values', ')', '.', 'indexOf', '(', 'k', ')', 'is', '-', '1', ':', 'newlist', '.', 'set', '(', 'k', ',', 'self', '.', 'obj', '[', 'k', ']', ')', 'else', ':', 'newlist', '=', '[', ']', 'for', 'i', ',', 'v', 'in', 'enumerate', '(', 'self', '.', 'obj', ')', ':', '# if v not in values: # use indexof to check identity', 'if', '_', '(', 'values', ')', '.', 'indexOf', '(', 'v', ')', 'is', '-', '1', ':', 'newlist', '.', 'append', '(', 'v', ')', 'return', 'self', '.', '_wrap', '(', 'newlist', ')'] | Return a version of the array that does not
contain the specified value(s). | ['Return', 'a', 'version', 'of', 'the', 'array', 'that', 'does', 'not', 'contain', 'the', 'specified', 'value', '(', 's', ')', '.'] | train | https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L561-L579 |
8,947 | python-rope/rope | rope/contrib/fixmodnames.py | FixModuleNames.get_changes | def get_changes(self, fixer=str.lower,
task_handle=taskhandle.NullTaskHandle()):
"""Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
"""
stack = changestack.ChangeStack(self.project, 'Fixing module names')
jobset = task_handle.create_jobset('Fixing module names',
self._count_fixes(fixer) + 1)
try:
while True:
for resource in self._tobe_fixed(fixer):
jobset.started_job(resource.path)
renamer = rename.Rename(self.project, resource)
changes = renamer.get_changes(fixer(self._name(resource)))
stack.push(changes)
jobset.finished_job()
break
else:
break
finally:
jobset.started_job('Reverting to original state')
stack.pop_all()
jobset.finished_job()
return stack.merged() | python | def get_changes(self, fixer=str.lower,
task_handle=taskhandle.NullTaskHandle()):
"""Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
"""
stack = changestack.ChangeStack(self.project, 'Fixing module names')
jobset = task_handle.create_jobset('Fixing module names',
self._count_fixes(fixer) + 1)
try:
while True:
for resource in self._tobe_fixed(fixer):
jobset.started_job(resource.path)
renamer = rename.Rename(self.project, resource)
changes = renamer.get_changes(fixer(self._name(resource)))
stack.push(changes)
jobset.finished_job()
break
else:
break
finally:
jobset.started_job('Reverting to original state')
stack.pop_all()
jobset.finished_job()
return stack.merged() | ['def', 'get_changes', '(', 'self', ',', 'fixer', '=', 'str', '.', 'lower', ',', 'task_handle', '=', 'taskhandle', '.', 'NullTaskHandle', '(', ')', ')', ':', 'stack', '=', 'changestack', '.', 'ChangeStack', '(', 'self', '.', 'project', ',', "'Fixing module names'", ')', 'jobset', '=', 'task_handle', '.', 'create_jobset', '(', "'Fixing module names'", ',', 'self', '.', '_count_fixes', '(', 'fixer', ')', '+', '1', ')', 'try', ':', 'while', 'True', ':', 'for', 'resource', 'in', 'self', '.', '_tobe_fixed', '(', 'fixer', ')', ':', 'jobset', '.', 'started_job', '(', 'resource', '.', 'path', ')', 'renamer', '=', 'rename', '.', 'Rename', '(', 'self', '.', 'project', ',', 'resource', ')', 'changes', '=', 'renamer', '.', 'get_changes', '(', 'fixer', '(', 'self', '.', '_name', '(', 'resource', ')', ')', ')', 'stack', '.', 'push', '(', 'changes', ')', 'jobset', '.', 'finished_job', '(', ')', 'break', 'else', ':', 'break', 'finally', ':', 'jobset', '.', 'started_job', '(', "'Reverting to original state'", ')', 'stack', '.', 'pop_all', '(', ')', 'jobset', '.', 'finished_job', '(', ')', 'return', 'stack', '.', 'merged', '(', ')'] | Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name. | ['Fix', 'module', 'names'] | train | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/contrib/fixmodnames.py#L28-L54 |
8,948 | ironfroggy/django-better-cache | bettercache/utils.py | get_header_dict | def get_header_dict(response, header):
""" returns a dictionary of the cache control headers
the same as is used by django.utils.cache.patch_cache_control
if there are no Cache-Control headers returns and empty dict
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
if response.has_header(header):
hd = dict([dictitem(el) for el in cc_delim_re.split(response[header])])
else:
hd= {}
return hd | python | def get_header_dict(response, header):
""" returns a dictionary of the cache control headers
the same as is used by django.utils.cache.patch_cache_control
if there are no Cache-Control headers returns and empty dict
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
if response.has_header(header):
hd = dict([dictitem(el) for el in cc_delim_re.split(response[header])])
else:
hd= {}
return hd | ['def', 'get_header_dict', '(', 'response', ',', 'header', ')', ':', 'def', 'dictitem', '(', 's', ')', ':', 't', '=', 's', '.', 'split', '(', "'='", ',', '1', ')', 'if', 'len', '(', 't', ')', '>', '1', ':', 'return', '(', 't', '[', '0', ']', '.', 'lower', '(', ')', ',', 't', '[', '1', ']', ')', 'else', ':', 'return', '(', 't', '[', '0', ']', '.', 'lower', '(', ')', ',', 'True', ')', 'if', 'response', '.', 'has_header', '(', 'header', ')', ':', 'hd', '=', 'dict', '(', '[', 'dictitem', '(', 'el', ')', 'for', 'el', 'in', 'cc_delim_re', '.', 'split', '(', 'response', '[', 'header', ']', ')', ']', ')', 'else', ':', 'hd', '=', '{', '}', 'return', 'hd'] | returns a dictionary of the cache control headers
the same as is used by django.utils.cache.patch_cache_control
if there are no Cache-Control headers returns and empty dict | ['returns', 'a', 'dictionary', 'of', 'the', 'cache', 'control', 'headers', 'the', 'same', 'as', 'is', 'used', 'by', 'django', '.', 'utils', '.', 'cache', '.', 'patch_cache_control', 'if', 'there', 'are', 'no', 'Cache', '-', 'Control', 'headers', 'returns', 'and', 'empty', 'dict'] | train | https://github.com/ironfroggy/django-better-cache/blob/5350e8c646cef1c1ca74eab176f856ddd9eaf5c3/bettercache/utils.py#L161-L177 |
8,949 | fedora-infra/fedmsg | fedmsg/consumers/__init__.py | FedmsgConsumer._backlog | def _backlog(self, data):
"""Find all the datagrepper messages between 'then' and 'now'.
Put those on our work queue.
Should be called in a thread so as not to block the hub at startup.
"""
try:
data = json.loads(data)
except ValueError as e:
self.log.info("Status contents are %r" % data)
self.log.exception(e)
self.log.info("Skipping backlog retrieval.")
return
last = data['message']['body']
if isinstance(last, str):
last = json.loads(last)
then = last['timestamp']
now = int(time.time())
retrieved = 0
for message in self.get_datagrepper_results(then, now):
# Take the messages from datagrepper and remove any keys that were
# artificially added to the message. The presence of these would
# otherwise cause message crypto validation to fail.
message = fedmsg.crypto.utils.fix_datagrepper_message(message)
if message['msg_id'] != last['msg_id']:
retrieved = retrieved + 1
self.incoming.put(dict(body=message, topic=message['topic']))
else:
self.log.warning("Already seen %r; Skipping." % last['msg_id'])
self.log.info("Retrieved %i messages from datagrepper." % retrieved) | python | def _backlog(self, data):
"""Find all the datagrepper messages between 'then' and 'now'.
Put those on our work queue.
Should be called in a thread so as not to block the hub at startup.
"""
try:
data = json.loads(data)
except ValueError as e:
self.log.info("Status contents are %r" % data)
self.log.exception(e)
self.log.info("Skipping backlog retrieval.")
return
last = data['message']['body']
if isinstance(last, str):
last = json.loads(last)
then = last['timestamp']
now = int(time.time())
retrieved = 0
for message in self.get_datagrepper_results(then, now):
# Take the messages from datagrepper and remove any keys that were
# artificially added to the message. The presence of these would
# otherwise cause message crypto validation to fail.
message = fedmsg.crypto.utils.fix_datagrepper_message(message)
if message['msg_id'] != last['msg_id']:
retrieved = retrieved + 1
self.incoming.put(dict(body=message, topic=message['topic']))
else:
self.log.warning("Already seen %r; Skipping." % last['msg_id'])
self.log.info("Retrieved %i messages from datagrepper." % retrieved) | ['def', '_backlog', '(', 'self', ',', 'data', ')', ':', 'try', ':', 'data', '=', 'json', '.', 'loads', '(', 'data', ')', 'except', 'ValueError', 'as', 'e', ':', 'self', '.', 'log', '.', 'info', '(', '"Status contents are %r"', '%', 'data', ')', 'self', '.', 'log', '.', 'exception', '(', 'e', ')', 'self', '.', 'log', '.', 'info', '(', '"Skipping backlog retrieval."', ')', 'return', 'last', '=', 'data', '[', "'message'", ']', '[', "'body'", ']', 'if', 'isinstance', '(', 'last', ',', 'str', ')', ':', 'last', '=', 'json', '.', 'loads', '(', 'last', ')', 'then', '=', 'last', '[', "'timestamp'", ']', 'now', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', 'retrieved', '=', '0', 'for', 'message', 'in', 'self', '.', 'get_datagrepper_results', '(', 'then', ',', 'now', ')', ':', '# Take the messages from datagrepper and remove any keys that were', '# artificially added to the message. The presence of these would', '# otherwise cause message crypto validation to fail.', 'message', '=', 'fedmsg', '.', 'crypto', '.', 'utils', '.', 'fix_datagrepper_message', '(', 'message', ')', 'if', 'message', '[', "'msg_id'", ']', '!=', 'last', '[', "'msg_id'", ']', ':', 'retrieved', '=', 'retrieved', '+', '1', 'self', '.', 'incoming', '.', 'put', '(', 'dict', '(', 'body', '=', 'message', ',', 'topic', '=', 'message', '[', "'topic'", ']', ')', ')', 'else', ':', 'self', '.', 'log', '.', 'warning', '(', '"Already seen %r; Skipping."', '%', 'last', '[', "'msg_id'", ']', ')', 'self', '.', 'log', '.', 'info', '(', '"Retrieved %i messages from datagrepper."', '%', 'retrieved', ')'] | Find all the datagrepper messages between 'then' and 'now'.
Put those on our work queue.
Should be called in a thread so as not to block the hub at startup. | ['Find', 'all', 'the', 'datagrepper', 'messages', 'between', 'then', 'and', 'now', '.'] | train | https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/consumers/__init__.py#L161-L197 |
8,950 | huseyin/color | color/color.py | colorize | def colorize(bg, base, fg, *text):
""" colorize(bg, base, fg, *text)
"""
# All argument types must be str.
rtext = [str(f) for f in text]
return COLORIZE_FORMAT.format(
_to_int(bg), _to_int(base), _to_int(fg), ''.join(rtext)
) | python | def colorize(bg, base, fg, *text):
""" colorize(bg, base, fg, *text)
"""
# All argument types must be str.
rtext = [str(f) for f in text]
return COLORIZE_FORMAT.format(
_to_int(bg), _to_int(base), _to_int(fg), ''.join(rtext)
) | ['def', 'colorize', '(', 'bg', ',', 'base', ',', 'fg', ',', '*', 'text', ')', ':', '# All argument types must be str.', 'rtext', '=', '[', 'str', '(', 'f', ')', 'for', 'f', 'in', 'text', ']', 'return', 'COLORIZE_FORMAT', '.', 'format', '(', '_to_int', '(', 'bg', ')', ',', '_to_int', '(', 'base', ')', ',', '_to_int', '(', 'fg', ')', ',', "''", '.', 'join', '(', 'rtext', ')', ')'] | colorize(bg, base, fg, *text) | ['colorize', '(', 'bg', 'base', 'fg', '*', 'text', ')'] | train | https://github.com/huseyin/color/blob/00d1e38f0f0cf9a94ad6c65fa21590984a575a05/color/color.py#L53-L61 |
8,951 | brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py | brocade_arp.get_arp_output_arp_entry_interface_name | def get_arp_output_arp_entry_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_name = ET.SubElement(arp_entry, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_arp_output_arp_entry_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_name = ET.SubElement(arp_entry, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'get_arp_output_arp_entry_interface_name', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_arp', '=', 'ET', '.', 'Element', '(', '"get_arp"', ')', 'config', '=', 'get_arp', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_arp', ',', '"output"', ')', 'arp_entry', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"arp-entry"', ')', 'ip_address_key', '=', 'ET', '.', 'SubElement', '(', 'arp_entry', ',', '"ip-address"', ')', 'ip_address_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'ip_address'", ')', 'interface_name', '=', 'ET', '.', 'SubElement', '(', 'arp_entry', ',', '"interface-name"', ')', 'interface_name', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_name'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py#L279-L293 |
8,952 | saltstack/salt | salt/cli/daemons.py | DaemonsMixin.verify_hash_type | def verify_hash_type(self):
'''
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
'''
if self.config['hash_type'].lower() in ['md5', 'sha1']:
log.warning(
'IMPORTANT: Do not use %s hashing algorithm! Please set '
'"hash_type" to sha256 in Salt %s config!',
self.config['hash_type'], self.__class__.__name__
) | python | def verify_hash_type(self):
'''
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
'''
if self.config['hash_type'].lower() in ['md5', 'sha1']:
log.warning(
'IMPORTANT: Do not use %s hashing algorithm! Please set '
'"hash_type" to sha256 in Salt %s config!',
self.config['hash_type'], self.__class__.__name__
) | ['def', 'verify_hash_type', '(', 'self', ')', ':', 'if', 'self', '.', 'config', '[', "'hash_type'", ']', '.', 'lower', '(', ')', 'in', '[', "'md5'", ',', "'sha1'", ']', ':', 'log', '.', 'warning', '(', "'IMPORTANT: Do not use %s hashing algorithm! Please set '", '\'"hash_type" to sha256 in Salt %s config!\'', ',', 'self', '.', 'config', '[', "'hash_type'", ']', ',', 'self', '.', '__class__', '.', '__name__', ')'] | Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return: | ['Verify', 'and', 'display', 'a', 'nag', '-', 'messsage', 'to', 'the', 'log', 'if', 'vulnerable', 'hash', '-', 'type', 'is', 'used', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L65-L76 |
8,953 | lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.set_session | def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation | python | def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation | ['def', 'set_session', '(', 'self', ',', 'autocommit', '=', 'None', ',', 'readonly', '=', 'None', ')', ':', 'props', '=', '{', '}', 'if', 'autocommit', 'is', 'not', 'None', ':', 'props', '[', "'autoCommit'", ']', '=', 'bool', '(', 'autocommit', ')', 'if', 'readonly', 'is', 'not', 'None', ':', 'props', '[', "'readOnly'", ']', '=', 'bool', '(', 'readonly', ')', 'props', '=', 'self', '.', '_client', '.', 'connection_sync', '(', 'self', '.', '_id', ',', 'props', ')', 'self', '.', '_autocommit', '=', 'props', '.', 'auto_commit', 'self', '.', '_readonly', '=', 'props', '.', 'read_only', 'self', '.', '_transactionisolation', '=', 'props', '.', 'transaction_isolation'] | Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode. | ['Sets', 'one', 'or', 'more', 'parameters', 'in', 'the', 'current', 'connection', '.'] | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L128-L147 |
8,954 | yeasy/hyperledger-py | hyperledger/api/network.py | NetworkApiMixin.peer_list | def peer_list(self):
""" GET /network/peers
Use the Network APIs to retrieve information about the network of peer
nodes comprising the blockchain network.
```golang
message PeersMessage {
repeated PeerEndpoint peers = 1;
}
message PeerEndpoint {
PeerID ID = 1;
string address = 2;
enum Type {
UNDEFINED = 0;
VALIDATOR = 1;
NON_VALIDATOR = 2;
}
Type type = 3;
bytes pkiID = 4;
}
message PeerID {
string name = 1;
}
```
:return: json body of the network peers info
"""
res = self._get(self._url("/network/peers"))
return self._result(res, True) | python | def peer_list(self):
""" GET /network/peers
Use the Network APIs to retrieve information about the network of peer
nodes comprising the blockchain network.
```golang
message PeersMessage {
repeated PeerEndpoint peers = 1;
}
message PeerEndpoint {
PeerID ID = 1;
string address = 2;
enum Type {
UNDEFINED = 0;
VALIDATOR = 1;
NON_VALIDATOR = 2;
}
Type type = 3;
bytes pkiID = 4;
}
message PeerID {
string name = 1;
}
```
:return: json body of the network peers info
"""
res = self._get(self._url("/network/peers"))
return self._result(res, True) | ['def', 'peer_list', '(', 'self', ')', ':', 'res', '=', 'self', '.', '_get', '(', 'self', '.', '_url', '(', '"/network/peers"', ')', ')', 'return', 'self', '.', '_result', '(', 'res', ',', 'True', ')'] | GET /network/peers
Use the Network APIs to retrieve information about the network of peer
nodes comprising the blockchain network.
```golang
message PeersMessage {
repeated PeerEndpoint peers = 1;
}
message PeerEndpoint {
PeerID ID = 1;
string address = 2;
enum Type {
UNDEFINED = 0;
VALIDATOR = 1;
NON_VALIDATOR = 2;
}
Type type = 3;
bytes pkiID = 4;
}
message PeerID {
string name = 1;
}
```
:return: json body of the network peers info | ['GET', '/', 'network', '/', 'peers'] | train | https://github.com/yeasy/hyperledger-py/blob/f24e9cc409b50628b911950466786be6fe74f09f/hyperledger/api/network.py#L16-L46 |
8,955 | kodexlab/reliure | reliure/offline.py | run | def run(pipeline, input_gen, options={}):
""" Run a pipeline over a input generator
>>> # if we have a simple component
>>> from reliure.pipeline import Composable
>>> @Composable
... def print_each(letters):
... for letter in letters:
... print(letter)
... yield letter
>>> # that we want to run over a given input:
>>> input = "abcde"
>>> # we just have to do :
>>> res = run(print_each, input)
a
b
c
d
e
it is also possible to run any reliure pipeline this way:
>>> import string
>>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) | print_each
>>> res = run(pipeline, input)
A
B
C
D
E
"""
logger = logging.getLogger("reliure.run")
t0 = time()
res = [output for output in pipeline(input_gen, **options)]
logger.info("Pipeline executed in %1.3f sec" % (time() - t0))
return res | python | def run(pipeline, input_gen, options={}):
""" Run a pipeline over a input generator
>>> # if we have a simple component
>>> from reliure.pipeline import Composable
>>> @Composable
... def print_each(letters):
... for letter in letters:
... print(letter)
... yield letter
>>> # that we want to run over a given input:
>>> input = "abcde"
>>> # we just have to do :
>>> res = run(print_each, input)
a
b
c
d
e
it is also possible to run any reliure pipeline this way:
>>> import string
>>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) | print_each
>>> res = run(pipeline, input)
A
B
C
D
E
"""
logger = logging.getLogger("reliure.run")
t0 = time()
res = [output for output in pipeline(input_gen, **options)]
logger.info("Pipeline executed in %1.3f sec" % (time() - t0))
return res | ['def', 'run', '(', 'pipeline', ',', 'input_gen', ',', 'options', '=', '{', '}', ')', ':', 'logger', '=', 'logging', '.', 'getLogger', '(', '"reliure.run"', ')', 't0', '=', 'time', '(', ')', 'res', '=', '[', 'output', 'for', 'output', 'in', 'pipeline', '(', 'input_gen', ',', '*', '*', 'options', ')', ']', 'logger', '.', 'info', '(', '"Pipeline executed in %1.3f sec"', '%', '(', 'time', '(', ')', '-', 't0', ')', ')', 'return', 'res'] | Run a pipeline over a input generator
>>> # if we have a simple component
>>> from reliure.pipeline import Composable
>>> @Composable
... def print_each(letters):
... for letter in letters:
... print(letter)
... yield letter
>>> # that we want to run over a given input:
>>> input = "abcde"
>>> # we just have to do :
>>> res = run(print_each, input)
a
b
c
d
e
it is also possible to run any reliure pipeline this way:
>>> import string
>>> pipeline = Composable(lambda letters: (l.upper() for l in letters)) | print_each
>>> res = run(pipeline, input)
A
B
C
D
E | ['Run', 'a', 'pipeline', 'over', 'a', 'input', 'generator'] | train | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/offline.py#L11-L45 |
8,956 | zerok/flask-compass | flaskext/compass.py | CompassConfig.compile | def compile(self, compass):
"""
Calls the compass script specified in the compass extension
with the paths provided by the config.rb.
"""
try:
output = subprocess.check_output(
[compass.compass_path, 'compile', '-q'],
cwd=self.base_dir)
os.utime(self.dest, None)
compass.log.debug(output)
except OSError, e:
if e.errno == errno.ENOENT:
compass.log.error("Compass could not be found in the PATH " +
"and/or in the COMPASS_PATH setting! " +
"Disabling compilation.")
compass.disabled = True
else:
raise e | python | def compile(self, compass):
"""
Calls the compass script specified in the compass extension
with the paths provided by the config.rb.
"""
try:
output = subprocess.check_output(
[compass.compass_path, 'compile', '-q'],
cwd=self.base_dir)
os.utime(self.dest, None)
compass.log.debug(output)
except OSError, e:
if e.errno == errno.ENOENT:
compass.log.error("Compass could not be found in the PATH " +
"and/or in the COMPASS_PATH setting! " +
"Disabling compilation.")
compass.disabled = True
else:
raise e | ['def', 'compile', '(', 'self', ',', 'compass', ')', ':', 'try', ':', 'output', '=', 'subprocess', '.', 'check_output', '(', '[', 'compass', '.', 'compass_path', ',', "'compile'", ',', "'-q'", ']', ',', 'cwd', '=', 'self', '.', 'base_dir', ')', 'os', '.', 'utime', '(', 'self', '.', 'dest', ',', 'None', ')', 'compass', '.', 'log', '.', 'debug', '(', 'output', ')', 'except', 'OSError', ',', 'e', ':', 'if', 'e', '.', 'errno', '==', 'errno', '.', 'ENOENT', ':', 'compass', '.', 'log', '.', 'error', '(', '"Compass could not be found in the PATH "', '+', '"and/or in the COMPASS_PATH setting! "', '+', '"Disabling compilation."', ')', 'compass', '.', 'disabled', '=', 'True', 'else', ':', 'raise', 'e'] | Calls the compass script specified in the compass extension
with the paths provided by the config.rb. | ['Calls', 'the', 'compass', 'script', 'specified', 'in', 'the', 'compass', 'extension', 'with', 'the', 'paths', 'provided', 'by', 'the', 'config', '.', 'rb', '.'] | train | https://github.com/zerok/flask-compass/blob/633ef4bcbfbf0882a337d84f776b3c090ef5f464/flaskext/compass.py#L179-L197 |
8,957 | SmartTeleMax/iktomi | iktomi/web/url_templates.py | UrlTemplate.match | def match(self, path, **kw):
'''
path - str (urlencoded)
'''
m = self._pattern.match(path)
if m:
kwargs = m.groupdict()
# convert params
for url_arg_name, value_urlencoded in kwargs.items():
conv_obj = self._url_params[url_arg_name]
unicode_value = unquote(value_urlencoded)
if isinstance(unicode_value, six.binary_type):
# XXX ??
unicode_value = unicode_value.decode('utf-8', 'replace')
try:
kwargs[url_arg_name] = conv_obj.to_python(unicode_value, **kw)
except ConvertError as err:
logger.debug('ConvertError in parameter "%s" '
'by %r, value "%s"',
url_arg_name,
err.converter.__class__,
err.value)
return None, {}
return m.group(), kwargs
return None, {} | python | def match(self, path, **kw):
'''
path - str (urlencoded)
'''
m = self._pattern.match(path)
if m:
kwargs = m.groupdict()
# convert params
for url_arg_name, value_urlencoded in kwargs.items():
conv_obj = self._url_params[url_arg_name]
unicode_value = unquote(value_urlencoded)
if isinstance(unicode_value, six.binary_type):
# XXX ??
unicode_value = unicode_value.decode('utf-8', 'replace')
try:
kwargs[url_arg_name] = conv_obj.to_python(unicode_value, **kw)
except ConvertError as err:
logger.debug('ConvertError in parameter "%s" '
'by %r, value "%s"',
url_arg_name,
err.converter.__class__,
err.value)
return None, {}
return m.group(), kwargs
return None, {} | ['def', 'match', '(', 'self', ',', 'path', ',', '*', '*', 'kw', ')', ':', 'm', '=', 'self', '.', '_pattern', '.', 'match', '(', 'path', ')', 'if', 'm', ':', 'kwargs', '=', 'm', '.', 'groupdict', '(', ')', '# convert params', 'for', 'url_arg_name', ',', 'value_urlencoded', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'conv_obj', '=', 'self', '.', '_url_params', '[', 'url_arg_name', ']', 'unicode_value', '=', 'unquote', '(', 'value_urlencoded', ')', 'if', 'isinstance', '(', 'unicode_value', ',', 'six', '.', 'binary_type', ')', ':', '# XXX ??', 'unicode_value', '=', 'unicode_value', '.', 'decode', '(', "'utf-8'", ',', "'replace'", ')', 'try', ':', 'kwargs', '[', 'url_arg_name', ']', '=', 'conv_obj', '.', 'to_python', '(', 'unicode_value', ',', '*', '*', 'kw', ')', 'except', 'ConvertError', 'as', 'err', ':', 'logger', '.', 'debug', '(', '\'ConvertError in parameter "%s" \'', '\'by %r, value "%s"\'', ',', 'url_arg_name', ',', 'err', '.', 'converter', '.', '__class__', ',', 'err', '.', 'value', ')', 'return', 'None', ',', '{', '}', 'return', 'm', '.', 'group', '(', ')', ',', 'kwargs', 'return', 'None', ',', '{', '}'] | path - str (urlencoded) | ['path', '-', 'str', '(', 'urlencoded', ')'] | train | https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/web/url_templates.py#L111-L135 |
8,958 | pypa/pipenv | pipenv/patched/notpip/_vendor/html5lib/treebuilders/etree_lxml.py | tostring | def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) | python | def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) | ['def', 'tostring', '(', 'element', ')', ':', 'rv', '=', '[', ']', 'def', 'serializeElement', '(', 'element', ')', ':', 'if', 'not', 'hasattr', '(', 'element', ',', '"tag"', ')', ':', 'if', 'element', '.', 'docinfo', '.', 'internalDTD', ':', 'if', 'element', '.', 'docinfo', '.', 'doctype', ':', 'dtd_str', '=', 'element', '.', 'docinfo', '.', 'doctype', 'else', ':', 'dtd_str', '=', '"<!DOCTYPE %s>"', '%', 'element', '.', 'docinfo', '.', 'root_name', 'rv', '.', 'append', '(', 'dtd_str', ')', 'serializeElement', '(', 'element', '.', 'getroot', '(', ')', ')', 'elif', 'element', '.', 'tag', '==', 'comment_type', ':', 'rv', '.', 'append', '(', '"<!--%s-->"', '%', '(', 'element', '.', 'text', ',', ')', ')', 'else', ':', '# This is assumed to be an ordinary element', 'if', 'not', 'element', '.', 'attrib', ':', 'rv', '.', 'append', '(', '"<%s>"', '%', '(', 'element', '.', 'tag', ',', ')', ')', 'else', ':', 'attr', '=', '" "', '.', 'join', '(', '[', '"%s=\\"%s\\""', '%', '(', 'name', ',', 'value', ')', 'for', 'name', ',', 'value', 'in', 'element', '.', 'attrib', '.', 'items', '(', ')', ']', ')', 'rv', '.', 'append', '(', '"<%s %s>"', '%', '(', 'element', '.', 'tag', ',', 'attr', ')', ')', 'if', 'element', '.', 'text', ':', 'rv', '.', 'append', '(', 'element', '.', 'text', ')', 'for', 'child', 'in', 'element', ':', 'serializeElement', '(', 'child', ')', 'rv', '.', 'append', '(', '"</%s>"', '%', '(', 'element', '.', 'tag', ',', ')', ')', 'if', 'hasattr', '(', 'element', ',', '"tail"', ')', 'and', 'element', '.', 'tail', ':', 'rv', '.', 'append', '(', 'element', '.', 'tail', ')', 'serializeElement', '(', 'element', ')', 'return', '""', '.', 'join', '(', 'rv', ')'] | Serialize an element and its child nodes to a string | ['Serialize', 'an', 'element', 'and', 'its', 'child', 'nodes', 'to', 'a', 'string'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/treebuilders/etree_lxml.py#L134-L172 |
8,959 | davgeo/clear | clear/util.py | UserAcceptance | def UserAcceptance(
matchList,
recursiveLookup = True,
promptComment = None,
promptOnly = False,
xStrOverride = "to skip this selection"
):
"""
Prompt user to select a entry from a given match list or to enter a new
string to look up. If the match list is empty user must enter a new string
or exit.
Parameters
----------
matchList : list
A list of entries which the user can select a valid match from.
recursiveLookup : boolean [optional: default = True]
Allow user to enter a new string to look up.
promptComment : string [optional: default = None]
Add an additional comment on the end of the prompt message.
promptOnly : boolean [optional: default = False]
Set to true if match list is expected to be empty. In which case
the presence of an empty match list will not be mentioned and user
will be expected to enter a new response to look up.
xStrOverride : string [optional: default = "to skip this selection"]
Override the string for 'x' response. This can be used if
the behaviour of the 'x' response is changed.
Returns
----------
string or None
Either a entry from matchList, another valid response or a new
string to look up. If match list is empty and recursive lookup is
disabled or if the user response is 'x' this will return None.
"""
matchString = ', '.join(matchList)
if len(matchList) == 1:
goodlogging.Log.Info("UTIL", "Match found: {0}".format(matchString))
prompt = "Enter 'y' to accept this match or e"
elif len(matchList) > 1:
goodlogging.Log.Info("UTIL", "Multiple possible matches found: {0}".format(matchString))
prompt = "Enter correct match from list or e"
else:
if promptOnly is False:
goodlogging.Log.Info("UTIL", "No match found")
prompt = "E"
if not recursiveLookup:
return None
if recursiveLookup:
prompt = prompt + "nter a different string to look up or e"
prompt = prompt + "nter 'x' {0} or enter 'exit' to quit this program".format(xStrOverride)
if promptComment is None:
prompt = prompt + ": "
else:
prompt = prompt + " ({0}): ".format(promptComment)
while(1):
response = goodlogging.Log.Input('UTIL', prompt)
if response.lower() == 'exit':
goodlogging.Log.Fatal("UTIL", "Program terminated by user 'exit'")
if response.lower() == 'x':
return None
elif response.lower() == 'y' and len(matchList) == 1:
return matchList[0]
elif len(matchList) > 1:
for match in matchList:
if response.lower() == match.lower():
return match
if recursiveLookup:
return response | python | def UserAcceptance(
matchList,
recursiveLookup = True,
promptComment = None,
promptOnly = False,
xStrOverride = "to skip this selection"
):
"""
Prompt user to select a entry from a given match list or to enter a new
string to look up. If the match list is empty user must enter a new string
or exit.
Parameters
----------
matchList : list
A list of entries which the user can select a valid match from.
recursiveLookup : boolean [optional: default = True]
Allow user to enter a new string to look up.
promptComment : string [optional: default = None]
Add an additional comment on the end of the prompt message.
promptOnly : boolean [optional: default = False]
Set to true if match list is expected to be empty. In which case
the presence of an empty match list will not be mentioned and user
will be expected to enter a new response to look up.
xStrOverride : string [optional: default = "to skip this selection"]
Override the string for 'x' response. This can be used if
the behaviour of the 'x' response is changed.
Returns
----------
string or None
Either a entry from matchList, another valid response or a new
string to look up. If match list is empty and recursive lookup is
disabled or if the user response is 'x' this will return None.
"""
matchString = ', '.join(matchList)
if len(matchList) == 1:
goodlogging.Log.Info("UTIL", "Match found: {0}".format(matchString))
prompt = "Enter 'y' to accept this match or e"
elif len(matchList) > 1:
goodlogging.Log.Info("UTIL", "Multiple possible matches found: {0}".format(matchString))
prompt = "Enter correct match from list or e"
else:
if promptOnly is False:
goodlogging.Log.Info("UTIL", "No match found")
prompt = "E"
if not recursiveLookup:
return None
if recursiveLookup:
prompt = prompt + "nter a different string to look up or e"
prompt = prompt + "nter 'x' {0} or enter 'exit' to quit this program".format(xStrOverride)
if promptComment is None:
prompt = prompt + ": "
else:
prompt = prompt + " ({0}): ".format(promptComment)
while(1):
response = goodlogging.Log.Input('UTIL', prompt)
if response.lower() == 'exit':
goodlogging.Log.Fatal("UTIL", "Program terminated by user 'exit'")
if response.lower() == 'x':
return None
elif response.lower() == 'y' and len(matchList) == 1:
return matchList[0]
elif len(matchList) > 1:
for match in matchList:
if response.lower() == match.lower():
return match
if recursiveLookup:
return response | ['def', 'UserAcceptance', '(', 'matchList', ',', 'recursiveLookup', '=', 'True', ',', 'promptComment', '=', 'None', ',', 'promptOnly', '=', 'False', ',', 'xStrOverride', '=', '"to skip this selection"', ')', ':', 'matchString', '=', "', '", '.', 'join', '(', 'matchList', ')', 'if', 'len', '(', 'matchList', ')', '==', '1', ':', 'goodlogging', '.', 'Log', '.', 'Info', '(', '"UTIL"', ',', '"Match found: {0}"', '.', 'format', '(', 'matchString', ')', ')', 'prompt', '=', '"Enter \'y\' to accept this match or e"', 'elif', 'len', '(', 'matchList', ')', '>', '1', ':', 'goodlogging', '.', 'Log', '.', 'Info', '(', '"UTIL"', ',', '"Multiple possible matches found: {0}"', '.', 'format', '(', 'matchString', ')', ')', 'prompt', '=', '"Enter correct match from list or e"', 'else', ':', 'if', 'promptOnly', 'is', 'False', ':', 'goodlogging', '.', 'Log', '.', 'Info', '(', '"UTIL"', ',', '"No match found"', ')', 'prompt', '=', '"E"', 'if', 'not', 'recursiveLookup', ':', 'return', 'None', 'if', 'recursiveLookup', ':', 'prompt', '=', 'prompt', '+', '"nter a different string to look up or e"', 'prompt', '=', 'prompt', '+', '"nter \'x\' {0} or enter \'exit\' to quit this program"', '.', 'format', '(', 'xStrOverride', ')', 'if', 'promptComment', 'is', 'None', ':', 'prompt', '=', 'prompt', '+', '": "', 'else', ':', 'prompt', '=', 'prompt', '+', '" ({0}): "', '.', 'format', '(', 'promptComment', ')', 'while', '(', '1', ')', ':', 'response', '=', 'goodlogging', '.', 'Log', '.', 'Input', '(', "'UTIL'", ',', 'prompt', ')', 'if', 'response', '.', 'lower', '(', ')', '==', "'exit'", ':', 'goodlogging', '.', 'Log', '.', 'Fatal', '(', '"UTIL"', ',', '"Program terminated by user \'exit\'"', ')', 'if', 'response', '.', 'lower', '(', ')', '==', "'x'", ':', 'return', 'None', 'elif', 'response', '.', 'lower', '(', ')', '==', "'y'", 'and', 'len', '(', 'matchList', ')', '==', '1', ':', 'return', 'matchList', '[', '0', ']', 'elif', 'len', '(', 'matchList', ')', '>', '1', ':', 'for', 'match', 'in', 'matchList', ':', 'if', 'response', '.', 'lower', '(', ')', '==', 'match', '.', 'lower', '(', ')', ':', 'return', 'match', 'if', 'recursiveLookup', ':', 'return', 'response'] | Prompt user to select a entry from a given match list or to enter a new
string to look up. If the match list is empty user must enter a new string
or exit.
Parameters
----------
matchList : list
A list of entries which the user can select a valid match from.
recursiveLookup : boolean [optional: default = True]
Allow user to enter a new string to look up.
promptComment : string [optional: default = None]
Add an additional comment on the end of the prompt message.
promptOnly : boolean [optional: default = False]
Set to true if match list is expected to be empty. In which case
the presence of an empty match list will not be mentioned and user
will be expected to enter a new response to look up.
xStrOverride : string [optional: default = "to skip this selection"]
Override the string for 'x' response. This can be used if
the behaviour of the 'x' response is changed.
Returns
----------
string or None
Either a entry from matchList, another valid response or a new
string to look up. If match list is empty and recursive lookup is
disabled or if the user response is 'x' this will return None. | ['Prompt', 'user', 'to', 'select', 'a', 'entry', 'from', 'a', 'given', 'match', 'list', 'or', 'to', 'enter', 'a', 'new', 'string', 'to', 'look', 'up', '.', 'If', 'the', 'match', 'list', 'is', 'empty', 'user', 'must', 'enter', 'a', 'new', 'string', 'or', 'exit', '.'] | train | https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/util.py#L162-L240 |
8,960 | ratt-ru/PyMORESANE | pymoresane/main.py | DataImage.moresane_by_scale | def moresane_by_scale(self, start_scale=1, stop_scale=20, subregion=None, sigma_level=4, loop_gain=0.1,
tolerance=0.75, accuracy=1e-6, major_loop_miter=100, minor_loop_miter=30, all_on_gpu=False,
decom_mode="ser", core_count=1, conv_device='cpu', conv_mode='linear', extraction_mode='cpu',
enforce_positivity=False, edge_suppression=False,
edge_offset=0, flux_threshold=0, neg_comp=False, edge_excl=0, int_excl=0):
"""
Extension of the MORESANE algorithm. This takes a scale-by-scale approach, attempting to remove all sources
at the lower scales before moving onto the higher ones. At each step the algorithm may return to previous
scales to remove the sources uncovered by the deconvolution.
INPUTS:
start_scale (default=1) The first scale which is to be considered.
stop_scale (default=20) The maximum scale which is to be considered. Optional.
subregion (default=None): Size, in pixels, of the central region to be analyzed and deconvolved.
sigma_level (default=4) Number of sigma at which thresholding is to be performed.
loop_gain (default=0.1): Loop gain for the deconvolution.
tolerance (default=0.75): Tolerance level for object extraction. Significant objects contain
wavelet coefficients greater than the tolerance multiplied by the
maximum wavelet coefficient in the scale under consideration.
accuracy (default=1e-6): Threshold on the standard deviation of the residual noise. Exit main
loop when this threshold is reached.
major_loop_miter (default=100): Maximum number of iterations allowed in the major loop. Exit
condition.
minor_loop_miter (default=30): Maximum number of iterations allowed in the minor loop. Serves as an
exit condition when the SNR does not reach a maximum.
all_on_gpu (default=False): Boolean specifier to toggle all gpu modes on.
decom_mode (default='ser'): Specifier for decomposition mode - serial, multiprocessing, or gpu.
core_count (default=1): In the event that multiprocessing, specifies the number of cores.
conv_device (default='cpu'): Specifier for device to be used - cpu or gpu.
conv_mode (default='linear'): Specifier for convolution mode - linear or circular.
extraction_mode (default='cpu'): Specifier for mode to be used - cpu or gpu.
enforce_positivity (default=False): Boolean specifier for whether or not a model must be strictly positive.
edge_suppression (default=False): Boolean specifier for whether or not the edges are to be suprressed.
edge_offset (default=0): Numeric value for an additional user-specified number of edge pixels
to be ignored. This is added to the minimum suppression.
OUTPUTS:
self.model (no default): Model extracted by the algorithm.
self.residual (no default): Residual signal after deconvolution.
"""
# The following preserves the dirty image as it will be changed on every iteration.
dirty_data = self.dirty_data
scale_count = start_scale
while not (self.complete):
logger.info("MORESANE at scale {}".format(scale_count))
self.moresane(subregion=subregion, scale_count=scale_count, sigma_level=sigma_level, loop_gain=loop_gain,
tolerance=tolerance, accuracy=accuracy, major_loop_miter=major_loop_miter,
minor_loop_miter=minor_loop_miter, all_on_gpu=all_on_gpu, decom_mode=decom_mode,
core_count=core_count, conv_device=conv_device, conv_mode=conv_mode,
extraction_mode=extraction_mode, enforce_positivity=enforce_positivity,
edge_suppression=edge_suppression, edge_offset=edge_offset,
flux_threshold=flux_threshold, neg_comp=neg_comp,
edge_excl=edge_excl, int_excl=int_excl)
self.dirty_data = self.residual
scale_count += 1
if (scale_count>(np.log2(self.dirty_data.shape[0]))-1):
logger.info("Maximum scale reached - finished.")
break
if (scale_count>stop_scale):
logger.info("Maximum scale reached - finished.")
break
# Restores the original dirty image.
self.dirty_data = dirty_data
self.complete = False | python | def moresane_by_scale(self, start_scale=1, stop_scale=20, subregion=None, sigma_level=4, loop_gain=0.1,
tolerance=0.75, accuracy=1e-6, major_loop_miter=100, minor_loop_miter=30, all_on_gpu=False,
decom_mode="ser", core_count=1, conv_device='cpu', conv_mode='linear', extraction_mode='cpu',
enforce_positivity=False, edge_suppression=False,
edge_offset=0, flux_threshold=0, neg_comp=False, edge_excl=0, int_excl=0):
"""
Extension of the MORESANE algorithm. This takes a scale-by-scale approach, attempting to remove all sources
at the lower scales before moving onto the higher ones. At each step the algorithm may return to previous
scales to remove the sources uncovered by the deconvolution.
INPUTS:
start_scale (default=1) The first scale which is to be considered.
stop_scale (default=20) The maximum scale which is to be considered. Optional.
subregion (default=None): Size, in pixels, of the central region to be analyzed and deconvolved.
sigma_level (default=4) Number of sigma at which thresholding is to be performed.
loop_gain (default=0.1): Loop gain for the deconvolution.
tolerance (default=0.75): Tolerance level for object extraction. Significant objects contain
wavelet coefficients greater than the tolerance multiplied by the
maximum wavelet coefficient in the scale under consideration.
accuracy (default=1e-6): Threshold on the standard deviation of the residual noise. Exit main
loop when this threshold is reached.
major_loop_miter (default=100): Maximum number of iterations allowed in the major loop. Exit
condition.
minor_loop_miter (default=30): Maximum number of iterations allowed in the minor loop. Serves as an
exit condition when the SNR does not reach a maximum.
all_on_gpu (default=False): Boolean specifier to toggle all gpu modes on.
decom_mode (default='ser'): Specifier for decomposition mode - serial, multiprocessing, or gpu.
core_count (default=1): In the event that multiprocessing, specifies the number of cores.
conv_device (default='cpu'): Specifier for device to be used - cpu or gpu.
conv_mode (default='linear'): Specifier for convolution mode - linear or circular.
extraction_mode (default='cpu'): Specifier for mode to be used - cpu or gpu.
enforce_positivity (default=False): Boolean specifier for whether or not a model must be strictly positive.
edge_suppression (default=False): Boolean specifier for whether or not the edges are to be suprressed.
edge_offset (default=0): Numeric value for an additional user-specified number of edge pixels
to be ignored. This is added to the minimum suppression.
OUTPUTS:
self.model (no default): Model extracted by the algorithm.
self.residual (no default): Residual signal after deconvolution.
"""
# The following preserves the dirty image as it will be changed on every iteration.
dirty_data = self.dirty_data
scale_count = start_scale
while not (self.complete):
logger.info("MORESANE at scale {}".format(scale_count))
self.moresane(subregion=subregion, scale_count=scale_count, sigma_level=sigma_level, loop_gain=loop_gain,
tolerance=tolerance, accuracy=accuracy, major_loop_miter=major_loop_miter,
minor_loop_miter=minor_loop_miter, all_on_gpu=all_on_gpu, decom_mode=decom_mode,
core_count=core_count, conv_device=conv_device, conv_mode=conv_mode,
extraction_mode=extraction_mode, enforce_positivity=enforce_positivity,
edge_suppression=edge_suppression, edge_offset=edge_offset,
flux_threshold=flux_threshold, neg_comp=neg_comp,
edge_excl=edge_excl, int_excl=int_excl)
self.dirty_data = self.residual
scale_count += 1
if (scale_count>(np.log2(self.dirty_data.shape[0]))-1):
logger.info("Maximum scale reached - finished.")
break
if (scale_count>stop_scale):
logger.info("Maximum scale reached - finished.")
break
# Restores the original dirty image.
self.dirty_data = dirty_data
self.complete = False | ['def', 'moresane_by_scale', '(', 'self', ',', 'start_scale', '=', '1', ',', 'stop_scale', '=', '20', ',', 'subregion', '=', 'None', ',', 'sigma_level', '=', '4', ',', 'loop_gain', '=', '0.1', ',', 'tolerance', '=', '0.75', ',', 'accuracy', '=', '1e-6', ',', 'major_loop_miter', '=', '100', ',', 'minor_loop_miter', '=', '30', ',', 'all_on_gpu', '=', 'False', ',', 'decom_mode', '=', '"ser"', ',', 'core_count', '=', '1', ',', 'conv_device', '=', "'cpu'", ',', 'conv_mode', '=', "'linear'", ',', 'extraction_mode', '=', "'cpu'", ',', 'enforce_positivity', '=', 'False', ',', 'edge_suppression', '=', 'False', ',', 'edge_offset', '=', '0', ',', 'flux_threshold', '=', '0', ',', 'neg_comp', '=', 'False', ',', 'edge_excl', '=', '0', ',', 'int_excl', '=', '0', ')', ':', '# The following preserves the dirty image as it will be changed on every iteration.', 'dirty_data', '=', 'self', '.', 'dirty_data', 'scale_count', '=', 'start_scale', 'while', 'not', '(', 'self', '.', 'complete', ')', ':', 'logger', '.', 'info', '(', '"MORESANE at scale {}"', '.', 'format', '(', 'scale_count', ')', ')', 'self', '.', 'moresane', '(', 'subregion', '=', 'subregion', ',', 'scale_count', '=', 'scale_count', ',', 'sigma_level', '=', 'sigma_level', ',', 'loop_gain', '=', 'loop_gain', ',', 'tolerance', '=', 'tolerance', ',', 'accuracy', '=', 'accuracy', ',', 'major_loop_miter', '=', 'major_loop_miter', ',', 'minor_loop_miter', '=', 'minor_loop_miter', ',', 'all_on_gpu', '=', 'all_on_gpu', ',', 'decom_mode', '=', 'decom_mode', ',', 'core_count', '=', 'core_count', ',', 'conv_device', '=', 'conv_device', ',', 'conv_mode', '=', 'conv_mode', ',', 'extraction_mode', '=', 'extraction_mode', ',', 'enforce_positivity', '=', 'enforce_positivity', ',', 'edge_suppression', '=', 'edge_suppression', ',', 'edge_offset', '=', 'edge_offset', ',', 'flux_threshold', '=', 'flux_threshold', ',', 'neg_comp', '=', 'neg_comp', ',', 'edge_excl', '=', 'edge_excl', ',', 'int_excl', '=', 'int_excl', ')', 'self', '.', 'dirty_data', '=', 'self', '.', 'residual', 'scale_count', '+=', '1', 'if', '(', 'scale_count', '>', '(', 'np', '.', 'log2', '(', 'self', '.', 'dirty_data', '.', 'shape', '[', '0', ']', ')', ')', '-', '1', ')', ':', 'logger', '.', 'info', '(', '"Maximum scale reached - finished."', ')', 'break', 'if', '(', 'scale_count', '>', 'stop_scale', ')', ':', 'logger', '.', 'info', '(', '"Maximum scale reached - finished."', ')', 'break', '# Restores the original dirty image.', 'self', '.', 'dirty_data', '=', 'dirty_data', 'self', '.', 'complete', '=', 'False'] | Extension of the MORESANE algorithm. This takes a scale-by-scale approach, attempting to remove all sources
at the lower scales before moving onto the higher ones. At each step the algorithm may return to previous
scales to remove the sources uncovered by the deconvolution.
INPUTS:
start_scale (default=1) The first scale which is to be considered.
stop_scale (default=20) The maximum scale which is to be considered. Optional.
subregion (default=None): Size, in pixels, of the central region to be analyzed and deconvolved.
sigma_level (default=4) Number of sigma at which thresholding is to be performed.
loop_gain (default=0.1): Loop gain for the deconvolution.
tolerance (default=0.75): Tolerance level for object extraction. Significant objects contain
wavelet coefficients greater than the tolerance multiplied by the
maximum wavelet coefficient in the scale under consideration.
accuracy (default=1e-6): Threshold on the standard deviation of the residual noise. Exit main
loop when this threshold is reached.
major_loop_miter (default=100): Maximum number of iterations allowed in the major loop. Exit
condition.
minor_loop_miter (default=30): Maximum number of iterations allowed in the minor loop. Serves as an
exit condition when the SNR does not reach a maximum.
all_on_gpu (default=False): Boolean specifier to toggle all gpu modes on.
decom_mode (default='ser'): Specifier for decomposition mode - serial, multiprocessing, or gpu.
core_count (default=1): In the event that multiprocessing, specifies the number of cores.
conv_device (default='cpu'): Specifier for device to be used - cpu or gpu.
conv_mode (default='linear'): Specifier for convolution mode - linear or circular.
extraction_mode (default='cpu'): Specifier for mode to be used - cpu or gpu.
enforce_positivity (default=False): Boolean specifier for whether or not a model must be strictly positive.
edge_suppression (default=False): Boolean specifier for whether or not the edges are to be suprressed.
edge_offset (default=0): Numeric value for an additional user-specified number of edge pixels
to be ignored. This is added to the minimum suppression.
OUTPUTS:
self.model (no default): Model extracted by the algorithm.
self.residual (no default): Residual signal after deconvolution. | ['Extension', 'of', 'the', 'MORESANE', 'algorithm', '.', 'This', 'takes', 'a', 'scale', '-', 'by', '-', 'scale', 'approach', 'attempting', 'to', 'remove', 'all', 'sources', 'at', 'the', 'lower', 'scales', 'before', 'moving', 'onto', 'the', 'higher', 'ones', '.', 'At', 'each', 'step', 'the', 'algorithm', 'may', 'return', 'to', 'previous', 'scales', 'to', 'remove', 'the', 'sources', 'uncovered', 'by', 'the', 'deconvolution', '.'] | train | https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/main.py#L523-L599 |
8,961 | Dallinger/Dallinger | dallinger/notifications.py | EmailConfig.validate | def validate(self):
"""Could this config be used to send a real email?"""
missing = []
for k, v in self._map.items():
attr = getattr(self, k, False)
if not attr or attr == CONFIG_PLACEHOLDER:
missing.append(v)
if missing:
return "Missing or invalid config values: {}".format(
", ".join(sorted(missing))
) | python | def validate(self):
"""Could this config be used to send a real email?"""
missing = []
for k, v in self._map.items():
attr = getattr(self, k, False)
if not attr or attr == CONFIG_PLACEHOLDER:
missing.append(v)
if missing:
return "Missing or invalid config values: {}".format(
", ".join(sorted(missing))
) | ['def', 'validate', '(', 'self', ')', ':', 'missing', '=', '[', ']', 'for', 'k', ',', 'v', 'in', 'self', '.', '_map', '.', 'items', '(', ')', ':', 'attr', '=', 'getattr', '(', 'self', ',', 'k', ',', 'False', ')', 'if', 'not', 'attr', 'or', 'attr', '==', 'CONFIG_PLACEHOLDER', ':', 'missing', '.', 'append', '(', 'v', ')', 'if', 'missing', ':', 'return', '"Missing or invalid config values: {}"', '.', 'format', '(', '", "', '.', 'join', '(', 'sorted', '(', 'missing', ')', ')', ')'] | Could this config be used to send a real email? | ['Could', 'this', 'config', 'be', 'used', 'to', 'send', 'a', 'real', 'email?'] | train | https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/notifications.py#L42-L52 |
8,962 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py | AggregatorAccountAdminApi.update_account_api_key | def update_account_api_key(self, account_id, api_key, body, **kwargs): # noqa: E501
"""Update API key details. # noqa: E501
An endpoint for updating API key details. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -d '{\"name\": \"TestApiKey25\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account_api_key(account_id, api_key, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The ID of the API key to be updated. (required)
:param ApiKeyUpdateReq body: New API key attributes to be stored. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_account_api_key_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501
else:
(data) = self.update_account_api_key_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501
return data | python | def update_account_api_key(self, account_id, api_key, body, **kwargs): # noqa: E501
"""Update API key details. # noqa: E501
An endpoint for updating API key details. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -d '{\"name\": \"TestApiKey25\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account_api_key(account_id, api_key, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The ID of the API key to be updated. (required)
:param ApiKeyUpdateReq body: New API key attributes to be stored. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_account_api_key_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501
else:
(data) = self.update_account_api_key_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501
return data | ['def', 'update_account_api_key', '(', 'self', ',', 'account_id', ',', 'api_key', ',', 'body', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'update_account_api_key_with_http_info', '(', 'account_id', ',', 'api_key', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'update_account_api_key_with_http_info', '(', 'account_id', ',', 'api_key', ',', 'body', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data'] | Update API key details. # noqa: E501
An endpoint for updating API key details. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey} -d '{\"name\": \"TestApiKey25\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_account_api_key(account_id, api_key, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The ID of the API key to be updated. (required)
:param ApiKeyUpdateReq body: New API key attributes to be stored. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread. | ['Update', 'API', 'key', 'details', '.', '#', 'noqa', ':', 'E501'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L4163-L4185 |
8,963 | aroberge/experimental | experimental/core/transforms.py | add_transformers | def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans) | python | def add_transformers(line):
'''Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict
'''
assert FROM_EXPERIMENTAL.match(line)
line = FROM_EXPERIMENTAL.sub(' ', line)
# we now have: " transformer1 [,...]"
line = line.split("#")[0] # remove any end of line comments
# and insert each transformer as an item in a list
for trans in line.replace(' ', '').split(','):
import_transformer(trans) | ['def', 'add_transformers', '(', 'line', ')', ':', 'assert', 'FROM_EXPERIMENTAL', '.', 'match', '(', 'line', ')', 'line', '=', 'FROM_EXPERIMENTAL', '.', 'sub', '(', "' '", ',', 'line', ')', '# we now have: " transformer1 [,...]"', 'line', '=', 'line', '.', 'split', '(', '"#"', ')', '[', '0', ']', '# remove any end of line comments', '# and insert each transformer as an item in a list', 'for', 'trans', 'in', 'line', '.', 'replace', '(', "' '", ',', "''", ')', '.', 'split', '(', "','", ')', ':', 'import_transformer', '(', 'trans', ')'] | Extract the transformers names from a line of code of the form
from __experimental__ import transformer1 [,...]
and adds them to the globally known dict | ['Extract', 'the', 'transformers', 'names', 'from', 'a', 'line', 'of', 'code', 'of', 'the', 'form', 'from', '__experimental__', 'import', 'transformer1', '[', '...', ']', 'and', 'adds', 'them', 'to', 'the', 'globally', 'known', 'dict'] | train | https://github.com/aroberge/experimental/blob/031a9be10698b429998436da748b8fdb86f18b47/experimental/core/transforms.py#L19-L31 |
8,964 | gabstopper/smc-python | smc/policy/rule.py | IPv4Rule.create | def create(self, name, sources=None, destinations=None,
services=None, action='allow', log_options=None,
authentication_options=None, connection_tracking=None,
is_disabled=False, vpn_policy=None, mobile_vpn=False,
add_pos=None, after=None, before=None,
sub_policy=None, comment=None, **kw):
"""
Create a layer 3 firewall rule
:param str name: name of rule
:param sources: source/s for rule
:type sources: list[str, Element]
:param destinations: destination/s for rule
:type destinations: list[str, Element]
:param services: service/s for rule
:type services: list[str, Element]
:param action: allow,continue,discard,refuse,enforce_vpn,
apply_vpn,forward_vpn, blacklist (default: allow)
:type action: Action or str
:param LogOptions log_options: LogOptions object
:param ConnectionTracking connection_tracking: custom connection tracking settings
:param AuthenticationOptions authentication_options: options for auth if any
:param PolicyVPN,str vpn_policy: policy element or str href; required for
enforce_vpn, use_vpn and apply_vpn actions
:param bool mobile_vpn: if using a vpn action, you can set mobile_vpn to True and
omit the vpn_policy setting if you want this VPN to apply to any mobile VPN based
on the policy VPN associated with the engine
:param str,Element sub_policy: sub policy required when rule has an action of 'jump'.
Can be the FirewallSubPolicy element or href.
:param int add_pos: position to insert the rule, starting with position 1. If
the position value is greater than the number of rules, the rule is inserted at
the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually
exclusive with ``after`` and ``before`` params.
:param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos``
and ``before`` params.
:param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos``
and ``after`` params.
:param str comment: optional comment for this rule
:raises MissingRequiredInput: when options are specified the need additional
setting, i.e. use_vpn action requires a vpn policy be specified.
:raises CreateRuleFailed: rule creation failure
:return: the created ipv4 rule
:rtype: IPv4Rule
"""
rule_values = self.update_targets(sources, destinations, services)
rule_values.update(name=name, comment=comment)
if isinstance(action, Action):
rule_action = action
else:
rule_action = Action()
rule_action.action = action
if not rule_action.action in self._actions:
raise CreateRuleFailed('Action specified is not valid for this '
'rule type; action: {}'.format(rule_action.action))
if rule_action.action in ('apply_vpn', 'enforce_vpn', 'forward_vpn'):
if vpn_policy is None and not mobile_vpn:
raise MissingRequiredInput('You must either specify a vpn_policy or set '
'mobile_vpn when using a rule with a VPN action')
if mobile_vpn:
rule_action.mobile_vpn = True
else:
try:
vpn = element_resolver(vpn_policy) # VPNPolicy
rule_action.vpn = vpn
except ElementNotFound:
raise MissingRequiredInput('Cannot find VPN policy specified: {}, '
.format(vpn_policy))
elif rule_action.action == 'jump':
try:
rule_action.sub_policy = element_resolver(sub_policy)
except ElementNotFound:
raise MissingRequiredInput('Cannot find sub policy specified: {} '
.format(sub_policy))
#rule_values.update(action=rule_action.data)
log_options = LogOptions() if not log_options else log_options
if connection_tracking is not None:
rule_action.connection_tracking_options.update(**connection_tracking)
auth_options = AuthenticationOptions() if not authentication_options \
else authentication_options
rule_values.update(
action=rule_action.data,
options=log_options.data,
authentication_options=auth_options.data,
is_disabled=is_disabled)
params = None
href = self.href
if add_pos is not None:
href = self.add_at_position(add_pos)
elif before or after:
params = self.add_before_after(before, after)
return ElementCreator(
self.__class__,
exception=CreateRuleFailed,
href=href,
params=params,
json=rule_values) | python | def create(self, name, sources=None, destinations=None,
services=None, action='allow', log_options=None,
authentication_options=None, connection_tracking=None,
is_disabled=False, vpn_policy=None, mobile_vpn=False,
add_pos=None, after=None, before=None,
sub_policy=None, comment=None, **kw):
"""
Create a layer 3 firewall rule
:param str name: name of rule
:param sources: source/s for rule
:type sources: list[str, Element]
:param destinations: destination/s for rule
:type destinations: list[str, Element]
:param services: service/s for rule
:type services: list[str, Element]
:param action: allow,continue,discard,refuse,enforce_vpn,
apply_vpn,forward_vpn, blacklist (default: allow)
:type action: Action or str
:param LogOptions log_options: LogOptions object
:param ConnectionTracking connection_tracking: custom connection tracking settings
:param AuthenticationOptions authentication_options: options for auth if any
:param PolicyVPN,str vpn_policy: policy element or str href; required for
enforce_vpn, use_vpn and apply_vpn actions
:param bool mobile_vpn: if using a vpn action, you can set mobile_vpn to True and
omit the vpn_policy setting if you want this VPN to apply to any mobile VPN based
on the policy VPN associated with the engine
:param str,Element sub_policy: sub policy required when rule has an action of 'jump'.
Can be the FirewallSubPolicy element or href.
:param int add_pos: position to insert the rule, starting with position 1. If
the position value is greater than the number of rules, the rule is inserted at
the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually
exclusive with ``after`` and ``before`` params.
:param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos``
and ``before`` params.
:param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos``
and ``after`` params.
:param str comment: optional comment for this rule
:raises MissingRequiredInput: when options are specified the need additional
setting, i.e. use_vpn action requires a vpn policy be specified.
:raises CreateRuleFailed: rule creation failure
:return: the created ipv4 rule
:rtype: IPv4Rule
"""
rule_values = self.update_targets(sources, destinations, services)
rule_values.update(name=name, comment=comment)
if isinstance(action, Action):
rule_action = action
else:
rule_action = Action()
rule_action.action = action
if not rule_action.action in self._actions:
raise CreateRuleFailed('Action specified is not valid for this '
'rule type; action: {}'.format(rule_action.action))
if rule_action.action in ('apply_vpn', 'enforce_vpn', 'forward_vpn'):
if vpn_policy is None and not mobile_vpn:
raise MissingRequiredInput('You must either specify a vpn_policy or set '
'mobile_vpn when using a rule with a VPN action')
if mobile_vpn:
rule_action.mobile_vpn = True
else:
try:
vpn = element_resolver(vpn_policy) # VPNPolicy
rule_action.vpn = vpn
except ElementNotFound:
raise MissingRequiredInput('Cannot find VPN policy specified: {}, '
.format(vpn_policy))
elif rule_action.action == 'jump':
try:
rule_action.sub_policy = element_resolver(sub_policy)
except ElementNotFound:
raise MissingRequiredInput('Cannot find sub policy specified: {} '
.format(sub_policy))
#rule_values.update(action=rule_action.data)
log_options = LogOptions() if not log_options else log_options
if connection_tracking is not None:
rule_action.connection_tracking_options.update(**connection_tracking)
auth_options = AuthenticationOptions() if not authentication_options \
else authentication_options
rule_values.update(
action=rule_action.data,
options=log_options.data,
authentication_options=auth_options.data,
is_disabled=is_disabled)
params = None
href = self.href
if add_pos is not None:
href = self.add_at_position(add_pos)
elif before or after:
params = self.add_before_after(before, after)
return ElementCreator(
self.__class__,
exception=CreateRuleFailed,
href=href,
params=params,
json=rule_values) | ['def', 'create', '(', 'self', ',', 'name', ',', 'sources', '=', 'None', ',', 'destinations', '=', 'None', ',', 'services', '=', 'None', ',', 'action', '=', "'allow'", ',', 'log_options', '=', 'None', ',', 'authentication_options', '=', 'None', ',', 'connection_tracking', '=', 'None', ',', 'is_disabled', '=', 'False', ',', 'vpn_policy', '=', 'None', ',', 'mobile_vpn', '=', 'False', ',', 'add_pos', '=', 'None', ',', 'after', '=', 'None', ',', 'before', '=', 'None', ',', 'sub_policy', '=', 'None', ',', 'comment', '=', 'None', ',', '*', '*', 'kw', ')', ':', 'rule_values', '=', 'self', '.', 'update_targets', '(', 'sources', ',', 'destinations', ',', 'services', ')', 'rule_values', '.', 'update', '(', 'name', '=', 'name', ',', 'comment', '=', 'comment', ')', 'if', 'isinstance', '(', 'action', ',', 'Action', ')', ':', 'rule_action', '=', 'action', 'else', ':', 'rule_action', '=', 'Action', '(', ')', 'rule_action', '.', 'action', '=', 'action', 'if', 'not', 'rule_action', '.', 'action', 'in', 'self', '.', '_actions', ':', 'raise', 'CreateRuleFailed', '(', "'Action specified is not valid for this '", "'rule type; action: {}'", '.', 'format', '(', 'rule_action', '.', 'action', ')', ')', 'if', 'rule_action', '.', 'action', 'in', '(', "'apply_vpn'", ',', "'enforce_vpn'", ',', "'forward_vpn'", ')', ':', 'if', 'vpn_policy', 'is', 'None', 'and', 'not', 'mobile_vpn', ':', 'raise', 'MissingRequiredInput', '(', "'You must either specify a vpn_policy or set '", "'mobile_vpn when using a rule with a VPN action'", ')', 'if', 'mobile_vpn', ':', 'rule_action', '.', 'mobile_vpn', '=', 'True', 'else', ':', 'try', ':', 'vpn', '=', 'element_resolver', '(', 'vpn_policy', ')', '# VPNPolicy', 'rule_action', '.', 'vpn', '=', 'vpn', 'except', 'ElementNotFound', ':', 'raise', 'MissingRequiredInput', '(', "'Cannot find VPN policy specified: {}, '", '.', 'format', '(', 'vpn_policy', ')', ')', 'elif', 'rule_action', '.', 'action', '==', "'jump'", ':', 'try', ':', 'rule_action', '.', 'sub_policy', '=', 'element_resolver', '(', 'sub_policy', ')', 'except', 'ElementNotFound', ':', 'raise', 'MissingRequiredInput', '(', "'Cannot find sub policy specified: {} '", '.', 'format', '(', 'sub_policy', ')', ')', '#rule_values.update(action=rule_action.data)', 'log_options', '=', 'LogOptions', '(', ')', 'if', 'not', 'log_options', 'else', 'log_options', 'if', 'connection_tracking', 'is', 'not', 'None', ':', 'rule_action', '.', 'connection_tracking_options', '.', 'update', '(', '*', '*', 'connection_tracking', ')', 'auth_options', '=', 'AuthenticationOptions', '(', ')', 'if', 'not', 'authentication_options', 'else', 'authentication_options', 'rule_values', '.', 'update', '(', 'action', '=', 'rule_action', '.', 'data', ',', 'options', '=', 'log_options', '.', 'data', ',', 'authentication_options', '=', 'auth_options', '.', 'data', ',', 'is_disabled', '=', 'is_disabled', ')', 'params', '=', 'None', 'href', '=', 'self', '.', 'href', 'if', 'add_pos', 'is', 'not', 'None', ':', 'href', '=', 'self', '.', 'add_at_position', '(', 'add_pos', ')', 'elif', 'before', 'or', 'after', ':', 'params', '=', 'self', '.', 'add_before_after', '(', 'before', ',', 'after', ')', 'return', 'ElementCreator', '(', 'self', '.', '__class__', ',', 'exception', '=', 'CreateRuleFailed', ',', 'href', '=', 'href', ',', 'params', '=', 'params', ',', 'json', '=', 'rule_values', ')'] | Create a layer 3 firewall rule
:param str name: name of rule
:param sources: source/s for rule
:type sources: list[str, Element]
:param destinations: destination/s for rule
:type destinations: list[str, Element]
:param services: service/s for rule
:type services: list[str, Element]
:param action: allow,continue,discard,refuse,enforce_vpn,
apply_vpn,forward_vpn, blacklist (default: allow)
:type action: Action or str
:param LogOptions log_options: LogOptions object
:param ConnectionTracking connection_tracking: custom connection tracking settings
:param AuthenticationOptions authentication_options: options for auth if any
:param PolicyVPN,str vpn_policy: policy element or str href; required for
enforce_vpn, use_vpn and apply_vpn actions
:param bool mobile_vpn: if using a vpn action, you can set mobile_vpn to True and
omit the vpn_policy setting if you want this VPN to apply to any mobile VPN based
on the policy VPN associated with the engine
:param str,Element sub_policy: sub policy required when rule has an action of 'jump'.
Can be the FirewallSubPolicy element or href.
:param int add_pos: position to insert the rule, starting with position 1. If
the position value is greater than the number of rules, the rule is inserted at
the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually
exclusive with ``after`` and ``before`` params.
:param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos``
and ``before`` params.
:param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos``
and ``after`` params.
:param str comment: optional comment for this rule
:raises MissingRequiredInput: when options are specified the need additional
setting, i.e. use_vpn action requires a vpn policy be specified.
:raises CreateRuleFailed: rule creation failure
:return: the created ipv4 rule
:rtype: IPv4Rule | ['Create', 'a', 'layer', '3', 'firewall', 'rule'] | train | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/policy/rule.py#L469-L575 |
8,965 | scanny/python-pptx | pptx/chart/series.py | LineSeries.smooth | def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self._element.smooth
if smooth is None:
return True
return smooth.val | python | def smooth(self):
"""
Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points.
"""
smooth = self._element.smooth
if smooth is None:
return True
return smooth.val | ['def', 'smooth', '(', 'self', ')', ':', 'smooth', '=', 'self', '.', '_element', '.', 'smooth', 'if', 'smooth', 'is', 'None', ':', 'return', 'True', 'return', 'smooth', '.', 'val'] | Read/write boolean specifying whether to use curve smoothing to
form the line connecting the data points in this series into
a continuous curve. If |False|, a series of straight line segments
are used to connect the points. | ['Read', '/', 'write', 'boolean', 'specifying', 'whether', 'to', 'use', 'curve', 'smoothing', 'to', 'form', 'the', 'line', 'connecting', 'the', 'data', 'points', 'in', 'this', 'series', 'into', 'a', 'continuous', 'curve', '.', 'If', '|False|', 'a', 'series', 'of', 'straight', 'line', 'segments', 'are', 'used', 'to', 'connect', 'the', 'points', '.'] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/series.py#L140-L150 |
8,966 | DLR-RM/RAFCON | source/rafcon/core/states/state.py | State.outcomes | def outcomes(self, outcomes):
""" Setter for _outcomes field
See property.
:param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type
:class:`rafcon.core.state_elements.logical_port.Outcome`
:raises exceptions.TypeError: if outcomes parameter has the wrong type
:raises exceptions.AttributeError: if the key of the outcome dictionary and the id of the outcome do not match
"""
if not isinstance(outcomes, dict):
raise TypeError("outcomes must be of type dict")
if [outcome_id for outcome_id, outcome in outcomes.items() if not isinstance(outcome, Outcome)]:
raise TypeError("element of outcomes must be of type Outcome")
if [outcome_id for outcome_id, outcome in outcomes.items() if not outcome_id == outcome.outcome_id]:
raise AttributeError("The key of the outcomes dictionary and the id of the outcome do not match")
old_outcomes = self.outcomes
self._outcomes = outcomes
for outcome_id, outcome in outcomes.items():
try:
outcome.parent = self
except ValueError:
self._outcomes = old_outcomes
raise
# aborted and preempted must always exist
if -1 not in outcomes:
self._outcomes[-1] = Outcome(outcome_id=-1, name="aborted", parent=self)
if -2 not in outcomes:
self._outcomes[-2] = Outcome(outcome_id=-2, name="preempted", parent=self)
# check that all old_outcomes are no more referencing self as there parent
for old_outcome in old_outcomes.values():
if old_outcome not in iter(list(self._outcomes.values())) and old_outcome.parent is self:
old_outcome.parent = None | python | def outcomes(self, outcomes):
""" Setter for _outcomes field
See property.
:param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type
:class:`rafcon.core.state_elements.logical_port.Outcome`
:raises exceptions.TypeError: if outcomes parameter has the wrong type
:raises exceptions.AttributeError: if the key of the outcome dictionary and the id of the outcome do not match
"""
if not isinstance(outcomes, dict):
raise TypeError("outcomes must be of type dict")
if [outcome_id for outcome_id, outcome in outcomes.items() if not isinstance(outcome, Outcome)]:
raise TypeError("element of outcomes must be of type Outcome")
if [outcome_id for outcome_id, outcome in outcomes.items() if not outcome_id == outcome.outcome_id]:
raise AttributeError("The key of the outcomes dictionary and the id of the outcome do not match")
old_outcomes = self.outcomes
self._outcomes = outcomes
for outcome_id, outcome in outcomes.items():
try:
outcome.parent = self
except ValueError:
self._outcomes = old_outcomes
raise
# aborted and preempted must always exist
if -1 not in outcomes:
self._outcomes[-1] = Outcome(outcome_id=-1, name="aborted", parent=self)
if -2 not in outcomes:
self._outcomes[-2] = Outcome(outcome_id=-2, name="preempted", parent=self)
# check that all old_outcomes are no more referencing self as there parent
for old_outcome in old_outcomes.values():
if old_outcome not in iter(list(self._outcomes.values())) and old_outcome.parent is self:
old_outcome.parent = None | ['def', 'outcomes', '(', 'self', ',', 'outcomes', ')', ':', 'if', 'not', 'isinstance', '(', 'outcomes', ',', 'dict', ')', ':', 'raise', 'TypeError', '(', '"outcomes must be of type dict"', ')', 'if', '[', 'outcome_id', 'for', 'outcome_id', ',', 'outcome', 'in', 'outcomes', '.', 'items', '(', ')', 'if', 'not', 'isinstance', '(', 'outcome', ',', 'Outcome', ')', ']', ':', 'raise', 'TypeError', '(', '"element of outcomes must be of type Outcome"', ')', 'if', '[', 'outcome_id', 'for', 'outcome_id', ',', 'outcome', 'in', 'outcomes', '.', 'items', '(', ')', 'if', 'not', 'outcome_id', '==', 'outcome', '.', 'outcome_id', ']', ':', 'raise', 'AttributeError', '(', '"The key of the outcomes dictionary and the id of the outcome do not match"', ')', 'old_outcomes', '=', 'self', '.', 'outcomes', 'self', '.', '_outcomes', '=', 'outcomes', 'for', 'outcome_id', ',', 'outcome', 'in', 'outcomes', '.', 'items', '(', ')', ':', 'try', ':', 'outcome', '.', 'parent', '=', 'self', 'except', 'ValueError', ':', 'self', '.', '_outcomes', '=', 'old_outcomes', 'raise', '# aborted and preempted must always exist', 'if', '-', '1', 'not', 'in', 'outcomes', ':', 'self', '.', '_outcomes', '[', '-', '1', ']', '=', 'Outcome', '(', 'outcome_id', '=', '-', '1', ',', 'name', '=', '"aborted"', ',', 'parent', '=', 'self', ')', 'if', '-', '2', 'not', 'in', 'outcomes', ':', 'self', '.', '_outcomes', '[', '-', '2', ']', '=', 'Outcome', '(', 'outcome_id', '=', '-', '2', ',', 'name', '=', '"preempted"', ',', 'parent', '=', 'self', ')', '# check that all old_outcomes are no more referencing self as there parent', 'for', 'old_outcome', 'in', 'old_outcomes', '.', 'values', '(', ')', ':', 'if', 'old_outcome', 'not', 'in', 'iter', '(', 'list', '(', 'self', '.', '_outcomes', '.', 'values', '(', ')', ')', ')', 'and', 'old_outcome', '.', 'parent', 'is', 'self', ':', 'old_outcome', '.', 'parent', '=', 'None'] | Setter for _outcomes field
See property.
:param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type
:class:`rafcon.core.state_elements.logical_port.Outcome`
:raises exceptions.TypeError: if outcomes parameter has the wrong type
:raises exceptions.AttributeError: if the key of the outcome dictionary and the id of the outcome do not match | ['Setter', 'for', '_outcomes', 'field'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L1192-L1227 |
8,967 | KelSolaar/Umbra | umbra/preferences.py | Preferences.get_key | def get_key(self, section, key):
"""
Gets key value from settings file.
:param section: Current section to retrieve key from.
:type section: unicode
:param key: Current key to retrieve.
:type key: unicode
:return: Current key value.
:rtype: object
"""
LOGGER.debug("> Retrieving '{0}' in '{1}' section.".format(key, section))
self.__settings.beginGroup(section)
value = self.__settings.value(key)
LOGGER.debug("> Key value: '{0}'.".format(value))
self.__settings.endGroup()
return value | python | def get_key(self, section, key):
"""
Gets key value from settings file.
:param section: Current section to retrieve key from.
:type section: unicode
:param key: Current key to retrieve.
:type key: unicode
:return: Current key value.
:rtype: object
"""
LOGGER.debug("> Retrieving '{0}' in '{1}' section.".format(key, section))
self.__settings.beginGroup(section)
value = self.__settings.value(key)
LOGGER.debug("> Key value: '{0}'.".format(value))
self.__settings.endGroup()
return value | ['def', 'get_key', '(', 'self', ',', 'section', ',', 'key', ')', ':', 'LOGGER', '.', 'debug', '(', '"> Retrieving \'{0}\' in \'{1}\' section."', '.', 'format', '(', 'key', ',', 'section', ')', ')', 'self', '.', '__settings', '.', 'beginGroup', '(', 'section', ')', 'value', '=', 'self', '.', '__settings', '.', 'value', '(', 'key', ')', 'LOGGER', '.', 'debug', '(', '"> Key value: \'{0}\'."', '.', 'format', '(', 'value', ')', ')', 'self', '.', '__settings', '.', 'endGroup', '(', ')', 'return', 'value'] | Gets key value from settings file.
:param section: Current section to retrieve key from.
:type section: unicode
:param key: Current key to retrieve.
:type key: unicode
:return: Current key value.
:rtype: object | ['Gets', 'key', 'value', 'from', 'settings', 'file', '.'] | train | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/preferences.py#L225-L244 |
8,968 | cakebread/yolk | yolk/pypi.py | check_proxy_setting | def check_proxy_setting():
"""
If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc.
"""
try:
http_proxy = os.environ['HTTP_PROXY']
except KeyError:
return
if not http_proxy.startswith('http://'):
match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy)
#if not match:
# raise Exception('Proxy format not recognised: [%s]' % http_proxy)
os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2),
match.group(3))
return | python | def check_proxy_setting():
"""
If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc.
"""
try:
http_proxy = os.environ['HTTP_PROXY']
except KeyError:
return
if not http_proxy.startswith('http://'):
match = re.match('(http://)?([-_\.A-Za-z]+):(\d+)', http_proxy)
#if not match:
# raise Exception('Proxy format not recognised: [%s]' % http_proxy)
os.environ['HTTP_PROXY'] = 'http://%s:%s' % (match.group(2),
match.group(3))
return | ['def', 'check_proxy_setting', '(', ')', ':', 'try', ':', 'http_proxy', '=', 'os', '.', 'environ', '[', "'HTTP_PROXY'", ']', 'except', 'KeyError', ':', 'return', 'if', 'not', 'http_proxy', '.', 'startswith', '(', "'http://'", ')', ':', 'match', '=', 're', '.', 'match', '(', "'(http://)?([-_\\.A-Za-z]+):(\\d+)'", ',', 'http_proxy', ')', '#if not match:', "# raise Exception('Proxy format not recognised: [%s]' % http_proxy)", 'os', '.', 'environ', '[', "'HTTP_PROXY'", ']', '=', "'http://%s:%s'", '%', '(', 'match', '.', 'group', '(', '2', ')', ',', 'match', '.', 'group', '(', '3', ')', ')', 'return'] | If the environmental variable 'HTTP_PROXY' is set, it will most likely be
in one of these forms:
proxyhost:8080
http://proxyhost:8080
urlllib2 requires the proxy URL to start with 'http://'
This routine does that, and returns the transport for xmlrpc. | ['If', 'the', 'environmental', 'variable', 'HTTP_PROXY', 'is', 'set', 'it', 'will', 'most', 'likely', 'be', 'in', 'one', 'of', 'these', 'forms', ':'] | train | https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/pypi.py#L95-L117 |
8,969 | knipknap/exscript | Exscript/util/interact.py | InputHistory.set | def set(self, key, value):
"""
Saves the input with the given key in the section that was
passed to the constructor. If either the section or the key
are not found, they are created.
Does nothing if the given value is None.
:type key: str
:param key: The key for which to define a value.
:type value: str|None
:param value: The value that is defined, or None.
:rtype: str|None
:return: The given value.
"""
if value is None:
return None
self.parser.set(self.section, key, value)
# Unfortunately ConfigParser attempts to write a string to the file
# object, and NamedTemporaryFile uses binary mode. So we nee to create
# the tempfile, and then re-open it.
with NamedTemporaryFile(delete=False) as tmpfile:
pass
with codecs.open(tmpfile.name, 'w', encoding='utf8') as fp:
self.parser.write(fp)
self.file.close()
shutil.move(tmpfile.name, self.file.name)
self.file = open(self.file.name)
return value | python | def set(self, key, value):
"""
Saves the input with the given key in the section that was
passed to the constructor. If either the section or the key
are not found, they are created.
Does nothing if the given value is None.
:type key: str
:param key: The key for which to define a value.
:type value: str|None
:param value: The value that is defined, or None.
:rtype: str|None
:return: The given value.
"""
if value is None:
return None
self.parser.set(self.section, key, value)
# Unfortunately ConfigParser attempts to write a string to the file
# object, and NamedTemporaryFile uses binary mode. So we nee to create
# the tempfile, and then re-open it.
with NamedTemporaryFile(delete=False) as tmpfile:
pass
with codecs.open(tmpfile.name, 'w', encoding='utf8') as fp:
self.parser.write(fp)
self.file.close()
shutil.move(tmpfile.name, self.file.name)
self.file = open(self.file.name)
return value | ['def', 'set', '(', 'self', ',', 'key', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'return', 'None', 'self', '.', 'parser', '.', 'set', '(', 'self', '.', 'section', ',', 'key', ',', 'value', ')', '# Unfortunately ConfigParser attempts to write a string to the file', '# object, and NamedTemporaryFile uses binary mode. So we nee to create', '# the tempfile, and then re-open it.', 'with', 'NamedTemporaryFile', '(', 'delete', '=', 'False', ')', 'as', 'tmpfile', ':', 'pass', 'with', 'codecs', '.', 'open', '(', 'tmpfile', '.', 'name', ',', "'w'", ',', 'encoding', '=', "'utf8'", ')', 'as', 'fp', ':', 'self', '.', 'parser', '.', 'write', '(', 'fp', ')', 'self', '.', 'file', '.', 'close', '(', ')', 'shutil', '.', 'move', '(', 'tmpfile', '.', 'name', ',', 'self', '.', 'file', '.', 'name', ')', 'self', '.', 'file', '=', 'open', '(', 'self', '.', 'file', '.', 'name', ')', 'return', 'value'] | Saves the input with the given key in the section that was
passed to the constructor. If either the section or the key
are not found, they are created.
Does nothing if the given value is None.
:type key: str
:param key: The key for which to define a value.
:type value: str|None
:param value: The value that is defined, or None.
:rtype: str|None
:return: The given value. | ['Saves', 'the', 'input', 'with', 'the', 'given', 'key', 'in', 'the', 'section', 'that', 'was', 'passed', 'to', 'the', 'constructor', '.', 'If', 'either', 'the', 'section', 'or', 'the', 'key', 'are', 'not', 'found', 'they', 'are', 'created', '.'] | train | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/interact.py#L108-L139 |
8,970 | quantumlib/Cirq | cirq/protocols/apply_unitary.py | ApplyUnitaryArgs.subspace_index | def subspace_index(self, little_endian_bits_int: int
) -> Tuple[Union[slice, int, 'ellipsis'], ...]:
"""An index for the subspace where the target axes equal a value.
Args:
little_endian_bits_int: The desired value of the qubits at the
targeted `axes`, packed into an integer. The least significant
bit of the integer is the desired bit for the first axis, and
so forth in increasing order.
Returns:
A value that can be used to index into `target_tensor` and
`available_buffer`, and manipulate only the part of Hilbert space
corresponding to a given bit assignment.
Example:
If `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and
then this method will return the following when given
`little_endian_bits=0b01`:
`(slice(None), 0, slice(None), 1, Ellipsis)`
Therefore the following two lines would be equivalent:
args.target_tensor[args.subspace_index(0b01)] += 1
args.target_tensor[:, 0, :, 1] += 1
"""
return linalg.slice_for_qubits_equal_to(self.axes,
little_endian_bits_int) | python | def subspace_index(self, little_endian_bits_int: int
) -> Tuple[Union[slice, int, 'ellipsis'], ...]:
"""An index for the subspace where the target axes equal a value.
Args:
little_endian_bits_int: The desired value of the qubits at the
targeted `axes`, packed into an integer. The least significant
bit of the integer is the desired bit for the first axis, and
so forth in increasing order.
Returns:
A value that can be used to index into `target_tensor` and
`available_buffer`, and manipulate only the part of Hilbert space
corresponding to a given bit assignment.
Example:
If `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and
then this method will return the following when given
`little_endian_bits=0b01`:
`(slice(None), 0, slice(None), 1, Ellipsis)`
Therefore the following two lines would be equivalent:
args.target_tensor[args.subspace_index(0b01)] += 1
args.target_tensor[:, 0, :, 1] += 1
"""
return linalg.slice_for_qubits_equal_to(self.axes,
little_endian_bits_int) | ['def', 'subspace_index', '(', 'self', ',', 'little_endian_bits_int', ':', 'int', ')', '->', 'Tuple', '[', 'Union', '[', 'slice', ',', 'int', ',', "'ellipsis'", ']', ',', '...', ']', ':', 'return', 'linalg', '.', 'slice_for_qubits_equal_to', '(', 'self', '.', 'axes', ',', 'little_endian_bits_int', ')'] | An index for the subspace where the target axes equal a value.
Args:
little_endian_bits_int: The desired value of the qubits at the
targeted `axes`, packed into an integer. The least significant
bit of the integer is the desired bit for the first axis, and
so forth in increasing order.
Returns:
A value that can be used to index into `target_tensor` and
`available_buffer`, and manipulate only the part of Hilbert space
corresponding to a given bit assignment.
Example:
If `target_tensor` is a 4 qubit tensor and `axes` is `[1, 3]` and
then this method will return the following when given
`little_endian_bits=0b01`:
`(slice(None), 0, slice(None), 1, Ellipsis)`
Therefore the following two lines would be equivalent:
args.target_tensor[args.subspace_index(0b01)] += 1
args.target_tensor[:, 0, :, 1] += 1 | ['An', 'index', 'for', 'the', 'subspace', 'where', 'the', 'target', 'axes', 'equal', 'a', 'value', '.'] | train | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/apply_unitary.py#L82-L111 |
8,971 | lrq3000/pyFileFixity | pyFileFixity/lib/brownanrs/ff.py | GF2int.multiply | def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True):
'''A slow multiply method. This method gives the same results as the
other __mul__ method but without needing precomputed tables,
thus it can be used to generate those tables.
If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations).
This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together.
The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are:
1- carry-less addition and substraction (XOR in GF(2^p))
2- modular reduction (to avoid duplicate values in the field) using a prime polynomial
'''
r = 0
a = int(a)
b = int(b)
while b: # while b is not 0
if b & 1: r = r ^ a if carryless else r + a # b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!).
b = b >> 1 # equivalent to b // 2
a = a << 1 # equivalent to a*2
if prim > 0 and a & field_charac_full: a = a ^ prim # GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR).
return GF2int(r) | python | def multiply(a, b, prim=0x11b, field_charac_full=256, carryless=True):
'''A slow multiply method. This method gives the same results as the
other __mul__ method but without needing precomputed tables,
thus it can be used to generate those tables.
If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations).
This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together.
The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are:
1- carry-less addition and substraction (XOR in GF(2^p))
2- modular reduction (to avoid duplicate values in the field) using a prime polynomial
'''
r = 0
a = int(a)
b = int(b)
while b: # while b is not 0
if b & 1: r = r ^ a if carryless else r + a # b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!).
b = b >> 1 # equivalent to b // 2
a = a << 1 # equivalent to a*2
if prim > 0 and a & field_charac_full: a = a ^ prim # GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR).
return GF2int(r) | ['def', 'multiply', '(', 'a', ',', 'b', ',', 'prim', '=', '0x11b', ',', 'field_charac_full', '=', '256', ',', 'carryless', '=', 'True', ')', ':', 'r', '=', '0', 'a', '=', 'int', '(', 'a', ')', 'b', '=', 'int', '(', 'b', ')', 'while', 'b', ':', '# while b is not 0', 'if', 'b', '&', '1', ':', 'r', '=', 'r', '^', 'a', 'if', 'carryless', 'else', 'r', '+', 'a', "# b is odd, then add the corresponding a to r (the sum of all a's corresponding to odd b's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!).", 'b', '=', 'b', '>>', '1', '# equivalent to b // 2', 'a', '=', 'a', '<<', '1', '# equivalent to a*2', 'if', 'prim', '>', '0', 'and', 'a', '&', 'field_charac_full', ':', 'a', '=', 'a', '^', 'prim', '# GF modulo: if a >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR).', 'return', 'GF2int', '(', 'r', ')'] | A slow multiply method. This method gives the same results as the
other __mul__ method but without needing precomputed tables,
thus it can be used to generate those tables.
If prim is set to 0 and carryless=False, the function produces the result of a standard multiplication of integers (outside of a finite field, ie, no modular reduction and no carry-less operations).
This procedure is called Russian Peasant Multiplication algorithm, which is just a general algorithm to multiply two integers together.
The only two differences that you need to account for when doing multiplication in a finite field (as opposed to just integers) are:
1- carry-less addition and substraction (XOR in GF(2^p))
2- modular reduction (to avoid duplicate values in the field) using a prime polynomial | ['A', 'slow', 'multiply', 'method', '.', 'This', 'method', 'gives', 'the', 'same', 'results', 'as', 'the', 'other', '__mul__', 'method', 'but', 'without', 'needing', 'precomputed', 'tables', 'thus', 'it', 'can', 'be', 'used', 'to', 'generate', 'those', 'tables', '.'] | train | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/brownanrs/ff.py#L265-L287 |
8,972 | pmelchior/proxmin | proxmin/utils.py | get_step_f | def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f | python | def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f | ['def', 'get_step_f', '(', 'step_f', ',', 'lR2', ',', 'lS2', ')', ':', 'mu', ',', 'tau', '=', '10', ',', '2', 'if', 'lR2', '>', 'mu', '*', 'lS2', ':', 'return', 'step_f', '*', 'tau', 'elif', 'lS2', '>', 'mu', '*', 'lR2', ':', 'return', 'step_f', '/', 'tau', 'return', 'step_f'] | Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1 | ['Update', 'the', 'stepsize', 'of', 'given', 'the', 'primal', 'and', 'dual', 'errors', '.'] | train | https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/utils.py#L300-L310 |
8,973 | crunchyroll/ef-open | efopen/ef_aws_resolver.py | EFAwsResolver.elbv2_load_balancer_arn_suffix | def elbv2_load_balancer_arn_suffix(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*`
"""
try:
elb = self._elbv2_load_balancer(lookup)
m = re.search(r'.+?(app\/[^\/]+\/[^\/]+)$', elb['LoadBalancerArn'])
return m.group(1)
except ClientError:
return default | python | def elbv2_load_balancer_arn_suffix(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*`
"""
try:
elb = self._elbv2_load_balancer(lookup)
m = re.search(r'.+?(app\/[^\/]+\/[^\/]+)$', elb['LoadBalancerArn'])
return m.group(1)
except ClientError:
return default | ['def', 'elbv2_load_balancer_arn_suffix', '(', 'self', ',', 'lookup', ',', 'default', '=', 'None', ')', ':', 'try', ':', 'elb', '=', 'self', '.', '_elbv2_load_balancer', '(', 'lookup', ')', 'm', '=', 're', '.', 'search', '(', "r'.+?(app\\/[^\\/]+\\/[^\\/]+)$'", ',', 'elb', '[', "'LoadBalancerArn'", ']', ')', 'return', 'm', '.', 'group', '(', '1', ')', 'except', 'ClientError', ':', 'return', 'default'] | Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*` | ['Args', ':', 'lookup', ':', 'the', 'friendly', 'name', 'of', 'the', 'v2', 'elb', 'to', 'look', 'up', 'default', ':', 'value', 'to', 'return', 'in', 'case', 'of', 'no', 'match', 'Returns', ':', 'The', 'shorthand', 'fragment', 'of', 'the', 'ALB', 's', 'ARN', 'of', 'the', 'form', 'app', '/', '*', '/', '*'] | train | https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_aws_resolver.py#L358-L371 |
8,974 | nickmckay/LiPD-utilities | Matlab/bagit.py | Bag.missing_optional_tagfiles | def missing_optional_tagfiles(self):
"""
From v0.97 we need to validate any tagfiles listed
in the optional tagmanifest(s). As there is no mandatory
directory structure for additional tagfiles we can
only check for entries with missing files (not missing
entries for existing files).
"""
for tagfilepath in list(self.tagfile_entries().keys()):
if not os.path.isfile(os.path.join(self.path, tagfilepath)):
yield tagfilepath | python | def missing_optional_tagfiles(self):
"""
From v0.97 we need to validate any tagfiles listed
in the optional tagmanifest(s). As there is no mandatory
directory structure for additional tagfiles we can
only check for entries with missing files (not missing
entries for existing files).
"""
for tagfilepath in list(self.tagfile_entries().keys()):
if not os.path.isfile(os.path.join(self.path, tagfilepath)):
yield tagfilepath | ['def', 'missing_optional_tagfiles', '(', 'self', ')', ':', 'for', 'tagfilepath', 'in', 'list', '(', 'self', '.', 'tagfile_entries', '(', ')', '.', 'keys', '(', ')', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'path', ',', 'tagfilepath', ')', ')', ':', 'yield', 'tagfilepath'] | From v0.97 we need to validate any tagfiles listed
in the optional tagmanifest(s). As there is no mandatory
directory structure for additional tagfiles we can
only check for entries with missing files (not missing
entries for existing files). | ['From', 'v0', '.', '97', 'we', 'need', 'to', 'validate', 'any', 'tagfiles', 'listed', 'in', 'the', 'optional', 'tagmanifest', '(', 's', ')', '.', 'As', 'there', 'is', 'no', 'mandatory', 'directory', 'structure', 'for', 'additional', 'tagfiles', 'we', 'can', 'only', 'check', 'for', 'entries', 'with', 'missing', 'files', '(', 'not', 'missing', 'entries', 'for', 'existing', 'files', ')', '.'] | train | https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Matlab/bagit.py#L325-L335 |
8,975 | fastavro/fastavro | fastavro/_read_py.py | schemaless_reader | def schemaless_reader(fo, writer_schema, reader_schema=None):
"""Reads a single record writen using the
:meth:`~fastavro._write_py.schemaless_writer`
Parameters
----------
fo: file-like
Input stream
writer_schema: dict
Schema used when calling schemaless_writer
reader_schema: dict, optional
If the schema has changed since being written then the new schema can
be given to allow for schema migration
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file.avro', 'rb') as fp:
record = fastavro.schemaless_reader(fp, parsed_schema)
Note: The ``schemaless_reader`` can only read a single record.
"""
if writer_schema == reader_schema:
# No need for the reader schema if they are the same
reader_schema = None
writer_schema = parse_schema(writer_schema)
if reader_schema:
reader_schema = parse_schema(reader_schema)
return read_data(fo, writer_schema, reader_schema) | python | def schemaless_reader(fo, writer_schema, reader_schema=None):
"""Reads a single record writen using the
:meth:`~fastavro._write_py.schemaless_writer`
Parameters
----------
fo: file-like
Input stream
writer_schema: dict
Schema used when calling schemaless_writer
reader_schema: dict, optional
If the schema has changed since being written then the new schema can
be given to allow for schema migration
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file.avro', 'rb') as fp:
record = fastavro.schemaless_reader(fp, parsed_schema)
Note: The ``schemaless_reader`` can only read a single record.
"""
if writer_schema == reader_schema:
# No need for the reader schema if they are the same
reader_schema = None
writer_schema = parse_schema(writer_schema)
if reader_schema:
reader_schema = parse_schema(reader_schema)
return read_data(fo, writer_schema, reader_schema) | ['def', 'schemaless_reader', '(', 'fo', ',', 'writer_schema', ',', 'reader_schema', '=', 'None', ')', ':', 'if', 'writer_schema', '==', 'reader_schema', ':', '# No need for the reader schema if they are the same', 'reader_schema', '=', 'None', 'writer_schema', '=', 'parse_schema', '(', 'writer_schema', ')', 'if', 'reader_schema', ':', 'reader_schema', '=', 'parse_schema', '(', 'reader_schema', ')', 'return', 'read_data', '(', 'fo', ',', 'writer_schema', ',', 'reader_schema', ')'] | Reads a single record writen using the
:meth:`~fastavro._write_py.schemaless_writer`
Parameters
----------
fo: file-like
Input stream
writer_schema: dict
Schema used when calling schemaless_writer
reader_schema: dict, optional
If the schema has changed since being written then the new schema can
be given to allow for schema migration
Example::
parsed_schema = fastavro.parse_schema(schema)
with open('file.avro', 'rb') as fp:
record = fastavro.schemaless_reader(fp, parsed_schema)
Note: The ``schemaless_reader`` can only read a single record. | ['Reads', 'a', 'single', 'record', 'writen', 'using', 'the', ':', 'meth', ':', '~fastavro', '.', '_write_py', '.', 'schemaless_writer'] | train | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_read_py.py#L794-L826 |
8,976 | wandb/client | wandb/vendor/prompt_toolkit/key_binding/registry.py | Registry.add_binding | def add_binding(self, *keys, **kwargs):
"""
Decorator for annotating key bindings.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine
when this key binding is active.
:param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`.
When True, ignore potential longer matches when this key binding is
hit. E.g. when there is an active eager key binding for Ctrl-X,
execute the handler immediately and ignore the key binding for
Ctrl-X Ctrl-E of which it is a prefix.
:param save_before: Callable that takes an `Event` and returns True if
we should save the current buffer, before handling the event.
(That's the default.)
"""
filter = to_cli_filter(kwargs.pop('filter', True))
eager = to_cli_filter(kwargs.pop('eager', False))
save_before = kwargs.pop('save_before', lambda e: True)
to_cli_filter(kwargs.pop('invalidate_ui', True)) # Deprecated! (ignored.)
assert not kwargs
assert keys
assert all(isinstance(k, (Key, text_type)) for k in keys), \
'Key bindings should consist of Key and string (unicode) instances.'
assert callable(save_before)
if isinstance(filter, Never):
# When a filter is Never, it will always stay disabled, so in that case
# don't bother putting it in the registry. It will slow down every key
# press otherwise.
def decorator(func):
return func
else:
def decorator(func):
self.key_bindings.append(
_Binding(keys, func, filter=filter, eager=eager,
save_before=save_before))
self._clear_cache()
return func
return decorator | python | def add_binding(self, *keys, **kwargs):
"""
Decorator for annotating key bindings.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine
when this key binding is active.
:param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`.
When True, ignore potential longer matches when this key binding is
hit. E.g. when there is an active eager key binding for Ctrl-X,
execute the handler immediately and ignore the key binding for
Ctrl-X Ctrl-E of which it is a prefix.
:param save_before: Callable that takes an `Event` and returns True if
we should save the current buffer, before handling the event.
(That's the default.)
"""
filter = to_cli_filter(kwargs.pop('filter', True))
eager = to_cli_filter(kwargs.pop('eager', False))
save_before = kwargs.pop('save_before', lambda e: True)
to_cli_filter(kwargs.pop('invalidate_ui', True)) # Deprecated! (ignored.)
assert not kwargs
assert keys
assert all(isinstance(k, (Key, text_type)) for k in keys), \
'Key bindings should consist of Key and string (unicode) instances.'
assert callable(save_before)
if isinstance(filter, Never):
# When a filter is Never, it will always stay disabled, so in that case
# don't bother putting it in the registry. It will slow down every key
# press otherwise.
def decorator(func):
return func
else:
def decorator(func):
self.key_bindings.append(
_Binding(keys, func, filter=filter, eager=eager,
save_before=save_before))
self._clear_cache()
return func
return decorator | ['def', 'add_binding', '(', 'self', ',', '*', 'keys', ',', '*', '*', 'kwargs', ')', ':', 'filter', '=', 'to_cli_filter', '(', 'kwargs', '.', 'pop', '(', "'filter'", ',', 'True', ')', ')', 'eager', '=', 'to_cli_filter', '(', 'kwargs', '.', 'pop', '(', "'eager'", ',', 'False', ')', ')', 'save_before', '=', 'kwargs', '.', 'pop', '(', "'save_before'", ',', 'lambda', 'e', ':', 'True', ')', 'to_cli_filter', '(', 'kwargs', '.', 'pop', '(', "'invalidate_ui'", ',', 'True', ')', ')', '# Deprecated! (ignored.)', 'assert', 'not', 'kwargs', 'assert', 'keys', 'assert', 'all', '(', 'isinstance', '(', 'k', ',', '(', 'Key', ',', 'text_type', ')', ')', 'for', 'k', 'in', 'keys', ')', ',', "'Key bindings should consist of Key and string (unicode) instances.'", 'assert', 'callable', '(', 'save_before', ')', 'if', 'isinstance', '(', 'filter', ',', 'Never', ')', ':', '# When a filter is Never, it will always stay disabled, so in that case', "# don't bother putting it in the registry. It will slow down every key", '# press otherwise.', 'def', 'decorator', '(', 'func', ')', ':', 'return', 'func', 'else', ':', 'def', 'decorator', '(', 'func', ')', ':', 'self', '.', 'key_bindings', '.', 'append', '(', '_Binding', '(', 'keys', ',', 'func', ',', 'filter', '=', 'filter', ',', 'eager', '=', 'eager', ',', 'save_before', '=', 'save_before', ')', ')', 'self', '.', '_clear_cache', '(', ')', 'return', 'func', 'return', 'decorator'] | Decorator for annotating key bindings.
:param filter: :class:`~prompt_toolkit.filters.CLIFilter` to determine
when this key binding is active.
:param eager: :class:`~prompt_toolkit.filters.CLIFilter` or `bool`.
When True, ignore potential longer matches when this key binding is
hit. E.g. when there is an active eager key binding for Ctrl-X,
execute the handler immediately and ignore the key binding for
Ctrl-X Ctrl-E of which it is a prefix.
:param save_before: Callable that takes an `Event` and returns True if
we should save the current buffer, before handling the event.
(That's the default.) | ['Decorator', 'for', 'annotating', 'key', 'bindings', '.'] | train | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/registry.py#L101-L141 |
8,977 | saltstack/salt | salt/modules/zfs.py | exists | def exists(name, **kwargs):
'''
Check if a ZFS filesystem or volume or snapshot exists.
name : string
name of dataset
type : string
also check if dataset is of a certain type, valid choices are:
filesystem, snapshot, volume, bookmark, or all.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' zfs.exists myzpool/mydataset
salt '*' zfs.exists myzpool/myvolume type=volume
'''
## Configure command
# NOTE: initialize the defaults
opts = {}
# NOTE: set extra config from kwargs
if kwargs.get('type', False):
opts['-t'] = kwargs.get('type')
## Check if 'name' of 'type' exists
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='list',
opts=opts,
target=name,
),
python_shell=False,
ignore_retcode=True,
)
return res['retcode'] == 0 | python | def exists(name, **kwargs):
'''
Check if a ZFS filesystem or volume or snapshot exists.
name : string
name of dataset
type : string
also check if dataset is of a certain type, valid choices are:
filesystem, snapshot, volume, bookmark, or all.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' zfs.exists myzpool/mydataset
salt '*' zfs.exists myzpool/myvolume type=volume
'''
## Configure command
# NOTE: initialize the defaults
opts = {}
# NOTE: set extra config from kwargs
if kwargs.get('type', False):
opts['-t'] = kwargs.get('type')
## Check if 'name' of 'type' exists
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='list',
opts=opts,
target=name,
),
python_shell=False,
ignore_retcode=True,
)
return res['retcode'] == 0 | ['def', 'exists', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', '## Configure command', '# NOTE: initialize the defaults', 'opts', '=', '{', '}', '# NOTE: set extra config from kwargs', 'if', 'kwargs', '.', 'get', '(', "'type'", ',', 'False', ')', ':', 'opts', '[', "'-t'", ']', '=', 'kwargs', '.', 'get', '(', "'type'", ')', "## Check if 'name' of 'type' exists", 'res', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', '__utils__', '[', "'zfs.zfs_command'", ']', '(', 'command', '=', "'list'", ',', 'opts', '=', 'opts', ',', 'target', '=', 'name', ',', ')', ',', 'python_shell', '=', 'False', ',', 'ignore_retcode', '=', 'True', ',', ')', 'return', 'res', '[', "'retcode'", ']', '==', '0'] | Check if a ZFS filesystem or volume or snapshot exists.
name : string
name of dataset
type : string
also check if dataset is of a certain type, valid choices are:
filesystem, snapshot, volume, bookmark, or all.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' zfs.exists myzpool/mydataset
salt '*' zfs.exists myzpool/myvolume type=volume | ['Check', 'if', 'a', 'ZFS', 'filesystem', 'or', 'volume', 'or', 'snapshot', 'exists', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zfs.py#L48-L87 |
8,978 | jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _inertia_from_labels | def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | python | def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | ['def', '_inertia_from_labels', '(', 'X', ',', 'centers', ',', 'labels', ')', ':', 'n_examples', ',', 'n_features', '=', 'X', '.', 'shape', 'inertia', '=', 'np', '.', 'zeros', '(', '(', 'n_examples', ',', ')', ')', 'for', 'ee', 'in', 'range', '(', 'n_examples', ')', ':', 'inertia', '[', 'ee', ']', '=', '1', '-', 'X', '[', 'ee', ',', ':', ']', '.', 'dot', '(', 'centers', '[', 'int', '(', 'labels', '[', 'ee', ']', ')', ',', ':', ']', '.', 'T', ')', 'return', 'np', '.', 'sum', '(', 'inertia', ')'] | Compute inertia with cosine distance using known labels. | ['Compute', 'inertia', 'with', 'cosine', 'distance', 'using', 'known', 'labels', '.'] | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L25-L33 |
8,979 | Erotemic/utool | utool/util_progress.py | progress_str | def progress_str(max_val, lbl='Progress: ', repl=False, approx=False,
backspace=PROGGRESS_BACKSPACE):
r""" makes format string that prints progress: %Xd/MAX_VAL with backspaces
NOTE: \r can be used instead of backspaces. This function is not very
relevant because of that.
"""
# string that displays max value
max_str = six.text_type(max_val)
if approx:
# denote approximate maximum
max_str = '~' + max_str
dnumstr = six.text_type(len(max_str))
# string that displays current progress
cur_str = '%' + dnumstr + 'd'
# If user passed in the label
if repl:
_fmt_str = lbl.replace('<cur_str>', cur_str).replace('<max_str>', max_str)
else:
_fmt_str = lbl + cur_str + '/' + max_str
if backspace:
# put backspace characters into the progress string
# (looks nice on normal terminals)
#nBackspaces = len(_fmt_str) - len(dnumstr) + len(max_str)
#backspaces = '\b' * nBackspaces
#fmt_str = backspaces + _fmt_str
# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES
fmt_str = '\r' + _fmt_str
else:
# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES
# this looks better on terminals without backspaces
fmt_str = _fmt_str + '\n'
return fmt_str | python | def progress_str(max_val, lbl='Progress: ', repl=False, approx=False,
backspace=PROGGRESS_BACKSPACE):
r""" makes format string that prints progress: %Xd/MAX_VAL with backspaces
NOTE: \r can be used instead of backspaces. This function is not very
relevant because of that.
"""
# string that displays max value
max_str = six.text_type(max_val)
if approx:
# denote approximate maximum
max_str = '~' + max_str
dnumstr = six.text_type(len(max_str))
# string that displays current progress
cur_str = '%' + dnumstr + 'd'
# If user passed in the label
if repl:
_fmt_str = lbl.replace('<cur_str>', cur_str).replace('<max_str>', max_str)
else:
_fmt_str = lbl + cur_str + '/' + max_str
if backspace:
# put backspace characters into the progress string
# (looks nice on normal terminals)
#nBackspaces = len(_fmt_str) - len(dnumstr) + len(max_str)
#backspaces = '\b' * nBackspaces
#fmt_str = backspaces + _fmt_str
# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES
fmt_str = '\r' + _fmt_str
else:
# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES
# this looks better on terminals without backspaces
fmt_str = _fmt_str + '\n'
return fmt_str | ['def', 'progress_str', '(', 'max_val', ',', 'lbl', '=', "'Progress: '", ',', 'repl', '=', 'False', ',', 'approx', '=', 'False', ',', 'backspace', '=', 'PROGGRESS_BACKSPACE', ')', ':', '# string that displays max value', 'max_str', '=', 'six', '.', 'text_type', '(', 'max_val', ')', 'if', 'approx', ':', '# denote approximate maximum', 'max_str', '=', "'~'", '+', 'max_str', 'dnumstr', '=', 'six', '.', 'text_type', '(', 'len', '(', 'max_str', ')', ')', '# string that displays current progress', 'cur_str', '=', "'%'", '+', 'dnumstr', '+', "'d'", '# If user passed in the label', 'if', 'repl', ':', '_fmt_str', '=', 'lbl', '.', 'replace', '(', "'<cur_str>'", ',', 'cur_str', ')', '.', 'replace', '(', "'<max_str>'", ',', 'max_str', ')', 'else', ':', '_fmt_str', '=', 'lbl', '+', 'cur_str', '+', "'/'", '+', 'max_str', 'if', 'backspace', ':', '# put backspace characters into the progress string', '# (looks nice on normal terminals)', '#nBackspaces = len(_fmt_str) - len(dnumstr) + len(max_str)', "#backspaces = '\\b' * nBackspaces", '#fmt_str = backspaces + _fmt_str', '# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES', 'fmt_str', '=', "'\\r'", '+', '_fmt_str', 'else', ':', '# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES', '# this looks better on terminals without backspaces', 'fmt_str', '=', '_fmt_str', '+', "'\\n'", 'return', 'fmt_str'] | r""" makes format string that prints progress: %Xd/MAX_VAL with backspaces
NOTE: \r can be used instead of backspaces. This function is not very
relevant because of that. | ['r', 'makes', 'format', 'string', 'that', 'prints', 'progress', ':', '%Xd', '/', 'MAX_VAL', 'with', 'backspaces'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L836-L869 |
8,980 | saltstack/salt | salt/modules/oracle.py | run_query | def run_query(db, query):
'''
Run SQL query and return result
CLI Example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table"
'''
if db in [x.keys()[0] for x in show_dbs()]:
conn = _connect(show_dbs(db)[db]['uri'])
else:
log.debug('No uri found in pillars - will try to use oratab')
# if db does not have uri defined in pillars
# or it's not defined in pillars at all parse oratab file
conn = _connect(uri=db)
return conn.cursor().execute(query).fetchall() | python | def run_query(db, query):
'''
Run SQL query and return result
CLI Example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table"
'''
if db in [x.keys()[0] for x in show_dbs()]:
conn = _connect(show_dbs(db)[db]['uri'])
else:
log.debug('No uri found in pillars - will try to use oratab')
# if db does not have uri defined in pillars
# or it's not defined in pillars at all parse oratab file
conn = _connect(uri=db)
return conn.cursor().execute(query).fetchall() | ['def', 'run_query', '(', 'db', ',', 'query', ')', ':', 'if', 'db', 'in', '[', 'x', '.', 'keys', '(', ')', '[', '0', ']', 'for', 'x', 'in', 'show_dbs', '(', ')', ']', ':', 'conn', '=', '_connect', '(', 'show_dbs', '(', 'db', ')', '[', 'db', ']', '[', "'uri'", ']', ')', 'else', ':', 'log', '.', 'debug', '(', "'No uri found in pillars - will try to use oratab'", ')', '# if db does not have uri defined in pillars', "# or it's not defined in pillars at all parse oratab file", 'conn', '=', '_connect', '(', 'uri', '=', 'db', ')', 'return', 'conn', '.', 'cursor', '(', ')', '.', 'execute', '(', 'query', ')', '.', 'fetchall', '(', ')'] | Run SQL query and return result
CLI Example:
.. code-block:: bash
salt '*' oracle.run_query my_db "select * from my_table" | ['Run', 'SQL', 'query', 'and', 'return', 'result'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/oracle.py#L170-L187 |
8,981 | confluentinc/confluent-kafka-python | confluent_kafka/avro/serializer/message_serializer.py | MessageSerializer.encode_record_with_schema_id | def encode_record_with_schema_id(self, schema_id, record, is_key=False):
"""
Encode a record with a given schema id. The record must
be a python dictionary.
:param int schema_id: integer ID
:param dict record: An object to serialize
:param bool is_key: If the record is a key
:returns: decoder function
:rtype: func
"""
serialize_err = KeySerializerError if is_key else ValueSerializerError
# use slow avro
if schema_id not in self.id_to_writers:
# get the writer + schema
try:
schema = self.registry_client.get_by_id(schema_id)
if not schema:
raise serialize_err("Schema does not exist")
self.id_to_writers[schema_id] = self._get_encoder_func(schema)
except ClientError:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise serialize_err(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
# get the writer
writer = self.id_to_writers[schema_id]
with ContextStringIO() as outf:
# Write the magic byte and schema ID in network byte order (big endian)
outf.write(struct.pack('>bI', MAGIC_BYTE, schema_id))
# write the record to the rest of the buffer
writer(record, outf)
return outf.getvalue() | python | def encode_record_with_schema_id(self, schema_id, record, is_key=False):
"""
Encode a record with a given schema id. The record must
be a python dictionary.
:param int schema_id: integer ID
:param dict record: An object to serialize
:param bool is_key: If the record is a key
:returns: decoder function
:rtype: func
"""
serialize_err = KeySerializerError if is_key else ValueSerializerError
# use slow avro
if schema_id not in self.id_to_writers:
# get the writer + schema
try:
schema = self.registry_client.get_by_id(schema_id)
if not schema:
raise serialize_err("Schema does not exist")
self.id_to_writers[schema_id] = self._get_encoder_func(schema)
except ClientError:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise serialize_err(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
# get the writer
writer = self.id_to_writers[schema_id]
with ContextStringIO() as outf:
# Write the magic byte and schema ID in network byte order (big endian)
outf.write(struct.pack('>bI', MAGIC_BYTE, schema_id))
# write the record to the rest of the buffer
writer(record, outf)
return outf.getvalue() | ['def', 'encode_record_with_schema_id', '(', 'self', ',', 'schema_id', ',', 'record', ',', 'is_key', '=', 'False', ')', ':', 'serialize_err', '=', 'KeySerializerError', 'if', 'is_key', 'else', 'ValueSerializerError', '# use slow avro', 'if', 'schema_id', 'not', 'in', 'self', '.', 'id_to_writers', ':', '# get the writer + schema', 'try', ':', 'schema', '=', 'self', '.', 'registry_client', '.', 'get_by_id', '(', 'schema_id', ')', 'if', 'not', 'schema', ':', 'raise', 'serialize_err', '(', '"Schema does not exist"', ')', 'self', '.', 'id_to_writers', '[', 'schema_id', ']', '=', 'self', '.', '_get_encoder_func', '(', 'schema', ')', 'except', 'ClientError', ':', 'exc_type', ',', 'exc_value', ',', 'exc_traceback', '=', 'sys', '.', 'exc_info', '(', ')', 'raise', 'serialize_err', '(', 'repr', '(', 'traceback', '.', 'format_exception', '(', 'exc_type', ',', 'exc_value', ',', 'exc_traceback', ')', ')', ')', '# get the writer', 'writer', '=', 'self', '.', 'id_to_writers', '[', 'schema_id', ']', 'with', 'ContextStringIO', '(', ')', 'as', 'outf', ':', '# Write the magic byte and schema ID in network byte order (big endian)', 'outf', '.', 'write', '(', 'struct', '.', 'pack', '(', "'>bI'", ',', 'MAGIC_BYTE', ',', 'schema_id', ')', ')', '# write the record to the rest of the buffer', 'writer', '(', 'record', ',', 'outf', ')', 'return', 'outf', '.', 'getvalue', '(', ')'] | Encode a record with a given schema id. The record must
be a python dictionary.
:param int schema_id: integer ID
:param dict record: An object to serialize
:param bool is_key: If the record is a key
:returns: decoder function
:rtype: func | ['Encode', 'a', 'record', 'with', 'a', 'given', 'schema', 'id', '.', 'The', 'record', 'must', 'be', 'a', 'python', 'dictionary', '.', ':', 'param', 'int', 'schema_id', ':', 'integer', 'ID', ':', 'param', 'dict', 'record', ':', 'An', 'object', 'to', 'serialize', ':', 'param', 'bool', 'is_key', ':', 'If', 'the', 'record', 'is', 'a', 'key', ':', 'returns', ':', 'decoder', 'function', ':', 'rtype', ':', 'func'] | train | https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/serializer/message_serializer.py#L115-L149 |
8,982 | PmagPy/PmagPy | dialogs/thellier_interpreter.py | thellier_auto_interpreter.find_close_value | def find_close_value(self, LIST, value):
'''
take a LIST and find the nearest value in LIST to 'value'
'''
diff = inf
for a in LIST:
if abs(value - a) < diff:
diff = abs(value - a)
result = a
return(result) | python | def find_close_value(self, LIST, value):
'''
take a LIST and find the nearest value in LIST to 'value'
'''
diff = inf
for a in LIST:
if abs(value - a) < diff:
diff = abs(value - a)
result = a
return(result) | ['def', 'find_close_value', '(', 'self', ',', 'LIST', ',', 'value', ')', ':', 'diff', '=', 'inf', 'for', 'a', 'in', 'LIST', ':', 'if', 'abs', '(', 'value', '-', 'a', ')', '<', 'diff', ':', 'diff', '=', 'abs', '(', 'value', '-', 'a', ')', 'result', '=', 'a', 'return', '(', 'result', ')'] | take a LIST and find the nearest value in LIST to 'value' | ['take', 'a', 'LIST', 'and', 'find', 'the', 'nearest', 'value', 'in', 'LIST', 'to', 'value'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/thellier_interpreter.py#L1000-L1009 |
8,983 | salbrandi/stressypy | stressypy/cpustresser.py | create_job | def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, shell_command, shell=True)
return job | python | def create_job(cpu_width, time_height):
"""
:param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object
"""
shell_command = stress_string.format(cpu_width, time_height)
job = JobBlock(cpu_width, time_height)
job.set_job(subprocess.call, shell_command, shell=True)
return job | ['def', 'create_job', '(', 'cpu_width', ',', 'time_height', ')', ':', 'shell_command', '=', 'stress_string', '.', 'format', '(', 'cpu_width', ',', 'time_height', ')', 'job', '=', 'JobBlock', '(', 'cpu_width', ',', 'time_height', ')', 'job', '.', 'set_job', '(', 'subprocess', '.', 'call', ',', 'shell_command', ',', 'shell', '=', 'True', ')', 'return', 'job'] | :param cpu_width: number of cpus
:param time_height: amount of time
:return: the instantiated JobBlock object | [':', 'param', 'cpu_width', ':', 'number', 'of', 'cpus', ':', 'param', 'time_height', ':', 'amount', 'of', 'time', ':', 'return', ':', 'the', 'instantiated', 'JobBlock', 'object'] | train | https://github.com/salbrandi/stressypy/blob/7e2901e131a40f3597921358a1c8647a346bd0cc/stressypy/cpustresser.py#L52-L62 |
8,984 | xapple/plumbing | plumbing/slurm/job.py | JobSLURM.wait_locally | def wait_locally(self):
"""If you have run the query in a non-blocking way, call this method to pause
until the query is finished."""
try: self.thread.join(sys.maxint) # maxint timeout so that we can Ctrl-C them
except KeyboardInterrupt: print "Stopped waiting on job '%s'" % self.kwargs['job_name'] | python | def wait_locally(self):
"""If you have run the query in a non-blocking way, call this method to pause
until the query is finished."""
try: self.thread.join(sys.maxint) # maxint timeout so that we can Ctrl-C them
except KeyboardInterrupt: print "Stopped waiting on job '%s'" % self.kwargs['job_name'] | ['def', 'wait_locally', '(', 'self', ')', ':', 'try', ':', 'self', '.', 'thread', '.', 'join', '(', 'sys', '.', 'maxint', ')', '# maxint timeout so that we can Ctrl-C them', 'except', 'KeyboardInterrupt', ':', 'print', '"Stopped waiting on job \'%s\'"', '%', 'self', '.', 'kwargs', '[', "'job_name'", ']'] | If you have run the query in a non-blocking way, call this method to pause
until the query is finished. | ['If', 'you', 'have', 'run', 'the', 'query', 'in', 'a', 'non', '-', 'blocking', 'way', 'call', 'this', 'method', 'to', 'pause', 'until', 'the', 'query', 'is', 'finished', '.'] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L263-L267 |
8,985 | spacetelescope/drizzlepac | drizzlepac/imageObject.py | baseImageObject.putData | def putData(self,data=None,exten=None):
""" Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method is an attempt to
make sure that when you add an array back to the .data section
of the hdu it still matches the header information for that
section ( ie. update the bitpix to reflect the datatype of the
array you are adding). The other header stuff is up to you to verify.
Data should be the data array exten is where you want to stick it,
either extension number or a string like 'sci,1'
"""
if data is None:
log.warning("No data supplied")
else:
extnum = _interpretExten(exten)
ext = self._image[extnum]
# update the bitpix to the current datatype, this aint fancy and
# ignores bscale
ext.header['BITPIX'] = _NUMPY_TO_IRAF_DTYPES[data.dtype.name]
ext.data = data | python | def putData(self,data=None,exten=None):
""" Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method is an attempt to
make sure that when you add an array back to the .data section
of the hdu it still matches the header information for that
section ( ie. update the bitpix to reflect the datatype of the
array you are adding). The other header stuff is up to you to verify.
Data should be the data array exten is where you want to stick it,
either extension number or a string like 'sci,1'
"""
if data is None:
log.warning("No data supplied")
else:
extnum = _interpretExten(exten)
ext = self._image[extnum]
# update the bitpix to the current datatype, this aint fancy and
# ignores bscale
ext.header['BITPIX'] = _NUMPY_TO_IRAF_DTYPES[data.dtype.name]
ext.data = data | ['def', 'putData', '(', 'self', ',', 'data', '=', 'None', ',', 'exten', '=', 'None', ')', ':', 'if', 'data', 'is', 'None', ':', 'log', '.', 'warning', '(', '"No data supplied"', ')', 'else', ':', 'extnum', '=', '_interpretExten', '(', 'exten', ')', 'ext', '=', 'self', '.', '_image', '[', 'extnum', ']', '# update the bitpix to the current datatype, this aint fancy and', '# ignores bscale', 'ext', '.', 'header', '[', "'BITPIX'", ']', '=', '_NUMPY_TO_IRAF_DTYPES', '[', 'data', '.', 'dtype', '.', 'name', ']', 'ext', '.', 'data', '=', 'data'] | Now that we are removing the data from the object to save memory,
we need something that cleanly puts the data array back into
the object so that we can write out everything together using
something like fits.writeto....this method is an attempt to
make sure that when you add an array back to the .data section
of the hdu it still matches the header information for that
section ( ie. update the bitpix to reflect the datatype of the
array you are adding). The other header stuff is up to you to verify.
Data should be the data array exten is where you want to stick it,
either extension number or a string like 'sci,1' | ['Now', 'that', 'we', 'are', 'removing', 'the', 'data', 'from', 'the', 'object', 'to', 'save', 'memory', 'we', 'need', 'something', 'that', 'cleanly', 'puts', 'the', 'data', 'array', 'back', 'into', 'the', 'object', 'so', 'that', 'we', 'can', 'write', 'out', 'everything', 'together', 'using', 'something', 'like', 'fits', '.', 'writeto', '....', 'this', 'method', 'is', 'an', 'attempt', 'to', 'make', 'sure', 'that', 'when', 'you', 'add', 'an', 'array', 'back', 'to', 'the', '.', 'data', 'section', 'of', 'the', 'hdu', 'it', 'still', 'matches', 'the', 'header', 'information', 'for', 'that', 'section', '(', 'ie', '.', 'update', 'the', 'bitpix', 'to', 'reflect', 'the', 'datatype', 'of', 'the', 'array', 'you', 'are', 'adding', ')', '.', 'The', 'other', 'header', 'stuff', 'is', 'up', 'to', 'you', 'to', 'verify', '.'] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L222-L244 |
8,986 | pvlib/pvlib-python | pvlib/atmosphere.py | kasten96_lt | def kasten96_lt(airmass_absolute, precipitable_water, aod_bb):
"""
Calculate Linke turbidity factor using Kasten pyrheliometric formula.
Note that broadband aerosol optical depth (AOD) can be approximated by AOD
measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an
alternate approximation using AOD measured at 380 nm and 500 nm.
Based on original implementation by Armel Oumbe.
.. warning::
These calculations are only valid for air mass less than 5 atm and
precipitable water less than 5 cm.
Parameters
----------
airmass_absolute : numeric
airmass, pressure corrected in atmospheres
precipitable_water : numeric
precipitable water or total column water vapor in centimeters
aod_bb : numeric
broadband AOD
Returns
-------
lt : numeric
Linke turbidity
See also
--------
bird_hulstrom80_aod_bb
angstrom_aod_at_lambda
References
----------
[1] F. Linke, "Transmissions-Koeffizient und Trubungsfaktor", Beitrage
zur Physik der Atmosphare, Vol 10, pp. 91-103 (1922)
[2] F. Kasten, "A simple parameterization of the pyrheliometric formula for
determining the Linke turbidity factor", Meteorologische Rundschau 33,
pp. 124-127 (1980)
[3] Kasten, "The Linke turbidity factor based on improved values of the
integral Rayleigh optical thickness", Solar Energy, Vol. 56, No. 3,
pp. 239-244 (1996)
:doi:`10.1016/0038-092X(95)00114-7`
[4] B. Molineaux, P. Ineichen, N. O'Neill, "Equivalence of pyrheliometric
and monochromatic aerosol optical depths at a single key wavelength",
Applied Optics Vol. 37, issue 10, 7008-7018 (1998)
:doi:`10.1364/AO.37.007008`
[5] P. Ineichen, "Conversion function between the Linke turbidity and the
atmospheric water vapor and aerosol content", Solar Energy 82,
pp. 1095-1097 (2008)
:doi:`10.1016/j.solener.2008.04.010`
[6] P. Ineichen and R. Perez, "A new airmass independent formulation for
the Linke Turbidity coefficient", Solar Energy, Vol. 73, no. 3, pp. 151-157
(2002)
:doi:`10.1016/S0038-092X(02)00045-2`
"""
# "From numerically integrated spectral simulations done with Modtran
# (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth
# of a clean and dry atmospshere (fictitious atmosphere that comprises only
# the effects of Rayleigh scattering and absorption by the atmosphere gases
# other than the water vapor) the following expression"
# - P. Ineichen (2008)
delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16)
# "and the broadband water vapor optical depth where pwat is the integrated
# precipitable water vapor content of the atmosphere expressed in cm and am
# the optical air mass. The precision of these fits is better than 1% when
# compared with Modtran simulations in the range 1 < am < 5 and
# 0 < pwat < 5 cm at sea level" - P. Ineichen (2008)
delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34
# broadband AOD
delta_a = aod_bb
# "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke
# turbidity at am = 2 can be written. The extension of the Linke turbidity
# coefficient to other values of air mass was published by Ineichen and
# Perez (2002)" - P. Ineichen (2008)
lt = -(9.4 + 0.9 * airmass_absolute) * np.log(
np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a))
) / airmass_absolute
# filter out of extrapolated values
return lt | python | def kasten96_lt(airmass_absolute, precipitable_water, aod_bb):
"""
Calculate Linke turbidity factor using Kasten pyrheliometric formula.
Note that broadband aerosol optical depth (AOD) can be approximated by AOD
measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an
alternate approximation using AOD measured at 380 nm and 500 nm.
Based on original implementation by Armel Oumbe.
.. warning::
These calculations are only valid for air mass less than 5 atm and
precipitable water less than 5 cm.
Parameters
----------
airmass_absolute : numeric
airmass, pressure corrected in atmospheres
precipitable_water : numeric
precipitable water or total column water vapor in centimeters
aod_bb : numeric
broadband AOD
Returns
-------
lt : numeric
Linke turbidity
See also
--------
bird_hulstrom80_aod_bb
angstrom_aod_at_lambda
References
----------
[1] F. Linke, "Transmissions-Koeffizient und Trubungsfaktor", Beitrage
zur Physik der Atmosphare, Vol 10, pp. 91-103 (1922)
[2] F. Kasten, "A simple parameterization of the pyrheliometric formula for
determining the Linke turbidity factor", Meteorologische Rundschau 33,
pp. 124-127 (1980)
[3] Kasten, "The Linke turbidity factor based on improved values of the
integral Rayleigh optical thickness", Solar Energy, Vol. 56, No. 3,
pp. 239-244 (1996)
:doi:`10.1016/0038-092X(95)00114-7`
[4] B. Molineaux, P. Ineichen, N. O'Neill, "Equivalence of pyrheliometric
and monochromatic aerosol optical depths at a single key wavelength",
Applied Optics Vol. 37, issue 10, 7008-7018 (1998)
:doi:`10.1364/AO.37.007008`
[5] P. Ineichen, "Conversion function between the Linke turbidity and the
atmospheric water vapor and aerosol content", Solar Energy 82,
pp. 1095-1097 (2008)
:doi:`10.1016/j.solener.2008.04.010`
[6] P. Ineichen and R. Perez, "A new airmass independent formulation for
the Linke Turbidity coefficient", Solar Energy, Vol. 73, no. 3, pp. 151-157
(2002)
:doi:`10.1016/S0038-092X(02)00045-2`
"""
# "From numerically integrated spectral simulations done with Modtran
# (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth
# of a clean and dry atmospshere (fictitious atmosphere that comprises only
# the effects of Rayleigh scattering and absorption by the atmosphere gases
# other than the water vapor) the following expression"
# - P. Ineichen (2008)
delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16)
# "and the broadband water vapor optical depth where pwat is the integrated
# precipitable water vapor content of the atmosphere expressed in cm and am
# the optical air mass. The precision of these fits is better than 1% when
# compared with Modtran simulations in the range 1 < am < 5 and
# 0 < pwat < 5 cm at sea level" - P. Ineichen (2008)
delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34
# broadband AOD
delta_a = aod_bb
# "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke
# turbidity at am = 2 can be written. The extension of the Linke turbidity
# coefficient to other values of air mass was published by Ineichen and
# Perez (2002)" - P. Ineichen (2008)
lt = -(9.4 + 0.9 * airmass_absolute) * np.log(
np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a))
) / airmass_absolute
# filter out of extrapolated values
return lt | ['def', 'kasten96_lt', '(', 'airmass_absolute', ',', 'precipitable_water', ',', 'aod_bb', ')', ':', '# "From numerically integrated spectral simulations done with Modtran', '# (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth', '# of a clean and dry atmospshere (fictitious atmosphere that comprises only', '# the effects of Rayleigh scattering and absorption by the atmosphere gases', '# other than the water vapor) the following expression"', '# - P. Ineichen (2008)', 'delta_cda', '=', '-', '0.101', '+', '0.235', '*', 'airmass_absolute', '**', '(', '-', '0.16', ')', '# "and the broadband water vapor optical depth where pwat is the integrated', '# precipitable water vapor content of the atmosphere expressed in cm and am', '# the optical air mass. The precision of these fits is better than 1% when', '# compared with Modtran simulations in the range 1 < am < 5 and', '# 0 < pwat < 5 cm at sea level" - P. Ineichen (2008)', 'delta_w', '=', '0.112', '*', 'airmass_absolute', '**', '(', '-', '0.55', ')', '*', 'precipitable_water', '**', '0.34', '# broadband AOD', 'delta_a', '=', 'aod_bb', '# "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke', '# turbidity at am = 2 can be written. The extension of the Linke turbidity', '# coefficient to other values of air mass was published by Ineichen and', '# Perez (2002)" - P. Ineichen (2008)', 'lt', '=', '-', '(', '9.4', '+', '0.9', '*', 'airmass_absolute', ')', '*', 'np', '.', 'log', '(', 'np', '.', 'exp', '(', '-', 'airmass_absolute', '*', '(', 'delta_cda', '+', 'delta_w', '+', 'delta_a', ')', ')', ')', '/', 'airmass_absolute', '# filter out of extrapolated values', 'return', 'lt'] | Calculate Linke turbidity factor using Kasten pyrheliometric formula.
Note that broadband aerosol optical depth (AOD) can be approximated by AOD
measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an
alternate approximation using AOD measured at 380 nm and 500 nm.
Based on original implementation by Armel Oumbe.
.. warning::
These calculations are only valid for air mass less than 5 atm and
precipitable water less than 5 cm.
Parameters
----------
airmass_absolute : numeric
airmass, pressure corrected in atmospheres
precipitable_water : numeric
precipitable water or total column water vapor in centimeters
aod_bb : numeric
broadband AOD
Returns
-------
lt : numeric
Linke turbidity
See also
--------
bird_hulstrom80_aod_bb
angstrom_aod_at_lambda
References
----------
[1] F. Linke, "Transmissions-Koeffizient und Trubungsfaktor", Beitrage
zur Physik der Atmosphare, Vol 10, pp. 91-103 (1922)
[2] F. Kasten, "A simple parameterization of the pyrheliometric formula for
determining the Linke turbidity factor", Meteorologische Rundschau 33,
pp. 124-127 (1980)
[3] Kasten, "The Linke turbidity factor based on improved values of the
integral Rayleigh optical thickness", Solar Energy, Vol. 56, No. 3,
pp. 239-244 (1996)
:doi:`10.1016/0038-092X(95)00114-7`
[4] B. Molineaux, P. Ineichen, N. O'Neill, "Equivalence of pyrheliometric
and monochromatic aerosol optical depths at a single key wavelength",
Applied Optics Vol. 37, issue 10, 7008-7018 (1998)
:doi:`10.1364/AO.37.007008`
[5] P. Ineichen, "Conversion function between the Linke turbidity and the
atmospheric water vapor and aerosol content", Solar Energy 82,
pp. 1095-1097 (2008)
:doi:`10.1016/j.solener.2008.04.010`
[6] P. Ineichen and R. Perez, "A new airmass independent formulation for
the Linke Turbidity coefficient", Solar Energy, Vol. 73, no. 3, pp. 151-157
(2002)
:doi:`10.1016/S0038-092X(02)00045-2` | ['Calculate', 'Linke', 'turbidity', 'factor', 'using', 'Kasten', 'pyrheliometric', 'formula', '.'] | train | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/atmosphere.py#L536-L621 |
8,987 | jobovy/galpy | galpy/util/bovy_coords.py | Rz_to_lambdanu_jac | def Rz_to_lambdanu_jac(R,z,Delta=1.):
"""
NAME:
Rz_to_lambdanu_jac
PURPOSE:
calculate the Jacobian of the cylindrical (R,z) to prolate spheroidal
(lambda,nu) conversion
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
jacobian d((lambda,nu))/d((R,z))
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
discr = (R**2 + z**2 - Delta**2)**2 + (4. * Delta**2 * R**2)
dldR = R * (1. + (R**2 + z**2 + Delta**2) / nu.sqrt(discr))
dndR = R * (1. - (R**2 + z**2 + Delta**2) / nu.sqrt(discr))
dldz = z * (1. + (R**2 + z**2 - Delta**2) / nu.sqrt(discr))
dndz = z * (1. - (R**2 + z**2 - Delta**2) / nu.sqrt(discr))
dim = 1
if isinstance(R,nu.ndarray): dim = len(R)
elif isinstance(z,nu.ndarray): dim = len(z)
jac = nu.zeros((2,2,dim))
jac[0,0,:] = dldR
jac[0,1,:] = dldz
jac[1,0,:] = dndR
jac[1,1,:] = dndz
if dim == 1: return jac[:,:,0]
else: return jac | python | def Rz_to_lambdanu_jac(R,z,Delta=1.):
"""
NAME:
Rz_to_lambdanu_jac
PURPOSE:
calculate the Jacobian of the cylindrical (R,z) to prolate spheroidal
(lambda,nu) conversion
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
jacobian d((lambda,nu))/d((R,z))
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
discr = (R**2 + z**2 - Delta**2)**2 + (4. * Delta**2 * R**2)
dldR = R * (1. + (R**2 + z**2 + Delta**2) / nu.sqrt(discr))
dndR = R * (1. - (R**2 + z**2 + Delta**2) / nu.sqrt(discr))
dldz = z * (1. + (R**2 + z**2 - Delta**2) / nu.sqrt(discr))
dndz = z * (1. - (R**2 + z**2 - Delta**2) / nu.sqrt(discr))
dim = 1
if isinstance(R,nu.ndarray): dim = len(R)
elif isinstance(z,nu.ndarray): dim = len(z)
jac = nu.zeros((2,2,dim))
jac[0,0,:] = dldR
jac[0,1,:] = dldz
jac[1,0,:] = dndR
jac[1,1,:] = dndz
if dim == 1: return jac[:,:,0]
else: return jac | ['def', 'Rz_to_lambdanu_jac', '(', 'R', ',', 'z', ',', 'Delta', '=', '1.', ')', ':', 'discr', '=', '(', 'R', '**', '2', '+', 'z', '**', '2', '-', 'Delta', '**', '2', ')', '**', '2', '+', '(', '4.', '*', 'Delta', '**', '2', '*', 'R', '**', '2', ')', 'dldR', '=', 'R', '*', '(', '1.', '+', '(', 'R', '**', '2', '+', 'z', '**', '2', '+', 'Delta', '**', '2', ')', '/', 'nu', '.', 'sqrt', '(', 'discr', ')', ')', 'dndR', '=', 'R', '*', '(', '1.', '-', '(', 'R', '**', '2', '+', 'z', '**', '2', '+', 'Delta', '**', '2', ')', '/', 'nu', '.', 'sqrt', '(', 'discr', ')', ')', 'dldz', '=', 'z', '*', '(', '1.', '+', '(', 'R', '**', '2', '+', 'z', '**', '2', '-', 'Delta', '**', '2', ')', '/', 'nu', '.', 'sqrt', '(', 'discr', ')', ')', 'dndz', '=', 'z', '*', '(', '1.', '-', '(', 'R', '**', '2', '+', 'z', '**', '2', '-', 'Delta', '**', '2', ')', '/', 'nu', '.', 'sqrt', '(', 'discr', ')', ')', 'dim', '=', '1', 'if', 'isinstance', '(', 'R', ',', 'nu', '.', 'ndarray', ')', ':', 'dim', '=', 'len', '(', 'R', ')', 'elif', 'isinstance', '(', 'z', ',', 'nu', '.', 'ndarray', ')', ':', 'dim', '=', 'len', '(', 'z', ')', 'jac', '=', 'nu', '.', 'zeros', '(', '(', '2', ',', '2', ',', 'dim', ')', ')', 'jac', '[', '0', ',', '0', ',', ':', ']', '=', 'dldR', 'jac', '[', '0', ',', '1', ',', ':', ']', '=', 'dldz', 'jac', '[', '1', ',', '0', ',', ':', ']', '=', 'dndR', 'jac', '[', '1', ',', '1', ',', ':', ']', '=', 'dndz', 'if', 'dim', '==', '1', ':', 'return', 'jac', '[', ':', ',', ':', ',', '0', ']', 'else', ':', 'return', 'jac'] | NAME:
Rz_to_lambdanu_jac
PURPOSE:
calculate the Jacobian of the cylindrical (R,z) to prolate spheroidal
(lambda,nu) conversion
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
jacobian d((lambda,nu))/d((R,z))
HISTORY:
2015-02-13 - Written - Trick (MPIA) | ['NAME', ':'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L2039-L2080 |
8,988 | mrcagney/gtfstk | gtfstk/trips.py | map_trips | def map_trips(
feed: "Feed",
trip_ids: List[str],
color_palette: List[str] = cs.COLORS_SET2,
*,
include_stops: bool = True,
):
"""
Return a Folium map showing the given trips and (optionally)
their stops.
Parameters
----------
feed : Feed
trip_ids : list
IDs of trips in ``feed.trips``
color_palette : list
Palette to use to color the routes. If more routes than colors,
then colors will be recycled.
include_stops : boolean
If ``True``, then include stops in the map
Returns
-------
dictionary
A Folium Map depicting the shapes of the trips.
If ``include_stops``, then include the stops for each trip.
Notes
------
- Requires Folium
"""
import folium as fl
import folium.plugins as fp
# Get routes slice and convert to dictionary
trips = (
feed.trips.loc[lambda x: x["trip_id"].isin(trip_ids)]
.fillna("n/a")
.to_dict(orient="records")
)
# Create colors
n = len(trips)
colors = [color_palette[i % len(color_palette)] for i in range(n)]
# Initialize map
my_map = fl.Map(tiles="cartodbpositron")
# Collect route bounding boxes to set map zoom later
bboxes = []
# Create a feature group for each route and add it to the map
for i, trip in enumerate(trips):
collection = feed.trip_to_geojson(
trip_id=trip["trip_id"], include_stops=include_stops
)
group = fl.FeatureGroup(name="Trip " + trip["trip_id"])
color = colors[i]
for f in collection["features"]:
prop = f["properties"]
# Add stop
if f["geometry"]["type"] == "Point":
lon, lat = f["geometry"]["coordinates"]
fl.CircleMarker(
location=[lat, lon],
radius=8,
fill=True,
color=color,
weight=1,
popup=fl.Popup(hp.make_html(prop)),
).add_to(group)
# Add path
else:
# Path
prop["color"] = color
path = fl.GeoJson(
f,
name=trip,
style_function=lambda x: {
"color": x["properties"]["color"]
},
)
path.add_child(fl.Popup(hp.make_html(prop)))
path.add_to(group)
# Direction arrows, assuming, as GTFS does, that
# trip direction equals LineString direction
fp.PolyLineTextPath(
path,
" \u27A4 ",
repeat=True,
offset=5.5,
attributes={"fill": color, "font-size": "18"},
).add_to(group)
bboxes.append(sg.box(*sg.shape(f["geometry"]).bounds))
group.add_to(my_map)
fl.LayerControl().add_to(my_map)
# Fit map to bounds
bounds = so.unary_union(bboxes).bounds
bounds2 = [bounds[1::-1], bounds[3:1:-1]] # Folium expects this ordering
my_map.fit_bounds(bounds2)
return my_map | python | def map_trips(
feed: "Feed",
trip_ids: List[str],
color_palette: List[str] = cs.COLORS_SET2,
*,
include_stops: bool = True,
):
"""
Return a Folium map showing the given trips and (optionally)
their stops.
Parameters
----------
feed : Feed
trip_ids : list
IDs of trips in ``feed.trips``
color_palette : list
Palette to use to color the routes. If more routes than colors,
then colors will be recycled.
include_stops : boolean
If ``True``, then include stops in the map
Returns
-------
dictionary
A Folium Map depicting the shapes of the trips.
If ``include_stops``, then include the stops for each trip.
Notes
------
- Requires Folium
"""
import folium as fl
import folium.plugins as fp
# Get routes slice and convert to dictionary
trips = (
feed.trips.loc[lambda x: x["trip_id"].isin(trip_ids)]
.fillna("n/a")
.to_dict(orient="records")
)
# Create colors
n = len(trips)
colors = [color_palette[i % len(color_palette)] for i in range(n)]
# Initialize map
my_map = fl.Map(tiles="cartodbpositron")
# Collect route bounding boxes to set map zoom later
bboxes = []
# Create a feature group for each route and add it to the map
for i, trip in enumerate(trips):
collection = feed.trip_to_geojson(
trip_id=trip["trip_id"], include_stops=include_stops
)
group = fl.FeatureGroup(name="Trip " + trip["trip_id"])
color = colors[i]
for f in collection["features"]:
prop = f["properties"]
# Add stop
if f["geometry"]["type"] == "Point":
lon, lat = f["geometry"]["coordinates"]
fl.CircleMarker(
location=[lat, lon],
radius=8,
fill=True,
color=color,
weight=1,
popup=fl.Popup(hp.make_html(prop)),
).add_to(group)
# Add path
else:
# Path
prop["color"] = color
path = fl.GeoJson(
f,
name=trip,
style_function=lambda x: {
"color": x["properties"]["color"]
},
)
path.add_child(fl.Popup(hp.make_html(prop)))
path.add_to(group)
# Direction arrows, assuming, as GTFS does, that
# trip direction equals LineString direction
fp.PolyLineTextPath(
path,
" \u27A4 ",
repeat=True,
offset=5.5,
attributes={"fill": color, "font-size": "18"},
).add_to(group)
bboxes.append(sg.box(*sg.shape(f["geometry"]).bounds))
group.add_to(my_map)
fl.LayerControl().add_to(my_map)
# Fit map to bounds
bounds = so.unary_union(bboxes).bounds
bounds2 = [bounds[1::-1], bounds[3:1:-1]] # Folium expects this ordering
my_map.fit_bounds(bounds2)
return my_map | ['def', 'map_trips', '(', 'feed', ':', '"Feed"', ',', 'trip_ids', ':', 'List', '[', 'str', ']', ',', 'color_palette', ':', 'List', '[', 'str', ']', '=', 'cs', '.', 'COLORS_SET2', ',', '*', ',', 'include_stops', ':', 'bool', '=', 'True', ',', ')', ':', 'import', 'folium', 'as', 'fl', 'import', 'folium', '.', 'plugins', 'as', 'fp', '# Get routes slice and convert to dictionary', 'trips', '=', '(', 'feed', '.', 'trips', '.', 'loc', '[', 'lambda', 'x', ':', 'x', '[', '"trip_id"', ']', '.', 'isin', '(', 'trip_ids', ')', ']', '.', 'fillna', '(', '"n/a"', ')', '.', 'to_dict', '(', 'orient', '=', '"records"', ')', ')', '# Create colors', 'n', '=', 'len', '(', 'trips', ')', 'colors', '=', '[', 'color_palette', '[', 'i', '%', 'len', '(', 'color_palette', ')', ']', 'for', 'i', 'in', 'range', '(', 'n', ')', ']', '# Initialize map', 'my_map', '=', 'fl', '.', 'Map', '(', 'tiles', '=', '"cartodbpositron"', ')', '# Collect route bounding boxes to set map zoom later', 'bboxes', '=', '[', ']', '# Create a feature group for each route and add it to the map', 'for', 'i', ',', 'trip', 'in', 'enumerate', '(', 'trips', ')', ':', 'collection', '=', 'feed', '.', 'trip_to_geojson', '(', 'trip_id', '=', 'trip', '[', '"trip_id"', ']', ',', 'include_stops', '=', 'include_stops', ')', 'group', '=', 'fl', '.', 'FeatureGroup', '(', 'name', '=', '"Trip "', '+', 'trip', '[', '"trip_id"', ']', ')', 'color', '=', 'colors', '[', 'i', ']', 'for', 'f', 'in', 'collection', '[', '"features"', ']', ':', 'prop', '=', 'f', '[', '"properties"', ']', '# Add stop', 'if', 'f', '[', '"geometry"', ']', '[', '"type"', ']', '==', '"Point"', ':', 'lon', ',', 'lat', '=', 'f', '[', '"geometry"', ']', '[', '"coordinates"', ']', 'fl', '.', 'CircleMarker', '(', 'location', '=', '[', 'lat', ',', 'lon', ']', ',', 'radius', '=', '8', ',', 'fill', '=', 'True', ',', 'color', '=', 'color', ',', 'weight', '=', '1', ',', 'popup', '=', 'fl', '.', 'Popup', '(', 'hp', '.', 'make_html', '(', 'prop', ')', ')', ',', ')', '.', 'add_to', '(', 'group', ')', '# Add path', 'else', ':', '# Path', 'prop', '[', '"color"', ']', '=', 'color', 'path', '=', 'fl', '.', 'GeoJson', '(', 'f', ',', 'name', '=', 'trip', ',', 'style_function', '=', 'lambda', 'x', ':', '{', '"color"', ':', 'x', '[', '"properties"', ']', '[', '"color"', ']', '}', ',', ')', 'path', '.', 'add_child', '(', 'fl', '.', 'Popup', '(', 'hp', '.', 'make_html', '(', 'prop', ')', ')', ')', 'path', '.', 'add_to', '(', 'group', ')', '# Direction arrows, assuming, as GTFS does, that', '# trip direction equals LineString direction', 'fp', '.', 'PolyLineTextPath', '(', 'path', ',', '" \\u27A4 "', ',', 'repeat', '=', 'True', ',', 'offset', '=', '5.5', ',', 'attributes', '=', '{', '"fill"', ':', 'color', ',', '"font-size"', ':', '"18"', '}', ',', ')', '.', 'add_to', '(', 'group', ')', 'bboxes', '.', 'append', '(', 'sg', '.', 'box', '(', '*', 'sg', '.', 'shape', '(', 'f', '[', '"geometry"', ']', ')', '.', 'bounds', ')', ')', 'group', '.', 'add_to', '(', 'my_map', ')', 'fl', '.', 'LayerControl', '(', ')', '.', 'add_to', '(', 'my_map', ')', '# Fit map to bounds', 'bounds', '=', 'so', '.', 'unary_union', '(', 'bboxes', ')', '.', 'bounds', 'bounds2', '=', '[', 'bounds', '[', '1', ':', ':', '-', '1', ']', ',', 'bounds', '[', '3', ':', '1', ':', '-', '1', ']', ']', '# Folium expects this ordering', 'my_map', '.', 'fit_bounds', '(', 'bounds2', ')', 'return', 'my_map'] | Return a Folium map showing the given trips and (optionally)
their stops.
Parameters
----------
feed : Feed
trip_ids : list
IDs of trips in ``feed.trips``
color_palette : list
Palette to use to color the routes. If more routes than colors,
then colors will be recycled.
include_stops : boolean
If ``True``, then include stops in the map
Returns
-------
dictionary
A Folium Map depicting the shapes of the trips.
If ``include_stops``, then include the stops for each trip.
Notes
------
- Requires Folium | ['Return', 'a', 'Folium', 'map', 'showing', 'the', 'given', 'trips', 'and', '(', 'optionally', ')', 'their', 'stops', '.'] | train | https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/trips.py#L572-L683 |
8,989 | saltstack/salt | salt/modules/lxd.py | image_get | def image_get(fingerprint,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
_raw=False):
''' Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint>
'''
client = pylxd_client_get(remote_addr, cert, key, verify_cert)
image = None
try:
image = client.images.get(fingerprint)
except pylxd.exceptions.LXDAPIException:
raise SaltInvocationError(
'Image with fingerprint \'{0}\' not found'.format(fingerprint)
)
if _raw:
return image
return _pylxd_model_to_dict(image) | python | def image_get(fingerprint,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
_raw=False):
''' Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint>
'''
client = pylxd_client_get(remote_addr, cert, key, verify_cert)
image = None
try:
image = client.images.get(fingerprint)
except pylxd.exceptions.LXDAPIException:
raise SaltInvocationError(
'Image with fingerprint \'{0}\' not found'.format(fingerprint)
)
if _raw:
return image
return _pylxd_model_to_dict(image) | ['def', 'image_get', '(', 'fingerprint', ',', 'remote_addr', '=', 'None', ',', 'cert', '=', 'None', ',', 'key', '=', 'None', ',', 'verify_cert', '=', 'True', ',', '_raw', '=', 'False', ')', ':', 'client', '=', 'pylxd_client_get', '(', 'remote_addr', ',', 'cert', ',', 'key', ',', 'verify_cert', ')', 'image', '=', 'None', 'try', ':', 'image', '=', 'client', '.', 'images', '.', 'get', '(', 'fingerprint', ')', 'except', 'pylxd', '.', 'exceptions', '.', 'LXDAPIException', ':', 'raise', 'SaltInvocationError', '(', "'Image with fingerprint \\'{0}\\' not found'", '.', 'format', '(', 'fingerprint', ')', ')', 'if', '_raw', ':', 'return', 'image', 'return', '_pylxd_model_to_dict', '(', 'image', ')'] | Get an image by its fingerprint
fingerprint :
The fingerprint of the image to retrieve
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
_raw : False
Return the raw pylxd object or a dict of it?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_get <fingerprint> | ['Get', 'an', 'image', 'by', 'its', 'fingerprint'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L2542-L2600 |
8,990 | modin-project/modin | modin/pandas/base.py | BasePandasDataset.get_dtype_counts | def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
if hasattr(self, "dtype"):
return pandas.Series({str(self.dtype): 1})
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result | python | def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
if hasattr(self, "dtype"):
return pandas.Series({str(self.dtype): 1})
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result | ['def', 'get_dtype_counts', '(', 'self', ')', ':', 'if', 'hasattr', '(', 'self', ',', '"dtype"', ')', ':', 'return', 'pandas', '.', 'Series', '(', '{', 'str', '(', 'self', '.', 'dtype', ')', ':', '1', '}', ')', 'result', '=', 'self', '.', 'dtypes', '.', 'value_counts', '(', ')', 'result', '.', 'index', '=', 'result', '.', 'index', '.', 'map', '(', 'lambda', 'x', ':', 'str', '(', 'x', ')', ')', 'return', 'result'] | Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object. | ['Get', 'the', 'counts', 'of', 'dtypes', 'in', 'this', 'object', '.', 'Returns', ':', 'The', 'counts', 'of', 'dtypes', 'in', 'this', 'object', '.'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1264-L1274 |
8,991 | mopidy/mopidy-gmusic | mopidy_gmusic/translator.py | track_to_ref | def track_to_ref(track, with_track_no=False):
"""Convert a mopidy track to a mopidy ref."""
if with_track_no and track.track_no > 0:
name = '%d - ' % track.track_no
else:
name = ''
for artist in track.artists:
if len(name) > 0:
name += ', '
name += artist.name
if (len(name)) > 0:
name += ' - '
name += track.name
return Ref.track(uri=track.uri, name=name) | python | def track_to_ref(track, with_track_no=False):
"""Convert a mopidy track to a mopidy ref."""
if with_track_no and track.track_no > 0:
name = '%d - ' % track.track_no
else:
name = ''
for artist in track.artists:
if len(name) > 0:
name += ', '
name += artist.name
if (len(name)) > 0:
name += ' - '
name += track.name
return Ref.track(uri=track.uri, name=name) | ['def', 'track_to_ref', '(', 'track', ',', 'with_track_no', '=', 'False', ')', ':', 'if', 'with_track_no', 'and', 'track', '.', 'track_no', '>', '0', ':', 'name', '=', "'%d - '", '%', 'track', '.', 'track_no', 'else', ':', 'name', '=', "''", 'for', 'artist', 'in', 'track', '.', 'artists', ':', 'if', 'len', '(', 'name', ')', '>', '0', ':', 'name', '+=', "', '", 'name', '+=', 'artist', '.', 'name', 'if', '(', 'len', '(', 'name', ')', ')', '>', '0', ':', 'name', '+=', "' - '", 'name', '+=', 'track', '.', 'name', 'return', 'Ref', '.', 'track', '(', 'uri', '=', 'track', '.', 'uri', ',', 'name', '=', 'name', ')'] | Convert a mopidy track to a mopidy ref. | ['Convert', 'a', 'mopidy', 'track', 'to', 'a', 'mopidy', 'ref', '.'] | train | https://github.com/mopidy/mopidy-gmusic/blob/bbfe876d2a7e4f0f4f9308193bb988936bdfd5c3/mopidy_gmusic/translator.py#L33-L46 |
8,992 | noobermin/lspreader | lspreader/flds.py | getvector | def getvector(d,s):
'''
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data.
'''
return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]); | python | def getvector(d,s):
'''
Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data.
'''
return np.array([d[s+"x"],d[s+"y"],d[s+"z"]]); | ['def', 'getvector', '(', 'd', ',', 's', ')', ':', 'return', 'np', '.', 'array', '(', '[', 'd', '[', 's', '+', '"x"', ']', ',', 'd', '[', 's', '+', '"y"', ']', ',', 'd', '[', 's', '+', '"z"', ']', ']', ')'] | Get a vector flds data.
Parameters:
-----------
d -- flds data.
s -- key for the data. | ['Get', 'a', 'vector', 'flds', 'data', '.'] | train | https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/lspreader/flds.py#L11-L21 |
8,993 | log2timeline/dfvfs | examples/source_analyzer.py | SourceAnalyzer._EncodeString | def _EncodeString(self, string):
"""Encodes a string in the preferred encoding.
Returns:
bytes: encoded string.
"""
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = string.encode(
self._preferred_encoding, errors=self._encode_errors)
except UnicodeEncodeError:
if self._encode_errors == 'strict':
logging.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._encode_errors = 'replace'
encoded_string = string.encode(
self._preferred_encoding, errors=self._encode_errors)
return encoded_string | python | def _EncodeString(self, string):
"""Encodes a string in the preferred encoding.
Returns:
bytes: encoded string.
"""
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = string.encode(
self._preferred_encoding, errors=self._encode_errors)
except UnicodeEncodeError:
if self._encode_errors == 'strict':
logging.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._encode_errors = 'replace'
encoded_string = string.encode(
self._preferred_encoding, errors=self._encode_errors)
return encoded_string | ['def', '_EncodeString', '(', 'self', ',', 'string', ')', ':', 'try', ':', '# Note that encode() will first convert string into a Unicode string', '# if necessary.', 'encoded_string', '=', 'string', '.', 'encode', '(', 'self', '.', '_preferred_encoding', ',', 'errors', '=', 'self', '.', '_encode_errors', ')', 'except', 'UnicodeEncodeError', ':', 'if', 'self', '.', '_encode_errors', '==', "'strict'", ':', 'logging', '.', 'error', '(', "'Unable to properly write output due to encoding error. '", "'Switching to error tolerant encoding which can result in '", '\'non Basic Latin (C0) characters to be replaced with "?" or \'', '\'"\\\\ufffd".\'', ')', 'self', '.', '_encode_errors', '=', "'replace'", 'encoded_string', '=', 'string', '.', 'encode', '(', 'self', '.', '_preferred_encoding', ',', 'errors', '=', 'self', '.', '_encode_errors', ')', 'return', 'encoded_string'] | Encodes a string in the preferred encoding.
Returns:
bytes: encoded string. | ['Encodes', 'a', 'string', 'in', 'the', 'preferred', 'encoding', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/source_analyzer.py#L39-L62 |
8,994 | wummel/linkchecker | third_party/dnspython/dns/rcode.py | to_flags | def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev) | python | def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev) | ['def', 'to_flags', '(', 'value', ')', ':', 'if', 'value', '<', '0', 'or', 'value', '>', '4095', ':', 'raise', 'ValueError', '(', "'rcode must be >= 0 and <= 4095'", ')', 'v', '=', 'value', '&', '0xf', 'ev', '=', 'long', '(', 'value', '&', '0xff0', ')', '<<', '20', 'return', '(', 'v', ',', 'ev', ')'] | Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple | ['Return', 'a', '(', 'flags', 'ednsflags', ')', 'tuple', 'which', 'encodes', 'the', 'rcode', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/rcode.py#L93-L106 |
8,995 | scanny/python-pptx | pptx/shapes/freeform.py | FreeformBuilder._add_line_segment | def _add_line_segment(self, x, y):
"""Add a |_LineSegment| operation to the drawing sequence."""
self._drawing_operations.append(_LineSegment.new(self, x, y)) | python | def _add_line_segment(self, x, y):
"""Add a |_LineSegment| operation to the drawing sequence."""
self._drawing_operations.append(_LineSegment.new(self, x, y)) | ['def', '_add_line_segment', '(', 'self', ',', 'x', ',', 'y', ')', ':', 'self', '.', '_drawing_operations', '.', 'append', '(', '_LineSegment', '.', 'new', '(', 'self', ',', 'x', ',', 'y', ')', ')'] | Add a |_LineSegment| operation to the drawing sequence. | ['Add', 'a', '|_LineSegment|', 'operation', 'to', 'the', 'drawing', 'sequence', '.'] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/freeform.py#L145-L147 |
8,996 | oasiswork/zimsoap | zimsoap/client.py | ZimbraAdminClient.mk_auth_token | def mk_auth_token(self, account, admin=False, duration=0):
""" Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account object to be used as a selector
:returns: the auth string
"""
domain = account.get_domain()
try:
preauth_key = self.get_domain(domain)['zimbraPreAuthKey']
except KeyError:
raise DomainHasNoPreAuthKey(domain)
timestamp = int(time.time())*1000
expires = duration*1000
return utils.build_preauth_str(preauth_key, account.name, timestamp,
expires, admin) | python | def mk_auth_token(self, account, admin=False, duration=0):
""" Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account object to be used as a selector
:returns: the auth string
"""
domain = account.get_domain()
try:
preauth_key = self.get_domain(domain)['zimbraPreAuthKey']
except KeyError:
raise DomainHasNoPreAuthKey(domain)
timestamp = int(time.time())*1000
expires = duration*1000
return utils.build_preauth_str(preauth_key, account.name, timestamp,
expires, admin) | ['def', 'mk_auth_token', '(', 'self', ',', 'account', ',', 'admin', '=', 'False', ',', 'duration', '=', '0', ')', ':', 'domain', '=', 'account', '.', 'get_domain', '(', ')', 'try', ':', 'preauth_key', '=', 'self', '.', 'get_domain', '(', 'domain', ')', '[', "'zimbraPreAuthKey'", ']', 'except', 'KeyError', ':', 'raise', 'DomainHasNoPreAuthKey', '(', 'domain', ')', 'timestamp', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', '*', '1000', 'expires', '=', 'duration', '*', '1000', 'return', 'utils', '.', 'build_preauth_str', '(', 'preauth_key', ',', 'account', '.', 'name', ',', 'timestamp', ',', 'expires', ',', 'admin', ')'] | Builds an authentification token, using preauth mechanism.
See http://wiki.zimbra.com/wiki/Preauth
:param duration: in seconds defaults to 0, which means "use account
default"
:param account: an account object to be used as a selector
:returns: the auth string | ['Builds', 'an', 'authentification', 'token', 'using', 'preauth', 'mechanism', '.'] | train | https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L1133-L1152 |
8,997 | LogicalDash/LiSE | allegedb/allegedb/window.py | WindowDict.past | def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past) | python | def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past) | ['def', 'past', '(', 'self', ',', 'rev', '=', 'None', ')', ':', 'if', 'rev', 'is', 'not', 'None', ':', 'self', '.', 'seek', '(', 'rev', ')', 'return', 'WindowDictPastView', '(', 'self', '.', '_past', ')'] | Return a Mapping of items at or before the given revision.
Default revision is the last one looked up. | ['Return', 'a', 'Mapping', 'of', 'items', 'at', 'or', 'before', 'the', 'given', 'revision', '.'] | train | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/window.py#L439-L447 |
8,998 | saltstack/salt | salt/modules/xbpspkg.py | _get_version | def _get_version():
'''
Get the xbps version
'''
version_string = __salt__['cmd.run'](
[_check_xbps(), '--version'],
output_loglevel='trace')
if version_string is None:
# Dunno why it would, but...
return False
VERSION_MATCH = re.compile(r'(?:XBPS:[\s]+)([\d.]+)(?:[\s]+.*)')
version_match = VERSION_MATCH.search(version_string)
if not version_match:
return False
return version_match.group(1).split('.') | python | def _get_version():
'''
Get the xbps version
'''
version_string = __salt__['cmd.run'](
[_check_xbps(), '--version'],
output_loglevel='trace')
if version_string is None:
# Dunno why it would, but...
return False
VERSION_MATCH = re.compile(r'(?:XBPS:[\s]+)([\d.]+)(?:[\s]+.*)')
version_match = VERSION_MATCH.search(version_string)
if not version_match:
return False
return version_match.group(1).split('.') | ['def', '_get_version', '(', ')', ':', 'version_string', '=', '__salt__', '[', "'cmd.run'", ']', '(', '[', '_check_xbps', '(', ')', ',', "'--version'", ']', ',', 'output_loglevel', '=', "'trace'", ')', 'if', 'version_string', 'is', 'None', ':', '# Dunno why it would, but...', 'return', 'False', 'VERSION_MATCH', '=', 're', '.', 'compile', '(', "r'(?:XBPS:[\\s]+)([\\d.]+)(?:[\\s]+.*)'", ')', 'version_match', '=', 'VERSION_MATCH', '.', 'search', '(', 'version_string', ')', 'if', 'not', 'version_match', ':', 'return', 'False', 'return', 'version_match', '.', 'group', '(', '1', ')', '.', 'split', '(', "'.'", ')'] | Get the xbps version | ['Get', 'the', 'xbps', 'version'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xbpspkg.py#L51-L67 |
8,999 | inveniosoftware/invenio-oauth2server | invenio_oauth2server/ext.py | verify_oauth_token_and_set_current_user | def verify_oauth_token_and_set_current_user():
"""Verify OAuth token and set current user on request stack.
This function should be used **only** on REST application.
.. code-block:: python
app.before_request(verify_oauth_token_and_set_current_user)
"""
for func in oauth2._before_request_funcs:
func()
if not hasattr(request, 'oauth') or not request.oauth:
scopes = []
try:
valid, req = oauth2.verify_request(scopes)
except ValueError:
abort(400, 'Error trying to decode a non urlencoded string.')
for func in oauth2._after_request_funcs:
valid, req = func(valid, req)
if valid:
request.oauth = req | python | def verify_oauth_token_and_set_current_user():
"""Verify OAuth token and set current user on request stack.
This function should be used **only** on REST application.
.. code-block:: python
app.before_request(verify_oauth_token_and_set_current_user)
"""
for func in oauth2._before_request_funcs:
func()
if not hasattr(request, 'oauth') or not request.oauth:
scopes = []
try:
valid, req = oauth2.verify_request(scopes)
except ValueError:
abort(400, 'Error trying to decode a non urlencoded string.')
for func in oauth2._after_request_funcs:
valid, req = func(valid, req)
if valid:
request.oauth = req | ['def', 'verify_oauth_token_and_set_current_user', '(', ')', ':', 'for', 'func', 'in', 'oauth2', '.', '_before_request_funcs', ':', 'func', '(', ')', 'if', 'not', 'hasattr', '(', 'request', ',', "'oauth'", ')', 'or', 'not', 'request', '.', 'oauth', ':', 'scopes', '=', '[', ']', 'try', ':', 'valid', ',', 'req', '=', 'oauth2', '.', 'verify_request', '(', 'scopes', ')', 'except', 'ValueError', ':', 'abort', '(', '400', ',', "'Error trying to decode a non urlencoded string.'", ')', 'for', 'func', 'in', 'oauth2', '.', '_after_request_funcs', ':', 'valid', ',', 'req', '=', 'func', '(', 'valid', ',', 'req', ')', 'if', 'valid', ':', 'request', '.', 'oauth', '=', 'req'] | Verify OAuth token and set current user on request stack.
This function should be used **only** on REST application.
.. code-block:: python
app.before_request(verify_oauth_token_and_set_current_user) | ['Verify', 'OAuth', 'token', 'and', 'set', 'current', 'user', 'on', 'request', 'stack', '.'] | train | https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/ext.py#L165-L188 |
Subsets and Splits