Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
9,000
libtcod/python-tcod
tcod/libtcodpy.py
console_set_char
def console_set_char( con: tcod.console.Console, x: int, y: int, c: Union[int, str] ) -> None: """Change the character at x,y to c, keeping the current colors. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. c (Union[int, AnyStr]): Character to draw, can be an integer or string. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.ch`. """ lib.TCOD_console_set_char(_console(con), x, y, _int(c))
python
def console_set_char( con: tcod.console.Console, x: int, y: int, c: Union[int, str] ) -> None: """Change the character at x,y to c, keeping the current colors. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. c (Union[int, AnyStr]): Character to draw, can be an integer or string. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.ch`. """ lib.TCOD_console_set_char(_console(con), x, y, _int(c))
['def', 'console_set_char', '(', 'con', ':', 'tcod', '.', 'console', '.', 'Console', ',', 'x', ':', 'int', ',', 'y', ':', 'int', ',', 'c', ':', 'Union', '[', 'int', ',', 'str', ']', ')', '->', 'None', ':', 'lib', '.', 'TCOD_console_set_char', '(', '_console', '(', 'con', ')', ',', 'x', ',', 'y', ',', '_int', '(', 'c', ')', ')']
Change the character at x,y to c, keeping the current colors. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. c (Union[int, AnyStr]): Character to draw, can be an integer or string. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.ch`.
['Change', 'the', 'character', 'at', 'x', 'y', 'to', 'c', 'keeping', 'the', 'current', 'colors', '.']
train
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1274-L1289
9,001
sorgerlab/indra
indra/sources/geneways/find_full_text_sentence.py
FullTextMention.get_sentences
def get_sentences(self, root_element, block_tags): """Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.""" sentences = [] for element in root_element: if not self.any_ends_with(block_tags, element.tag): # tag not in block_tags if element.text is not None and not re.match('^\s*$', element.text): sentences.extend(self.sentence_tokenize(element.text)) sentences.extend(self.get_sentences(element, block_tags)) f = open('sentence_debug.txt', 'w') for s in sentences: f.write(s.lower() + '\n') f.close() return sentences
python
def get_sentences(self, root_element, block_tags): """Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.""" sentences = [] for element in root_element: if not self.any_ends_with(block_tags, element.tag): # tag not in block_tags if element.text is not None and not re.match('^\s*$', element.text): sentences.extend(self.sentence_tokenize(element.text)) sentences.extend(self.get_sentences(element, block_tags)) f = open('sentence_debug.txt', 'w') for s in sentences: f.write(s.lower() + '\n') f.close() return sentences
['def', 'get_sentences', '(', 'self', ',', 'root_element', ',', 'block_tags', ')', ':', 'sentences', '=', '[', ']', 'for', 'element', 'in', 'root_element', ':', 'if', 'not', 'self', '.', 'any_ends_with', '(', 'block_tags', ',', 'element', '.', 'tag', ')', ':', '# tag not in block_tags', 'if', 'element', '.', 'text', 'is', 'not', 'None', 'and', 'not', 're', '.', 'match', '(', "'^\\s*$'", ',', 'element', '.', 'text', ')', ':', 'sentences', '.', 'extend', '(', 'self', '.', 'sentence_tokenize', '(', 'element', '.', 'text', ')', ')', 'sentences', '.', 'extend', '(', 'self', '.', 'get_sentences', '(', 'element', ',', 'block_tags', ')', ')', 'f', '=', 'open', '(', "'sentence_debug.txt'", ',', "'w'", ')', 'for', 's', 'in', 'sentences', ':', 'f', '.', 'write', '(', 's', '.', 'lower', '(', ')', '+', "'\\n'", ')', 'f', '.', 'close', '(', ')', 'return', 'sentences']
Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.
['Returns', 'a', 'list', 'of', 'plain', '-', 'text', 'sentences', 'by', 'iterating', 'through', 'XML', 'tags', 'except', 'for', 'those', 'listed', 'in', 'block_tags', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/geneways/find_full_text_sentence.py#L33-L49
9,002
aio-libs/janus
janus/__init__.py
_AsyncQueueProxy.full
def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._parent._maxsize <= 0: return False else: return self.qsize() >= self._parent._maxsize
python
def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._parent._maxsize <= 0: return False else: return self.qsize() >= self._parent._maxsize
['def', 'full', '(', 'self', ')', ':', 'if', 'self', '.', '_parent', '.', '_maxsize', '<=', '0', ':', 'return', 'False', 'else', ':', 'return', 'self', '.', 'qsize', '(', ')', '>=', 'self', '.', '_parent', '.', '_maxsize']
Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True.
['Return', 'True', 'if', 'there', 'are', 'maxsize', 'items', 'in', 'the', 'queue', '.']
train
https://github.com/aio-libs/janus/blob/8dc80530db1144fbd1dba75d4a1c1a54bb520c21/janus/__init__.py#L373-L382
9,003
rigetti/quantumflow
quantumflow/qubits.py
QubitVector.partial_trace
def partial_trace(self, qubits: Qubits) -> 'QubitVector': """ Return the partial trace over some subset of qubits""" N = self.qubit_nb R = self.rank if R == 1: raise ValueError('Cannot take trace of vector') new_qubits: List[Qubit] = list(self.qubits) for q in qubits: new_qubits.remove(q) if not new_qubits: raise ValueError('Cannot remove all qubits with partial_trace.') indices = [self.qubits.index(qubit) for qubit in qubits] subscripts = list(EINSUM_SUBSCRIPTS)[0:N*R] for idx in indices: for r in range(1, R): subscripts[r * N + idx] = subscripts[idx] subscript_str = ''.join(subscripts) # Only numpy's einsum works with repeated subscripts tensor = self.asarray() tensor = np.einsum(subscript_str, tensor) return QubitVector(tensor, new_qubits)
python
def partial_trace(self, qubits: Qubits) -> 'QubitVector': """ Return the partial trace over some subset of qubits""" N = self.qubit_nb R = self.rank if R == 1: raise ValueError('Cannot take trace of vector') new_qubits: List[Qubit] = list(self.qubits) for q in qubits: new_qubits.remove(q) if not new_qubits: raise ValueError('Cannot remove all qubits with partial_trace.') indices = [self.qubits.index(qubit) for qubit in qubits] subscripts = list(EINSUM_SUBSCRIPTS)[0:N*R] for idx in indices: for r in range(1, R): subscripts[r * N + idx] = subscripts[idx] subscript_str = ''.join(subscripts) # Only numpy's einsum works with repeated subscripts tensor = self.asarray() tensor = np.einsum(subscript_str, tensor) return QubitVector(tensor, new_qubits)
['def', 'partial_trace', '(', 'self', ',', 'qubits', ':', 'Qubits', ')', '->', "'QubitVector'", ':', 'N', '=', 'self', '.', 'qubit_nb', 'R', '=', 'self', '.', 'rank', 'if', 'R', '==', '1', ':', 'raise', 'ValueError', '(', "'Cannot take trace of vector'", ')', 'new_qubits', ':', 'List', '[', 'Qubit', ']', '=', 'list', '(', 'self', '.', 'qubits', ')', 'for', 'q', 'in', 'qubits', ':', 'new_qubits', '.', 'remove', '(', 'q', ')', 'if', 'not', 'new_qubits', ':', 'raise', 'ValueError', '(', "'Cannot remove all qubits with partial_trace.'", ')', 'indices', '=', '[', 'self', '.', 'qubits', '.', 'index', '(', 'qubit', ')', 'for', 'qubit', 'in', 'qubits', ']', 'subscripts', '=', 'list', '(', 'EINSUM_SUBSCRIPTS', ')', '[', '0', ':', 'N', '*', 'R', ']', 'for', 'idx', 'in', 'indices', ':', 'for', 'r', 'in', 'range', '(', '1', ',', 'R', ')', ':', 'subscripts', '[', 'r', '*', 'N', '+', 'idx', ']', '=', 'subscripts', '[', 'idx', ']', 'subscript_str', '=', "''", '.', 'join', '(', 'subscripts', ')', "# Only numpy's einsum works with repeated subscripts", 'tensor', '=', 'self', '.', 'asarray', '(', ')', 'tensor', '=', 'np', '.', 'einsum', '(', 'subscript_str', ',', 'tensor', ')', 'return', 'QubitVector', '(', 'tensor', ',', 'new_qubits', ')']
Return the partial trace over some subset of qubits
['Return', 'the', 'partial', 'trace', 'over', 'some', 'subset', 'of', 'qubits']
train
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/qubits.py#L201-L227
9,004
daskos/mentor
mentor/proxies/scheduler.py
SchedulerDriverProxy.message
def message(self, executor_id, slave_id, message): """Sends a message from the framework to one of its executors. These messages are best effort; do not expect a framework message to be retransmitted in any reliable fashion. """ logging.info('Sends message `{}` to executor `{}` on slave `{}`'.format( message, executor_id, slave_id)) return self.driver.sendFrameworkMessage(encode(executor_id), encode(slave_id), message)
python
def message(self, executor_id, slave_id, message): """Sends a message from the framework to one of its executors. These messages are best effort; do not expect a framework message to be retransmitted in any reliable fashion. """ logging.info('Sends message `{}` to executor `{}` on slave `{}`'.format( message, executor_id, slave_id)) return self.driver.sendFrameworkMessage(encode(executor_id), encode(slave_id), message)
['def', 'message', '(', 'self', ',', 'executor_id', ',', 'slave_id', ',', 'message', ')', ':', 'logging', '.', 'info', '(', "'Sends message `{}` to executor `{}` on slave `{}`'", '.', 'format', '(', 'message', ',', 'executor_id', ',', 'slave_id', ')', ')', 'return', 'self', '.', 'driver', '.', 'sendFrameworkMessage', '(', 'encode', '(', 'executor_id', ')', ',', 'encode', '(', 'slave_id', ')', ',', 'message', ')']
Sends a message from the framework to one of its executors. These messages are best effort; do not expect a framework message to be retransmitted in any reliable fashion.
['Sends', 'a', 'message', 'from', 'the', 'framework', 'to', 'one', 'of', 'its', 'executors', '.']
train
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L234-L244
9,005
bitlabstudio/django-libs
django_libs/templatetags/libs_tags.py
get_range_around
def get_range_around(range_value, current_item, padding): """ Returns a range of numbers around the given number. This is useful for pagination, where you might want to show something like this:: << < ... 4 5 (6) 7 8 .. > >> In this example `6` would be the current page and we show 2 items around that page (including the page itself). Usage:: {% load libs_tags %} {% get_range_around page_obj.paginator.num_pages page_obj.number 5 as pages %} :param range_amount: Number of total items in your range (1 indexed) :param current_item: The item around which the result should be centered (1 indexed) :param padding: Number of items to show left and right from the current item. """ total_items = 1 + padding * 2 left_bound = padding right_bound = range_value - padding if range_value <= total_items: range_items = range(1, range_value + 1) return { 'range_items': range_items, 'left_padding': False, 'right_padding': False, } if current_item <= left_bound: range_items = range(1, range_value + 1)[:total_items] return { 'range_items': range_items, 'left_padding': range_items[0] > 1, 'right_padding': range_items[-1] < range_value, } if current_item >= right_bound: range_items = range(1, range_value + 1)[-total_items:] return { 'range_items': range_items, 'left_padding': range_items[0] > 1, 'right_padding': range_items[-1] < range_value, } range_items = range(current_item - padding, current_item + padding + 1) return { 'range_items': range_items, 'left_padding': True, 'right_padding': True, }
python
def get_range_around(range_value, current_item, padding): """ Returns a range of numbers around the given number. This is useful for pagination, where you might want to show something like this:: << < ... 4 5 (6) 7 8 .. > >> In this example `6` would be the current page and we show 2 items around that page (including the page itself). Usage:: {% load libs_tags %} {% get_range_around page_obj.paginator.num_pages page_obj.number 5 as pages %} :param range_amount: Number of total items in your range (1 indexed) :param current_item: The item around which the result should be centered (1 indexed) :param padding: Number of items to show left and right from the current item. """ total_items = 1 + padding * 2 left_bound = padding right_bound = range_value - padding if range_value <= total_items: range_items = range(1, range_value + 1) return { 'range_items': range_items, 'left_padding': False, 'right_padding': False, } if current_item <= left_bound: range_items = range(1, range_value + 1)[:total_items] return { 'range_items': range_items, 'left_padding': range_items[0] > 1, 'right_padding': range_items[-1] < range_value, } if current_item >= right_bound: range_items = range(1, range_value + 1)[-total_items:] return { 'range_items': range_items, 'left_padding': range_items[0] > 1, 'right_padding': range_items[-1] < range_value, } range_items = range(current_item - padding, current_item + padding + 1) return { 'range_items': range_items, 'left_padding': True, 'right_padding': True, }
['def', 'get_range_around', '(', 'range_value', ',', 'current_item', ',', 'padding', ')', ':', 'total_items', '=', '1', '+', 'padding', '*', '2', 'left_bound', '=', 'padding', 'right_bound', '=', 'range_value', '-', 'padding', 'if', 'range_value', '<=', 'total_items', ':', 'range_items', '=', 'range', '(', '1', ',', 'range_value', '+', '1', ')', 'return', '{', "'range_items'", ':', 'range_items', ',', "'left_padding'", ':', 'False', ',', "'right_padding'", ':', 'False', ',', '}', 'if', 'current_item', '<=', 'left_bound', ':', 'range_items', '=', 'range', '(', '1', ',', 'range_value', '+', '1', ')', '[', ':', 'total_items', ']', 'return', '{', "'range_items'", ':', 'range_items', ',', "'left_padding'", ':', 'range_items', '[', '0', ']', '>', '1', ',', "'right_padding'", ':', 'range_items', '[', '-', '1', ']', '<', 'range_value', ',', '}', 'if', 'current_item', '>=', 'right_bound', ':', 'range_items', '=', 'range', '(', '1', ',', 'range_value', '+', '1', ')', '[', '-', 'total_items', ':', ']', 'return', '{', "'range_items'", ':', 'range_items', ',', "'left_padding'", ':', 'range_items', '[', '0', ']', '>', '1', ',', "'right_padding'", ':', 'range_items', '[', '-', '1', ']', '<', 'range_value', ',', '}', 'range_items', '=', 'range', '(', 'current_item', '-', 'padding', ',', 'current_item', '+', 'padding', '+', '1', ')', 'return', '{', "'range_items'", ':', 'range_items', ',', "'left_padding'", ':', 'True', ',', "'right_padding'", ':', 'True', ',', '}']
Returns a range of numbers around the given number. This is useful for pagination, where you might want to show something like this:: << < ... 4 5 (6) 7 8 .. > >> In this example `6` would be the current page and we show 2 items around that page (including the page itself). Usage:: {% load libs_tags %} {% get_range_around page_obj.paginator.num_pages page_obj.number 5 as pages %} :param range_amount: Number of total items in your range (1 indexed) :param current_item: The item around which the result should be centered (1 indexed) :param padding: Number of items to show left and right from the current item.
['Returns', 'a', 'range', 'of', 'numbers', 'around', 'the', 'given', 'number', '.']
train
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/templatetags/libs_tags.py#L364-L420
9,006
workforce-data-initiative/skills-utils
skills_utils/io.py
stream_json_file
def stream_json_file(local_file): """Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects """ for i, line in enumerate(local_file): try: data = json.loads(line.decode('utf-8')) yield data except ValueError as e: logging.warning("Skipping line %d due to error: %s", i, e) continue
python
def stream_json_file(local_file): """Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects """ for i, line in enumerate(local_file): try: data = json.loads(line.decode('utf-8')) yield data except ValueError as e: logging.warning("Skipping line %d due to error: %s", i, e) continue
['def', 'stream_json_file', '(', 'local_file', ')', ':', 'for', 'i', ',', 'line', 'in', 'enumerate', '(', 'local_file', ')', ':', 'try', ':', 'data', '=', 'json', '.', 'loads', '(', 'line', '.', 'decode', '(', "'utf-8'", ')', ')', 'yield', 'data', 'except', 'ValueError', 'as', 'e', ':', 'logging', '.', 'warning', '(', '"Skipping line %d due to error: %s"', ',', 'i', ',', 'e', ')', 'continue']
Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects
['Stream', 'a', 'JSON', 'file', '(', 'in', 'JSON', '-', 'per', '-', 'line', 'format', ')']
train
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/io.py#L6-L21
9,007
codeforamerica/three
three/core.py
Three.get
def get(self, *args, **kwargs): """Perform a get request.""" if 'convert' in kwargs: conversion = kwargs.pop('convert') else: conversion = True kwargs = self._get_keywords(**kwargs) url = self._create_path(*args) request = self.session.get(url, params=kwargs) content = request.content self._request = request return self.convert(content, conversion)
python
def get(self, *args, **kwargs): """Perform a get request.""" if 'convert' in kwargs: conversion = kwargs.pop('convert') else: conversion = True kwargs = self._get_keywords(**kwargs) url = self._create_path(*args) request = self.session.get(url, params=kwargs) content = request.content self._request = request return self.convert(content, conversion)
['def', 'get', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', "'convert'", 'in', 'kwargs', ':', 'conversion', '=', 'kwargs', '.', 'pop', '(', "'convert'", ')', 'else', ':', 'conversion', '=', 'True', 'kwargs', '=', 'self', '.', '_get_keywords', '(', '*', '*', 'kwargs', ')', 'url', '=', 'self', '.', '_create_path', '(', '*', 'args', ')', 'request', '=', 'self', '.', 'session', '.', 'get', '(', 'url', ',', 'params', '=', 'kwargs', ')', 'content', '=', 'request', '.', 'content', 'self', '.', '_request', '=', 'request', 'return', 'self', '.', 'convert', '(', 'content', ',', 'conversion', ')']
Perform a get request.
['Perform', 'a', 'get', 'request', '.']
train
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L107-L118
9,008
twitterdev/search-tweets-python
searchtweets/result_stream.py
make_session
def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None): """Creates a Requests Session for use. Accepts a bearer token for premiums users and will override username and password information if present. Args: username (str): username for the session password (str): password for the user bearer_token (str): token for a premium API user. """ if password is None and bearer_token is None: logger.error("No authentication information provided; " "please check your object") raise KeyError session = requests.Session() session.trust_env = False headers = {'Accept-encoding': 'gzip', 'User-Agent': 'twitterdev-search-tweets-python/' + VERSION} if bearer_token: logger.info("using bearer token for authentication") headers['Authorization'] = "Bearer {}".format(bearer_token) session.headers = headers else: logger.info("using username and password for authentication") session.auth = username, password session.headers = headers if extra_headers_dict: headers.update(extra_headers_dict) return session
python
def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None): """Creates a Requests Session for use. Accepts a bearer token for premiums users and will override username and password information if present. Args: username (str): username for the session password (str): password for the user bearer_token (str): token for a premium API user. """ if password is None and bearer_token is None: logger.error("No authentication information provided; " "please check your object") raise KeyError session = requests.Session() session.trust_env = False headers = {'Accept-encoding': 'gzip', 'User-Agent': 'twitterdev-search-tweets-python/' + VERSION} if bearer_token: logger.info("using bearer token for authentication") headers['Authorization'] = "Bearer {}".format(bearer_token) session.headers = headers else: logger.info("using username and password for authentication") session.auth = username, password session.headers = headers if extra_headers_dict: headers.update(extra_headers_dict) return session
['def', 'make_session', '(', 'username', '=', 'None', ',', 'password', '=', 'None', ',', 'bearer_token', '=', 'None', ',', 'extra_headers_dict', '=', 'None', ')', ':', 'if', 'password', 'is', 'None', 'and', 'bearer_token', 'is', 'None', ':', 'logger', '.', 'error', '(', '"No authentication information provided; "', '"please check your object"', ')', 'raise', 'KeyError', 'session', '=', 'requests', '.', 'Session', '(', ')', 'session', '.', 'trust_env', '=', 'False', 'headers', '=', '{', "'Accept-encoding'", ':', "'gzip'", ',', "'User-Agent'", ':', "'twitterdev-search-tweets-python/'", '+', 'VERSION', '}', 'if', 'bearer_token', ':', 'logger', '.', 'info', '(', '"using bearer token for authentication"', ')', 'headers', '[', "'Authorization'", ']', '=', '"Bearer {}"', '.', 'format', '(', 'bearer_token', ')', 'session', '.', 'headers', '=', 'headers', 'else', ':', 'logger', '.', 'info', '(', '"using username and password for authentication"', ')', 'session', '.', 'auth', '=', 'username', ',', 'password', 'session', '.', 'headers', '=', 'headers', 'if', 'extra_headers_dict', ':', 'headers', '.', 'update', '(', 'extra_headers_dict', ')', 'return', 'session']
Creates a Requests Session for use. Accepts a bearer token for premiums users and will override username and password information if present. Args: username (str): username for the session password (str): password for the user bearer_token (str): token for a premium API user.
['Creates', 'a', 'Requests', 'Session', 'for', 'use', '.', 'Accepts', 'a', 'bearer', 'token', 'for', 'premiums', 'users', 'and', 'will', 'override', 'username', 'and', 'password', 'information', 'if', 'present', '.']
train
https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/result_stream.py#L31-L61
9,009
thilux/tvdb_client
tvdb_client/clients/ApiV2Client.py
ApiV2Client.search_series
def search_series(self, name=None, imdb_id=None, zap2it_id=None): """ Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id. :param name: the name of the series to look for :param imdb_id: the IMDB id of the series to look for :param zap2it_id: the zap2it id of the series to look for. :return: a python dictionary with either the result of the search or an error from TheTVDB. """ arguments = locals() optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'} query_string = utils.query_param_string_from_option_args(optional_parameters, arguments) raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series', query_string), headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
python
def search_series(self, name=None, imdb_id=None, zap2it_id=None): """ Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id. :param name: the name of the series to look for :param imdb_id: the IMDB id of the series to look for :param zap2it_id: the zap2it id of the series to look for. :return: a python dictionary with either the result of the search or an error from TheTVDB. """ arguments = locals() optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'} query_string = utils.query_param_string_from_option_args(optional_parameters, arguments) raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series', query_string), headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
['def', 'search_series', '(', 'self', ',', 'name', '=', 'None', ',', 'imdb_id', '=', 'None', ',', 'zap2it_id', '=', 'None', ')', ':', 'arguments', '=', 'locals', '(', ')', 'optional_parameters', '=', '{', "'name'", ':', "'name'", ',', "'imdb_id'", ':', "'imdbId'", ',', "'zap2it_id'", ':', "'zap2itId'", '}', 'query_string', '=', 'utils', '.', 'query_param_string_from_option_args', '(', 'optional_parameters', ',', 'arguments', ')', 'raw_response', '=', 'requests_util', '.', 'run_request', '(', "'get'", ',', "'%s%s?%s'", '%', '(', 'self', '.', 'API_BASE_URL', ',', "'/search/series'", ',', 'query_string', ')', ',', 'headers', '=', 'self', '.', '__get_header_with_auth', '(', ')', ')', 'return', 'self', '.', 'parse_raw_response', '(', 'raw_response', ')']
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id. :param name: the name of the series to look for :param imdb_id: the IMDB id of the series to look for :param zap2it_id: the zap2it id of the series to look for. :return: a python dictionary with either the result of the search or an error from TheTVDB.
['Searchs', 'for', 'a', 'series', 'in', 'TheTVDB', 'by', 'either', 'its', 'name', 'imdb_id', 'or', 'zap2it_id', '.']
train
https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L102-L120
9,010
BerkeleyAutomation/perception
perception/opencv_camera_sensor.py
OpenCVCameraSensor.frames
def frames(self, flush=True): """ Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0 """ self.flush() ret_val, frame = self._sensor.read() if not ret_val: raise Exception("Unable to retrieve frame from OpenCVCameraSensor for id {0}".format(self._device_id)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if self._upside_down: frame = np.flipud(frame).astype(np.uint8) frame = np.fliplr(frame).astype(np.uint8) return ColorImage(frame)
python
def frames(self, flush=True): """ Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0 """ self.flush() ret_val, frame = self._sensor.read() if not ret_val: raise Exception("Unable to retrieve frame from OpenCVCameraSensor for id {0}".format(self._device_id)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if self._upside_down: frame = np.flipud(frame).astype(np.uint8) frame = np.fliplr(frame).astype(np.uint8) return ColorImage(frame)
['def', 'frames', '(', 'self', ',', 'flush', '=', 'True', ')', ':', 'self', '.', 'flush', '(', ')', 'ret_val', ',', 'frame', '=', 'self', '.', '_sensor', '.', 'read', '(', ')', 'if', 'not', 'ret_val', ':', 'raise', 'Exception', '(', '"Unable to retrieve frame from OpenCVCameraSensor for id {0}"', '.', 'format', '(', 'self', '.', '_device_id', ')', ')', 'frame', '=', 'cv2', '.', 'cvtColor', '(', 'frame', ',', 'cv2', '.', 'COLOR_BGR2RGB', ')', 'if', 'self', '.', '_upside_down', ':', 'frame', '=', 'np', '.', 'flipud', '(', 'frame', ')', '.', 'astype', '(', 'np', '.', 'uint8', ')', 'frame', '=', 'np', '.', 'fliplr', '(', 'frame', ')', '.', 'astype', '(', 'np', '.', 'uint8', ')', 'return', 'ColorImage', '(', 'frame', ')']
Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0
['Returns', 'the', 'latest', 'color', 'image', 'from', 'the', 'stream', 'Raises', ':', 'Exception', 'if', 'opencv', 'sensor', 'gives', 'ret_val', 'of', '0']
train
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/opencv_camera_sensor.py#L36-L49
9,011
hozn/coilmq
coilmq/engine.py
StompEngine.unbind
def unbind(self): """ Unbinds this connection from queue and topic managers (freeing up resources) and resets state. """ self.connected = False self.queue_manager.disconnect(self.connection) self.topic_manager.disconnect(self.connection)
python
def unbind(self): """ Unbinds this connection from queue and topic managers (freeing up resources) and resets state. """ self.connected = False self.queue_manager.disconnect(self.connection) self.topic_manager.disconnect(self.connection)
['def', 'unbind', '(', 'self', ')', ':', 'self', '.', 'connected', '=', 'False', 'self', '.', 'queue_manager', '.', 'disconnect', '(', 'self', '.', 'connection', ')', 'self', '.', 'topic_manager', '.', 'disconnect', '(', 'self', '.', 'connection', ')']
Unbinds this connection from queue and topic managers (freeing up resources) and resets state.
['Unbinds', 'this', 'connection', 'from', 'queue', 'and', 'topic', 'managers', '(', 'freeing', 'up', 'resources', ')', 'and', 'resets', 'state', '.']
train
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/engine.py#L87-L94
9,012
The-Politico/politico-civic-election-night
electionnight/serializers/votes.py
VotesSerializer.get_fipscode
def get_fipscode(self, obj): """County FIPS code""" if obj.division.level.name == DivisionLevel.COUNTY: return obj.division.code return None
python
def get_fipscode(self, obj): """County FIPS code""" if obj.division.level.name == DivisionLevel.COUNTY: return obj.division.code return None
['def', 'get_fipscode', '(', 'self', ',', 'obj', ')', ':', 'if', 'obj', '.', 'division', '.', 'level', '.', 'name', '==', 'DivisionLevel', '.', 'COUNTY', ':', 'return', 'obj', '.', 'division', '.', 'code', 'return', 'None']
County FIPS code
['County', 'FIPS', 'code']
train
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/votes.py#L63-L67
9,013
KnowledgeLinks/rdfframework
rdfframework/datamanager/defmanager.py
DefinitionManager.drop_vocab
def drop_vocab(self, vocab_name, **kwargs): """ Removes the vocab from the definiton triplestore args: vocab_name: the name or uri of the vocab to return """ vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs) return self.drop_file(vocab_dict['filename'], **kwargs)
python
def drop_vocab(self, vocab_name, **kwargs): """ Removes the vocab from the definiton triplestore args: vocab_name: the name or uri of the vocab to return """ vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs) return self.drop_file(vocab_dict['filename'], **kwargs)
['def', 'drop_vocab', '(', 'self', ',', 'vocab_name', ',', '*', '*', 'kwargs', ')', ':', 'vocab_dict', '=', 'self', '.', '__get_vocab_dict__', '(', 'vocab_name', ',', '*', '*', 'kwargs', ')', 'return', 'self', '.', 'drop_file', '(', 'vocab_dict', '[', "'filename'", ']', ',', '*', '*', 'kwargs', ')']
Removes the vocab from the definiton triplestore args: vocab_name: the name or uri of the vocab to return
['Removes', 'the', 'vocab', 'from', 'the', 'definiton', 'triplestore']
train
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datamanager/defmanager.py#L267-L275
9,014
PGower/PyCanvas
pycanvas/apis/announcement_external_feeds.py
AnnouncementExternalFeedsAPI.delete_external_feed_courses
def delete_external_feed_courses(self, course_id, external_feed_id): """ Delete an external feed. Deletes the external feed. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_feed_id """ID""" path["external_feed_id"] = external_feed_id self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
python
def delete_external_feed_courses(self, course_id, external_feed_id): """ Delete an external feed. Deletes the external feed. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_feed_id """ID""" path["external_feed_id"] = external_feed_id self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
['def', 'delete_external_feed_courses', '(', 'self', ',', 'course_id', ',', 'external_feed_id', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - course_id\r', '"""ID"""', 'path', '[', '"course_id"', ']', '=', 'course_id', '# REQUIRED - PATH - external_feed_id\r', '"""ID"""', 'path', '[', '"external_feed_id"', ']', '=', 'external_feed_id', 'self', '.', 'logger', '.', 'debug', '(', '"DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"DELETE"', ',', '"/api/v1/courses/{course_id}/external_feeds/{external_feed_id}"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'single_item', '=', 'True', ')']
Delete an external feed. Deletes the external feed.
['Delete', 'an', 'external', 'feed', '.', 'Deletes', 'the', 'external', 'feed', '.']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/announcement_external_feeds.py#L117-L136
9,015
MagicTheGathering/mtg-sdk-python
mtgsdk/querybuilder.py
QueryBuilder.find
def find(self, id): """Get a resource by its id Args: id (string): Resource id Returns: object: Instance of the resource type """ url = "{}/{}/{}".format(__endpoint__, self.type.RESOURCE, id) response = RestClient.get(url)[self.type.RESOURCE[:-1]] return self.type(response)
python
def find(self, id): """Get a resource by its id Args: id (string): Resource id Returns: object: Instance of the resource type """ url = "{}/{}/{}".format(__endpoint__, self.type.RESOURCE, id) response = RestClient.get(url)[self.type.RESOURCE[:-1]] return self.type(response)
['def', 'find', '(', 'self', ',', 'id', ')', ':', 'url', '=', '"{}/{}/{}"', '.', 'format', '(', '__endpoint__', ',', 'self', '.', 'type', '.', 'RESOURCE', ',', 'id', ')', 'response', '=', 'RestClient', '.', 'get', '(', 'url', ')', '[', 'self', '.', 'type', '.', 'RESOURCE', '[', ':', '-', '1', ']', ']', 'return', 'self', '.', 'type', '(', 'response', ')']
Get a resource by its id Args: id (string): Resource id Returns: object: Instance of the resource type
['Get', 'a', 'resource', 'by', 'its', 'id']
train
https://github.com/MagicTheGathering/mtg-sdk-python/blob/3d28fe209d72356a559321355d3f7e53ca78a9cc/mtgsdk/querybuilder.py#L19-L29
9,016
simse/pymitv
pymitv/discover.py
Discover.scan
def scan(self, stop_on_first=True, base_ip=0): """Scans the local network for TVs.""" tvs = [] # Check if base_ip has been passed if base_ip == 0: # Find IP address of computer pymitv is running on sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect(("8.8.8.8", 80)) ip = sock.getsockname()[0] sock.close() # Get IP and compose a base like 192.168.1.xxx ip_parts = ip.split('.') base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] # Loop through every IP and check if TV is alive for ip_suffix in range(2, 256): ip_check = '{}.{}'.format(base_ip, ip_suffix) if self.check_ip(ip_check): tvs.append(ip_check) if stop_on_first: break return tvs
python
def scan(self, stop_on_first=True, base_ip=0): """Scans the local network for TVs.""" tvs = [] # Check if base_ip has been passed if base_ip == 0: # Find IP address of computer pymitv is running on sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect(("8.8.8.8", 80)) ip = sock.getsockname()[0] sock.close() # Get IP and compose a base like 192.168.1.xxx ip_parts = ip.split('.') base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] # Loop through every IP and check if TV is alive for ip_suffix in range(2, 256): ip_check = '{}.{}'.format(base_ip, ip_suffix) if self.check_ip(ip_check): tvs.append(ip_check) if stop_on_first: break return tvs
['def', 'scan', '(', 'self', ',', 'stop_on_first', '=', 'True', ',', 'base_ip', '=', '0', ')', ':', 'tvs', '=', '[', ']', '# Check if base_ip has been passed\r', 'if', 'base_ip', '==', '0', ':', '# Find IP address of computer pymitv is running on\r', 'sock', '=', 'socket', '.', 'socket', '(', 'socket', '.', 'AF_INET', ',', 'socket', '.', 'SOCK_DGRAM', ')', 'sock', '.', 'connect', '(', '(', '"8.8.8.8"', ',', '80', ')', ')', 'ip', '=', 'sock', '.', 'getsockname', '(', ')', '[', '0', ']', 'sock', '.', 'close', '(', ')', '# Get IP and compose a base like 192.168.1.xxx\r', 'ip_parts', '=', 'ip', '.', 'split', '(', "'.'", ')', 'base_ip', '=', 'ip_parts', '[', '0', ']', '+', "'.'", '+', 'ip_parts', '[', '1', ']', '+', "'.'", '+', 'ip_parts', '[', '2', ']', '# Loop through every IP and check if TV is alive\r', 'for', 'ip_suffix', 'in', 'range', '(', '2', ',', '256', ')', ':', 'ip_check', '=', "'{}.{}'", '.', 'format', '(', 'base_ip', ',', 'ip_suffix', ')', 'if', 'self', '.', 'check_ip', '(', 'ip_check', ')', ':', 'tvs', '.', 'append', '(', 'ip_check', ')', 'if', 'stop_on_first', ':', 'break', 'return', 'tvs']
Scans the local network for TVs.
['Scans', 'the', 'local', 'network', 'for', 'TVs', '.']
train
https://github.com/simse/pymitv/blob/03213f591d70fbf90ba2b6af372e474c9bfb99f6/pymitv/discover.py#L13-L39
9,017
jhuapl-boss/intern
intern/service/boss/v1/volume.py
VolumeService_1.get_neuroglancer_link
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, url_prefix, **kwargs): """ Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step. Args: resource (intern.resource.Resource): Resource compatible with cutout operations. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. url_prefix (string): Protocol + host such as https://api.theboss.io Returns: (string): Return neuroglancer link. Raises: RuntimeError when given invalid resource. Other exceptions may be raised depending on the volume service's implementation. """ link = "https://neuroglancer.theboss.io/#!{'layers':{'" + str(resource.name) + "':{'type':'" + resource.type + "'_'source':" + "'boss://" + url_prefix+ "/" + resource.coll_name + "/" + resource.exp_name + "/" + resource.name + "'}}_'navigation':{'pose':{'position':{'voxelCoordinates':[" + str(x_range[0]) + "_" + str(y_range[0]) + "_" + str(z_range[0]) + "]}}}}" return link
python
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, url_prefix, **kwargs): """ Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step. Args: resource (intern.resource.Resource): Resource compatible with cutout operations. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. url_prefix (string): Protocol + host such as https://api.theboss.io Returns: (string): Return neuroglancer link. Raises: RuntimeError when given invalid resource. Other exceptions may be raised depending on the volume service's implementation. """ link = "https://neuroglancer.theboss.io/#!{'layers':{'" + str(resource.name) + "':{'type':'" + resource.type + "'_'source':" + "'boss://" + url_prefix+ "/" + resource.coll_name + "/" + resource.exp_name + "/" + resource.name + "'}}_'navigation':{'pose':{'position':{'voxelCoordinates':[" + str(x_range[0]) + "_" + str(y_range[0]) + "_" + str(z_range[0]) + "]}}}}" return link
['def', 'get_neuroglancer_link', '(', 'self', ',', 'resource', ',', 'resolution', ',', 'x_range', ',', 'y_range', ',', 'z_range', ',', 'url_prefix', ',', '*', '*', 'kwargs', ')', ':', 'link', '=', '"https://neuroglancer.theboss.io/#!{\'layers\':{\'"', '+', 'str', '(', 'resource', '.', 'name', ')', '+', '"\':{\'type\':\'"', '+', 'resource', '.', 'type', '+', '"\'_\'source\':"', '+', '"\'boss://"', '+', 'url_prefix', '+', '"/"', '+', 'resource', '.', 'coll_name', '+', '"/"', '+', 'resource', '.', 'exp_name', '+', '"/"', '+', 'resource', '.', 'name', '+', '"\'}}_\'navigation\':{\'pose\':{\'position\':{\'voxelCoordinates\':["', '+', 'str', '(', 'x_range', '[', '0', ']', ')', '+', '"_"', '+', 'str', '(', 'y_range', '[', '0', ']', ')', '+', '"_"', '+', 'str', '(', 'z_range', '[', '0', ']', ')', '+', '"]}}}}"', 'return', 'link']
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step. Args: resource (intern.resource.Resource): Resource compatible with cutout operations. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. url_prefix (string): Protocol + host such as https://api.theboss.io Returns: (string): Return neuroglancer link. Raises: RuntimeError when given invalid resource. Other exceptions may be raised depending on the volume service's implementation.
['Get', 'a', 'neuroglancer', 'link', 'of', 'the', 'cutout', 'specified', 'from', 'the', 'host', 'specified', 'in', 'the', 'remote', 'configuration', 'step', '.']
train
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/v1/volume.py#L376-L396
9,018
ahwillia/tensortools
tensortools/ensemble.py
Ensemble.objectives
def objectives(self, rank): """Returns objective values of models with specified rank. """ self._check_rank(rank) return [result.obj for result in self.results[rank]]
python
def objectives(self, rank): """Returns objective values of models with specified rank. """ self._check_rank(rank) return [result.obj for result in self.results[rank]]
['def', 'objectives', '(', 'self', ',', 'rank', ')', ':', 'self', '.', '_check_rank', '(', 'rank', ')', 'return', '[', 'result', '.', 'obj', 'for', 'result', 'in', 'self', '.', 'results', '[', 'rank', ']', ']']
Returns objective values of models with specified rank.
['Returns', 'objective', 'values', 'of', 'models', 'with', 'specified', 'rank', '.']
train
https://github.com/ahwillia/tensortools/blob/f375633ec621caa96665a56205dcf932590d4a6e/tensortools/ensemble.py#L130-L134
9,019
juju/theblues
theblues/jimm.py
JIMM.list_models
def list_models(self, macaroons): """ Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments. """ return make_request("{}model".format(self.url), timeout=self.timeout, client=self._client, cookies=self.cookies)
python
def list_models(self, macaroons): """ Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments. """ return make_request("{}model".format(self.url), timeout=self.timeout, client=self._client, cookies=self.cookies)
['def', 'list_models', '(', 'self', ',', 'macaroons', ')', ':', 'return', 'make_request', '(', '"{}model"', '.', 'format', '(', 'self', '.', 'url', ')', ',', 'timeout', '=', 'self', '.', 'timeout', ',', 'client', '=', 'self', '.', '_client', ',', 'cookies', '=', 'self', '.', 'cookies', ')']
Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments.
['Get', 'the', 'logged', 'in', 'user', 's', 'models', 'from', 'the', 'JIMM', 'controller', '.']
train
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/jimm.py#L31-L38
9,020
cmbruns/pyopenvr
src/openvr/__init__.py
IVRSystem.getDXGIOutputInfo
def getDXGIOutputInfo(self): """ [D3D10/11 Only] Returns the adapter index and output index that the user should pass into EnumAdapters and EnumOutputs to create the device and swap chain in DX10 and DX11. If an error occurs both indices will be set to -1. """ fn = self.function_table.getDXGIOutputInfo pnAdapterIndex = c_int32() fn(byref(pnAdapterIndex)) return pnAdapterIndex.value
python
def getDXGIOutputInfo(self): """ [D3D10/11 Only] Returns the adapter index and output index that the user should pass into EnumAdapters and EnumOutputs to create the device and swap chain in DX10 and DX11. If an error occurs both indices will be set to -1. """ fn = self.function_table.getDXGIOutputInfo pnAdapterIndex = c_int32() fn(byref(pnAdapterIndex)) return pnAdapterIndex.value
['def', 'getDXGIOutputInfo', '(', 'self', ')', ':', 'fn', '=', 'self', '.', 'function_table', '.', 'getDXGIOutputInfo', 'pnAdapterIndex', '=', 'c_int32', '(', ')', 'fn', '(', 'byref', '(', 'pnAdapterIndex', ')', ')', 'return', 'pnAdapterIndex', '.', 'value']
[D3D10/11 Only] Returns the adapter index and output index that the user should pass into EnumAdapters and EnumOutputs to create the device and swap chain in DX10 and DX11. If an error occurs both indices will be set to -1.
['[', 'D3D10', '/', '11', 'Only', ']', 'Returns', 'the', 'adapter', 'index', 'and', 'output', 'index', 'that', 'the', 'user', 'should', 'pass', 'into', 'EnumAdapters', 'and', 'EnumOutputs', 'to', 'create', 'the', 'device', 'and', 'swap', 'chain', 'in', 'DX10', 'and', 'DX11', '.', 'If', 'an', 'error', 'occurs', 'both', 'indices', 'will', 'be', 'set', 'to', '-', '1', '.']
train
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L2706-L2716
9,021
hearsaycorp/normalize
normalize/property/__init__.py
Property.fullname
def fullname(self): """Returns the name of the ``Record`` class this ``Property`` is attached to, and attribute name it is attached as.""" if not self.bound: if self.name is not None: return "(unbound).%s" % self.name else: return "(unbound)" elif not self.class_(): classname = "(GC'd class)" else: classname = self.class_().__name__ return "%s.%s" % (classname, self.name)
python
def fullname(self): """Returns the name of the ``Record`` class this ``Property`` is attached to, and attribute name it is attached as.""" if not self.bound: if self.name is not None: return "(unbound).%s" % self.name else: return "(unbound)" elif not self.class_(): classname = "(GC'd class)" else: classname = self.class_().__name__ return "%s.%s" % (classname, self.name)
['def', 'fullname', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'bound', ':', 'if', 'self', '.', 'name', 'is', 'not', 'None', ':', 'return', '"(unbound).%s"', '%', 'self', '.', 'name', 'else', ':', 'return', '"(unbound)"', 'elif', 'not', 'self', '.', 'class_', '(', ')', ':', 'classname', '=', '"(GC\'d class)"', 'else', ':', 'classname', '=', 'self', '.', 'class_', '(', ')', '.', '__name__', 'return', '"%s.%s"', '%', '(', 'classname', ',', 'self', '.', 'name', ')']
Returns the name of the ``Record`` class this ``Property`` is attached to, and attribute name it is attached as.
['Returns', 'the', 'name', 'of', 'the', 'Record', 'class', 'this', 'Property', 'is', 'attached', 'to', 'and', 'attribute', 'name', 'it', 'is', 'attached', 'as', '.']
train
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/property/__init__.py#L189-L201
9,022
secdev/scapy
scapy/utils.py
RawPcapWriter.write
def write(self, pkt): """ Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes """ if isinstance(pkt, bytes): if not self.header_present: self._write_header(pkt) self._write_packet(pkt) else: pkt = pkt.__iter__() for p in pkt: if not self.header_present: self._write_header(p) self._write_packet(p)
python
def write(self, pkt): """ Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes """ if isinstance(pkt, bytes): if not self.header_present: self._write_header(pkt) self._write_packet(pkt) else: pkt = pkt.__iter__() for p in pkt: if not self.header_present: self._write_header(p) self._write_packet(p)
['def', 'write', '(', 'self', ',', 'pkt', ')', ':', 'if', 'isinstance', '(', 'pkt', ',', 'bytes', ')', ':', 'if', 'not', 'self', '.', 'header_present', ':', 'self', '.', '_write_header', '(', 'pkt', ')', 'self', '.', '_write_packet', '(', 'pkt', ')', 'else', ':', 'pkt', '=', 'pkt', '.', '__iter__', '(', ')', 'for', 'p', 'in', 'pkt', ':', 'if', 'not', 'self', '.', 'header_present', ':', 'self', '.', '_write_header', '(', 'p', ')', 'self', '.', '_write_packet', '(', 'p', ')']
Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes
['Writes', 'a', 'Packet', 'or', 'bytes', 'to', 'a', 'pcap', 'file', '.']
train
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils.py#L1282-L1299
9,023
quantumlib/Cirq
cirq/google/sim/xmon_stepper.py
_single_qubit_accumulate_into_scratch
def _single_qubit_accumulate_into_scratch(args: Dict[str, Any]): """Accumulates single qubit phase gates into the scratch shards.""" index = args['indices'][0] shard_num = args['shard_num'] half_turns = args['half_turns'] num_shard_qubits = args['num_shard_qubits'] scratch = _scratch_shard(args) # ExpZ = exp(-i pi Z half_turns / 2). if index >= num_shard_qubits: # Acts on prefix qubits. sign = 1 - 2 * _kth_bit(shard_num, index - num_shard_qubits) scratch -= half_turns * sign else: # Acts on shard qubits. scratch -= half_turns * _pm_vects(args)[index]
python
def _single_qubit_accumulate_into_scratch(args: Dict[str, Any]): """Accumulates single qubit phase gates into the scratch shards.""" index = args['indices'][0] shard_num = args['shard_num'] half_turns = args['half_turns'] num_shard_qubits = args['num_shard_qubits'] scratch = _scratch_shard(args) # ExpZ = exp(-i pi Z half_turns / 2). if index >= num_shard_qubits: # Acts on prefix qubits. sign = 1 - 2 * _kth_bit(shard_num, index - num_shard_qubits) scratch -= half_turns * sign else: # Acts on shard qubits. scratch -= half_turns * _pm_vects(args)[index]
['def', '_single_qubit_accumulate_into_scratch', '(', 'args', ':', 'Dict', '[', 'str', ',', 'Any', ']', ')', ':', 'index', '=', 'args', '[', "'indices'", ']', '[', '0', ']', 'shard_num', '=', 'args', '[', "'shard_num'", ']', 'half_turns', '=', 'args', '[', "'half_turns'", ']', 'num_shard_qubits', '=', 'args', '[', "'num_shard_qubits'", ']', 'scratch', '=', '_scratch_shard', '(', 'args', ')', '# ExpZ = exp(-i pi Z half_turns / 2).', 'if', 'index', '>=', 'num_shard_qubits', ':', '# Acts on prefix qubits.', 'sign', '=', '1', '-', '2', '*', '_kth_bit', '(', 'shard_num', ',', 'index', '-', 'num_shard_qubits', ')', 'scratch', '-=', 'half_turns', '*', 'sign', 'else', ':', '# Acts on shard qubits.', 'scratch', '-=', 'half_turns', '*', '_pm_vects', '(', 'args', ')', '[', 'index', ']']
Accumulates single qubit phase gates into the scratch shards.
['Accumulates', 'single', 'qubit', 'phase', 'gates', 'into', 'the', 'scratch', 'shards', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/xmon_stepper.py#L432-L447
9,024
ooici/elasticpy
elasticpy/query.py
ElasticQuery.bool
def bool(cls, must=None, should=None, must_not=None, minimum_number_should_match=None, boost=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are: 'must' - The clause(query) must appear in matching documents. 'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter. 'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s). 'minimum_number_should_match' - Minimum number of documents that should match 'boost' - boost value > term = ElasticQuery() > term.term(user='kimchy') > query = ElasticQuery() > query.bool(should=term) > query.query() { 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}} ''' instance = cls(bool={}) if must is not None: instance['bool']['must'] = must if should is not None: instance['bool']['should'] = should if must_not is not None: instance['bool']['must_not'] = must_not if minimum_number_should_match is not None: instance['bool']['minimum_number_should_match'] = minimum_number_should_match if boost is not None: instance['bool']['boost'] = boost return instance
python
def bool(cls, must=None, should=None, must_not=None, minimum_number_should_match=None, boost=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are: 'must' - The clause(query) must appear in matching documents. 'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter. 'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s). 'minimum_number_should_match' - Minimum number of documents that should match 'boost' - boost value > term = ElasticQuery() > term.term(user='kimchy') > query = ElasticQuery() > query.bool(should=term) > query.query() { 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}} ''' instance = cls(bool={}) if must is not None: instance['bool']['must'] = must if should is not None: instance['bool']['should'] = should if must_not is not None: instance['bool']['must_not'] = must_not if minimum_number_should_match is not None: instance['bool']['minimum_number_should_match'] = minimum_number_should_match if boost is not None: instance['bool']['boost'] = boost return instance
['def', 'bool', '(', 'cls', ',', 'must', '=', 'None', ',', 'should', '=', 'None', ',', 'must_not', '=', 'None', ',', 'minimum_number_should_match', '=', 'None', ',', 'boost', '=', 'None', ')', ':', 'instance', '=', 'cls', '(', 'bool', '=', '{', '}', ')', 'if', 'must', 'is', 'not', 'None', ':', 'instance', '[', "'bool'", ']', '[', "'must'", ']', '=', 'must', 'if', 'should', 'is', 'not', 'None', ':', 'instance', '[', "'bool'", ']', '[', "'should'", ']', '=', 'should', 'if', 'must_not', 'is', 'not', 'None', ':', 'instance', '[', "'bool'", ']', '[', "'must_not'", ']', '=', 'must_not', 'if', 'minimum_number_should_match', 'is', 'not', 'None', ':', 'instance', '[', "'bool'", ']', '[', "'minimum_number_should_match'", ']', '=', 'minimum_number_should_match', 'if', 'boost', 'is', 'not', 'None', ':', 'instance', '[', "'bool'", ']', '[', "'boost'", ']', '=', 'boost', 'return', 'instance']
http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are: 'must' - The clause(query) must appear in matching documents. 'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter. 'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s). 'minimum_number_should_match' - Minimum number of documents that should match 'boost' - boost value > term = ElasticQuery() > term.term(user='kimchy') > query = ElasticQuery() > query.bool(should=term) > query.query() { 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}}
['http', ':', '//', 'www', '.', 'elasticsearch', '.', 'org', '/', 'guide', '/', 'reference', '/', 'query', '-', 'dsl', '/', 'bool', '-', 'query', '.', 'html', 'A', 'query', 'that', 'matches', 'documents', 'matching', 'boolean', 'combinations', 'of', 'other', 'queris', '.', 'The', 'bool', 'query', 'maps', 'to', 'Lucene', 'BooleanQuery', '.', 'It', 'is', 'built', 'using', 'one', 'of', 'more', 'boolean', 'clauses', 'each', 'clause', 'with', 'a', 'typed', 'occurrence', '.', 'The', 'occurrence', 'types', 'are', ':', 'must', '-', 'The', 'clause', '(', 'query', ')', 'must', 'appear', 'in', 'matching', 'documents', '.', 'should', '-', 'The', 'clause', '(', 'query', ')', 'should', 'appear', 'in', 'the', 'matching', 'document', '.', 'A', 'boolean', 'query', 'with', 'no', 'must', 'clauses', 'one', 'or', 'more', 'should', 'clauses', 'must', 'match', 'a', 'document', '.', 'The', 'minimum', 'number', 'of', 'should', 'clauses', 'to', 'match', 'can', 'be', 'set', 'using', 'minimum_number_should_match', 'parameter', '.', 'must_not', '-', 'The', 'clause', '(', 'query', ')', 'must', 'not', 'appear', 'in', 'the', 'matching', 'documents', '.', 'Note', 'that', 'it', 'is', 'not', 'possible', 'to', 'search', 'on', 'documents', 'that', 'only', 'consists', 'of', 'a', 'must_not', 'clause', '(', 's', ')', '.']
train
https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/query.py#L101-L132
9,025
bioidiap/gridtk
gridtk/sge.py
JobManagerSGE._queue
def _queue(self, kwargs): """The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname""" if not 'hard resource_list' in kwargs: return 'all.q' d = dict([k.split('=') for k in kwargs['hard resource_list'].split(',')]) for k in d: if k[0] == 'q' and d[k] == 'TRUE': return k return 'all.q'
python
def _queue(self, kwargs): """The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname""" if not 'hard resource_list' in kwargs: return 'all.q' d = dict([k.split('=') for k in kwargs['hard resource_list'].split(',')]) for k in d: if k[0] == 'q' and d[k] == 'TRUE': return k return 'all.q'
['def', '_queue', '(', 'self', ',', 'kwargs', ')', ':', 'if', 'not', "'hard resource_list'", 'in', 'kwargs', ':', 'return', "'all.q'", 'd', '=', 'dict', '(', '[', 'k', '.', 'split', '(', "'='", ')', 'for', 'k', 'in', 'kwargs', '[', "'hard resource_list'", ']', '.', 'split', '(', "','", ')', ']', ')', 'for', 'k', 'in', 'd', ':', 'if', 'k', '[', '0', ']', '==', "'q'", 'and', 'd', '[', 'k', ']', '==', "'TRUE'", ':', 'return', 'k', 'return', "'all.q'"]
The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname
['The', 'hard', 'resource_list', 'comes', 'like', 'this', ':', '<qname', '>', '=', 'TRUE', 'mem', '=', '128M', '.', 'To', 'process', 'it', 'we', 'have', 'to', 'split', 'it', 'twice', '(', 'and', 'then', 'on', '=', ')', 'create', 'a', 'dictionary', 'and', 'extract', 'just', 'the', 'qname']
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/sge.py#L43-L51
9,026
sixty-north/asq
asq/extension.py
add_method
def add_method(function, klass, name=None): '''Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method. ''' # Should we be using functools.update_wrapper in here? if name is None: name = function_name(function) if hasattr(klass, name): raise ValueError("Cannot replace existing attribute with method " "'{name}'".format(name=name)) setattr(klass, name, function) return function
python
def add_method(function, klass, name=None): '''Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method. ''' # Should we be using functools.update_wrapper in here? if name is None: name = function_name(function) if hasattr(klass, name): raise ValueError("Cannot replace existing attribute with method " "'{name}'".format(name=name)) setattr(klass, name, function) return function
['def', 'add_method', '(', 'function', ',', 'klass', ',', 'name', '=', 'None', ')', ':', '# Should we be using functools.update_wrapper in here?\r', 'if', 'name', 'is', 'None', ':', 'name', '=', 'function_name', '(', 'function', ')', 'if', 'hasattr', '(', 'klass', ',', 'name', ')', ':', 'raise', 'ValueError', '(', '"Cannot replace existing attribute with method "', '"\'{name}\'"', '.', 'format', '(', 'name', '=', 'name', ')', ')', 'setattr', '(', 'klass', ',', 'name', ',', 'function', ')', 'return', 'function']
Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method.
['Add', 'an', 'existing', 'function', 'to', 'a', 'class', 'as', 'a', 'method', '.', 'Note', ':', 'Consider', 'using', 'the', 'extend', 'decorator', 'as', 'a', 'more', 'readable', 'alternative', 'to', 'using', 'this', 'function', 'directly', '.', 'Args', ':', 'function', ':', 'The', 'function', 'to', 'be', 'added', 'to', 'the', 'class', 'klass', '.', 'klass', ':', 'The', 'class', 'to', 'which', 'the', 'new', 'method', 'will', 'be', 'added', '.', 'name', ':', 'An', 'optional', 'name', 'for', 'the', 'new', 'method', '.', 'If', 'omitted', 'or', 'None', 'the', 'original', 'name', 'of', 'the', 'function', 'is', 'used', '.', 'Returns', ':', 'The', 'function', 'argument', 'unmodified', '.', 'Raises', ':', 'ValueError', ':', 'If', 'klass', 'already', 'has', 'an', 'attribute', 'with', 'the', 'same', 'name', 'as', 'the', 'extension', 'method', '.']
train
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/extension.py#L8-L36
9,027
ladybug-tools/ladybug
ladybug/datacollection.py
HourlyContinuousCollection._get_analysis_period_subset
def _get_analysis_period_subset(self, a_per): """Return an analysis_period is always a subset of the Data Collection""" if self.header.analysis_period.is_annual: return a_per new_needed = False n_ap = [a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, a_per.timestep, a_per.is_leap_year] if a_per.st_hour < self.header.analysis_period.st_hour: n_ap[2] = self.header.analysis_period.st_hour new_needed = True if a_per.end_hour > self.header.analysis_period.end_hour: n_ap[5] = self.header.analysis_period.end_hour new_needed = True if a_per.st_time.doy < self.header.analysis_period.st_time.doy: n_ap[0] = self.header.analysis_period.st_month n_ap[1] = self.header.analysis_period.st_day new_needed = True if a_per.end_time.doy > self.header.analysis_period.end_time.doy: n_ap[3] = self.header.analysis_period.end_month n_ap[4] = self.header.analysis_period.end_day new_needed = True if new_needed is False: return a_per else: return AnalysisPeriod(*n_ap)
python
def _get_analysis_period_subset(self, a_per): """Return an analysis_period is always a subset of the Data Collection""" if self.header.analysis_period.is_annual: return a_per new_needed = False n_ap = [a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, a_per.timestep, a_per.is_leap_year] if a_per.st_hour < self.header.analysis_period.st_hour: n_ap[2] = self.header.analysis_period.st_hour new_needed = True if a_per.end_hour > self.header.analysis_period.end_hour: n_ap[5] = self.header.analysis_period.end_hour new_needed = True if a_per.st_time.doy < self.header.analysis_period.st_time.doy: n_ap[0] = self.header.analysis_period.st_month n_ap[1] = self.header.analysis_period.st_day new_needed = True if a_per.end_time.doy > self.header.analysis_period.end_time.doy: n_ap[3] = self.header.analysis_period.end_month n_ap[4] = self.header.analysis_period.end_day new_needed = True if new_needed is False: return a_per else: return AnalysisPeriod(*n_ap)
['def', '_get_analysis_period_subset', '(', 'self', ',', 'a_per', ')', ':', 'if', 'self', '.', 'header', '.', 'analysis_period', '.', 'is_annual', ':', 'return', 'a_per', 'new_needed', '=', 'False', 'n_ap', '=', '[', 'a_per', '.', 'st_month', ',', 'a_per', '.', 'st_day', ',', 'a_per', '.', 'st_hour', ',', 'a_per', '.', 'end_month', ',', 'a_per', '.', 'end_day', ',', 'a_per', '.', 'end_hour', ',', 'a_per', '.', 'timestep', ',', 'a_per', '.', 'is_leap_year', ']', 'if', 'a_per', '.', 'st_hour', '<', 'self', '.', 'header', '.', 'analysis_period', '.', 'st_hour', ':', 'n_ap', '[', '2', ']', '=', 'self', '.', 'header', '.', 'analysis_period', '.', 'st_hour', 'new_needed', '=', 'True', 'if', 'a_per', '.', 'end_hour', '>', 'self', '.', 'header', '.', 'analysis_period', '.', 'end_hour', ':', 'n_ap', '[', '5', ']', '=', 'self', '.', 'header', '.', 'analysis_period', '.', 'end_hour', 'new_needed', '=', 'True', 'if', 'a_per', '.', 'st_time', '.', 'doy', '<', 'self', '.', 'header', '.', 'analysis_period', '.', 'st_time', '.', 'doy', ':', 'n_ap', '[', '0', ']', '=', 'self', '.', 'header', '.', 'analysis_period', '.', 'st_month', 'n_ap', '[', '1', ']', '=', 'self', '.', 'header', '.', 'analysis_period', '.', 'st_day', 'new_needed', '=', 'True', 'if', 'a_per', '.', 'end_time', '.', 'doy', '>', 'self', '.', 'header', '.', 'analysis_period', '.', 'end_time', '.', 'doy', ':', 'n_ap', '[', '3', ']', '=', 'self', '.', 'header', '.', 'analysis_period', '.', 'end_month', 'n_ap', '[', '4', ']', '=', 'self', '.', 'header', '.', 'analysis_period', '.', 'end_day', 'new_needed', '=', 'True', 'if', 'new_needed', 'is', 'False', ':', 'return', 'a_per', 'else', ':', 'return', 'AnalysisPeriod', '(', '*', 'n_ap', ')']
Return an analysis_period is always a subset of the Data Collection
['Return', 'an', 'analysis_period', 'is', 'always', 'a', 'subset', 'of', 'the', 'Data', 'Collection']
train
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L875-L901
9,028
sdispater/orator
orator/orm/builder.py
Builder._apply_scope
def _apply_scope(self, scope, builder): """ Apply a single scope on the given builder instance. :param scope: The scope to apply :type scope: callable or Scope :param builder: The builder to apply the scope to :type builder: Builder """ if callable(scope): scope(builder) elif isinstance(scope, Scope): scope.apply(builder, self.get_model())
python
def _apply_scope(self, scope, builder): """ Apply a single scope on the given builder instance. :param scope: The scope to apply :type scope: callable or Scope :param builder: The builder to apply the scope to :type builder: Builder """ if callable(scope): scope(builder) elif isinstance(scope, Scope): scope.apply(builder, self.get_model())
['def', '_apply_scope', '(', 'self', ',', 'scope', ',', 'builder', ')', ':', 'if', 'callable', '(', 'scope', ')', ':', 'scope', '(', 'builder', ')', 'elif', 'isinstance', '(', 'scope', ',', 'Scope', ')', ':', 'scope', '.', 'apply', '(', 'builder', ',', 'self', '.', 'get_model', '(', ')', ')']
Apply a single scope on the given builder instance. :param scope: The scope to apply :type scope: callable or Scope :param builder: The builder to apply the scope to :type builder: Builder
['Apply', 'a', 'single', 'scope', 'on', 'the', 'given', 'builder', 'instance', '.']
train
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/builder.py#L986-L999
9,029
santoshphilip/eppy
eppy/modeleditor.py
zoneheight
def zoneheight(idf, zonename, debug=False): """zone height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] if floors == [] or roofs == []: height = zone_height_min2max(idf, zonename) else: height = zone_floor2roofheight(idf, zonename) return height
python
def zoneheight(idf, zonename, debug=False): """zone height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] if floors == [] or roofs == []: height = zone_height_min2max(idf, zonename) else: height = zone_floor2roofheight(idf, zonename) return height
['def', 'zoneheight', '(', 'idf', ',', 'zonename', ',', 'debug', '=', 'False', ')', ':', 'zone', '=', 'idf', '.', 'getobject', '(', "'ZONE'", ',', 'zonename', ')', 'surfs', '=', 'idf', '.', 'idfobjects', '[', "'BuildingSurface:Detailed'", '.', 'upper', '(', ')', ']', 'zone_surfs', '=', '[', 's', 'for', 's', 'in', 'surfs', 'if', 's', '.', 'Zone_Name', '==', 'zone', '.', 'Name', ']', 'floors', '=', '[', 's', 'for', 's', 'in', 'zone_surfs', 'if', 's', '.', 'Surface_Type', '.', 'upper', '(', ')', '==', "'FLOOR'", ']', 'roofs', '=', '[', 's', 'for', 's', 'in', 'zone_surfs', 'if', 's', '.', 'Surface_Type', '.', 'upper', '(', ')', '==', "'ROOF'", ']', 'if', 'floors', '==', '[', ']', 'or', 'roofs', '==', '[', ']', ':', 'height', '=', 'zone_height_min2max', '(', 'idf', ',', 'zonename', ')', 'else', ':', 'height', '=', 'zone_floor2roofheight', '(', 'idf', ',', 'zonename', ')', 'return', 'height']
zone height
['zone', 'height']
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L450-L461
9,030
ssato/python-anyconfig
src/anyconfig/utils.py
expand_paths
def expand_paths(paths, marker='*'): """ :param paths: A glob path pattern string or pathlib.Path object holding such path, or a list consists of path strings or glob path pattern strings or pathlib.Path object holding such ones, or file objects :param marker: Glob marker character or string, e.g. '*' :return: List of path strings >>> expand_paths([]) [] >>> expand_paths("/usr/lib/a/b.conf /etc/a/b.conf /run/a/b.conf".split()) ['/usr/lib/a/b.conf', '/etc/a/b.conf', '/run/a/b.conf'] >>> paths_s = os.path.join(os.path.dirname(__file__), "u*.py") >>> ref = sglob(paths_s) >>> assert expand_paths(paths_s) == ref >>> ref = ["/etc/a.conf"] + ref >>> assert expand_paths(["/etc/a.conf", paths_s]) == ref >>> strm = anyconfig.compat.StringIO() >>> assert expand_paths(["/etc/a.conf", strm]) == ["/etc/a.conf", strm] """ if is_path(paths) and marker in paths: return sglob(paths) if is_path_obj(paths) and marker in paths.as_posix(): # TBD: Is it better to return [p :: pathlib.Path] instead? return [normpath(p) for p in sglob(paths.as_posix())] return list(_expand_paths_itr(paths, marker=marker))
python
def expand_paths(paths, marker='*'): """ :param paths: A glob path pattern string or pathlib.Path object holding such path, or a list consists of path strings or glob path pattern strings or pathlib.Path object holding such ones, or file objects :param marker: Glob marker character or string, e.g. '*' :return: List of path strings >>> expand_paths([]) [] >>> expand_paths("/usr/lib/a/b.conf /etc/a/b.conf /run/a/b.conf".split()) ['/usr/lib/a/b.conf', '/etc/a/b.conf', '/run/a/b.conf'] >>> paths_s = os.path.join(os.path.dirname(__file__), "u*.py") >>> ref = sglob(paths_s) >>> assert expand_paths(paths_s) == ref >>> ref = ["/etc/a.conf"] + ref >>> assert expand_paths(["/etc/a.conf", paths_s]) == ref >>> strm = anyconfig.compat.StringIO() >>> assert expand_paths(["/etc/a.conf", strm]) == ["/etc/a.conf", strm] """ if is_path(paths) and marker in paths: return sglob(paths) if is_path_obj(paths) and marker in paths.as_posix(): # TBD: Is it better to return [p :: pathlib.Path] instead? return [normpath(p) for p in sglob(paths.as_posix())] return list(_expand_paths_itr(paths, marker=marker))
['def', 'expand_paths', '(', 'paths', ',', 'marker', '=', "'*'", ')', ':', 'if', 'is_path', '(', 'paths', ')', 'and', 'marker', 'in', 'paths', ':', 'return', 'sglob', '(', 'paths', ')', 'if', 'is_path_obj', '(', 'paths', ')', 'and', 'marker', 'in', 'paths', '.', 'as_posix', '(', ')', ':', '# TBD: Is it better to return [p :: pathlib.Path] instead?', 'return', '[', 'normpath', '(', 'p', ')', 'for', 'p', 'in', 'sglob', '(', 'paths', '.', 'as_posix', '(', ')', ')', ']', 'return', 'list', '(', '_expand_paths_itr', '(', 'paths', ',', 'marker', '=', 'marker', ')', ')']
:param paths: A glob path pattern string or pathlib.Path object holding such path, or a list consists of path strings or glob path pattern strings or pathlib.Path object holding such ones, or file objects :param marker: Glob marker character or string, e.g. '*' :return: List of path strings >>> expand_paths([]) [] >>> expand_paths("/usr/lib/a/b.conf /etc/a/b.conf /run/a/b.conf".split()) ['/usr/lib/a/b.conf', '/etc/a/b.conf', '/run/a/b.conf'] >>> paths_s = os.path.join(os.path.dirname(__file__), "u*.py") >>> ref = sglob(paths_s) >>> assert expand_paths(paths_s) == ref >>> ref = ["/etc/a.conf"] + ref >>> assert expand_paths(["/etc/a.conf", paths_s]) == ref >>> strm = anyconfig.compat.StringIO() >>> assert expand_paths(["/etc/a.conf", strm]) == ["/etc/a.conf", strm]
[':', 'param', 'paths', ':', 'A', 'glob', 'path', 'pattern', 'string', 'or', 'pathlib', '.', 'Path', 'object', 'holding', 'such', 'path', 'or', 'a', 'list', 'consists', 'of', 'path', 'strings', 'or', 'glob', 'path', 'pattern', 'strings', 'or', 'pathlib', '.', 'Path', 'object', 'holding', 'such', 'ones', 'or', 'file', 'objects', ':', 'param', 'marker', ':', 'Glob', 'marker', 'character', 'or', 'string', 'e', '.', 'g', '.', '*']
train
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/utils.py#L345-L374
9,031
caseyjlaw/rtpipe
rtpipe/calpipe.py
pipe.set_fluxinfo
def set_fluxinfo(self): """ Uses list of known flux calibrators (with models in CASA) to find full name given in scan. """ knowncals = ['3C286', '3C48', '3C147', '3C138'] # find scans with knowncals in the name sourcenames = [self.sources[source]['source'] for source in self.sources] calsources = [cal for src in sourcenames for cal in knowncals if cal in src] calsources_full = [src for src in sourcenames for cal in knowncals if cal in src] if len(calsources): # if cal found, set band name from first spw self.band = self.sdm['Receiver'][0].frequencyBand.split('_')[1] if len(calsources) > 1: print 'Found multiple flux calibrators:', calsources self.fluxname = calsources[0] self.fluxname_full = calsources_full[0] print 'Set flux calibrator to %s and band to %s.' % (self.fluxname_full, self.band) else: self.fluxname = '' self.fluxname_full = '' self.band = ''
python
def set_fluxinfo(self): """ Uses list of known flux calibrators (with models in CASA) to find full name given in scan. """ knowncals = ['3C286', '3C48', '3C147', '3C138'] # find scans with knowncals in the name sourcenames = [self.sources[source]['source'] for source in self.sources] calsources = [cal for src in sourcenames for cal in knowncals if cal in src] calsources_full = [src for src in sourcenames for cal in knowncals if cal in src] if len(calsources): # if cal found, set band name from first spw self.band = self.sdm['Receiver'][0].frequencyBand.split('_')[1] if len(calsources) > 1: print 'Found multiple flux calibrators:', calsources self.fluxname = calsources[0] self.fluxname_full = calsources_full[0] print 'Set flux calibrator to %s and band to %s.' % (self.fluxname_full, self.band) else: self.fluxname = '' self.fluxname_full = '' self.band = ''
['def', 'set_fluxinfo', '(', 'self', ')', ':', 'knowncals', '=', '[', "'3C286'", ',', "'3C48'", ',', "'3C147'", ',', "'3C138'", ']', '# find scans with knowncals in the name', 'sourcenames', '=', '[', 'self', '.', 'sources', '[', 'source', ']', '[', "'source'", ']', 'for', 'source', 'in', 'self', '.', 'sources', ']', 'calsources', '=', '[', 'cal', 'for', 'src', 'in', 'sourcenames', 'for', 'cal', 'in', 'knowncals', 'if', 'cal', 'in', 'src', ']', 'calsources_full', '=', '[', 'src', 'for', 'src', 'in', 'sourcenames', 'for', 'cal', 'in', 'knowncals', 'if', 'cal', 'in', 'src', ']', 'if', 'len', '(', 'calsources', ')', ':', '# if cal found, set band name from first spw', 'self', '.', 'band', '=', 'self', '.', 'sdm', '[', "'Receiver'", ']', '[', '0', ']', '.', 'frequencyBand', '.', 'split', '(', "'_'", ')', '[', '1', ']', 'if', 'len', '(', 'calsources', ')', '>', '1', ':', 'print', "'Found multiple flux calibrators:'", ',', 'calsources', 'self', '.', 'fluxname', '=', 'calsources', '[', '0', ']', 'self', '.', 'fluxname_full', '=', 'calsources_full', '[', '0', ']', 'print', "'Set flux calibrator to %s and band to %s.'", '%', '(', 'self', '.', 'fluxname_full', ',', 'self', '.', 'band', ')', 'else', ':', 'self', '.', 'fluxname', '=', "''", 'self', '.', 'fluxname_full', '=', "''", 'self', '.', 'band', '=', "''"]
Uses list of known flux calibrators (with models in CASA) to find full name given in scan.
['Uses', 'list', 'of', 'known', 'flux', 'calibrators', '(', 'with', 'models', 'in', 'CASA', ')', 'to', 'find', 'full', 'name', 'given', 'in', 'scan', '.']
train
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/calpipe.py#L78-L100
9,032
hyperledger/indy-sdk
wrappers/python/indy/ledger.py
build_attrib_request
async def build_attrib_request(submitter_did: str, target_did: str, xhash: Optional[str], raw: Optional[str], enc: Optional[str]) -> str: """ Builds an ATTRIB request. Request to add attribute to a NYM record. :param submitter_did: DID of the submitter stored in secured Wallet. :param target_did: Target DID as base58-encoded string for 16 or 32 bit DID value. :param xhash: (Optional) Hash of attribute data. :param raw: (Optional) Json, where key is attribute name and value is attribute value. :param enc: (Optional) Encrypted value attribute data. :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug("build_attrib_request: >>> submitter_did: %r, target_did: %r, hash: %r, raw: %r, enc: %r", submitter_did, target_did, xhash, raw, enc) if not hasattr(build_attrib_request, "cb"): logger.debug("build_attrib_request: Creating callback") build_attrib_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) c_target_did = c_char_p(target_did.encode('utf-8')) c_hash = c_char_p(xhash.encode('utf-8')) if xhash is not None else None c_raw = c_char_p(raw.encode('utf-8')) if raw is not None else None c_enc = c_char_p(enc.encode('utf-8')) if enc is not None else None request_json = await do_call('indy_build_attrib_request', c_submitter_did, c_target_did, c_hash, c_raw, c_enc, build_attrib_request.cb) res = request_json.decode() logger.debug("build_attrib_request: <<< res: %r", res) return res
python
async def build_attrib_request(submitter_did: str, target_did: str, xhash: Optional[str], raw: Optional[str], enc: Optional[str]) -> str: """ Builds an ATTRIB request. Request to add attribute to a NYM record. :param submitter_did: DID of the submitter stored in secured Wallet. :param target_did: Target DID as base58-encoded string for 16 or 32 bit DID value. :param xhash: (Optional) Hash of attribute data. :param raw: (Optional) Json, where key is attribute name and value is attribute value. :param enc: (Optional) Encrypted value attribute data. :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug("build_attrib_request: >>> submitter_did: %r, target_did: %r, hash: %r, raw: %r, enc: %r", submitter_did, target_did, xhash, raw, enc) if not hasattr(build_attrib_request, "cb"): logger.debug("build_attrib_request: Creating callback") build_attrib_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) c_target_did = c_char_p(target_did.encode('utf-8')) c_hash = c_char_p(xhash.encode('utf-8')) if xhash is not None else None c_raw = c_char_p(raw.encode('utf-8')) if raw is not None else None c_enc = c_char_p(enc.encode('utf-8')) if enc is not None else None request_json = await do_call('indy_build_attrib_request', c_submitter_did, c_target_did, c_hash, c_raw, c_enc, build_attrib_request.cb) res = request_json.decode() logger.debug("build_attrib_request: <<< res: %r", res) return res
['async', 'def', 'build_attrib_request', '(', 'submitter_did', ':', 'str', ',', 'target_did', ':', 'str', ',', 'xhash', ':', 'Optional', '[', 'str', ']', ',', 'raw', ':', 'Optional', '[', 'str', ']', ',', 'enc', ':', 'Optional', '[', 'str', ']', ')', '->', 'str', ':', 'logger', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', 'logger', '.', 'debug', '(', '"build_attrib_request: >>> submitter_did: %r, target_did: %r, hash: %r, raw: %r, enc: %r"', ',', 'submitter_did', ',', 'target_did', ',', 'xhash', ',', 'raw', ',', 'enc', ')', 'if', 'not', 'hasattr', '(', 'build_attrib_request', ',', '"cb"', ')', ':', 'logger', '.', 'debug', '(', '"build_attrib_request: Creating callback"', ')', 'build_attrib_request', '.', 'cb', '=', 'create_cb', '(', 'CFUNCTYPE', '(', 'None', ',', 'c_int32', ',', 'c_int32', ',', 'c_char_p', ')', ')', 'c_submitter_did', '=', 'c_char_p', '(', 'submitter_did', '.', 'encode', '(', "'utf-8'", ')', ')', 'c_target_did', '=', 'c_char_p', '(', 'target_did', '.', 'encode', '(', "'utf-8'", ')', ')', 'c_hash', '=', 'c_char_p', '(', 'xhash', '.', 'encode', '(', "'utf-8'", ')', ')', 'if', 'xhash', 'is', 'not', 'None', 'else', 'None', 'c_raw', '=', 'c_char_p', '(', 'raw', '.', 'encode', '(', "'utf-8'", ')', ')', 'if', 'raw', 'is', 'not', 'None', 'else', 'None', 'c_enc', '=', 'c_char_p', '(', 'enc', '.', 'encode', '(', "'utf-8'", ')', ')', 'if', 'enc', 'is', 'not', 'None', 'else', 'None', 'request_json', '=', 'await', 'do_call', '(', "'indy_build_attrib_request'", ',', 'c_submitter_did', ',', 'c_target_did', ',', 'c_hash', ',', 'c_raw', ',', 'c_enc', ',', 'build_attrib_request', '.', 'cb', ')', 'res', '=', 'request_json', '.', 'decode', '(', ')', 'logger', '.', 'debug', '(', '"build_attrib_request: <<< res: %r"', ',', 'res', ')', 'return', 'res']
Builds an ATTRIB request. Request to add attribute to a NYM record. :param submitter_did: DID of the submitter stored in secured Wallet. :param target_did: Target DID as base58-encoded string for 16 or 32 bit DID value. :param xhash: (Optional) Hash of attribute data. :param raw: (Optional) Json, where key is attribute name and value is attribute value. :param enc: (Optional) Encrypted value attribute data. :return: Request result as json.
['Builds', 'an', 'ATTRIB', 'request', '.', 'Request', 'to', 'add', 'attribute', 'to', 'a', 'NYM', 'record', '.']
train
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/ledger.py#L302-L346
9,033
ampl/amplpy
amplpy/ampl.py
AMPL.getData
def getData(self, *statements): """ Get the data corresponding to the display statements. The statements can be AMPL expressions, or entities. It captures the equivalent of the command: .. code-block:: ampl display ds1, ..., dsn; where ds1, ..., dsn are the ``displayStatements`` with which the function is called. As only one DataFrame is returned, the operation will fail if the results of the display statements cannot be indexed over the same set. As a result, any attempt to get data from more than one set, or to get data for multiple parameters with a different number of indexing sets will fail. Args: statements: The display statements to be fetched. Raises: RuntimeError: if the AMPL visualization command does not succeed for one of the reasons listed above. Returns: DataFrame capturing the output of the display command in tabular form. """ # FIXME: only works for the first statement. return lock_and_call( lambda: DataFrame._fromDataFrameRef( self._impl.getData(list(statements), len(statements)) ), self._lock )
python
def getData(self, *statements): """ Get the data corresponding to the display statements. The statements can be AMPL expressions, or entities. It captures the equivalent of the command: .. code-block:: ampl display ds1, ..., dsn; where ds1, ..., dsn are the ``displayStatements`` with which the function is called. As only one DataFrame is returned, the operation will fail if the results of the display statements cannot be indexed over the same set. As a result, any attempt to get data from more than one set, or to get data for multiple parameters with a different number of indexing sets will fail. Args: statements: The display statements to be fetched. Raises: RuntimeError: if the AMPL visualization command does not succeed for one of the reasons listed above. Returns: DataFrame capturing the output of the display command in tabular form. """ # FIXME: only works for the first statement. return lock_and_call( lambda: DataFrame._fromDataFrameRef( self._impl.getData(list(statements), len(statements)) ), self._lock )
['def', 'getData', '(', 'self', ',', '*', 'statements', ')', ':', '# FIXME: only works for the first statement.', 'return', 'lock_and_call', '(', 'lambda', ':', 'DataFrame', '.', '_fromDataFrameRef', '(', 'self', '.', '_impl', '.', 'getData', '(', 'list', '(', 'statements', ')', ',', 'len', '(', 'statements', ')', ')', ')', ',', 'self', '.', '_lock', ')']
Get the data corresponding to the display statements. The statements can be AMPL expressions, or entities. It captures the equivalent of the command: .. code-block:: ampl display ds1, ..., dsn; where ds1, ..., dsn are the ``displayStatements`` with which the function is called. As only one DataFrame is returned, the operation will fail if the results of the display statements cannot be indexed over the same set. As a result, any attempt to get data from more than one set, or to get data for multiple parameters with a different number of indexing sets will fail. Args: statements: The display statements to be fetched. Raises: RuntimeError: if the AMPL visualization command does not succeed for one of the reasons listed above. Returns: DataFrame capturing the output of the display command in tabular form.
['Get', 'the', 'data', 'corresponding', 'to', 'the', 'display', 'statements', '.', 'The', 'statements', 'can', 'be', 'AMPL', 'expressions', 'or', 'entities', '.', 'It', 'captures', 'the', 'equivalent', 'of', 'the', 'command', ':']
train
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L118-L154
9,034
robinandeer/puzzle
puzzle/cli/delete.py
delete
def delete(ctx, family_id, individual_id, root): """ Delete a case or individual from the database. If no database was found run puzzle init first. """ root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle") if os.path.isfile(root): logger.error("'root' can't be a file") ctx.abort() logger.info("Root directory is: {}".format(root)) db_path = os.path.join(root, 'puzzle_db.sqlite3') logger.info("db path is: {}".format(db_path)) if not os.path.exists(db_path): logger.warn("database not initialized, run 'puzzle init'") ctx.abort() store = SqlStore(db_path) if family_id: case_obj = store.case(case_id=family_id) if case_obj is None: logger.warning("Family {0} does not exist in database" .format(family_id)) ctx.abort() store.delete_case(case_obj) elif individual_id: ind_obj = store.individual(ind_id=individual_id) if ind_obj.ind_id != individual_id: logger.warning("Individual {0} does not exist in database" .format(individual_id)) ctx.abort() store.delete_individual(ind_obj) else: logger.warning("Please provide a family or individual id") ctx.abort()
python
def delete(ctx, family_id, individual_id, root): """ Delete a case or individual from the database. If no database was found run puzzle init first. """ root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle") if os.path.isfile(root): logger.error("'root' can't be a file") ctx.abort() logger.info("Root directory is: {}".format(root)) db_path = os.path.join(root, 'puzzle_db.sqlite3') logger.info("db path is: {}".format(db_path)) if not os.path.exists(db_path): logger.warn("database not initialized, run 'puzzle init'") ctx.abort() store = SqlStore(db_path) if family_id: case_obj = store.case(case_id=family_id) if case_obj is None: logger.warning("Family {0} does not exist in database" .format(family_id)) ctx.abort() store.delete_case(case_obj) elif individual_id: ind_obj = store.individual(ind_id=individual_id) if ind_obj.ind_id != individual_id: logger.warning("Individual {0} does not exist in database" .format(individual_id)) ctx.abort() store.delete_individual(ind_obj) else: logger.warning("Please provide a family or individual id") ctx.abort()
['def', 'delete', '(', 'ctx', ',', 'family_id', ',', 'individual_id', ',', 'root', ')', ':', 'root', '=', 'root', 'or', 'ctx', '.', 'obj', '.', 'get', '(', "'root'", ')', 'or', 'os', '.', 'path', '.', 'expanduser', '(', '"~/.puzzle"', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'root', ')', ':', 'logger', '.', 'error', '(', '"\'root\' can\'t be a file"', ')', 'ctx', '.', 'abort', '(', ')', 'logger', '.', 'info', '(', '"Root directory is: {}"', '.', 'format', '(', 'root', ')', ')', 'db_path', '=', 'os', '.', 'path', '.', 'join', '(', 'root', ',', "'puzzle_db.sqlite3'", ')', 'logger', '.', 'info', '(', '"db path is: {}"', '.', 'format', '(', 'db_path', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'db_path', ')', ':', 'logger', '.', 'warn', '(', '"database not initialized, run \'puzzle init\'"', ')', 'ctx', '.', 'abort', '(', ')', 'store', '=', 'SqlStore', '(', 'db_path', ')', 'if', 'family_id', ':', 'case_obj', '=', 'store', '.', 'case', '(', 'case_id', '=', 'family_id', ')', 'if', 'case_obj', 'is', 'None', ':', 'logger', '.', 'warning', '(', '"Family {0} does not exist in database"', '.', 'format', '(', 'family_id', ')', ')', 'ctx', '.', 'abort', '(', ')', 'store', '.', 'delete_case', '(', 'case_obj', ')', 'elif', 'individual_id', ':', 'ind_obj', '=', 'store', '.', 'individual', '(', 'ind_id', '=', 'individual_id', ')', 'if', 'ind_obj', '.', 'ind_id', '!=', 'individual_id', ':', 'logger', '.', 'warning', '(', '"Individual {0} does not exist in database"', '.', 'format', '(', 'individual_id', ')', ')', 'ctx', '.', 'abort', '(', ')', 'store', '.', 'delete_individual', '(', 'ind_obj', ')', 'else', ':', 'logger', '.', 'warning', '(', '"Please provide a family or individual id"', ')', 'ctx', '.', 'abort', '(', ')']
Delete a case or individual from the database. If no database was found run puzzle init first.
['Delete', 'a', 'case', 'or', 'individual', 'from', 'the', 'database', '.']
train
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/delete.py#L18-L57
9,035
benhoff/pluginmanager
pluginmanager/file_manager.py
FileManager.set_plugin_filepaths
def set_plugin_filepaths(self, filepaths, except_blacklisted=True): """ Sets `filepaths` to the `self.plugin_filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set. """ filepaths = util.to_absolute_paths(filepaths) if except_blacklisted: filepaths = util.remove_from_set(filepaths, self.blacklisted_filepaths) self.plugin_filepaths = filepaths
python
def set_plugin_filepaths(self, filepaths, except_blacklisted=True): """ Sets `filepaths` to the `self.plugin_filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set. """ filepaths = util.to_absolute_paths(filepaths) if except_blacklisted: filepaths = util.remove_from_set(filepaths, self.blacklisted_filepaths) self.plugin_filepaths = filepaths
['def', 'set_plugin_filepaths', '(', 'self', ',', 'filepaths', ',', 'except_blacklisted', '=', 'True', ')', ':', 'filepaths', '=', 'util', '.', 'to_absolute_paths', '(', 'filepaths', ')', 'if', 'except_blacklisted', ':', 'filepaths', '=', 'util', '.', 'remove_from_set', '(', 'filepaths', ',', 'self', '.', 'blacklisted_filepaths', ')', 'self', '.', 'plugin_filepaths', '=', 'filepaths']
Sets `filepaths` to the `self.plugin_filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set.
['Sets', 'filepaths', 'to', 'the', 'self', '.', 'plugin_filepaths', '.', 'Recommend', 'passing', 'in', 'absolute', 'filepaths', '.', 'Method', 'will', 'attempt', 'to', 'convert', 'to', 'absolute', 'paths', 'if', 'they', 'are', 'not', 'already', '.']
train
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/file_manager.py#L93-L109
9,036
contentful/contentful-management.py
contentful_management/client_proxy.py
ClientProxy.find
def find(self, resource_id, query=None, **kwargs): """Gets a single resource.""" if query is None: query = {} return self.client._get( self._url(resource_id), query, **kwargs )
python
def find(self, resource_id, query=None, **kwargs): """Gets a single resource.""" if query is None: query = {} return self.client._get( self._url(resource_id), query, **kwargs )
['def', 'find', '(', 'self', ',', 'resource_id', ',', 'query', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'query', 'is', 'None', ':', 'query', '=', '{', '}', 'return', 'self', '.', 'client', '.', '_get', '(', 'self', '.', '_url', '(', 'resource_id', ')', ',', 'query', ',', '*', '*', 'kwargs', ')']
Gets a single resource.
['Gets', 'a', 'single', 'resource', '.']
train
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/client_proxy.py#L45-L54
9,037
openstax/cnx-easybake
cnxeasybake/oven.py
Oven.counter_style
def counter_style(self, val, style): """Return counter value in given style.""" if style == 'decimal-leading-zero': if val < 10: valstr = "0{}".format(val) else: valstr = str(val) elif style == 'lower-roman': valstr = _to_roman(val).lower() elif style == 'upper-roman': valstr = _to_roman(val) elif style == 'lower-latin' or style == 'lower-alpha': if 1 <= val <= 26: valstr = chr(val + 96) else: log(WARN, 'Counter out of range for latin (must be 1...26)') valstr = str(val) elif style == 'upper-latin' or style == 'upper-alpha': if 1 <= val <= 26: valstr = chr(val + 64) else: log(WARN, 'Counter out of range for latin (must be 1...26)') valstr = str(val) elif style == 'decimal': valstr = str(val) else: log(WARN, u"ERROR: Counter numbering not supported for" u" list type {}. Using decimal.".format( style).encode('utf-8')) valstr = str(val) return valstr
python
def counter_style(self, val, style): """Return counter value in given style.""" if style == 'decimal-leading-zero': if val < 10: valstr = "0{}".format(val) else: valstr = str(val) elif style == 'lower-roman': valstr = _to_roman(val).lower() elif style == 'upper-roman': valstr = _to_roman(val) elif style == 'lower-latin' or style == 'lower-alpha': if 1 <= val <= 26: valstr = chr(val + 96) else: log(WARN, 'Counter out of range for latin (must be 1...26)') valstr = str(val) elif style == 'upper-latin' or style == 'upper-alpha': if 1 <= val <= 26: valstr = chr(val + 64) else: log(WARN, 'Counter out of range for latin (must be 1...26)') valstr = str(val) elif style == 'decimal': valstr = str(val) else: log(WARN, u"ERROR: Counter numbering not supported for" u" list type {}. Using decimal.".format( style).encode('utf-8')) valstr = str(val) return valstr
['def', 'counter_style', '(', 'self', ',', 'val', ',', 'style', ')', ':', 'if', 'style', '==', "'decimal-leading-zero'", ':', 'if', 'val', '<', '10', ':', 'valstr', '=', '"0{}"', '.', 'format', '(', 'val', ')', 'else', ':', 'valstr', '=', 'str', '(', 'val', ')', 'elif', 'style', '==', "'lower-roman'", ':', 'valstr', '=', '_to_roman', '(', 'val', ')', '.', 'lower', '(', ')', 'elif', 'style', '==', "'upper-roman'", ':', 'valstr', '=', '_to_roman', '(', 'val', ')', 'elif', 'style', '==', "'lower-latin'", 'or', 'style', '==', "'lower-alpha'", ':', 'if', '1', '<=', 'val', '<=', '26', ':', 'valstr', '=', 'chr', '(', 'val', '+', '96', ')', 'else', ':', 'log', '(', 'WARN', ',', "'Counter out of range for latin (must be 1...26)'", ')', 'valstr', '=', 'str', '(', 'val', ')', 'elif', 'style', '==', "'upper-latin'", 'or', 'style', '==', "'upper-alpha'", ':', 'if', '1', '<=', 'val', '<=', '26', ':', 'valstr', '=', 'chr', '(', 'val', '+', '64', ')', 'else', ':', 'log', '(', 'WARN', ',', "'Counter out of range for latin (must be 1...26)'", ')', 'valstr', '=', 'str', '(', 'val', ')', 'elif', 'style', '==', "'decimal'", ':', 'valstr', '=', 'str', '(', 'val', ')', 'else', ':', 'log', '(', 'WARN', ',', 'u"ERROR: Counter numbering not supported for"', 'u" list type {}. Using decimal."', '.', 'format', '(', 'style', ')', '.', 'encode', '(', "'utf-8'", ')', ')', 'valstr', '=', 'str', '(', 'val', ')', 'return', 'valstr']
Return counter value in given style.
['Return', 'counter', 'value', 'in', 'given', 'style', '.']
train
https://github.com/openstax/cnx-easybake/blob/f8edf018fb7499f6f18af0145c326b93a737a782/cnxeasybake/oven.py#L677-L707
9,038
gbiggs/rtctree
rtctree/manager.py
Manager.unload_module
def unload_module(self, path): '''Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError ''' with self._mutex: if self._obj.unload_module(path) != RTC.RTC_OK: raise FailedToUnloadModuleError(path)
python
def unload_module(self, path): '''Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError ''' with self._mutex: if self._obj.unload_module(path) != RTC.RTC_OK: raise FailedToUnloadModuleError(path)
['def', 'unload_module', '(', 'self', ',', 'path', ')', ':', 'with', 'self', '.', '_mutex', ':', 'if', 'self', '.', '_obj', '.', 'unload_module', '(', 'path', ')', '!=', 'RTC', '.', 'RTC_OK', ':', 'raise', 'FailedToUnloadModuleError', '(', 'path', ')']
Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError
['Unload', 'a', 'loaded', 'shared', 'library', '.']
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/manager.py#L236-L248
9,039
clld/cdstarcat
src/cdstarcat/__main__.py
create
def create(args): """ cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories). """ with _catalog(args) as cat: for fname, created, obj in cat.create(args.args[0], {}): args.log.info('{0} -> {1} object {2.id}'.format( fname, 'new' if created else 'existing', obj))
python
def create(args): """ cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories). """ with _catalog(args) as cat: for fname, created, obj in cat.create(args.args[0], {}): args.log.info('{0} -> {1} object {2.id}'.format( fname, 'new' if created else 'existing', obj))
['def', 'create', '(', 'args', ')', ':', 'with', '_catalog', '(', 'args', ')', 'as', 'cat', ':', 'for', 'fname', ',', 'created', ',', 'obj', 'in', 'cat', '.', 'create', '(', 'args', '.', 'args', '[', '0', ']', ',', '{', '}', ')', ':', 'args', '.', 'log', '.', 'info', '(', "'{0} -> {1} object {2.id}'", '.', 'format', '(', 'fname', ',', "'new'", 'if', 'created', 'else', "'existing'", ',', 'obj', ')', ')']
cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories).
['cdstarcat', 'create', 'PATH']
train
https://github.com/clld/cdstarcat/blob/41f33f59cdde5e30835d2f3accf2d1fbe5332cab/src/cdstarcat/__main__.py#L95-L107
9,040
thomasdelaet/python-velbus
velbus/connections/serial.py
VelbusUSBConnection.feed_parser
def feed_parser(self, data): """Parse received message.""" assert isinstance(data, bytes) self.controller.feed_parser(data)
python
def feed_parser(self, data): """Parse received message.""" assert isinstance(data, bytes) self.controller.feed_parser(data)
['def', 'feed_parser', '(', 'self', ',', 'data', ')', ':', 'assert', 'isinstance', '(', 'data', ',', 'bytes', ')', 'self', '.', 'controller', '.', 'feed_parser', '(', 'data', ')']
Parse received message.
['Parse', 'received', 'message', '.']
train
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/connections/serial.py#L87-L90
9,041
Damgaard/PyImgur
pyimgur/__init__.py
Imgur.get_gallery_album
def get_gallery_album(self, id): """ Return the gallery album matching the id. Note that an album's id is different from it's id as a gallery album. This makes it possible to remove an album from the gallery and setting it's privacy setting as secret, without compromising it's secrecy. """ url = self._base_url + "/3/gallery/album/{0}".format(id) resp = self._send_request(url) return Gallery_album(resp, self)
python
def get_gallery_album(self, id): """ Return the gallery album matching the id. Note that an album's id is different from it's id as a gallery album. This makes it possible to remove an album from the gallery and setting it's privacy setting as secret, without compromising it's secrecy. """ url = self._base_url + "/3/gallery/album/{0}".format(id) resp = self._send_request(url) return Gallery_album(resp, self)
['def', 'get_gallery_album', '(', 'self', ',', 'id', ')', ':', 'url', '=', 'self', '.', '_base_url', '+', '"/3/gallery/album/{0}"', '.', 'format', '(', 'id', ')', 'resp', '=', 'self', '.', '_send_request', '(', 'url', ')', 'return', 'Gallery_album', '(', 'resp', ',', 'self', ')']
Return the gallery album matching the id. Note that an album's id is different from it's id as a gallery album. This makes it possible to remove an album from the gallery and setting it's privacy setting as secret, without compromising it's secrecy.
['Return', 'the', 'gallery', 'album', 'matching', 'the', 'id', '.']
train
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L965-L975
9,042
cloudant/python-cloudant
src/cloudant/database.py
CouchDatabase.create
def create(self, throw_on_exists=False): """ Creates a database defined by the current database object, if it does not already exist and raises a CloudantException if the operation fails. If the database already exists then this method call is a no-op. :param bool throw_on_exists: Boolean flag dictating whether or not to throw a CloudantDatabaseException when attempting to create a database that already exists. :returns: The database object """ if not throw_on_exists and self.exists(): return self resp = self.r_session.put(self.database_url, params={ 'partitioned': TYPE_CONVERTERS.get(bool)(self._partitioned) }) if resp.status_code == 201 or resp.status_code == 202: return self raise CloudantDatabaseException( resp.status_code, self.database_url, resp.text )
python
def create(self, throw_on_exists=False): """ Creates a database defined by the current database object, if it does not already exist and raises a CloudantException if the operation fails. If the database already exists then this method call is a no-op. :param bool throw_on_exists: Boolean flag dictating whether or not to throw a CloudantDatabaseException when attempting to create a database that already exists. :returns: The database object """ if not throw_on_exists and self.exists(): return self resp = self.r_session.put(self.database_url, params={ 'partitioned': TYPE_CONVERTERS.get(bool)(self._partitioned) }) if resp.status_code == 201 or resp.status_code == 202: return self raise CloudantDatabaseException( resp.status_code, self.database_url, resp.text )
['def', 'create', '(', 'self', ',', 'throw_on_exists', '=', 'False', ')', ':', 'if', 'not', 'throw_on_exists', 'and', 'self', '.', 'exists', '(', ')', ':', 'return', 'self', 'resp', '=', 'self', '.', 'r_session', '.', 'put', '(', 'self', '.', 'database_url', ',', 'params', '=', '{', "'partitioned'", ':', 'TYPE_CONVERTERS', '.', 'get', '(', 'bool', ')', '(', 'self', '.', '_partitioned', ')', '}', ')', 'if', 'resp', '.', 'status_code', '==', '201', 'or', 'resp', '.', 'status_code', '==', '202', ':', 'return', 'self', 'raise', 'CloudantDatabaseException', '(', 'resp', '.', 'status_code', ',', 'self', '.', 'database_url', ',', 'resp', '.', 'text', ')']
Creates a database defined by the current database object, if it does not already exist and raises a CloudantException if the operation fails. If the database already exists then this method call is a no-op. :param bool throw_on_exists: Boolean flag dictating whether or not to throw a CloudantDatabaseException when attempting to create a database that already exists. :returns: The database object
['Creates', 'a', 'database', 'defined', 'by', 'the', 'current', 'database', 'object', 'if', 'it', 'does', 'not', 'already', 'exist', 'and', 'raises', 'a', 'CloudantException', 'if', 'the', 'operation', 'fails', '.', 'If', 'the', 'database', 'already', 'exists', 'then', 'this', 'method', 'call', 'is', 'a', 'no', '-', 'op', '.']
train
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L409-L432
9,043
moralrecordings/mrcrowbar
mrcrowbar/utils.py
BitReader.get_bits
def get_bits( self, count ): """Get an integer containing the next [count] bits from the source.""" result = 0 for i in range( count ): if self.bits_remaining <= 0: self._fill_buffer() if self.bits_reverse: bit = (1 if (self.current_bits & (0x80 << 8*(self.bytes_to_cache-1))) else 0) self.current_bits <<= 1 self.current_bits &= 0xff else: bit = (self.current_bits & 1) self.current_bits >>= 1 self.bits_remaining -= 1 if self.output_reverse: result <<= 1 result |= bit else: result |= bit << i return result
python
def get_bits( self, count ): """Get an integer containing the next [count] bits from the source.""" result = 0 for i in range( count ): if self.bits_remaining <= 0: self._fill_buffer() if self.bits_reverse: bit = (1 if (self.current_bits & (0x80 << 8*(self.bytes_to_cache-1))) else 0) self.current_bits <<= 1 self.current_bits &= 0xff else: bit = (self.current_bits & 1) self.current_bits >>= 1 self.bits_remaining -= 1 if self.output_reverse: result <<= 1 result |= bit else: result |= bit << i return result
['def', 'get_bits', '(', 'self', ',', 'count', ')', ':', 'result', '=', '0', 'for', 'i', 'in', 'range', '(', 'count', ')', ':', 'if', 'self', '.', 'bits_remaining', '<=', '0', ':', 'self', '.', '_fill_buffer', '(', ')', 'if', 'self', '.', 'bits_reverse', ':', 'bit', '=', '(', '1', 'if', '(', 'self', '.', 'current_bits', '&', '(', '0x80', '<<', '8', '*', '(', 'self', '.', 'bytes_to_cache', '-', '1', ')', ')', ')', 'else', '0', ')', 'self', '.', 'current_bits', '<<=', '1', 'self', '.', 'current_bits', '&=', '0xff', 'else', ':', 'bit', '=', '(', 'self', '.', 'current_bits', '&', '1', ')', 'self', '.', 'current_bits', '>>=', '1', 'self', '.', 'bits_remaining', '-=', '1', 'if', 'self', '.', 'output_reverse', ':', 'result', '<<=', '1', 'result', '|=', 'bit', 'else', ':', 'result', '|=', 'bit', '<<', 'i', 'return', 'result']
Get an integer containing the next [count] bits from the source.
['Get', 'an', 'integer', 'containing', 'the', 'next', '[', 'count', ']', 'bits', 'from', 'the', 'source', '.']
train
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L545-L566
9,044
yyuu/botornado
boto/iam/connection.py
IAMConnection.update_group
def update_group(self, group_name, new_group_name=None, new_path=None): """ Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path. """ params = {'GroupName' : group_name} if new_group_name: params['NewGroupName'] = new_group_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateGroup', params)
python
def update_group(self, group_name, new_group_name=None, new_path=None): """ Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path. """ params = {'GroupName' : group_name} if new_group_name: params['NewGroupName'] = new_group_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateGroup', params)
['def', 'update_group', '(', 'self', ',', 'group_name', ',', 'new_group_name', '=', 'None', ',', 'new_path', '=', 'None', ')', ':', 'params', '=', '{', "'GroupName'", ':', 'group_name', '}', 'if', 'new_group_name', ':', 'params', '[', "'NewGroupName'", ']', '=', 'new_group_name', 'if', 'new_path', ':', 'params', '[', "'NewPath'", ']', '=', 'new_path', 'return', 'self', '.', 'get_response', '(', "'UpdateGroup'", ',', 'params', ')']
Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path.
['Updates', 'name', 'and', '/', 'or', 'path', 'of', 'the', 'specified', 'group', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/iam/connection.py#L157-L178
9,045
peterdemin/pip-compile-multi
pipcompilemulti/environment.py
Environment.replace_header
def replace_header(self, header_text): """Replace pip-compile header with custom text""" with open(self.outfile, 'rt') as fp: _, body = self.split_header(fp) with open(self.outfile, 'wt') as fp: fp.write(header_text) fp.writelines(body)
python
def replace_header(self, header_text): """Replace pip-compile header with custom text""" with open(self.outfile, 'rt') as fp: _, body = self.split_header(fp) with open(self.outfile, 'wt') as fp: fp.write(header_text) fp.writelines(body)
['def', 'replace_header', '(', 'self', ',', 'header_text', ')', ':', 'with', 'open', '(', 'self', '.', 'outfile', ',', "'rt'", ')', 'as', 'fp', ':', '_', ',', 'body', '=', 'self', '.', 'split_header', '(', 'fp', ')', 'with', 'open', '(', 'self', '.', 'outfile', ',', "'wt'", ')', 'as', 'fp', ':', 'fp', '.', 'write', '(', 'header_text', ')', 'fp', '.', 'writelines', '(', 'body', ')']
Replace pip-compile header with custom text
['Replace', 'pip', '-', 'compile', 'header', 'with', 'custom', 'text']
train
https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L197-L203
9,046
pjuren/pyokit
src/pyokit/io/repeatmaskerAlignments.py
repeat_masker_alignment_iterator
def repeat_masker_alignment_iterator(fn, index_friendly=True, verbose=False): """ Iterator for repeat masker alignment files; yields multiple alignment objects. Iterate over a file/stream of full repeat alignments in the repeatmasker format. Briefly, this format is as follows: each record (alignment) begins with a header line (see _rm_parse_header_line documentation for details of header format), followed by the alignment itself (example below) and finally a set of key-value meta-data pairs. The actual alignment looks like this:: chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41 ii v -- v i i v C MER5B#DNA/hAT 10 CCCCAGAGATTCTGATTTAATTGGTCTGGGGT 42 chr1 42 GACTG 47 v C MER5B#DNA/hAT 43 CACTG 48 The 'C' indicates that its the reverse complement of the consensus. The central string gives information about matches; "-" indicates an insertion/deletion, "i" a transition (G<->A, C<->T) and "v" a transversion (all other substitutions). :param fh: filename or stream-like object to read from. :param index_friendly: if True, we will ensure the file/stream position is before the start of the record when we yield it; this requires the ability to seek within the stream though, so if iterating over a stream wtihout that ability, you'll have to set this to false. Further, this will disable buffering for the file, to ensure file.tell() behaves correctly, so a performance hit will be incurred. :param verbose: if true, output progress messages to stderr. """ # step 1 -- build our iterator for the stream.. try: fh = open(fn) except (TypeError): fh = fn iterable = fh if index_friendly: iterable = iter(fh.readline, '') # build progress indicator, if we want one and we're able to if verbose: try: m_fn = ": " + fh.name except TypeError: m_fn = "" try: current = fh.tell() fh.seek(0, 2) total_progress = fh.tell() fh.seek(current) pind = ProgressIndicator(totalToDo=total_progress, messagePrefix="completed", messageSuffix="of processing repeat-masker " "alignment file" + m_fn) except IOError: pind = None old_fh_pos = None new_fh_pos = fh.tell() s1 = None s2 = None s1_name = None s2_name = None s1_start = None s1_end = None s2_start = None s2_end = None meta_data = None alignment_line_counter = 0 alig_l_space = 0 prev_seq_len = 0 rev_comp_match = None remaining_repeat = None remaining_genomic = None for line in iterable: if verbose and pind is not None: pind.done = fh.tell() pind.showProgress() if index_friendly: old_fh_pos = new_fh_pos new_fh_pos = fh.tell() line = line.rstrip() if line.lstrip() == "" and alignment_line_counter % 3 != 1: continue s_pres_split = re.split(r'(\s+)', line) parts = [x for x in s_pres_split if not (x.isspace() or x == "")] n = len(parts) for i in REPEATMASKER_FIELDS_TO_TRIM: if n >= i + 1: parts[i] = parts[i].strip() # decide what to do with this line -- is it a header line, part of the # alignment or a meta-data key-value line if alignment_line_counter % 3 == 1: if (REPEATMASKER_VALIDATE_MUTATIONS and not _rm_is_valid_annotation_line(line)): raise IOError("invalid mutation line: " + line) l_space = _rm_compute_leading_space(s_pres_split) - alig_l_space pad_right = prev_seq_len - (l_space + len(line.strip())) meta_data[ANNOTATION_KEY] += ((' ' * l_space) + line.strip() + (' ' * pad_right)) alignment_line_counter += 1 elif _rm_is_header_line(parts, n): if not (s1 is None and s2 is None and meta_data is None): if ANNOTATION_KEY in meta_data: meta_data[ANNOTATION_KEY] = meta_data[ANNOTATION_KEY].rstrip() if index_friendly: fh.seek(old_fh_pos) ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic) s2s = "-" if rev_comp_match else "+" ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat) yield PairwiseAlignment(ss1, ss2, meta_data) if index_friendly: fh.seek(new_fh_pos) meta_data = {} s1 = "" s2 = "" s1_name, s2_name = _rm_get_names_from_header(parts) s1_start, s1_end = _rm_get_reference_coords_from_header(parts) s2_start, s2_end = _rm_get_repeat_coords_from_header(parts) rev_comp_match = _rm_is_reverse_comp_match(parts) remaining_repeat = _rm_get_remaining_repeat_from_header(parts) remaining_genomic = _rm_get_remaining_genomic_from_header(parts) _rm_parse_header_line(parts, meta_data) alignment_line_counter = 0 elif _rm_is_alignment_line(parts, s1_name, s2_name): alignment_line_counter += 1 name, seq = _rm_extract_sequence_and_name(parts, s1_name, s2_name) if name == s1_name: s1 += seq elif name == s2_name: s2 += seq alig_l_space = _rm_compute_leading_space_alig(s_pres_split, seq) prev_seq_len = len(seq) else: k, v = _rm_parse_meta_line(parts) meta_data[k] = v if index_friendly: fh.seek(old_fh_pos) ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic) s2s = "-" if rev_comp_match else "+" ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat) yield PairwiseAlignment(ss1, ss2, meta_data) if index_friendly: fh.seek(new_fh_pos)
python
def repeat_masker_alignment_iterator(fn, index_friendly=True, verbose=False): """ Iterator for repeat masker alignment files; yields multiple alignment objects. Iterate over a file/stream of full repeat alignments in the repeatmasker format. Briefly, this format is as follows: each record (alignment) begins with a header line (see _rm_parse_header_line documentation for details of header format), followed by the alignment itself (example below) and finally a set of key-value meta-data pairs. The actual alignment looks like this:: chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41 ii v -- v i i v C MER5B#DNA/hAT 10 CCCCAGAGATTCTGATTTAATTGGTCTGGGGT 42 chr1 42 GACTG 47 v C MER5B#DNA/hAT 43 CACTG 48 The 'C' indicates that its the reverse complement of the consensus. The central string gives information about matches; "-" indicates an insertion/deletion, "i" a transition (G<->A, C<->T) and "v" a transversion (all other substitutions). :param fh: filename or stream-like object to read from. :param index_friendly: if True, we will ensure the file/stream position is before the start of the record when we yield it; this requires the ability to seek within the stream though, so if iterating over a stream wtihout that ability, you'll have to set this to false. Further, this will disable buffering for the file, to ensure file.tell() behaves correctly, so a performance hit will be incurred. :param verbose: if true, output progress messages to stderr. """ # step 1 -- build our iterator for the stream.. try: fh = open(fn) except (TypeError): fh = fn iterable = fh if index_friendly: iterable = iter(fh.readline, '') # build progress indicator, if we want one and we're able to if verbose: try: m_fn = ": " + fh.name except TypeError: m_fn = "" try: current = fh.tell() fh.seek(0, 2) total_progress = fh.tell() fh.seek(current) pind = ProgressIndicator(totalToDo=total_progress, messagePrefix="completed", messageSuffix="of processing repeat-masker " "alignment file" + m_fn) except IOError: pind = None old_fh_pos = None new_fh_pos = fh.tell() s1 = None s2 = None s1_name = None s2_name = None s1_start = None s1_end = None s2_start = None s2_end = None meta_data = None alignment_line_counter = 0 alig_l_space = 0 prev_seq_len = 0 rev_comp_match = None remaining_repeat = None remaining_genomic = None for line in iterable: if verbose and pind is not None: pind.done = fh.tell() pind.showProgress() if index_friendly: old_fh_pos = new_fh_pos new_fh_pos = fh.tell() line = line.rstrip() if line.lstrip() == "" and alignment_line_counter % 3 != 1: continue s_pres_split = re.split(r'(\s+)', line) parts = [x for x in s_pres_split if not (x.isspace() or x == "")] n = len(parts) for i in REPEATMASKER_FIELDS_TO_TRIM: if n >= i + 1: parts[i] = parts[i].strip() # decide what to do with this line -- is it a header line, part of the # alignment or a meta-data key-value line if alignment_line_counter % 3 == 1: if (REPEATMASKER_VALIDATE_MUTATIONS and not _rm_is_valid_annotation_line(line)): raise IOError("invalid mutation line: " + line) l_space = _rm_compute_leading_space(s_pres_split) - alig_l_space pad_right = prev_seq_len - (l_space + len(line.strip())) meta_data[ANNOTATION_KEY] += ((' ' * l_space) + line.strip() + (' ' * pad_right)) alignment_line_counter += 1 elif _rm_is_header_line(parts, n): if not (s1 is None and s2 is None and meta_data is None): if ANNOTATION_KEY in meta_data: meta_data[ANNOTATION_KEY] = meta_data[ANNOTATION_KEY].rstrip() if index_friendly: fh.seek(old_fh_pos) ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic) s2s = "-" if rev_comp_match else "+" ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat) yield PairwiseAlignment(ss1, ss2, meta_data) if index_friendly: fh.seek(new_fh_pos) meta_data = {} s1 = "" s2 = "" s1_name, s2_name = _rm_get_names_from_header(parts) s1_start, s1_end = _rm_get_reference_coords_from_header(parts) s2_start, s2_end = _rm_get_repeat_coords_from_header(parts) rev_comp_match = _rm_is_reverse_comp_match(parts) remaining_repeat = _rm_get_remaining_repeat_from_header(parts) remaining_genomic = _rm_get_remaining_genomic_from_header(parts) _rm_parse_header_line(parts, meta_data) alignment_line_counter = 0 elif _rm_is_alignment_line(parts, s1_name, s2_name): alignment_line_counter += 1 name, seq = _rm_extract_sequence_and_name(parts, s1_name, s2_name) if name == s1_name: s1 += seq elif name == s2_name: s2 += seq alig_l_space = _rm_compute_leading_space_alig(s_pres_split, seq) prev_seq_len = len(seq) else: k, v = _rm_parse_meta_line(parts) meta_data[k] = v if index_friendly: fh.seek(old_fh_pos) ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic) s2s = "-" if rev_comp_match else "+" ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat) yield PairwiseAlignment(ss1, ss2, meta_data) if index_friendly: fh.seek(new_fh_pos)
['def', 'repeat_masker_alignment_iterator', '(', 'fn', ',', 'index_friendly', '=', 'True', ',', 'verbose', '=', 'False', ')', ':', '# step 1 -- build our iterator for the stream..', 'try', ':', 'fh', '=', 'open', '(', 'fn', ')', 'except', '(', 'TypeError', ')', ':', 'fh', '=', 'fn', 'iterable', '=', 'fh', 'if', 'index_friendly', ':', 'iterable', '=', 'iter', '(', 'fh', '.', 'readline', ',', "''", ')', "# build progress indicator, if we want one and we're able to", 'if', 'verbose', ':', 'try', ':', 'm_fn', '=', '": "', '+', 'fh', '.', 'name', 'except', 'TypeError', ':', 'm_fn', '=', '""', 'try', ':', 'current', '=', 'fh', '.', 'tell', '(', ')', 'fh', '.', 'seek', '(', '0', ',', '2', ')', 'total_progress', '=', 'fh', '.', 'tell', '(', ')', 'fh', '.', 'seek', '(', 'current', ')', 'pind', '=', 'ProgressIndicator', '(', 'totalToDo', '=', 'total_progress', ',', 'messagePrefix', '=', '"completed"', ',', 'messageSuffix', '=', '"of processing repeat-masker "', '"alignment file"', '+', 'm_fn', ')', 'except', 'IOError', ':', 'pind', '=', 'None', 'old_fh_pos', '=', 'None', 'new_fh_pos', '=', 'fh', '.', 'tell', '(', ')', 's1', '=', 'None', 's2', '=', 'None', 's1_name', '=', 'None', 's2_name', '=', 'None', 's1_start', '=', 'None', 's1_end', '=', 'None', 's2_start', '=', 'None', 's2_end', '=', 'None', 'meta_data', '=', 'None', 'alignment_line_counter', '=', '0', 'alig_l_space', '=', '0', 'prev_seq_len', '=', '0', 'rev_comp_match', '=', 'None', 'remaining_repeat', '=', 'None', 'remaining_genomic', '=', 'None', 'for', 'line', 'in', 'iterable', ':', 'if', 'verbose', 'and', 'pind', 'is', 'not', 'None', ':', 'pind', '.', 'done', '=', 'fh', '.', 'tell', '(', ')', 'pind', '.', 'showProgress', '(', ')', 'if', 'index_friendly', ':', 'old_fh_pos', '=', 'new_fh_pos', 'new_fh_pos', '=', 'fh', '.', 'tell', '(', ')', 'line', '=', 'line', '.', 'rstrip', '(', ')', 'if', 'line', '.', 'lstrip', '(', ')', '==', '""', 'and', 'alignment_line_counter', '%', '3', '!=', '1', ':', 'continue', 's_pres_split', '=', 're', '.', 'split', '(', "r'(\\s+)'", ',', 'line', ')', 'parts', '=', '[', 'x', 'for', 'x', 'in', 's_pres_split', 'if', 'not', '(', 'x', '.', 'isspace', '(', ')', 'or', 'x', '==', '""', ')', ']', 'n', '=', 'len', '(', 'parts', ')', 'for', 'i', 'in', 'REPEATMASKER_FIELDS_TO_TRIM', ':', 'if', 'n', '>=', 'i', '+', '1', ':', 'parts', '[', 'i', ']', '=', 'parts', '[', 'i', ']', '.', 'strip', '(', ')', '# decide what to do with this line -- is it a header line, part of the', '# alignment or a meta-data key-value line', 'if', 'alignment_line_counter', '%', '3', '==', '1', ':', 'if', '(', 'REPEATMASKER_VALIDATE_MUTATIONS', 'and', 'not', '_rm_is_valid_annotation_line', '(', 'line', ')', ')', ':', 'raise', 'IOError', '(', '"invalid mutation line: "', '+', 'line', ')', 'l_space', '=', '_rm_compute_leading_space', '(', 's_pres_split', ')', '-', 'alig_l_space', 'pad_right', '=', 'prev_seq_len', '-', '(', 'l_space', '+', 'len', '(', 'line', '.', 'strip', '(', ')', ')', ')', 'meta_data', '[', 'ANNOTATION_KEY', ']', '+=', '(', '(', "' '", '*', 'l_space', ')', '+', 'line', '.', 'strip', '(', ')', '+', '(', "' '", '*', 'pad_right', ')', ')', 'alignment_line_counter', '+=', '1', 'elif', '_rm_is_header_line', '(', 'parts', ',', 'n', ')', ':', 'if', 'not', '(', 's1', 'is', 'None', 'and', 's2', 'is', 'None', 'and', 'meta_data', 'is', 'None', ')', ':', 'if', 'ANNOTATION_KEY', 'in', 'meta_data', ':', 'meta_data', '[', 'ANNOTATION_KEY', ']', '=', 'meta_data', '[', 'ANNOTATION_KEY', ']', '.', 'rstrip', '(', ')', 'if', 'index_friendly', ':', 'fh', '.', 'seek', '(', 'old_fh_pos', ')', 'ss1', '=', 'Sequence', '(', 's1_name', ',', 's1', ',', 's1_start', ',', 's1_end', ',', '"+"', ',', 'remaining_genomic', ')', 's2s', '=', '"-"', 'if', 'rev_comp_match', 'else', '"+"', 'ss2', '=', 'Sequence', '(', 's2_name', ',', 's2', ',', 's2_start', ',', 's2_end', ',', 's2s', ',', 'remaining_repeat', ')', 'yield', 'PairwiseAlignment', '(', 'ss1', ',', 'ss2', ',', 'meta_data', ')', 'if', 'index_friendly', ':', 'fh', '.', 'seek', '(', 'new_fh_pos', ')', 'meta_data', '=', '{', '}', 's1', '=', '""', 's2', '=', '""', 's1_name', ',', 's2_name', '=', '_rm_get_names_from_header', '(', 'parts', ')', 's1_start', ',', 's1_end', '=', '_rm_get_reference_coords_from_header', '(', 'parts', ')', 's2_start', ',', 's2_end', '=', '_rm_get_repeat_coords_from_header', '(', 'parts', ')', 'rev_comp_match', '=', '_rm_is_reverse_comp_match', '(', 'parts', ')', 'remaining_repeat', '=', '_rm_get_remaining_repeat_from_header', '(', 'parts', ')', 'remaining_genomic', '=', '_rm_get_remaining_genomic_from_header', '(', 'parts', ')', '_rm_parse_header_line', '(', 'parts', ',', 'meta_data', ')', 'alignment_line_counter', '=', '0', 'elif', '_rm_is_alignment_line', '(', 'parts', ',', 's1_name', ',', 's2_name', ')', ':', 'alignment_line_counter', '+=', '1', 'name', ',', 'seq', '=', '_rm_extract_sequence_and_name', '(', 'parts', ',', 's1_name', ',', 's2_name', ')', 'if', 'name', '==', 's1_name', ':', 's1', '+=', 'seq', 'elif', 'name', '==', 's2_name', ':', 's2', '+=', 'seq', 'alig_l_space', '=', '_rm_compute_leading_space_alig', '(', 's_pres_split', ',', 'seq', ')', 'prev_seq_len', '=', 'len', '(', 'seq', ')', 'else', ':', 'k', ',', 'v', '=', '_rm_parse_meta_line', '(', 'parts', ')', 'meta_data', '[', 'k', ']', '=', 'v', 'if', 'index_friendly', ':', 'fh', '.', 'seek', '(', 'old_fh_pos', ')', 'ss1', '=', 'Sequence', '(', 's1_name', ',', 's1', ',', 's1_start', ',', 's1_end', ',', '"+"', ',', 'remaining_genomic', ')', 's2s', '=', '"-"', 'if', 'rev_comp_match', 'else', '"+"', 'ss2', '=', 'Sequence', '(', 's2_name', ',', 's2', ',', 's2_start', ',', 's2_end', ',', 's2s', ',', 'remaining_repeat', ')', 'yield', 'PairwiseAlignment', '(', 'ss1', ',', 'ss2', ',', 'meta_data', ')', 'if', 'index_friendly', ':', 'fh', '.', 'seek', '(', 'new_fh_pos', ')']
Iterator for repeat masker alignment files; yields multiple alignment objects. Iterate over a file/stream of full repeat alignments in the repeatmasker format. Briefly, this format is as follows: each record (alignment) begins with a header line (see _rm_parse_header_line documentation for details of header format), followed by the alignment itself (example below) and finally a set of key-value meta-data pairs. The actual alignment looks like this:: chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41 ii v -- v i i v C MER5B#DNA/hAT 10 CCCCAGAGATTCTGATTTAATTGGTCTGGGGT 42 chr1 42 GACTG 47 v C MER5B#DNA/hAT 43 CACTG 48 The 'C' indicates that its the reverse complement of the consensus. The central string gives information about matches; "-" indicates an insertion/deletion, "i" a transition (G<->A, C<->T) and "v" a transversion (all other substitutions). :param fh: filename or stream-like object to read from. :param index_friendly: if True, we will ensure the file/stream position is before the start of the record when we yield it; this requires the ability to seek within the stream though, so if iterating over a stream wtihout that ability, you'll have to set this to false. Further, this will disable buffering for the file, to ensure file.tell() behaves correctly, so a performance hit will be incurred. :param verbose: if true, output progress messages to stderr.
['Iterator', 'for', 'repeat', 'masker', 'alignment', 'files', ';', 'yields', 'multiple', 'alignment', 'objects', '.']
train
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L600-L756
9,047
softlayer/softlayer-python
SoftLayer/CLI/virt/capacity/list.py
cli
def cli(env): """List Reserved Capacity groups.""" manager = CapacityManager(env.client) result = manager.list() table = formatting.Table( ["ID", "Name", "Capacity", "Flavor", "Location", "Created"], title="Reserved Capacity" ) for r_c in result: occupied_string = "#" * int(r_c.get('occupiedInstanceCount', 0)) available_string = "-" * int(r_c.get('availableInstanceCount', 0)) try: flavor = r_c['instances'][0]['billingItem']['description'] # cost = float(r_c['instances'][0]['billingItem']['hourlyRecurringFee']) except KeyError: flavor = "Unknown Billing Item" location = r_c['backendRouter']['hostname'] capacity = "%s%s" % (occupied_string, available_string) table.add_row([r_c['id'], r_c['name'], capacity, flavor, location, r_c['createDate']]) env.fout(table)
python
def cli(env): """List Reserved Capacity groups.""" manager = CapacityManager(env.client) result = manager.list() table = formatting.Table( ["ID", "Name", "Capacity", "Flavor", "Location", "Created"], title="Reserved Capacity" ) for r_c in result: occupied_string = "#" * int(r_c.get('occupiedInstanceCount', 0)) available_string = "-" * int(r_c.get('availableInstanceCount', 0)) try: flavor = r_c['instances'][0]['billingItem']['description'] # cost = float(r_c['instances'][0]['billingItem']['hourlyRecurringFee']) except KeyError: flavor = "Unknown Billing Item" location = r_c['backendRouter']['hostname'] capacity = "%s%s" % (occupied_string, available_string) table.add_row([r_c['id'], r_c['name'], capacity, flavor, location, r_c['createDate']]) env.fout(table)
['def', 'cli', '(', 'env', ')', ':', 'manager', '=', 'CapacityManager', '(', 'env', '.', 'client', ')', 'result', '=', 'manager', '.', 'list', '(', ')', 'table', '=', 'formatting', '.', 'Table', '(', '[', '"ID"', ',', '"Name"', ',', '"Capacity"', ',', '"Flavor"', ',', '"Location"', ',', '"Created"', ']', ',', 'title', '=', '"Reserved Capacity"', ')', 'for', 'r_c', 'in', 'result', ':', 'occupied_string', '=', '"#"', '*', 'int', '(', 'r_c', '.', 'get', '(', "'occupiedInstanceCount'", ',', '0', ')', ')', 'available_string', '=', '"-"', '*', 'int', '(', 'r_c', '.', 'get', '(', "'availableInstanceCount'", ',', '0', ')', ')', 'try', ':', 'flavor', '=', 'r_c', '[', "'instances'", ']', '[', '0', ']', '[', "'billingItem'", ']', '[', "'description'", ']', "# cost = float(r_c['instances'][0]['billingItem']['hourlyRecurringFee'])", 'except', 'KeyError', ':', 'flavor', '=', '"Unknown Billing Item"', 'location', '=', 'r_c', '[', "'backendRouter'", ']', '[', "'hostname'", ']', 'capacity', '=', '"%s%s"', '%', '(', 'occupied_string', ',', 'available_string', ')', 'table', '.', 'add_row', '(', '[', 'r_c', '[', "'id'", ']', ',', 'r_c', '[', "'name'", ']', ',', 'capacity', ',', 'flavor', ',', 'location', ',', 'r_c', '[', "'createDate'", ']', ']', ')', 'env', '.', 'fout', '(', 'table', ')']
List Reserved Capacity groups.
['List', 'Reserved', 'Capacity', 'groups', '.']
train
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/virt/capacity/list.py#L12-L32
9,048
bunq/sdk_python
bunq/sdk/json/adapters.py
AnchoredObjectModelAdapter._get_object_class
def _get_object_class(cls, class_name): """ :type class_name: str :rtype: core.BunqModel """ class_name = class_name.lstrip(cls.__STRING_FORMAT_UNDERSCORE) if class_name in cls._override_field_map: class_name = cls._override_field_map[class_name] try: return getattr(endpoint, class_name) except AttributeError: pass try: return getattr(object_, class_name) except AttributeError: pass raise BunqException(cls._ERROR_MODEL_NOT_FOUND.format(class_name))
python
def _get_object_class(cls, class_name): """ :type class_name: str :rtype: core.BunqModel """ class_name = class_name.lstrip(cls.__STRING_FORMAT_UNDERSCORE) if class_name in cls._override_field_map: class_name = cls._override_field_map[class_name] try: return getattr(endpoint, class_name) except AttributeError: pass try: return getattr(object_, class_name) except AttributeError: pass raise BunqException(cls._ERROR_MODEL_NOT_FOUND.format(class_name))
['def', '_get_object_class', '(', 'cls', ',', 'class_name', ')', ':', 'class_name', '=', 'class_name', '.', 'lstrip', '(', 'cls', '.', '__STRING_FORMAT_UNDERSCORE', ')', 'if', 'class_name', 'in', 'cls', '.', '_override_field_map', ':', 'class_name', '=', 'cls', '.', '_override_field_map', '[', 'class_name', ']', 'try', ':', 'return', 'getattr', '(', 'endpoint', ',', 'class_name', ')', 'except', 'AttributeError', ':', 'pass', 'try', ':', 'return', 'getattr', '(', 'object_', ',', 'class_name', ')', 'except', 'AttributeError', ':', 'pass', 'raise', 'BunqException', '(', 'cls', '.', '_ERROR_MODEL_NOT_FOUND', '.', 'format', '(', 'class_name', ')', ')']
:type class_name: str :rtype: core.BunqModel
[':', 'type', 'class_name', ':', 'str', ':', 'rtype', ':', 'core', '.', 'BunqModel']
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/adapters.py#L55-L76
9,049
globocom/GloboNetworkAPI-client-python
networkapiclient/Ip.py
Ip.get_available_ip6_for_vip
def get_available_ip6_for_vip(self, id_evip, name): """ Get and save a available IP in the network ipv6 for vip request :param id_evip: Vip environment identifier. Integer value and greater than zero. :param name: Ip description :return: Dictionary with the following structure: :: {'ip': {'bloco1':<bloco1>, 'bloco2':<bloco2>, 'bloco3':<bloco3>, 'bloco4':<bloco4>, 'bloco5':<bloco5>, 'bloco6':<bloco6>, 'bloco7':<bloco7>, 'bloco8':<bloco8>, 'id':<id>, 'networkipv6':<networkipv6>, 'description':<description>}} :raise IpNotAvailableError: Network dont have available IP for vip environment. :raise EnvironmentVipNotFoundError: Vip environment not registered. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Vip environment identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_evip): raise InvalidParameterError( u'Vip environment identifier is invalid or was not informed.') url = 'ip/availableip6/vip/' + str(id_evip) + "/" ip_map = dict() ip_map['id_evip'] = id_evip ip_map['name'] = name code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)
python
def get_available_ip6_for_vip(self, id_evip, name): """ Get and save a available IP in the network ipv6 for vip request :param id_evip: Vip environment identifier. Integer value and greater than zero. :param name: Ip description :return: Dictionary with the following structure: :: {'ip': {'bloco1':<bloco1>, 'bloco2':<bloco2>, 'bloco3':<bloco3>, 'bloco4':<bloco4>, 'bloco5':<bloco5>, 'bloco6':<bloco6>, 'bloco7':<bloco7>, 'bloco8':<bloco8>, 'id':<id>, 'networkipv6':<networkipv6>, 'description':<description>}} :raise IpNotAvailableError: Network dont have available IP for vip environment. :raise EnvironmentVipNotFoundError: Vip environment not registered. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Vip environment identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_evip): raise InvalidParameterError( u'Vip environment identifier is invalid or was not informed.') url = 'ip/availableip6/vip/' + str(id_evip) + "/" ip_map = dict() ip_map['id_evip'] = id_evip ip_map['name'] = name code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)
['def', 'get_available_ip6_for_vip', '(', 'self', ',', 'id_evip', ',', 'name', ')', ':', 'if', 'not', 'is_valid_int_param', '(', 'id_evip', ')', ':', 'raise', 'InvalidParameterError', '(', "u'Vip environment identifier is invalid or was not informed.'", ')', 'url', '=', "'ip/availableip6/vip/'", '+', 'str', '(', 'id_evip', ')', '+', '"/"', 'ip_map', '=', 'dict', '(', ')', 'ip_map', '[', "'id_evip'", ']', '=', 'id_evip', 'ip_map', '[', "'name'", ']', '=', 'name', 'code', ',', 'xml', '=', 'self', '.', 'submit', '(', '{', "'ip_map'", ':', 'ip_map', '}', ',', "'POST'", ',', 'url', ')', 'return', 'self', '.', 'response', '(', 'code', ',', 'xml', ')']
Get and save a available IP in the network ipv6 for vip request :param id_evip: Vip environment identifier. Integer value and greater than zero. :param name: Ip description :return: Dictionary with the following structure: :: {'ip': {'bloco1':<bloco1>, 'bloco2':<bloco2>, 'bloco3':<bloco3>, 'bloco4':<bloco4>, 'bloco5':<bloco5>, 'bloco6':<bloco6>, 'bloco7':<bloco7>, 'bloco8':<bloco8>, 'id':<id>, 'networkipv6':<networkipv6>, 'description':<description>}} :raise IpNotAvailableError: Network dont have available IP for vip environment. :raise EnvironmentVipNotFoundError: Vip environment not registered. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Vip environment identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database.
['Get', 'and', 'save', 'a', 'available', 'IP', 'in', 'the', 'network', 'ipv6', 'for', 'vip', 'request']
train
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ip.py#L306-L350
9,050
Pegase745/sqlalchemy-datatables
examples/pyramid_tut/pyramid_tut/views.py
data
def data(request): """Return server side data.""" columns = [ ColumnDT(User.id), ColumnDT(User.name), ColumnDT(Address.description), ColumnDT(func.strftime("%d-%m-%Y", User.birthday)), ColumnDT(User.age) ] query = DBSession.query().select_from(User).join(Address).filter( Address.id > 4) rowTable = DataTables(request.GET, query, columns) return rowTable.output_result()
python
def data(request): """Return server side data.""" columns = [ ColumnDT(User.id), ColumnDT(User.name), ColumnDT(Address.description), ColumnDT(func.strftime("%d-%m-%Y", User.birthday)), ColumnDT(User.age) ] query = DBSession.query().select_from(User).join(Address).filter( Address.id > 4) rowTable = DataTables(request.GET, query, columns) return rowTable.output_result()
['def', 'data', '(', 'request', ')', ':', 'columns', '=', '[', 'ColumnDT', '(', 'User', '.', 'id', ')', ',', 'ColumnDT', '(', 'User', '.', 'name', ')', ',', 'ColumnDT', '(', 'Address', '.', 'description', ')', ',', 'ColumnDT', '(', 'func', '.', 'strftime', '(', '"%d-%m-%Y"', ',', 'User', '.', 'birthday', ')', ')', ',', 'ColumnDT', '(', 'User', '.', 'age', ')', ']', 'query', '=', 'DBSession', '.', 'query', '(', ')', '.', 'select_from', '(', 'User', ')', '.', 'join', '(', 'Address', ')', '.', 'filter', '(', 'Address', '.', 'id', '>', '4', ')', 'rowTable', '=', 'DataTables', '(', 'request', '.', 'GET', ',', 'query', ',', 'columns', ')', 'return', 'rowTable', '.', 'output_result', '(', ')']
Return server side data.
['Return', 'server', 'side', 'data', '.']
train
https://github.com/Pegase745/sqlalchemy-datatables/blob/049ab5f98f20ad37926fe86d5528da0c91cd462d/examples/pyramid_tut/pyramid_tut/views.py#L64-L79
9,051
ssalentin/plip
plip/modules/detection.py
water_bridges
def water_bridges(bs_hba, lig_hba, bs_hbd, lig_hbd, water): """Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.""" data = namedtuple('waterbridge', 'a a_orig_idx atype d d_orig_idx dtype h water water_orig_idx distance_aw ' 'distance_dw d_angle w_angle type resnr restype reschain resnr_l restype_l reschain_l protisdon') pairings = [] # First find all acceptor-water pairs with distance within d # and all donor-water pairs with distance within d and angle greater theta lig_aw, prot_aw, lig_dw, prot_hw = [], [], [], [] for w in water: for acc1 in lig_hba: dist = euclidean3d(acc1.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: lig_aw.append((acc1, w, dist)) for acc2 in bs_hba: dist = euclidean3d(acc2.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: prot_aw.append((acc2, w, dist)) for don1 in lig_hbd: dist = euclidean3d(don1.d.coords, w.oxy.coords) d_angle = vecangle(vector(don1.h.coords, don1.d.coords), vector(don1.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: lig_dw.append((don1, w, dist, d_angle)) for don2 in bs_hbd: dist = euclidean3d(don2.d.coords, w.oxy.coords) d_angle = vecangle(vector(don2.h.coords, don2.d.coords), vector(don2.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: prot_hw.append((don2, w, dist, d_angle)) for l, p in itertools.product(lig_aw, prot_hw): acc, wl, distance_aw = l don, wd, distance_dw, d_angle = p if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(don.d), whichchain(don.d), whichrestype(don.d) resnr_l, reschain_l, restype_l = whichresnumber(acc.a_orig_atom), whichchain( acc.a_orig_atom), whichrestype(acc.a_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protisdon=True) pairings.append(contact) for p, l in itertools.product(prot_aw, lig_dw): acc, wl, distance_aw = p don, wd, distance_dw, d_angle = l if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(acc.a), whichchain(acc.a), whichrestype(acc.a) resnr_l, reschain_l, restype_l = whichresnumber(don.d_orig_atom), whichchain( don.d_orig_atom), whichrestype(don.d_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, protisdon=False) pairings.append(contact) return filter_contacts(pairings)
python
def water_bridges(bs_hba, lig_hba, bs_hbd, lig_hbd, water): """Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.""" data = namedtuple('waterbridge', 'a a_orig_idx atype d d_orig_idx dtype h water water_orig_idx distance_aw ' 'distance_dw d_angle w_angle type resnr restype reschain resnr_l restype_l reschain_l protisdon') pairings = [] # First find all acceptor-water pairs with distance within d # and all donor-water pairs with distance within d and angle greater theta lig_aw, prot_aw, lig_dw, prot_hw = [], [], [], [] for w in water: for acc1 in lig_hba: dist = euclidean3d(acc1.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: lig_aw.append((acc1, w, dist)) for acc2 in bs_hba: dist = euclidean3d(acc2.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: prot_aw.append((acc2, w, dist)) for don1 in lig_hbd: dist = euclidean3d(don1.d.coords, w.oxy.coords) d_angle = vecangle(vector(don1.h.coords, don1.d.coords), vector(don1.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: lig_dw.append((don1, w, dist, d_angle)) for don2 in bs_hbd: dist = euclidean3d(don2.d.coords, w.oxy.coords) d_angle = vecangle(vector(don2.h.coords, don2.d.coords), vector(don2.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: prot_hw.append((don2, w, dist, d_angle)) for l, p in itertools.product(lig_aw, prot_hw): acc, wl, distance_aw = l don, wd, distance_dw, d_angle = p if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(don.d), whichchain(don.d), whichrestype(don.d) resnr_l, reschain_l, restype_l = whichresnumber(acc.a_orig_atom), whichchain( acc.a_orig_atom), whichrestype(acc.a_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protisdon=True) pairings.append(contact) for p, l in itertools.product(prot_aw, lig_dw): acc, wl, distance_aw = p don, wd, distance_dw, d_angle = l if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(acc.a), whichchain(acc.a), whichrestype(acc.a) resnr_l, reschain_l, restype_l = whichresnumber(don.d_orig_atom), whichchain( don.d_orig_atom), whichrestype(don.d_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, protisdon=False) pairings.append(contact) return filter_contacts(pairings)
['def', 'water_bridges', '(', 'bs_hba', ',', 'lig_hba', ',', 'bs_hbd', ',', 'lig_hbd', ',', 'water', ')', ':', 'data', '=', 'namedtuple', '(', "'waterbridge'", ',', "'a a_orig_idx atype d d_orig_idx dtype h water water_orig_idx distance_aw '", "'distance_dw d_angle w_angle type resnr restype reschain resnr_l restype_l reschain_l protisdon'", ')', 'pairings', '=', '[', ']', '# First find all acceptor-water pairs with distance within d', '# and all donor-water pairs with distance within d and angle greater theta', 'lig_aw', ',', 'prot_aw', ',', 'lig_dw', ',', 'prot_hw', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'w', 'in', 'water', ':', 'for', 'acc1', 'in', 'lig_hba', ':', 'dist', '=', 'euclidean3d', '(', 'acc1', '.', 'a', '.', 'coords', ',', 'w', '.', 'oxy', '.', 'coords', ')', 'if', 'config', '.', 'WATER_BRIDGE_MINDIST', '<=', 'dist', '<=', 'config', '.', 'WATER_BRIDGE_MAXDIST', ':', 'lig_aw', '.', 'append', '(', '(', 'acc1', ',', 'w', ',', 'dist', ')', ')', 'for', 'acc2', 'in', 'bs_hba', ':', 'dist', '=', 'euclidean3d', '(', 'acc2', '.', 'a', '.', 'coords', ',', 'w', '.', 'oxy', '.', 'coords', ')', 'if', 'config', '.', 'WATER_BRIDGE_MINDIST', '<=', 'dist', '<=', 'config', '.', 'WATER_BRIDGE_MAXDIST', ':', 'prot_aw', '.', 'append', '(', '(', 'acc2', ',', 'w', ',', 'dist', ')', ')', 'for', 'don1', 'in', 'lig_hbd', ':', 'dist', '=', 'euclidean3d', '(', 'don1', '.', 'd', '.', 'coords', ',', 'w', '.', 'oxy', '.', 'coords', ')', 'd_angle', '=', 'vecangle', '(', 'vector', '(', 'don1', '.', 'h', '.', 'coords', ',', 'don1', '.', 'd', '.', 'coords', ')', ',', 'vector', '(', 'don1', '.', 'h', '.', 'coords', ',', 'w', '.', 'oxy', '.', 'coords', ')', ')', 'if', 'config', '.', 'WATER_BRIDGE_MINDIST', '<=', 'dist', '<=', 'config', '.', 'WATER_BRIDGE_MAXDIST', 'and', 'd_angle', '>', 'config', '.', 'WATER_BRIDGE_THETA_MIN', ':', 'lig_dw', '.', 'append', '(', '(', 'don1', ',', 'w', ',', 'dist', ',', 'd_angle', ')', ')', 'for', 'don2', 'in', 'bs_hbd', ':', 'dist', '=', 'euclidean3d', '(', 'don2', '.', 'd', '.', 'coords', ',', 'w', '.', 'oxy', '.', 'coords', ')', 'd_angle', '=', 'vecangle', '(', 'vector', '(', 'don2', '.', 'h', '.', 'coords', ',', 'don2', '.', 'd', '.', 'coords', ')', ',', 'vector', '(', 'don2', '.', 'h', '.', 'coords', ',', 'w', '.', 'oxy', '.', 'coords', ')', ')', 'if', 'config', '.', 'WATER_BRIDGE_MINDIST', '<=', 'dist', '<=', 'config', '.', 'WATER_BRIDGE_MAXDIST', 'and', 'd_angle', '>', 'config', '.', 'WATER_BRIDGE_THETA_MIN', ':', 'prot_hw', '.', 'append', '(', '(', 'don2', ',', 'w', ',', 'dist', ',', 'd_angle', ')', ')', 'for', 'l', ',', 'p', 'in', 'itertools', '.', 'product', '(', 'lig_aw', ',', 'prot_hw', ')', ':', 'acc', ',', 'wl', ',', 'distance_aw', '=', 'l', 'don', ',', 'wd', ',', 'distance_dw', ',', 'd_angle', '=', 'p', 'if', 'not', 'wl', '.', 'oxy', '==', 'wd', '.', 'oxy', ':', 'continue', '# Same water molecule and angle within omega', 'w_angle', '=', 'vecangle', '(', 'vector', '(', 'acc', '.', 'a', '.', 'coords', ',', 'wl', '.', 'oxy', '.', 'coords', ')', ',', 'vector', '(', 'wl', '.', 'oxy', '.', 'coords', ',', 'don', '.', 'h', '.', 'coords', ')', ')', 'if', 'not', 'config', '.', 'WATER_BRIDGE_OMEGA_MIN', '<', 'w_angle', '<', 'config', '.', 'WATER_BRIDGE_OMEGA_MAX', ':', 'continue', 'resnr', ',', 'reschain', ',', 'restype', '=', 'whichresnumber', '(', 'don', '.', 'd', ')', ',', 'whichchain', '(', 'don', '.', 'd', ')', ',', 'whichrestype', '(', 'don', '.', 'd', ')', 'resnr_l', ',', 'reschain_l', ',', 'restype_l', '=', 'whichresnumber', '(', 'acc', '.', 'a_orig_atom', ')', ',', 'whichchain', '(', 'acc', '.', 'a_orig_atom', ')', ',', 'whichrestype', '(', 'acc', '.', 'a_orig_atom', ')', 'contact', '=', 'data', '(', 'a', '=', 'acc', '.', 'a', ',', 'a_orig_idx', '=', 'acc', '.', 'a_orig_idx', ',', 'atype', '=', 'acc', '.', 'a', '.', 'type', ',', 'd', '=', 'don', '.', 'd', ',', 'd_orig_idx', '=', 'don', '.', 'd_orig_idx', ',', 'dtype', '=', 'don', '.', 'd', '.', 'type', ',', 'h', '=', 'don', '.', 'h', ',', 'water', '=', 'wl', '.', 'oxy', ',', 'water_orig_idx', '=', 'wl', '.', 'oxy_orig_idx', ',', 'distance_aw', '=', 'distance_aw', ',', 'distance_dw', '=', 'distance_dw', ',', 'd_angle', '=', 'd_angle', ',', 'w_angle', '=', 'w_angle', ',', 'type', '=', "'first_deg'", ',', 'resnr', '=', 'resnr', ',', 'restype', '=', 'restype', ',', 'reschain', '=', 'reschain', ',', 'restype_l', '=', 'restype_l', ',', 'resnr_l', '=', 'resnr_l', ',', 'reschain_l', '=', 'reschain_l', ',', 'protisdon', '=', 'True', ')', 'pairings', '.', 'append', '(', 'contact', ')', 'for', 'p', ',', 'l', 'in', 'itertools', '.', 'product', '(', 'prot_aw', ',', 'lig_dw', ')', ':', 'acc', ',', 'wl', ',', 'distance_aw', '=', 'p', 'don', ',', 'wd', ',', 'distance_dw', ',', 'd_angle', '=', 'l', 'if', 'not', 'wl', '.', 'oxy', '==', 'wd', '.', 'oxy', ':', 'continue', '# Same water molecule and angle within omega', 'w_angle', '=', 'vecangle', '(', 'vector', '(', 'acc', '.', 'a', '.', 'coords', ',', 'wl', '.', 'oxy', '.', 'coords', ')', ',', 'vector', '(', 'wl', '.', 'oxy', '.', 'coords', ',', 'don', '.', 'h', '.', 'coords', ')', ')', 'if', 'not', 'config', '.', 'WATER_BRIDGE_OMEGA_MIN', '<', 'w_angle', '<', 'config', '.', 'WATER_BRIDGE_OMEGA_MAX', ':', 'continue', 'resnr', ',', 'reschain', ',', 'restype', '=', 'whichresnumber', '(', 'acc', '.', 'a', ')', ',', 'whichchain', '(', 'acc', '.', 'a', ')', ',', 'whichrestype', '(', 'acc', '.', 'a', ')', 'resnr_l', ',', 'reschain_l', ',', 'restype_l', '=', 'whichresnumber', '(', 'don', '.', 'd_orig_atom', ')', ',', 'whichchain', '(', 'don', '.', 'd_orig_atom', ')', ',', 'whichrestype', '(', 'don', '.', 'd_orig_atom', ')', 'contact', '=', 'data', '(', 'a', '=', 'acc', '.', 'a', ',', 'a_orig_idx', '=', 'acc', '.', 'a_orig_idx', ',', 'atype', '=', 'acc', '.', 'a', '.', 'type', ',', 'd', '=', 'don', '.', 'd', ',', 'd_orig_idx', '=', 'don', '.', 'd_orig_idx', ',', 'dtype', '=', 'don', '.', 'd', '.', 'type', ',', 'h', '=', 'don', '.', 'h', ',', 'water', '=', 'wl', '.', 'oxy', ',', 'water_orig_idx', '=', 'wl', '.', 'oxy_orig_idx', ',', 'distance_aw', '=', 'distance_aw', ',', 'distance_dw', '=', 'distance_dw', ',', 'd_angle', '=', 'd_angle', ',', 'w_angle', '=', 'w_angle', ',', 'type', '=', "'first_deg'", ',', 'resnr', '=', 'resnr', ',', 'restype', '=', 'restype', ',', 'reschain', '=', 'reschain', ',', 'restype_l', '=', 'restype_l', ',', 'reschain_l', '=', 'reschain_l', ',', 'resnr_l', '=', 'resnr_l', ',', 'protisdon', '=', 'False', ')', 'pairings', '.', 'append', '(', 'contact', ')', 'return', 'filter_contacts', '(', 'pairings', ')']
Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.
['Find', 'water', '-', 'bridged', 'hydrogen', 'bonds', 'between', 'ligand', 'and', 'protein', '.', 'For', 'now', 'only', 'considers', 'bridged', 'of', 'first', 'degree', '.']
train
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L263-L330
9,052
cds-astro/mocpy
mocpy/interval_set.py
IntervalSet.merge
def merge(a_intervals, b_intervals, op): """ Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``. """ a_endpoints = a_intervals.flatten().tolist() b_endpoints = b_intervals.flatten().tolist() sentinel = max(a_endpoints[-1], b_endpoints[-1]) + 1 a_endpoints += [sentinel] b_endpoints += [sentinel] a_index = 0 b_index = 0 res = [] scan = min(a_endpoints[0], b_endpoints[0]) while scan < sentinel: in_a = not ((scan < a_endpoints[a_index]) ^ (a_index % 2)) in_b = not ((scan < b_endpoints[b_index]) ^ (b_index % 2)) in_res = op(in_a, in_b) if in_res ^ (len(res) % 2): res += [scan] if scan == a_endpoints[a_index]: a_index += 1 if scan == b_endpoints[b_index]: b_index += 1 scan = min(a_endpoints[a_index], b_endpoints[b_index]) return np.asarray(res).reshape((-1, 2))
python
def merge(a_intervals, b_intervals, op): """ Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``. """ a_endpoints = a_intervals.flatten().tolist() b_endpoints = b_intervals.flatten().tolist() sentinel = max(a_endpoints[-1], b_endpoints[-1]) + 1 a_endpoints += [sentinel] b_endpoints += [sentinel] a_index = 0 b_index = 0 res = [] scan = min(a_endpoints[0], b_endpoints[0]) while scan < sentinel: in_a = not ((scan < a_endpoints[a_index]) ^ (a_index % 2)) in_b = not ((scan < b_endpoints[b_index]) ^ (b_index % 2)) in_res = op(in_a, in_b) if in_res ^ (len(res) % 2): res += [scan] if scan == a_endpoints[a_index]: a_index += 1 if scan == b_endpoints[b_index]: b_index += 1 scan = min(a_endpoints[a_index], b_endpoints[b_index]) return np.asarray(res).reshape((-1, 2))
['def', 'merge', '(', 'a_intervals', ',', 'b_intervals', ',', 'op', ')', ':', 'a_endpoints', '=', 'a_intervals', '.', 'flatten', '(', ')', '.', 'tolist', '(', ')', 'b_endpoints', '=', 'b_intervals', '.', 'flatten', '(', ')', '.', 'tolist', '(', ')', 'sentinel', '=', 'max', '(', 'a_endpoints', '[', '-', '1', ']', ',', 'b_endpoints', '[', '-', '1', ']', ')', '+', '1', 'a_endpoints', '+=', '[', 'sentinel', ']', 'b_endpoints', '+=', '[', 'sentinel', ']', 'a_index', '=', '0', 'b_index', '=', '0', 'res', '=', '[', ']', 'scan', '=', 'min', '(', 'a_endpoints', '[', '0', ']', ',', 'b_endpoints', '[', '0', ']', ')', 'while', 'scan', '<', 'sentinel', ':', 'in_a', '=', 'not', '(', '(', 'scan', '<', 'a_endpoints', '[', 'a_index', ']', ')', '^', '(', 'a_index', '%', '2', ')', ')', 'in_b', '=', 'not', '(', '(', 'scan', '<', 'b_endpoints', '[', 'b_index', ']', ')', '^', '(', 'b_index', '%', '2', ')', ')', 'in_res', '=', 'op', '(', 'in_a', ',', 'in_b', ')', 'if', 'in_res', '^', '(', 'len', '(', 'res', ')', '%', '2', ')', ':', 'res', '+=', '[', 'scan', ']', 'if', 'scan', '==', 'a_endpoints', '[', 'a_index', ']', ':', 'a_index', '+=', '1', 'if', 'scan', '==', 'b_endpoints', '[', 'b_index', ']', ':', 'b_index', '+=', '1', 'scan', '=', 'min', '(', 'a_endpoints', '[', 'a_index', ']', ',', 'b_endpoints', '[', 'b_index', ']', ')', 'return', 'np', '.', 'asarray', '(', 'res', ')', '.', 'reshape', '(', '(', '-', '1', ',', '2', ')', ')']
Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``.
['Merge', 'two', 'lists', 'of', 'intervals', 'according', 'to', 'the', 'boolean', 'function', 'op']
train
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/interval_set.py#L304-L357
9,053
prompt-toolkit/pyvim
pyvim/commands/commands.py
buffer_list
def buffer_list(editor): """ List all buffers. """ def handler(): wa = editor.window_arrangement for info in wa.list_open_buffers(): char = '%' if info.is_active else '' eb = info.editor_buffer print(' %3i %-2s %-20s line %i' % ( info.index, char, eb.location, (eb.buffer.document.cursor_position_row + 1))) six.moves.input('\nPress ENTER to continue...') run_in_terminal(handler)
python
def buffer_list(editor): """ List all buffers. """ def handler(): wa = editor.window_arrangement for info in wa.list_open_buffers(): char = '%' if info.is_active else '' eb = info.editor_buffer print(' %3i %-2s %-20s line %i' % ( info.index, char, eb.location, (eb.buffer.document.cursor_position_row + 1))) six.moves.input('\nPress ENTER to continue...') run_in_terminal(handler)
['def', 'buffer_list', '(', 'editor', ')', ':', 'def', 'handler', '(', ')', ':', 'wa', '=', 'editor', '.', 'window_arrangement', 'for', 'info', 'in', 'wa', '.', 'list_open_buffers', '(', ')', ':', 'char', '=', "'%'", 'if', 'info', '.', 'is_active', 'else', "''", 'eb', '=', 'info', '.', 'editor_buffer', 'print', '(', "' %3i %-2s %-20s line %i'", '%', '(', 'info', '.', 'index', ',', 'char', ',', 'eb', '.', 'location', ',', '(', 'eb', '.', 'buffer', '.', 'document', '.', 'cursor_position_row', '+', '1', ')', ')', ')', 'six', '.', 'moves', '.', 'input', '(', "'\\nPress ENTER to continue...'", ')', 'run_in_terminal', '(', 'handler', ')']
List all buffers.
['List', 'all', 'buffers', '.']
train
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/commands.py#L212-L224
9,054
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewprofile.py
XViewProfile.fromString
def fromString(strdata): """ Generates profile data from the inputed string data. :param strdata | <str> :return <XViewProfile> """ if strdata: try: xprofile = ElementTree.fromstring(nativestring(strdata)) except ExpatError, err: logger.exception(str(err)) return XViewProfile() return XViewProfile.fromXml(xprofile) logger.warning('Blank profile data provided.') return XViewProfile()
python
def fromString(strdata): """ Generates profile data from the inputed string data. :param strdata | <str> :return <XViewProfile> """ if strdata: try: xprofile = ElementTree.fromstring(nativestring(strdata)) except ExpatError, err: logger.exception(str(err)) return XViewProfile() return XViewProfile.fromXml(xprofile) logger.warning('Blank profile data provided.') return XViewProfile()
['def', 'fromString', '(', 'strdata', ')', ':', 'if', 'strdata', ':', 'try', ':', 'xprofile', '=', 'ElementTree', '.', 'fromstring', '(', 'nativestring', '(', 'strdata', ')', ')', 'except', 'ExpatError', ',', 'err', ':', 'logger', '.', 'exception', '(', 'str', '(', 'err', ')', ')', 'return', 'XViewProfile', '(', ')', 'return', 'XViewProfile', '.', 'fromXml', '(', 'xprofile', ')', 'logger', '.', 'warning', '(', "'Blank profile data provided.'", ')', 'return', 'XViewProfile', '(', ')']
Generates profile data from the inputed string data. :param strdata | <str> :return <XViewProfile>
['Generates', 'profile', 'data', 'from', 'the', 'inputed', 'string', 'data', '.', ':', 'param', 'strdata', '|', '<str', '>', ':', 'return', '<XViewProfile', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofile.py#L493-L511
9,055
gregmuellegger/django-autofixture
autofixture/__init__.py
create
def create(model, count, *args, **kwargs): ''' Create *count* instances of *model* using the either an appropiate autofixture that was :ref:`registry <registry>` or fall back to the default:class:`AutoFixture` class. *model* can be a model class or its string representation (e.g. ``"app.ModelClass"``). All positional and keyword arguments are passed to the autofixture constructor. It is demonstrated in the example below which will create ten superusers:: import autofixture admins = autofixture.create('auth.User', 10, field_values={'is_superuser': True}) .. note:: See :ref:`AutoFixture` for more information. :func:`create` will return a list of the created objects. ''' from .compat import get_model if isinstance(model, string_types): model = get_model(*model.split('.', 1)) if model in REGISTRY: autofixture_class = REGISTRY[model] else: autofixture_class = AutoFixture # Get keyword arguments that the create_one method accepts and pass them # into create_one instead of AutoFixture.__init__ argnames = set(getargnames(autofixture_class.create_one)) argnames -= set(['self']) create_kwargs = {} for argname in argnames: if argname in kwargs: create_kwargs[argname] = kwargs.pop(argname) autofixture = autofixture_class(model, *args, **kwargs) return autofixture.create(count, **create_kwargs)
python
def create(model, count, *args, **kwargs): ''' Create *count* instances of *model* using the either an appropiate autofixture that was :ref:`registry <registry>` or fall back to the default:class:`AutoFixture` class. *model* can be a model class or its string representation (e.g. ``"app.ModelClass"``). All positional and keyword arguments are passed to the autofixture constructor. It is demonstrated in the example below which will create ten superusers:: import autofixture admins = autofixture.create('auth.User', 10, field_values={'is_superuser': True}) .. note:: See :ref:`AutoFixture` for more information. :func:`create` will return a list of the created objects. ''' from .compat import get_model if isinstance(model, string_types): model = get_model(*model.split('.', 1)) if model in REGISTRY: autofixture_class = REGISTRY[model] else: autofixture_class = AutoFixture # Get keyword arguments that the create_one method accepts and pass them # into create_one instead of AutoFixture.__init__ argnames = set(getargnames(autofixture_class.create_one)) argnames -= set(['self']) create_kwargs = {} for argname in argnames: if argname in kwargs: create_kwargs[argname] = kwargs.pop(argname) autofixture = autofixture_class(model, *args, **kwargs) return autofixture.create(count, **create_kwargs)
['def', 'create', '(', 'model', ',', 'count', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'from', '.', 'compat', 'import', 'get_model', 'if', 'isinstance', '(', 'model', ',', 'string_types', ')', ':', 'model', '=', 'get_model', '(', '*', 'model', '.', 'split', '(', "'.'", ',', '1', ')', ')', 'if', 'model', 'in', 'REGISTRY', ':', 'autofixture_class', '=', 'REGISTRY', '[', 'model', ']', 'else', ':', 'autofixture_class', '=', 'AutoFixture', '# Get keyword arguments that the create_one method accepts and pass them', '# into create_one instead of AutoFixture.__init__', 'argnames', '=', 'set', '(', 'getargnames', '(', 'autofixture_class', '.', 'create_one', ')', ')', 'argnames', '-=', 'set', '(', '[', "'self'", ']', ')', 'create_kwargs', '=', '{', '}', 'for', 'argname', 'in', 'argnames', ':', 'if', 'argname', 'in', 'kwargs', ':', 'create_kwargs', '[', 'argname', ']', '=', 'kwargs', '.', 'pop', '(', 'argname', ')', 'autofixture', '=', 'autofixture_class', '(', 'model', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'autofixture', '.', 'create', '(', 'count', ',', '*', '*', 'create_kwargs', ')']
Create *count* instances of *model* using the either an appropiate autofixture that was :ref:`registry <registry>` or fall back to the default:class:`AutoFixture` class. *model* can be a model class or its string representation (e.g. ``"app.ModelClass"``). All positional and keyword arguments are passed to the autofixture constructor. It is demonstrated in the example below which will create ten superusers:: import autofixture admins = autofixture.create('auth.User', 10, field_values={'is_superuser': True}) .. note:: See :ref:`AutoFixture` for more information. :func:`create` will return a list of the created objects.
['Create', '*', 'count', '*', 'instances', 'of', '*', 'model', '*', 'using', 'the', 'either', 'an', 'appropiate', 'autofixture', 'that', 'was', ':', 'ref', ':', 'registry', '<registry', '>', 'or', 'fall', 'back', 'to', 'the', 'default', ':', 'class', ':', 'AutoFixture', 'class', '.', '*', 'model', '*', 'can', 'be', 'a', 'model', 'class', 'or', 'its', 'string', 'representation', '(', 'e', '.', 'g', '.', 'app', '.', 'ModelClass', ')', '.']
train
https://github.com/gregmuellegger/django-autofixture/blob/0b696fd3a06747459981e4269aff427676f84ae0/autofixture/__init__.py#L101-L136
9,056
saltstack/salt
salt/modules/solarisipspkg.py
get_fmri
def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret
python
def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret
['def', 'get_fmri', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'if', 'name', '.', 'startswith', '(', "'pkg://'", ')', ':', '# already full fmri', 'return', 'name', 'cmd', '=', '[', "'/bin/pkg'", ',', "'list'", ',', "'-aHv'", ',', 'name', ']', '# there can be more packages matching the name', 'lines', '=', '__salt__', '[', "'cmd.run_stdout'", ']', '(', 'cmd', ')', '.', 'splitlines', '(', ')', 'if', 'not', 'lines', ':', '# empty string = package not found', 'return', "''", 'ret', '=', '[', ']', 'for', 'line', 'in', 'lines', ':', 'ret', '.', 'append', '(', '_ips_get_pkgname', '(', 'line', ')', ')', 'return', 'ret']
Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash
['Returns', 'FMRI', 'from', 'partial', 'name', '.', 'Returns', 'empty', 'string', '(', ')', 'if', 'not', 'found', '.', 'In', 'case', 'of', 'multiple', 'match', 'the', 'function', 'returns', 'list', 'of', 'all', 'matched', 'packages', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarisipspkg.py#L394-L418
9,057
evhub/coconut
coconut/command/util.py
interpret
def interpret(code, in_vars): """Try to evaluate the given code, otherwise execute it.""" try: result = eval(code, in_vars) except SyntaxError: pass # exec code outside of exception context else: if result is not None: print(ascii(result)) return # don't also exec code exec_func(code, in_vars)
python
def interpret(code, in_vars): """Try to evaluate the given code, otherwise execute it.""" try: result = eval(code, in_vars) except SyntaxError: pass # exec code outside of exception context else: if result is not None: print(ascii(result)) return # don't also exec code exec_func(code, in_vars)
['def', 'interpret', '(', 'code', ',', 'in_vars', ')', ':', 'try', ':', 'result', '=', 'eval', '(', 'code', ',', 'in_vars', ')', 'except', 'SyntaxError', ':', 'pass', '# exec code outside of exception context', 'else', ':', 'if', 'result', 'is', 'not', 'None', ':', 'print', '(', 'ascii', '(', 'result', ')', ')', 'return', "# don't also exec code", 'exec_func', '(', 'code', ',', 'in_vars', ')']
Try to evaluate the given code, otherwise execute it.
['Try', 'to', 'evaluate', 'the', 'given', 'code', 'otherwise', 'execute', 'it', '.']
train
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/util.py#L172-L182
9,058
phareous/insteonlocal
insteonlocal/Hub.py
Hub.brightness_to_hex
def brightness_to_hex(self, level): """Convert numeric brightness percentage into hex for insteon""" level_int = int(level) new_int = int((level_int * 255)/100) new_level = format(new_int, '02X') self.logger.debug("brightness_to_hex: %s to %s", level, str(new_level)) return str(new_level)
python
def brightness_to_hex(self, level): """Convert numeric brightness percentage into hex for insteon""" level_int = int(level) new_int = int((level_int * 255)/100) new_level = format(new_int, '02X') self.logger.debug("brightness_to_hex: %s to %s", level, str(new_level)) return str(new_level)
['def', 'brightness_to_hex', '(', 'self', ',', 'level', ')', ':', 'level_int', '=', 'int', '(', 'level', ')', 'new_int', '=', 'int', '(', '(', 'level_int', '*', '255', ')', '/', '100', ')', 'new_level', '=', 'format', '(', 'new_int', ',', "'02X'", ')', 'self', '.', 'logger', '.', 'debug', '(', '"brightness_to_hex: %s to %s"', ',', 'level', ',', 'str', '(', 'new_level', ')', ')', 'return', 'str', '(', 'new_level', ')']
Convert numeric brightness percentage into hex for insteon
['Convert', 'numeric', 'brightness', 'percentage', 'into', 'hex', 'for', 'insteon']
train
https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Hub.py#L77-L83
9,059
hydpy-dev/hydpy
hydpy/core/timetools.py
Date.to_array
def to_array(self): """Return a 1-dimensional |numpy| |numpy.ndarray| with six entries defining the actual date (year, month, day, hour, minute, second). >>> from hydpy import Date >>> Date('1992-10-8 15:15:42').to_array() array([ 1992., 10., 8., 15., 15., 42.]) .. note:: The date defined by the returned |numpy.ndarray| does not include any time zone information and corresponds to |Options.utcoffset|, which defaults to UTC+01:00. """ return numpy.array([self.year, self.month, self.day, self.hour, self.minute, self.second], dtype=float)
python
def to_array(self): """Return a 1-dimensional |numpy| |numpy.ndarray| with six entries defining the actual date (year, month, day, hour, minute, second). >>> from hydpy import Date >>> Date('1992-10-8 15:15:42').to_array() array([ 1992., 10., 8., 15., 15., 42.]) .. note:: The date defined by the returned |numpy.ndarray| does not include any time zone information and corresponds to |Options.utcoffset|, which defaults to UTC+01:00. """ return numpy.array([self.year, self.month, self.day, self.hour, self.minute, self.second], dtype=float)
['def', 'to_array', '(', 'self', ')', ':', 'return', 'numpy', '.', 'array', '(', '[', 'self', '.', 'year', ',', 'self', '.', 'month', ',', 'self', '.', 'day', ',', 'self', '.', 'hour', ',', 'self', '.', 'minute', ',', 'self', '.', 'second', ']', ',', 'dtype', '=', 'float', ')']
Return a 1-dimensional |numpy| |numpy.ndarray| with six entries defining the actual date (year, month, day, hour, minute, second). >>> from hydpy import Date >>> Date('1992-10-8 15:15:42').to_array() array([ 1992., 10., 8., 15., 15., 42.]) .. note:: The date defined by the returned |numpy.ndarray| does not include any time zone information and corresponds to |Options.utcoffset|, which defaults to UTC+01:00.
['Return', 'a', '1', '-', 'dimensional', '|numpy|', '|numpy', '.', 'ndarray|', 'with', 'six', 'entries', 'defining', 'the', 'actual', 'date', '(', 'year', 'month', 'day', 'hour', 'minute', 'second', ')', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/timetools.py#L283-L298
9,060
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/reftrack/__init__.py
group_content
def group_content(content, namespace, grpname, grpnodetype): """Group the given content in the given namespace under a node of type grpnodetype with the name grpname :param content: the nodes to group :type content: :class:`list` :param namespace: the namespace to use :type namespace: str | None :param grpname: the name of the new grpnode :type grpname: str :param grpnodetype: the nodetype for the grpnode :type grpnodetype: str :returns: the created group node :rtype: str :raises: None """ with common.preserve_namespace(namespace): grpnode = cmds.createNode(grpnodetype, name=grpname) # create grp node cmds.group(content, uag=grpnode) # group content return grpnode
python
def group_content(content, namespace, grpname, grpnodetype): """Group the given content in the given namespace under a node of type grpnodetype with the name grpname :param content: the nodes to group :type content: :class:`list` :param namespace: the namespace to use :type namespace: str | None :param grpname: the name of the new grpnode :type grpname: str :param grpnodetype: the nodetype for the grpnode :type grpnodetype: str :returns: the created group node :rtype: str :raises: None """ with common.preserve_namespace(namespace): grpnode = cmds.createNode(grpnodetype, name=grpname) # create grp node cmds.group(content, uag=grpnode) # group content return grpnode
['def', 'group_content', '(', 'content', ',', 'namespace', ',', 'grpname', ',', 'grpnodetype', ')', ':', 'with', 'common', '.', 'preserve_namespace', '(', 'namespace', ')', ':', 'grpnode', '=', 'cmds', '.', 'createNode', '(', 'grpnodetype', ',', 'name', '=', 'grpname', ')', '# create grp node', 'cmds', '.', 'group', '(', 'content', ',', 'uag', '=', 'grpnode', ')', '# group content', 'return', 'grpnode']
Group the given content in the given namespace under a node of type grpnodetype with the name grpname :param content: the nodes to group :type content: :class:`list` :param namespace: the namespace to use :type namespace: str | None :param grpname: the name of the new grpnode :type grpname: str :param grpnodetype: the nodetype for the grpnode :type grpnodetype: str :returns: the created group node :rtype: str :raises: None
['Group', 'the', 'given', 'content', 'in', 'the', 'given', 'namespace', 'under', 'a', 'node', 'of', 'type', 'grpnodetype', 'with', 'the', 'name', 'grpname']
train
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/reftrack/__init__.py#L38-L57
9,061
mcs07/PubChemPy
pubchempy.py
Compound.to_dict
def to_dict(self, properties=None): """Return a dictionary containing Compound data. Optionally specify a list of the desired properties. synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is because they each require an extra request. """ if not properties: skip = {'aids', 'sids', 'synonyms'} properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip] return {p: [i.to_dict() for i in getattr(self, p)] if p in {'atoms', 'bonds'} else getattr(self, p) for p in properties}
python
def to_dict(self, properties=None): """Return a dictionary containing Compound data. Optionally specify a list of the desired properties. synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is because they each require an extra request. """ if not properties: skip = {'aids', 'sids', 'synonyms'} properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip] return {p: [i.to_dict() for i in getattr(self, p)] if p in {'atoms', 'bonds'} else getattr(self, p) for p in properties}
['def', 'to_dict', '(', 'self', ',', 'properties', '=', 'None', ')', ':', 'if', 'not', 'properties', ':', 'skip', '=', '{', "'aids'", ',', "'sids'", ',', "'synonyms'", '}', 'properties', '=', '[', 'p', 'for', 'p', 'in', 'dir', '(', 'Compound', ')', 'if', 'isinstance', '(', 'getattr', '(', 'Compound', ',', 'p', ')', ',', 'property', ')', 'and', 'p', 'not', 'in', 'skip', ']', 'return', '{', 'p', ':', '[', 'i', '.', 'to_dict', '(', ')', 'for', 'i', 'in', 'getattr', '(', 'self', ',', 'p', ')', ']', 'if', 'p', 'in', '{', "'atoms'", ',', "'bonds'", '}', 'else', 'getattr', '(', 'self', ',', 'p', ')', 'for', 'p', 'in', 'properties', '}']
Return a dictionary containing Compound data. Optionally specify a list of the desired properties. synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is because they each require an extra request.
['Return', 'a', 'dictionary', 'containing', 'Compound', 'data', '.', 'Optionally', 'specify', 'a', 'list', 'of', 'the', 'desired', 'properties', '.']
train
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L735-L744
9,062
hayd/pep8radius
pep8radius/shell.py
shell_out_ignore_exitcode
def shell_out_ignore_exitcode(cmd, stderr=STDOUT, cwd=None): """Same as shell_out but doesn't raise if the cmd exits badly.""" try: return shell_out(cmd, stderr=stderr, cwd=cwd) except CalledProcessError as c: return _clean_output(c.output)
python
def shell_out_ignore_exitcode(cmd, stderr=STDOUT, cwd=None): """Same as shell_out but doesn't raise if the cmd exits badly.""" try: return shell_out(cmd, stderr=stderr, cwd=cwd) except CalledProcessError as c: return _clean_output(c.output)
['def', 'shell_out_ignore_exitcode', '(', 'cmd', ',', 'stderr', '=', 'STDOUT', ',', 'cwd', '=', 'None', ')', ':', 'try', ':', 'return', 'shell_out', '(', 'cmd', ',', 'stderr', '=', 'stderr', ',', 'cwd', '=', 'cwd', ')', 'except', 'CalledProcessError', 'as', 'c', ':', 'return', '_clean_output', '(', 'c', '.', 'output', ')']
Same as shell_out but doesn't raise if the cmd exits badly.
['Same', 'as', 'shell_out', 'but', 'doesn', 't', 'raise', 'if', 'the', 'cmd', 'exits', 'badly', '.']
train
https://github.com/hayd/pep8radius/blob/0c1d14835d390f7feeb602f35a768e52ce306a0a/pep8radius/shell.py#L62-L67
9,063
dw/mitogen
mitogen/compat/pkgutil.py
find_loader
def find_loader(fullname): """Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry. """ for importer in iter_importers(fullname): loader = importer.find_module(fullname) if loader is not None: return loader return None
python
def find_loader(fullname): """Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry. """ for importer in iter_importers(fullname): loader = importer.find_module(fullname) if loader is not None: return loader return None
['def', 'find_loader', '(', 'fullname', ')', ':', 'for', 'importer', 'in', 'iter_importers', '(', 'fullname', ')', ':', 'loader', '=', 'importer', '.', 'find_module', '(', 'fullname', ')', 'if', 'loader', 'is', 'not', 'None', ':', 'return', 'loader', 'return', 'None']
Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry.
['Find', 'a', 'PEP', '302', 'loader', 'object', 'for', 'fullname']
train
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/compat/pkgutil.py#L468-L481
9,064
summa-tx/riemann
riemann/tx/sprout.py
SproutTx.sighash_all
def sighash_all(self, index=0, script=None, prevout_value=None, anyone_can_pay=False): ''' SproutTx, int, byte-like, byte-like, bool -> bytearray Sighashes suck Generates the hash to be signed with SIGHASH_ALL https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29 ''' if riemann.network.FORKID is not None: return self._sighash_forkid(index=index, script=script, prevout_value=prevout_value, sighash_type=shared.SIGHASH_ALL, anyone_can_pay=anyone_can_pay) copy_tx = self._sighash_prep(index=index, script=script) if anyone_can_pay: return self._sighash_anyone_can_pay( index=index, copy_tx=copy_tx, sighash_type=shared.SIGHASH_ALL) return self._sighash_final_hashing(copy_tx, shared.SIGHASH_ALL)
python
def sighash_all(self, index=0, script=None, prevout_value=None, anyone_can_pay=False): ''' SproutTx, int, byte-like, byte-like, bool -> bytearray Sighashes suck Generates the hash to be signed with SIGHASH_ALL https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29 ''' if riemann.network.FORKID is not None: return self._sighash_forkid(index=index, script=script, prevout_value=prevout_value, sighash_type=shared.SIGHASH_ALL, anyone_can_pay=anyone_can_pay) copy_tx = self._sighash_prep(index=index, script=script) if anyone_can_pay: return self._sighash_anyone_can_pay( index=index, copy_tx=copy_tx, sighash_type=shared.SIGHASH_ALL) return self._sighash_final_hashing(copy_tx, shared.SIGHASH_ALL)
['def', 'sighash_all', '(', 'self', ',', 'index', '=', '0', ',', 'script', '=', 'None', ',', 'prevout_value', '=', 'None', ',', 'anyone_can_pay', '=', 'False', ')', ':', 'if', 'riemann', '.', 'network', '.', 'FORKID', 'is', 'not', 'None', ':', 'return', 'self', '.', '_sighash_forkid', '(', 'index', '=', 'index', ',', 'script', '=', 'script', ',', 'prevout_value', '=', 'prevout_value', ',', 'sighash_type', '=', 'shared', '.', 'SIGHASH_ALL', ',', 'anyone_can_pay', '=', 'anyone_can_pay', ')', 'copy_tx', '=', 'self', '.', '_sighash_prep', '(', 'index', '=', 'index', ',', 'script', '=', 'script', ')', 'if', 'anyone_can_pay', ':', 'return', 'self', '.', '_sighash_anyone_can_pay', '(', 'index', '=', 'index', ',', 'copy_tx', '=', 'copy_tx', ',', 'sighash_type', '=', 'shared', '.', 'SIGHASH_ALL', ')', 'return', 'self', '.', '_sighash_final_hashing', '(', 'copy_tx', ',', 'shared', '.', 'SIGHASH_ALL', ')']
SproutTx, int, byte-like, byte-like, bool -> bytearray Sighashes suck Generates the hash to be signed with SIGHASH_ALL https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29
['SproutTx', 'int', 'byte', '-', 'like', 'byte', '-', 'like', 'bool', '-', '>', 'bytearray', 'Sighashes', 'suck', 'Generates', 'the', 'hash', 'to', 'be', 'signed', 'with', 'SIGHASH_ALL', 'https', ':', '//', 'en', '.', 'bitcoin', '.', 'it', '/', 'wiki', '/', 'OP_CHECKSIG#Hashtype_SIGHASH_ALL_', '.', '28default', '.', '29']
train
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L242-L263
9,065
SBRG/ssbio
ssbio/core/protein.py
Protein.get_homology_models
def get_homology_models(self): """DictList: Return a DictList of all homology models in self.structures""" # TODO: change to a property? if self.representative_structure: return DictList(x for x in self.structures if not x.is_experimental and x.id != self.representative_structure.id) else: return DictList(x for x in self.structures if not x.is_experimental)
python
def get_homology_models(self): """DictList: Return a DictList of all homology models in self.structures""" # TODO: change to a property? if self.representative_structure: return DictList(x for x in self.structures if not x.is_experimental and x.id != self.representative_structure.id) else: return DictList(x for x in self.structures if not x.is_experimental)
['def', 'get_homology_models', '(', 'self', ')', ':', '# TODO: change to a property?', 'if', 'self', '.', 'representative_structure', ':', 'return', 'DictList', '(', 'x', 'for', 'x', 'in', 'self', '.', 'structures', 'if', 'not', 'x', '.', 'is_experimental', 'and', 'x', '.', 'id', '!=', 'self', '.', 'representative_structure', '.', 'id', ')', 'else', ':', 'return', 'DictList', '(', 'x', 'for', 'x', 'in', 'self', '.', 'structures', 'if', 'not', 'x', '.', 'is_experimental', ')']
DictList: Return a DictList of all homology models in self.structures
['DictList', ':', 'Return', 'a', 'DictList', 'of', 'all', 'homology', 'models', 'in', 'self', '.', 'structures']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/core/protein.py#L251-L257
9,066
SHTOOLS/SHTOOLS
pyshtools/shclasses/shwindow.py
SHWindow.from_cap
def from_cap(cls, theta, lwin, clat=None, clon=None, nwin=None, theta_degrees=True, coord_degrees=True, dj_matrix=None, weights=None): """ Construct spherical cap localization windows. Usage ----- x = SHWindow.from_cap(theta, lwin, [clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights]) Returns ------- x : SHWindow class instance Parameters ---------- theta : float Angular radius of the spherical cap localization domain (default in degrees). lwin : int Spherical harmonic bandwidth of the localization windows. clat, clon : float, optional, default = None Latitude and longitude of the center of the rotated spherical cap localization windows (default in degrees). nwin : int, optional, default (lwin+1)**2 Number of localization windows. theta_degrees : bool, optional, default = True True if theta is in degrees. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. weights : ndarray, optional, default = None Taper weights used with the multitaper spectral analyses. """ if theta_degrees: tapers, eigenvalues, taper_order = _shtools.SHReturnTapers( _np.radians(theta), lwin) else: tapers, eigenvalues, taper_order = _shtools.SHReturnTapers( theta, lwin) return SHWindowCap(theta, tapers, eigenvalues, taper_order, clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights, copy=False)
python
def from_cap(cls, theta, lwin, clat=None, clon=None, nwin=None, theta_degrees=True, coord_degrees=True, dj_matrix=None, weights=None): """ Construct spherical cap localization windows. Usage ----- x = SHWindow.from_cap(theta, lwin, [clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights]) Returns ------- x : SHWindow class instance Parameters ---------- theta : float Angular radius of the spherical cap localization domain (default in degrees). lwin : int Spherical harmonic bandwidth of the localization windows. clat, clon : float, optional, default = None Latitude and longitude of the center of the rotated spherical cap localization windows (default in degrees). nwin : int, optional, default (lwin+1)**2 Number of localization windows. theta_degrees : bool, optional, default = True True if theta is in degrees. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. weights : ndarray, optional, default = None Taper weights used with the multitaper spectral analyses. """ if theta_degrees: tapers, eigenvalues, taper_order = _shtools.SHReturnTapers( _np.radians(theta), lwin) else: tapers, eigenvalues, taper_order = _shtools.SHReturnTapers( theta, lwin) return SHWindowCap(theta, tapers, eigenvalues, taper_order, clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights, copy=False)
['def', 'from_cap', '(', 'cls', ',', 'theta', ',', 'lwin', ',', 'clat', '=', 'None', ',', 'clon', '=', 'None', ',', 'nwin', '=', 'None', ',', 'theta_degrees', '=', 'True', ',', 'coord_degrees', '=', 'True', ',', 'dj_matrix', '=', 'None', ',', 'weights', '=', 'None', ')', ':', 'if', 'theta_degrees', ':', 'tapers', ',', 'eigenvalues', ',', 'taper_order', '=', '_shtools', '.', 'SHReturnTapers', '(', '_np', '.', 'radians', '(', 'theta', ')', ',', 'lwin', ')', 'else', ':', 'tapers', ',', 'eigenvalues', ',', 'taper_order', '=', '_shtools', '.', 'SHReturnTapers', '(', 'theta', ',', 'lwin', ')', 'return', 'SHWindowCap', '(', 'theta', ',', 'tapers', ',', 'eigenvalues', ',', 'taper_order', ',', 'clat', ',', 'clon', ',', 'nwin', ',', 'theta_degrees', ',', 'coord_degrees', ',', 'dj_matrix', ',', 'weights', ',', 'copy', '=', 'False', ')']
Construct spherical cap localization windows. Usage ----- x = SHWindow.from_cap(theta, lwin, [clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights]) Returns ------- x : SHWindow class instance Parameters ---------- theta : float Angular radius of the spherical cap localization domain (default in degrees). lwin : int Spherical harmonic bandwidth of the localization windows. clat, clon : float, optional, default = None Latitude and longitude of the center of the rotated spherical cap localization windows (default in degrees). nwin : int, optional, default (lwin+1)**2 Number of localization windows. theta_degrees : bool, optional, default = True True if theta is in degrees. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. weights : ndarray, optional, default = None Taper weights used with the multitaper spectral analyses.
['Construct', 'spherical', 'cap', 'localization', 'windows', '.']
train
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shwindow.py#L119-L164
9,067
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
Task.clean_output_files
def clean_output_files(self, follow_parents=True): """ This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed. """ paths = [] if self.status != self.S_OK: logger.warning("Calling task.clean_output_files on a task whose status != S_OK") # Remove all files in tmpdir. self.tmpdir.clean() # Find the file extensions that should be preserved since these files are still # needed by the children who haven't reached S_OK except_exts = set() for child in self.get_children(): if child.status == self.S_OK: continue # Find the position of self in child.deps and add the extensions. i = [dep.node for dep in child.deps].index(self) except_exts.update(child.deps[i].exts) # Remove the files in the outdir of the task but keep except_exts. exts = self.gc.exts.difference(except_exts) #print("Will remove its extensions: ", exts) paths += self.outdir.remove_exts(exts) if not follow_parents: return paths # Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled. for parent in self.get_parents(): # Here we build a dictionary file extension --> list of child nodes requiring this file from parent # e.g {"WFK": [node1, node2]} ext2nodes = collections.defaultdict(list) for child in parent.get_children(): if child.status == child.S_OK: continue i = [d.node for d in child.deps].index(parent) for ext in child.deps[i].exts: ext2nodes[ext].append(child) # Remove extension only if no node depends on it! except_exts = [k for k, lst in ext2nodes.items() if lst] exts = self.gc.exts.difference(except_exts) #print("%s removes extensions %s from parent node %s" % (self, exts, parent)) paths += parent.outdir.remove_exts(exts) self.history.info("Removed files: %s" % paths) return paths
python
def clean_output_files(self, follow_parents=True): """ This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed. """ paths = [] if self.status != self.S_OK: logger.warning("Calling task.clean_output_files on a task whose status != S_OK") # Remove all files in tmpdir. self.tmpdir.clean() # Find the file extensions that should be preserved since these files are still # needed by the children who haven't reached S_OK except_exts = set() for child in self.get_children(): if child.status == self.S_OK: continue # Find the position of self in child.deps and add the extensions. i = [dep.node for dep in child.deps].index(self) except_exts.update(child.deps[i].exts) # Remove the files in the outdir of the task but keep except_exts. exts = self.gc.exts.difference(except_exts) #print("Will remove its extensions: ", exts) paths += self.outdir.remove_exts(exts) if not follow_parents: return paths # Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled. for parent in self.get_parents(): # Here we build a dictionary file extension --> list of child nodes requiring this file from parent # e.g {"WFK": [node1, node2]} ext2nodes = collections.defaultdict(list) for child in parent.get_children(): if child.status == child.S_OK: continue i = [d.node for d in child.deps].index(parent) for ext in child.deps[i].exts: ext2nodes[ext].append(child) # Remove extension only if no node depends on it! except_exts = [k for k, lst in ext2nodes.items() if lst] exts = self.gc.exts.difference(except_exts) #print("%s removes extensions %s from parent node %s" % (self, exts, parent)) paths += parent.outdir.remove_exts(exts) self.history.info("Removed files: %s" % paths) return paths
['def', 'clean_output_files', '(', 'self', ',', 'follow_parents', '=', 'True', ')', ':', 'paths', '=', '[', ']', 'if', 'self', '.', 'status', '!=', 'self', '.', 'S_OK', ':', 'logger', '.', 'warning', '(', '"Calling task.clean_output_files on a task whose status != S_OK"', ')', '# Remove all files in tmpdir.', 'self', '.', 'tmpdir', '.', 'clean', '(', ')', '# Find the file extensions that should be preserved since these files are still', "# needed by the children who haven't reached S_OK", 'except_exts', '=', 'set', '(', ')', 'for', 'child', 'in', 'self', '.', 'get_children', '(', ')', ':', 'if', 'child', '.', 'status', '==', 'self', '.', 'S_OK', ':', 'continue', '# Find the position of self in child.deps and add the extensions.', 'i', '=', '[', 'dep', '.', 'node', 'for', 'dep', 'in', 'child', '.', 'deps', ']', '.', 'index', '(', 'self', ')', 'except_exts', '.', 'update', '(', 'child', '.', 'deps', '[', 'i', ']', '.', 'exts', ')', '# Remove the files in the outdir of the task but keep except_exts.', 'exts', '=', 'self', '.', 'gc', '.', 'exts', '.', 'difference', '(', 'except_exts', ')', '#print("Will remove its extensions: ", exts)', 'paths', '+=', 'self', '.', 'outdir', '.', 'remove_exts', '(', 'exts', ')', 'if', 'not', 'follow_parents', ':', 'return', 'paths', '# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.', 'for', 'parent', 'in', 'self', '.', 'get_parents', '(', ')', ':', '# Here we build a dictionary file extension --> list of child nodes requiring this file from parent', '# e.g {"WFK": [node1, node2]}', 'ext2nodes', '=', 'collections', '.', 'defaultdict', '(', 'list', ')', 'for', 'child', 'in', 'parent', '.', 'get_children', '(', ')', ':', 'if', 'child', '.', 'status', '==', 'child', '.', 'S_OK', ':', 'continue', 'i', '=', '[', 'd', '.', 'node', 'for', 'd', 'in', 'child', '.', 'deps', ']', '.', 'index', '(', 'parent', ')', 'for', 'ext', 'in', 'child', '.', 'deps', '[', 'i', ']', '.', 'exts', ':', 'ext2nodes', '[', 'ext', ']', '.', 'append', '(', 'child', ')', '# Remove extension only if no node depends on it!', 'except_exts', '=', '[', 'k', 'for', 'k', ',', 'lst', 'in', 'ext2nodes', '.', 'items', '(', ')', 'if', 'lst', ']', 'exts', '=', 'self', '.', 'gc', '.', 'exts', '.', 'difference', '(', 'except_exts', ')', '#print("%s removes extensions %s from parent node %s" % (self, exts, parent))', 'paths', '+=', 'parent', '.', 'outdir', '.', 'remove_exts', '(', 'exts', ')', 'self', '.', 'history', '.', 'info', '(', '"Removed files: %s"', '%', 'paths', ')', 'return', 'paths']
This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed.
['This', 'method', 'is', 'called', 'when', 'the', 'task', 'reaches', 'S_OK', '.', 'It', 'removes', 'all', 'the', 'output', 'files', 'produced', 'by', 'the', 'task', 'that', 'are', 'not', 'needed', 'by', 'its', 'children', 'as', 'well', 'as', 'the', 'output', 'files', 'produced', 'by', 'its', 'parents', 'if', 'no', 'other', 'node', 'needs', 'them', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L2335-L2388
9,068
reiinakano/scikit-plot
scikitplot/decomposition.py
plot_pca_component_variance
def plot_pca_component_variance(clf, title='PCA Component Explained Variances', target_explained_variance=0.75, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots PCA components' explained variance ratios. (new in v0.2.2) Args: clf: PCA instance that has the ``explained_variance_ratio_`` attribute. title (string, optional): Title of the generated plot. Defaults to "PCA Component Explained Variances" target_explained_variance (float, optional): Looks for the minimum number of principal components that satisfies this value and emphasizes it on the plot. Defaults to 0.75 ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> pca = PCA(random_state=1) >>> pca.fit(X) >>> skplt.decomposition.plot_pca_component_variance(pca) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_pca_component_variance.png :align: center :alt: PCA Component variances """ if not hasattr(clf, 'explained_variance_ratio_'): raise TypeError('"clf" does not have explained_variance_ratio_ ' 'attribute. Has the PCA been fitted?') if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) cumulative_sum_ratios = np.cumsum(clf.explained_variance_ratio_) # Magic code for figuring out closest value to target_explained_variance idx = np.searchsorted(cumulative_sum_ratios, target_explained_variance) ax.plot(range(len(clf.explained_variance_ratio_) + 1), np.concatenate(([0], np.cumsum(clf.explained_variance_ratio_))), '*-') ax.grid(True) ax.set_xlabel('First n principal components', fontsize=text_fontsize) ax.set_ylabel('Explained variance ratio of first n components', fontsize=text_fontsize) ax.set_ylim([-0.02, 1.02]) if idx < len(cumulative_sum_ratios): ax.plot(idx+1, cumulative_sum_ratios[idx], 'ro', label='{0:0.3f} Explained variance ratio for ' 'first {1} components'.format(cumulative_sum_ratios[idx], idx+1), markersize=4, markeredgewidth=4) ax.axhline(cumulative_sum_ratios[idx], linestyle=':', lw=3, color='black') ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax
python
def plot_pca_component_variance(clf, title='PCA Component Explained Variances', target_explained_variance=0.75, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots PCA components' explained variance ratios. (new in v0.2.2) Args: clf: PCA instance that has the ``explained_variance_ratio_`` attribute. title (string, optional): Title of the generated plot. Defaults to "PCA Component Explained Variances" target_explained_variance (float, optional): Looks for the minimum number of principal components that satisfies this value and emphasizes it on the plot. Defaults to 0.75 ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> pca = PCA(random_state=1) >>> pca.fit(X) >>> skplt.decomposition.plot_pca_component_variance(pca) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_pca_component_variance.png :align: center :alt: PCA Component variances """ if not hasattr(clf, 'explained_variance_ratio_'): raise TypeError('"clf" does not have explained_variance_ratio_ ' 'attribute. Has the PCA been fitted?') if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) cumulative_sum_ratios = np.cumsum(clf.explained_variance_ratio_) # Magic code for figuring out closest value to target_explained_variance idx = np.searchsorted(cumulative_sum_ratios, target_explained_variance) ax.plot(range(len(clf.explained_variance_ratio_) + 1), np.concatenate(([0], np.cumsum(clf.explained_variance_ratio_))), '*-') ax.grid(True) ax.set_xlabel('First n principal components', fontsize=text_fontsize) ax.set_ylabel('Explained variance ratio of first n components', fontsize=text_fontsize) ax.set_ylim([-0.02, 1.02]) if idx < len(cumulative_sum_ratios): ax.plot(idx+1, cumulative_sum_ratios[idx], 'ro', label='{0:0.3f} Explained variance ratio for ' 'first {1} components'.format(cumulative_sum_ratios[idx], idx+1), markersize=4, markeredgewidth=4) ax.axhline(cumulative_sum_ratios[idx], linestyle=':', lw=3, color='black') ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax
['def', 'plot_pca_component_variance', '(', 'clf', ',', 'title', '=', "'PCA Component Explained Variances'", ',', 'target_explained_variance', '=', '0.75', ',', 'ax', '=', 'None', ',', 'figsize', '=', 'None', ',', 'title_fontsize', '=', '"large"', ',', 'text_fontsize', '=', '"medium"', ')', ':', 'if', 'not', 'hasattr', '(', 'clf', ',', "'explained_variance_ratio_'", ')', ':', 'raise', 'TypeError', '(', '\'"clf" does not have explained_variance_ratio_ \'', "'attribute. Has the PCA been fitted?'", ')', 'if', 'ax', 'is', 'None', ':', 'fig', ',', 'ax', '=', 'plt', '.', 'subplots', '(', '1', ',', '1', ',', 'figsize', '=', 'figsize', ')', 'ax', '.', 'set_title', '(', 'title', ',', 'fontsize', '=', 'title_fontsize', ')', 'cumulative_sum_ratios', '=', 'np', '.', 'cumsum', '(', 'clf', '.', 'explained_variance_ratio_', ')', '# Magic code for figuring out closest value to target_explained_variance', 'idx', '=', 'np', '.', 'searchsorted', '(', 'cumulative_sum_ratios', ',', 'target_explained_variance', ')', 'ax', '.', 'plot', '(', 'range', '(', 'len', '(', 'clf', '.', 'explained_variance_ratio_', ')', '+', '1', ')', ',', 'np', '.', 'concatenate', '(', '(', '[', '0', ']', ',', 'np', '.', 'cumsum', '(', 'clf', '.', 'explained_variance_ratio_', ')', ')', ')', ',', "'*-'", ')', 'ax', '.', 'grid', '(', 'True', ')', 'ax', '.', 'set_xlabel', '(', "'First n principal components'", ',', 'fontsize', '=', 'text_fontsize', ')', 'ax', '.', 'set_ylabel', '(', "'Explained variance ratio of first n components'", ',', 'fontsize', '=', 'text_fontsize', ')', 'ax', '.', 'set_ylim', '(', '[', '-', '0.02', ',', '1.02', ']', ')', 'if', 'idx', '<', 'len', '(', 'cumulative_sum_ratios', ')', ':', 'ax', '.', 'plot', '(', 'idx', '+', '1', ',', 'cumulative_sum_ratios', '[', 'idx', ']', ',', "'ro'", ',', 'label', '=', "'{0:0.3f} Explained variance ratio for '", "'first {1} components'", '.', 'format', '(', 'cumulative_sum_ratios', '[', 'idx', ']', ',', 'idx', '+', '1', ')', ',', 'markersize', '=', '4', ',', 'markeredgewidth', '=', '4', ')', 'ax', '.', 'axhline', '(', 'cumulative_sum_ratios', '[', 'idx', ']', ',', 'linestyle', '=', "':'", ',', 'lw', '=', '3', ',', 'color', '=', "'black'", ')', 'ax', '.', 'tick_params', '(', 'labelsize', '=', 'text_fontsize', ')', 'ax', '.', 'legend', '(', 'loc', '=', '"best"', ',', 'fontsize', '=', 'text_fontsize', ')', 'return', 'ax']
Plots PCA components' explained variance ratios. (new in v0.2.2) Args: clf: PCA instance that has the ``explained_variance_ratio_`` attribute. title (string, optional): Title of the generated plot. Defaults to "PCA Component Explained Variances" target_explained_variance (float, optional): Looks for the minimum number of principal components that satisfies this value and emphasizes it on the plot. Defaults to 0.75 ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> pca = PCA(random_state=1) >>> pca.fit(X) >>> skplt.decomposition.plot_pca_component_variance(pca) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_pca_component_variance.png :align: center :alt: PCA Component variances
['Plots', 'PCA', 'components', 'explained', 'variance', 'ratios', '.', '(', 'new', 'in', 'v0', '.', '2', '.', '2', ')']
train
https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/decomposition.py#L15-L94
9,069
ConsenSys/mythril-classic
mythril/ethereum/interface/leveldb/client.py
LevelDBReader._get_account
def _get_account(self, address): """Get account by address. :param address: :return: """ state = self._get_head_state() account_address = binascii.a2b_hex(utils.remove_0x_head(address)) return state.get_and_cache_account(account_address)
python
def _get_account(self, address): """Get account by address. :param address: :return: """ state = self._get_head_state() account_address = binascii.a2b_hex(utils.remove_0x_head(address)) return state.get_and_cache_account(account_address)
['def', '_get_account', '(', 'self', ',', 'address', ')', ':', 'state', '=', 'self', '.', '_get_head_state', '(', ')', 'account_address', '=', 'binascii', '.', 'a2b_hex', '(', 'utils', '.', 'remove_0x_head', '(', 'address', ')', ')', 'return', 'state', '.', 'get_and_cache_account', '(', 'account_address', ')']
Get account by address. :param address: :return:
['Get', 'account', 'by', 'address', '.']
train
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/ethereum/interface/leveldb/client.py#L68-L76
9,070
google/python_portpicker
src/portserver.py
_PortServerRequestHandler._handle_port_request
def _handle_port_request(self, client_data, writer): """Given a port request body, parse it and respond appropriately. Args: client_data: The request bytes from the client. writer: The asyncio Writer for the response to be written to. """ try: pid = int(client_data) except ValueError as error: self._client_request_errors += 1 log.warning('Could not parse request: %s', error) return log.info('Request on behalf of pid %d.', pid) log.info('cmdline: %s', _get_process_command_line(pid)) if not _should_allocate_port(pid): self._denied_allocations += 1 return port = self._port_pool.get_port_for_process(pid) if port > 0: self._total_allocations += 1 writer.write('{:d}\n'.format(port).encode('utf-8')) log.debug('Allocated port %d to pid %d', port, pid) else: self._denied_allocations += 1
python
def _handle_port_request(self, client_data, writer): """Given a port request body, parse it and respond appropriately. Args: client_data: The request bytes from the client. writer: The asyncio Writer for the response to be written to. """ try: pid = int(client_data) except ValueError as error: self._client_request_errors += 1 log.warning('Could not parse request: %s', error) return log.info('Request on behalf of pid %d.', pid) log.info('cmdline: %s', _get_process_command_line(pid)) if not _should_allocate_port(pid): self._denied_allocations += 1 return port = self._port_pool.get_port_for_process(pid) if port > 0: self._total_allocations += 1 writer.write('{:d}\n'.format(port).encode('utf-8')) log.debug('Allocated port %d to pid %d', port, pid) else: self._denied_allocations += 1
['def', '_handle_port_request', '(', 'self', ',', 'client_data', ',', 'writer', ')', ':', 'try', ':', 'pid', '=', 'int', '(', 'client_data', ')', 'except', 'ValueError', 'as', 'error', ':', 'self', '.', '_client_request_errors', '+=', '1', 'log', '.', 'warning', '(', "'Could not parse request: %s'", ',', 'error', ')', 'return', 'log', '.', 'info', '(', "'Request on behalf of pid %d.'", ',', 'pid', ')', 'log', '.', 'info', '(', "'cmdline: %s'", ',', '_get_process_command_line', '(', 'pid', ')', ')', 'if', 'not', '_should_allocate_port', '(', 'pid', ')', ':', 'self', '.', '_denied_allocations', '+=', '1', 'return', 'port', '=', 'self', '.', '_port_pool', '.', 'get_port_for_process', '(', 'pid', ')', 'if', 'port', '>', '0', ':', 'self', '.', '_total_allocations', '+=', '1', 'writer', '.', 'write', '(', "'{:d}\\n'", '.', 'format', '(', 'port', ')', '.', 'encode', '(', "'utf-8'", ')', ')', 'log', '.', 'debug', '(', "'Allocated port %d to pid %d'", ',', 'port', ',', 'pid', ')', 'else', ':', 'self', '.', '_denied_allocations', '+=', '1']
Given a port request body, parse it and respond appropriately. Args: client_data: The request bytes from the client. writer: The asyncio Writer for the response to be written to.
['Given', 'a', 'port', 'request', 'body', 'parse', 'it', 'and', 'respond', 'appropriately', '.']
train
https://github.com/google/python_portpicker/blob/f737189ea7a2d4b97048a2f4e37609e293b03546/src/portserver.py#L236-L263
9,071
google/apitools
apitools/base/py/base_api.py
BaseApiService.__ConstructQueryParams
def __ConstructQueryParams(self, query_params, request, global_params): """Construct a dictionary of query parameters for this request.""" # First, handle the global params. global_params = self.__CombineGlobalParams( global_params, self.__client.global_params) global_param_names = util.MapParamNames( [x.name for x in self.__client.params_type.all_fields()], self.__client.params_type) global_params_type = type(global_params) query_info = dict( (param, self.__FinalUrlValue(getattr(global_params, param), getattr(global_params_type, param))) for param in global_param_names) # Next, add the query params. query_param_names = util.MapParamNames(query_params, type(request)) request_type = type(request) query_info.update( (param, self.__FinalUrlValue(getattr(request, param, None), getattr(request_type, param))) for param in query_param_names) query_info = dict((k, v) for k, v in query_info.items() if v is not None) query_info = self.__EncodePrettyPrint(query_info) query_info = util.MapRequestParams(query_info, type(request)) return query_info
python
def __ConstructQueryParams(self, query_params, request, global_params): """Construct a dictionary of query parameters for this request.""" # First, handle the global params. global_params = self.__CombineGlobalParams( global_params, self.__client.global_params) global_param_names = util.MapParamNames( [x.name for x in self.__client.params_type.all_fields()], self.__client.params_type) global_params_type = type(global_params) query_info = dict( (param, self.__FinalUrlValue(getattr(global_params, param), getattr(global_params_type, param))) for param in global_param_names) # Next, add the query params. query_param_names = util.MapParamNames(query_params, type(request)) request_type = type(request) query_info.update( (param, self.__FinalUrlValue(getattr(request, param, None), getattr(request_type, param))) for param in query_param_names) query_info = dict((k, v) for k, v in query_info.items() if v is not None) query_info = self.__EncodePrettyPrint(query_info) query_info = util.MapRequestParams(query_info, type(request)) return query_info
['def', '__ConstructQueryParams', '(', 'self', ',', 'query_params', ',', 'request', ',', 'global_params', ')', ':', '# First, handle the global params.', 'global_params', '=', 'self', '.', '__CombineGlobalParams', '(', 'global_params', ',', 'self', '.', '__client', '.', 'global_params', ')', 'global_param_names', '=', 'util', '.', 'MapParamNames', '(', '[', 'x', '.', 'name', 'for', 'x', 'in', 'self', '.', '__client', '.', 'params_type', '.', 'all_fields', '(', ')', ']', ',', 'self', '.', '__client', '.', 'params_type', ')', 'global_params_type', '=', 'type', '(', 'global_params', ')', 'query_info', '=', 'dict', '(', '(', 'param', ',', 'self', '.', '__FinalUrlValue', '(', 'getattr', '(', 'global_params', ',', 'param', ')', ',', 'getattr', '(', 'global_params_type', ',', 'param', ')', ')', ')', 'for', 'param', 'in', 'global_param_names', ')', '# Next, add the query params.', 'query_param_names', '=', 'util', '.', 'MapParamNames', '(', 'query_params', ',', 'type', '(', 'request', ')', ')', 'request_type', '=', 'type', '(', 'request', ')', 'query_info', '.', 'update', '(', '(', 'param', ',', 'self', '.', '__FinalUrlValue', '(', 'getattr', '(', 'request', ',', 'param', ',', 'None', ')', ',', 'getattr', '(', 'request_type', ',', 'param', ')', ')', ')', 'for', 'param', 'in', 'query_param_names', ')', 'query_info', '=', 'dict', '(', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'query_info', '.', 'items', '(', ')', 'if', 'v', 'is', 'not', 'None', ')', 'query_info', '=', 'self', '.', '__EncodePrettyPrint', '(', 'query_info', ')', 'query_info', '=', 'util', '.', 'MapRequestParams', '(', 'query_info', ',', 'type', '(', 'request', ')', ')', 'return', 'query_info']
Construct a dictionary of query parameters for this request.
['Construct', 'a', 'dictionary', 'of', 'query', 'parameters', 'for', 'this', 'request', '.']
train
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/base_api.py#L547-L573
9,072
jilljenn/tryalgo
tryalgo/dfs.py
dfs_tree
def dfs_tree(graph, start=0): """DFS, build DFS tree in unweighted graph :param graph: directed graph in listlist or listdict format :param int start: source vertex :returns: precedence table :complexity: `O(|V|+|E|)` """ to_visit = [start] prec = [None] * len(graph) while to_visit: # an empty queue equals False node = to_visit.pop() for neighbor in graph[node]: if prec[neighbor] is None: prec[neighbor] = node to_visit.append(neighbor) return prec
python
def dfs_tree(graph, start=0): """DFS, build DFS tree in unweighted graph :param graph: directed graph in listlist or listdict format :param int start: source vertex :returns: precedence table :complexity: `O(|V|+|E|)` """ to_visit = [start] prec = [None] * len(graph) while to_visit: # an empty queue equals False node = to_visit.pop() for neighbor in graph[node]: if prec[neighbor] is None: prec[neighbor] = node to_visit.append(neighbor) return prec
['def', 'dfs_tree', '(', 'graph', ',', 'start', '=', '0', ')', ':', 'to_visit', '=', '[', 'start', ']', 'prec', '=', '[', 'None', ']', '*', 'len', '(', 'graph', ')', 'while', 'to_visit', ':', '# an empty queue equals False', 'node', '=', 'to_visit', '.', 'pop', '(', ')', 'for', 'neighbor', 'in', 'graph', '[', 'node', ']', ':', 'if', 'prec', '[', 'neighbor', ']', 'is', 'None', ':', 'prec', '[', 'neighbor', ']', '=', 'node', 'to_visit', '.', 'append', '(', 'neighbor', ')', 'return', 'prec']
DFS, build DFS tree in unweighted graph :param graph: directed graph in listlist or listdict format :param int start: source vertex :returns: precedence table :complexity: `O(|V|+|E|)`
['DFS', 'build', 'DFS', 'tree', 'in', 'unweighted', 'graph']
train
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/dfs.py#L46-L62
9,073
Microsoft/nni
examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py
CustomerTuner.generate_new_id
def generate_new_id(self): """ generate new id and event hook for new Individual """ self.events.append(Event()) indiv_id = self.indiv_counter self.indiv_counter += 1 return indiv_id
python
def generate_new_id(self): """ generate new id and event hook for new Individual """ self.events.append(Event()) indiv_id = self.indiv_counter self.indiv_counter += 1 return indiv_id
['def', 'generate_new_id', '(', 'self', ')', ':', 'self', '.', 'events', '.', 'append', '(', 'Event', '(', ')', ')', 'indiv_id', '=', 'self', '.', 'indiv_counter', 'self', '.', 'indiv_counter', '+=', '1', 'return', 'indiv_id']
generate new id and event hook for new Individual
['generate', 'new', 'id', 'and', 'event', 'hook', 'for', 'new', 'Individual']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/tuners/weight_sharing/ga_customer_tuner/customer_tuner.py#L84-L91
9,074
salesking/salesking_python_sdk
salesking/utils/loaders.py
import_schema_to_json
def import_schema_to_json(name, store_it=False): """ loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return: """ schema_file = u"%s.json" % name file_path = os.path.join(SCHEMA_ROOT, schema_file) log.debug(u"trying to load %s " % file_path) schema = None try: schema_file = open(file_path, "r").read() except IOError, e: log.error(u"file not found %s" % e) msg = "Could not find schema file. %s" % file_path raise SalesKingException("SCHEMA_NOT_FOUND", msg) schema = json.loads(schema_file) if schema is None: msg = "loading failed foo %s" % name raise SalesKingException("SCHEMA_NOT_FOUND", msg) return schema
python
def import_schema_to_json(name, store_it=False): """ loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return: """ schema_file = u"%s.json" % name file_path = os.path.join(SCHEMA_ROOT, schema_file) log.debug(u"trying to load %s " % file_path) schema = None try: schema_file = open(file_path, "r").read() except IOError, e: log.error(u"file not found %s" % e) msg = "Could not find schema file. %s" % file_path raise SalesKingException("SCHEMA_NOT_FOUND", msg) schema = json.loads(schema_file) if schema is None: msg = "loading failed foo %s" % name raise SalesKingException("SCHEMA_NOT_FOUND", msg) return schema
['def', 'import_schema_to_json', '(', 'name', ',', 'store_it', '=', 'False', ')', ':', 'schema_file', '=', 'u"%s.json"', '%', 'name', 'file_path', '=', 'os', '.', 'path', '.', 'join', '(', 'SCHEMA_ROOT', ',', 'schema_file', ')', 'log', '.', 'debug', '(', 'u"trying to load %s "', '%', 'file_path', ')', 'schema', '=', 'None', 'try', ':', 'schema_file', '=', 'open', '(', 'file_path', ',', '"r"', ')', '.', 'read', '(', ')', 'except', 'IOError', ',', 'e', ':', 'log', '.', 'error', '(', 'u"file not found %s"', '%', 'e', ')', 'msg', '=', '"Could not find schema file. %s"', '%', 'file_path', 'raise', 'SalesKingException', '(', '"SCHEMA_NOT_FOUND"', ',', 'msg', ')', 'schema', '=', 'json', '.', 'loads', '(', 'schema_file', ')', 'if', 'schema', 'is', 'None', ':', 'msg', '=', '"loading failed foo %s"', '%', 'name', 'raise', 'SalesKingException', '(', '"SCHEMA_NOT_FOUND"', ',', 'msg', ')', 'return', 'schema']
loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return:
['loads', 'the', 'given', 'schema', 'name', 'from', 'the', 'local', 'filesystem', 'and', 'puts', 'it', 'into', 'a', 'store', 'if', 'it', 'is', 'not', 'in', 'there', 'yet', ':', 'param', 'name', ':', ':', 'param', 'store_it', ':', 'if', 'set', 'to', 'True', 'stores', 'the', 'contents', ':', 'return', ':']
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/utils/loaders.py#L159-L185
9,075
wummel/linkchecker
linkcheck/logger/html.py
HtmlLogger.end_output
def end_output (self, **kwargs): """Write end of checking info as HTML.""" if self.has_part("stats"): self.write_stats() if self.has_part("outro"): self.write_outro() self.close_fileoutput()
python
def end_output (self, **kwargs): """Write end of checking info as HTML.""" if self.has_part("stats"): self.write_stats() if self.has_part("outro"): self.write_outro() self.close_fileoutput()
['def', 'end_output', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', 'has_part', '(', '"stats"', ')', ':', 'self', '.', 'write_stats', '(', ')', 'if', 'self', '.', 'has_part', '(', '"outro"', ')', ':', 'self', '.', 'write_outro', '(', ')', 'self', '.', 'close_fileoutput', '(', ')']
Write end of checking info as HTML.
['Write', 'end', 'of', 'checking', 'info', 'as', 'HTML', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/html.py#L331-L337
9,076
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
BlobstoreZipInputReader.next
def next(self): """Returns the next input from this input reader as (ZipInfo, opener) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple is a zipfile.ZipInfo object. The second element of the tuple is a zero-argument function that, when called, returns the complete body of the file. """ if not self._zip: self._zip = zipfile.ZipFile(self._reader(self._blob_key)) # Get a list of entries, reversed so we can pop entries off in order self._entries = self._zip.infolist()[self._start_index:self._end_index] self._entries.reverse() if not self._entries: raise StopIteration() entry = self._entries.pop() self._start_index += 1 return (entry, lambda: self._read(entry))
python
def next(self): """Returns the next input from this input reader as (ZipInfo, opener) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple is a zipfile.ZipInfo object. The second element of the tuple is a zero-argument function that, when called, returns the complete body of the file. """ if not self._zip: self._zip = zipfile.ZipFile(self._reader(self._blob_key)) # Get a list of entries, reversed so we can pop entries off in order self._entries = self._zip.infolist()[self._start_index:self._end_index] self._entries.reverse() if not self._entries: raise StopIteration() entry = self._entries.pop() self._start_index += 1 return (entry, lambda: self._read(entry))
['def', 'next', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_zip', ':', 'self', '.', '_zip', '=', 'zipfile', '.', 'ZipFile', '(', 'self', '.', '_reader', '(', 'self', '.', '_blob_key', ')', ')', '# Get a list of entries, reversed so we can pop entries off in order', 'self', '.', '_entries', '=', 'self', '.', '_zip', '.', 'infolist', '(', ')', '[', 'self', '.', '_start_index', ':', 'self', '.', '_end_index', ']', 'self', '.', '_entries', '.', 'reverse', '(', ')', 'if', 'not', 'self', '.', '_entries', ':', 'raise', 'StopIteration', '(', ')', 'entry', '=', 'self', '.', '_entries', '.', 'pop', '(', ')', 'self', '.', '_start_index', '+=', '1', 'return', '(', 'entry', ',', 'lambda', ':', 'self', '.', '_read', '(', 'entry', ')', ')']
Returns the next input from this input reader as (ZipInfo, opener) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple is a zipfile.ZipInfo object. The second element of the tuple is a zero-argument function that, when called, returns the complete body of the file.
['Returns', 'the', 'next', 'input', 'from', 'this', 'input', 'reader', 'as', '(', 'ZipInfo', 'opener', ')', 'tuple', '.']
train
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L1476-L1494
9,077
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
simxGetInMessageInfo
def simxGetInMessageInfo(clientID, infoType): ''' Please have a look at the function description/documentation in the V-REP user manual ''' info = ct.c_int() return c_GetInMessageInfo(clientID, infoType, ct.byref(info)), info.value
python
def simxGetInMessageInfo(clientID, infoType): ''' Please have a look at the function description/documentation in the V-REP user manual ''' info = ct.c_int() return c_GetInMessageInfo(clientID, infoType, ct.byref(info)), info.value
['def', 'simxGetInMessageInfo', '(', 'clientID', ',', 'infoType', ')', ':', 'info', '=', 'ct', '.', 'c_int', '(', ')', 'return', 'c_GetInMessageInfo', '(', 'clientID', ',', 'infoType', ',', 'ct', '.', 'byref', '(', 'info', ')', ')', ',', 'info', '.', 'value']
Please have a look at the function description/documentation in the V-REP user manual
['Please', 'have', 'a', 'look', 'at', 'the', 'function', 'description', '/', 'documentation', 'in', 'the', 'V', '-', 'REP', 'user', 'manual']
train
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1189-L1194
9,078
googleapis/google-cloud-python
core/google/cloud/operation.py
_compute_type_url
def _compute_type_url(klass, prefix=_GOOGLE_APIS_PREFIX): """Compute a type URL for a klass. :type klass: type :param klass: class to be used as a factory for the given type :type prefix: str :param prefix: URL prefix for the type :rtype: str :returns: the URL, prefixed as appropriate """ name = klass.DESCRIPTOR.full_name return "%s/%s" % (prefix, name)
python
def _compute_type_url(klass, prefix=_GOOGLE_APIS_PREFIX): """Compute a type URL for a klass. :type klass: type :param klass: class to be used as a factory for the given type :type prefix: str :param prefix: URL prefix for the type :rtype: str :returns: the URL, prefixed as appropriate """ name = klass.DESCRIPTOR.full_name return "%s/%s" % (prefix, name)
['def', '_compute_type_url', '(', 'klass', ',', 'prefix', '=', '_GOOGLE_APIS_PREFIX', ')', ':', 'name', '=', 'klass', '.', 'DESCRIPTOR', '.', 'full_name', 'return', '"%s/%s"', '%', '(', 'prefix', ',', 'name', ')']
Compute a type URL for a klass. :type klass: type :param klass: class to be used as a factory for the given type :type prefix: str :param prefix: URL prefix for the type :rtype: str :returns: the URL, prefixed as appropriate
['Compute', 'a', 'type', 'URL', 'for', 'a', 'klass', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/operation.py#L26-L39
9,079
nutechsoftware/alarmdecoder
alarmdecoder/decoder.py
AlarmDecoder._update_zone_bypass_status
def _update_zone_bypass_status(self, message=None, status=None, zone=None): """ Uses the provided message to update the zone bypass state. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.Message` :param status: bypass status, overrides message bits. :type status: bool :param zone: zone associated with bypass event :type zone: int :returns: dictionary {Zone:True|False,...} Zone can be None if LRR CID Bypass checking is disabled or we do not know what zones but know something is bypassed. """ bypass_status = status if isinstance(message, Message): bypass_status = message.zone_bypassed if bypass_status is None: return old_bypass_status = self._bypass_status.get(zone, None) if bypass_status != old_bypass_status: if bypass_status == False and zone is None: self._bypass_status = {} else: self._bypass_status[zone] = bypass_status if old_bypass_status is not None or message is None or (old_bypass_status is None and bypass_status is True): self.on_bypass(status=bypass_status, zone=zone) return bypass_status
python
def _update_zone_bypass_status(self, message=None, status=None, zone=None): """ Uses the provided message to update the zone bypass state. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.Message` :param status: bypass status, overrides message bits. :type status: bool :param zone: zone associated with bypass event :type zone: int :returns: dictionary {Zone:True|False,...} Zone can be None if LRR CID Bypass checking is disabled or we do not know what zones but know something is bypassed. """ bypass_status = status if isinstance(message, Message): bypass_status = message.zone_bypassed if bypass_status is None: return old_bypass_status = self._bypass_status.get(zone, None) if bypass_status != old_bypass_status: if bypass_status == False and zone is None: self._bypass_status = {} else: self._bypass_status[zone] = bypass_status if old_bypass_status is not None or message is None or (old_bypass_status is None and bypass_status is True): self.on_bypass(status=bypass_status, zone=zone) return bypass_status
['def', '_update_zone_bypass_status', '(', 'self', ',', 'message', '=', 'None', ',', 'status', '=', 'None', ',', 'zone', '=', 'None', ')', ':', 'bypass_status', '=', 'status', 'if', 'isinstance', '(', 'message', ',', 'Message', ')', ':', 'bypass_status', '=', 'message', '.', 'zone_bypassed', 'if', 'bypass_status', 'is', 'None', ':', 'return', 'old_bypass_status', '=', 'self', '.', '_bypass_status', '.', 'get', '(', 'zone', ',', 'None', ')', 'if', 'bypass_status', '!=', 'old_bypass_status', ':', 'if', 'bypass_status', '==', 'False', 'and', 'zone', 'is', 'None', ':', 'self', '.', '_bypass_status', '=', '{', '}', 'else', ':', 'self', '.', '_bypass_status', '[', 'zone', ']', '=', 'bypass_status', 'if', 'old_bypass_status', 'is', 'not', 'None', 'or', 'message', 'is', 'None', 'or', '(', 'old_bypass_status', 'is', 'None', 'and', 'bypass_status', 'is', 'True', ')', ':', 'self', '.', 'on_bypass', '(', 'status', '=', 'bypass_status', ',', 'zone', '=', 'zone', ')', 'return', 'bypass_status']
Uses the provided message to update the zone bypass state. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.Message` :param status: bypass status, overrides message bits. :type status: bool :param zone: zone associated with bypass event :type zone: int :returns: dictionary {Zone:True|False,...} Zone can be None if LRR CID Bypass checking is disabled or we do not know what zones but know something is bypassed.
['Uses', 'the', 'provided', 'message', 'to', 'update', 'the', 'zone', 'bypass', 'state', '.']
train
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L713-L746
9,080
wandb/client
wandb/vendor/prompt_toolkit/key_binding/input_processor.py
KeyPressEvent.append_to_arg_count
def append_to_arg_count(self, data): """ Add digit to the input argument. :param data: the typed digit as string """ assert data in '-0123456789' current = self._arg if data == '-': assert current is None or current == '-' result = data elif current is None: result = data else: result = "%s%s" % (current, data) self.input_processor.arg = result
python
def append_to_arg_count(self, data): """ Add digit to the input argument. :param data: the typed digit as string """ assert data in '-0123456789' current = self._arg if data == '-': assert current is None or current == '-' result = data elif current is None: result = data else: result = "%s%s" % (current, data) self.input_processor.arg = result
['def', 'append_to_arg_count', '(', 'self', ',', 'data', ')', ':', 'assert', 'data', 'in', "'-0123456789'", 'current', '=', 'self', '.', '_arg', 'if', 'data', '==', "'-'", ':', 'assert', 'current', 'is', 'None', 'or', 'current', '==', "'-'", 'result', '=', 'data', 'elif', 'current', 'is', 'None', ':', 'result', '=', 'data', 'else', ':', 'result', '=', '"%s%s"', '%', '(', 'current', ',', 'data', ')', 'self', '.', 'input_processor', '.', 'arg', '=', 'result']
Add digit to the input argument. :param data: the typed digit as string
['Add', 'digit', 'to', 'the', 'input', 'argument', '.']
train
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/input_processor.py#L355-L372
9,081
edelbluth/blackred
src/blackred/blackred.py
create_salt
def create_salt(length: int=128) -> bytes: """ Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes """ return b''.join(bytes([SystemRandom().randint(0, 255)]) for _ in range(length))
python
def create_salt(length: int=128) -> bytes: """ Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes """ return b''.join(bytes([SystemRandom().randint(0, 255)]) for _ in range(length))
['def', 'create_salt', '(', 'length', ':', 'int', '=', '128', ')', '->', 'bytes', ':', 'return', "b''", '.', 'join', '(', 'bytes', '(', '[', 'SystemRandom', '(', ')', '.', 'randint', '(', '0', ',', '255', ')', ']', ')', 'for', '_', 'in', 'range', '(', 'length', ')', ')']
Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes
['Create', 'a', 'new', 'salt']
train
https://github.com/edelbluth/blackred/blob/57a655e4d4eca60ce16e7b338079355049a87b49/src/blackred/blackred.py#L28-L36
9,082
ransford/sllurp
sllurp/llrp.py
LLRPClient.get_tx_power
def get_tx_power(self, tx_power): """Validates tx_power against self.tx_power_table @param tx_power: index into the self.tx_power_table list; if tx_power is 0 then the max power from self.tx_power_table @return: a dict {antenna: (tx_power_index, power_dbm)} from self.tx_power_table @raise: LLRPError if the requested index is out of range """ if not self.tx_power_table: logger.warn('get_tx_power(): tx_power_table is empty!') return {} logger.debug('requested tx_power: %s', tx_power) min_power = self.tx_power_table.index(min(self.tx_power_table)) max_power = self.tx_power_table.index(max(self.tx_power_table)) ret = {} for antid, tx_power in tx_power.items(): if tx_power == 0: # tx_power = 0 means max power max_power_dbm = max(self.tx_power_table) tx_power = self.tx_power_table.index(max_power_dbm) ret[antid] = (tx_power, max_power_dbm) try: power_dbm = self.tx_power_table[tx_power] ret[antid] = (tx_power, power_dbm) except IndexError: raise LLRPError('Invalid tx_power for antenna {}: ' 'requested={}, min_available={}, ' 'max_available={}'.format( antid, self.tx_power, min_power, max_power)) return ret
python
def get_tx_power(self, tx_power): """Validates tx_power against self.tx_power_table @param tx_power: index into the self.tx_power_table list; if tx_power is 0 then the max power from self.tx_power_table @return: a dict {antenna: (tx_power_index, power_dbm)} from self.tx_power_table @raise: LLRPError if the requested index is out of range """ if not self.tx_power_table: logger.warn('get_tx_power(): tx_power_table is empty!') return {} logger.debug('requested tx_power: %s', tx_power) min_power = self.tx_power_table.index(min(self.tx_power_table)) max_power = self.tx_power_table.index(max(self.tx_power_table)) ret = {} for antid, tx_power in tx_power.items(): if tx_power == 0: # tx_power = 0 means max power max_power_dbm = max(self.tx_power_table) tx_power = self.tx_power_table.index(max_power_dbm) ret[antid] = (tx_power, max_power_dbm) try: power_dbm = self.tx_power_table[tx_power] ret[antid] = (tx_power, power_dbm) except IndexError: raise LLRPError('Invalid tx_power for antenna {}: ' 'requested={}, min_available={}, ' 'max_available={}'.format( antid, self.tx_power, min_power, max_power)) return ret
['def', 'get_tx_power', '(', 'self', ',', 'tx_power', ')', ':', 'if', 'not', 'self', '.', 'tx_power_table', ':', 'logger', '.', 'warn', '(', "'get_tx_power(): tx_power_table is empty!'", ')', 'return', '{', '}', 'logger', '.', 'debug', '(', "'requested tx_power: %s'", ',', 'tx_power', ')', 'min_power', '=', 'self', '.', 'tx_power_table', '.', 'index', '(', 'min', '(', 'self', '.', 'tx_power_table', ')', ')', 'max_power', '=', 'self', '.', 'tx_power_table', '.', 'index', '(', 'max', '(', 'self', '.', 'tx_power_table', ')', ')', 'ret', '=', '{', '}', 'for', 'antid', ',', 'tx_power', 'in', 'tx_power', '.', 'items', '(', ')', ':', 'if', 'tx_power', '==', '0', ':', '# tx_power = 0 means max power', 'max_power_dbm', '=', 'max', '(', 'self', '.', 'tx_power_table', ')', 'tx_power', '=', 'self', '.', 'tx_power_table', '.', 'index', '(', 'max_power_dbm', ')', 'ret', '[', 'antid', ']', '=', '(', 'tx_power', ',', 'max_power_dbm', ')', 'try', ':', 'power_dbm', '=', 'self', '.', 'tx_power_table', '[', 'tx_power', ']', 'ret', '[', 'antid', ']', '=', '(', 'tx_power', ',', 'power_dbm', ')', 'except', 'IndexError', ':', 'raise', 'LLRPError', '(', "'Invalid tx_power for antenna {}: '", "'requested={}, min_available={}, '", "'max_available={}'", '.', 'format', '(', 'antid', ',', 'self', '.', 'tx_power', ',', 'min_power', ',', 'max_power', ')', ')', 'return', 'ret']
Validates tx_power against self.tx_power_table @param tx_power: index into the self.tx_power_table list; if tx_power is 0 then the max power from self.tx_power_table @return: a dict {antenna: (tx_power_index, power_dbm)} from self.tx_power_table @raise: LLRPError if the requested index is out of range
['Validates', 'tx_power', 'against', 'self', '.', 'tx_power_table']
train
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1165-L1199
9,083
ThreatConnect-Inc/tcex
tcex/tcex_notification_v2.py
TcExNotificationV2.send
def send(self, message): """Send our message Args: message (str): The message to be sent. Returns: requests.models.Response: The response from the request. """ body = { 'notificationType': self._notification_type, 'priority': self._priority, 'isOrganization': self._is_organization, 'message': message, } if self._recipients: body['recipients'] = self._recipients self._tcex.log.debug('notification body: {}'.format(json.dumps(body))) # create our tcex resource resource = resource = self._tcex.resource('Notification') resource.http_method = 'POST' resource.body = json.dumps(body) results = resource.request() # do the request if results.get('response').status_code == 200: # everything worked response = results.get('response').json() elif results.get('response').status_code == 400: # failed..but known... user doesn't exist # just return and let calling app handle it err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) response = results.get('response').json() else: # somekind of unknown error...raise err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) raise RuntimeError(err) return response
python
def send(self, message): """Send our message Args: message (str): The message to be sent. Returns: requests.models.Response: The response from the request. """ body = { 'notificationType': self._notification_type, 'priority': self._priority, 'isOrganization': self._is_organization, 'message': message, } if self._recipients: body['recipients'] = self._recipients self._tcex.log.debug('notification body: {}'.format(json.dumps(body))) # create our tcex resource resource = resource = self._tcex.resource('Notification') resource.http_method = 'POST' resource.body = json.dumps(body) results = resource.request() # do the request if results.get('response').status_code == 200: # everything worked response = results.get('response').json() elif results.get('response').status_code == 400: # failed..but known... user doesn't exist # just return and let calling app handle it err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) response = results.get('response').json() else: # somekind of unknown error...raise err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) raise RuntimeError(err) return response
['def', 'send', '(', 'self', ',', 'message', ')', ':', 'body', '=', '{', "'notificationType'", ':', 'self', '.', '_notification_type', ',', "'priority'", ':', 'self', '.', '_priority', ',', "'isOrganization'", ':', 'self', '.', '_is_organization', ',', "'message'", ':', 'message', ',', '}', 'if', 'self', '.', '_recipients', ':', 'body', '[', "'recipients'", ']', '=', 'self', '.', '_recipients', 'self', '.', '_tcex', '.', 'log', '.', 'debug', '(', "'notification body: {}'", '.', 'format', '(', 'json', '.', 'dumps', '(', 'body', ')', ')', ')', '# create our tcex resource', 'resource', '=', 'resource', '=', 'self', '.', '_tcex', '.', 'resource', '(', "'Notification'", ')', 'resource', '.', 'http_method', '=', "'POST'", 'resource', '.', 'body', '=', 'json', '.', 'dumps', '(', 'body', ')', 'results', '=', 'resource', '.', 'request', '(', ')', '# do the request', 'if', 'results', '.', 'get', '(', "'response'", ')', '.', 'status_code', '==', '200', ':', '# everything worked', 'response', '=', 'results', '.', 'get', '(', "'response'", ')', '.', 'json', '(', ')', 'elif', 'results', '.', 'get', '(', "'response'", ')', '.', 'status_code', '==', '400', ':', "# failed..but known... user doesn't exist", '# just return and let calling app handle it', 'err', '=', "'Failed to send notification ({})'", '.', 'format', '(', 'results', '.', 'get', '(', "'response'", ')', '.', 'text', ')', 'self', '.', '_tcex', '.', 'log', '.', 'error', '(', 'err', ')', 'response', '=', 'results', '.', 'get', '(', "'response'", ')', '.', 'json', '(', ')', 'else', ':', '# somekind of unknown error...raise', 'err', '=', "'Failed to send notification ({})'", '.', 'format', '(', 'results', '.', 'get', '(', "'response'", ')', '.', 'text', ')', 'self', '.', '_tcex', '.', 'log', '.', 'error', '(', 'err', ')', 'raise', 'RuntimeError', '(', 'err', ')', 'return', 'response']
Send our message Args: message (str): The message to be sent. Returns: requests.models.Response: The response from the request.
['Send', 'our', 'message']
train
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_notification_v2.py#L63-L105
9,084
google/python-adb
adb/adb_commands.py
AdbCommands.Push
def Push(self, source_file, device_filename, mtime='0', timeout_ms=None, progress_callback=None, st_mode=None): """Push a file or directory to the device. Args: source_file: Either a filename, a directory or file-like object to push to the device. device_filename: Destination on the device to write to. mtime: Optional, modification time to set on the file. timeout_ms: Expected timeout for any part of the push. st_mode: stat mode for filename progress_callback: callback method that accepts filename, bytes_written and total_bytes, total_bytes will be -1 for file-like objects """ if isinstance(source_file, str): if os.path.isdir(source_file): self.Shell("mkdir " + device_filename) for f in os.listdir(source_file): self.Push(os.path.join(source_file, f), device_filename + '/' + f, progress_callback=progress_callback) return source_file = open(source_file, "rb") with source_file: connection = self.protocol_handler.Open( self._handle, destination=b'sync:', timeout_ms=timeout_ms) kwargs={} if st_mode is not None: kwargs['st_mode'] = st_mode self.filesync_handler.Push(connection, source_file, device_filename, mtime=int(mtime), progress_callback=progress_callback, **kwargs) connection.Close()
python
def Push(self, source_file, device_filename, mtime='0', timeout_ms=None, progress_callback=None, st_mode=None): """Push a file or directory to the device. Args: source_file: Either a filename, a directory or file-like object to push to the device. device_filename: Destination on the device to write to. mtime: Optional, modification time to set on the file. timeout_ms: Expected timeout for any part of the push. st_mode: stat mode for filename progress_callback: callback method that accepts filename, bytes_written and total_bytes, total_bytes will be -1 for file-like objects """ if isinstance(source_file, str): if os.path.isdir(source_file): self.Shell("mkdir " + device_filename) for f in os.listdir(source_file): self.Push(os.path.join(source_file, f), device_filename + '/' + f, progress_callback=progress_callback) return source_file = open(source_file, "rb") with source_file: connection = self.protocol_handler.Open( self._handle, destination=b'sync:', timeout_ms=timeout_ms) kwargs={} if st_mode is not None: kwargs['st_mode'] = st_mode self.filesync_handler.Push(connection, source_file, device_filename, mtime=int(mtime), progress_callback=progress_callback, **kwargs) connection.Close()
['def', 'Push', '(', 'self', ',', 'source_file', ',', 'device_filename', ',', 'mtime', '=', "'0'", ',', 'timeout_ms', '=', 'None', ',', 'progress_callback', '=', 'None', ',', 'st_mode', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'source_file', ',', 'str', ')', ':', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'source_file', ')', ':', 'self', '.', 'Shell', '(', '"mkdir "', '+', 'device_filename', ')', 'for', 'f', 'in', 'os', '.', 'listdir', '(', 'source_file', ')', ':', 'self', '.', 'Push', '(', 'os', '.', 'path', '.', 'join', '(', 'source_file', ',', 'f', ')', ',', 'device_filename', '+', "'/'", '+', 'f', ',', 'progress_callback', '=', 'progress_callback', ')', 'return', 'source_file', '=', 'open', '(', 'source_file', ',', '"rb"', ')', 'with', 'source_file', ':', 'connection', '=', 'self', '.', 'protocol_handler', '.', 'Open', '(', 'self', '.', '_handle', ',', 'destination', '=', "b'sync:'", ',', 'timeout_ms', '=', 'timeout_ms', ')', 'kwargs', '=', '{', '}', 'if', 'st_mode', 'is', 'not', 'None', ':', 'kwargs', '[', "'st_mode'", ']', '=', 'st_mode', 'self', '.', 'filesync_handler', '.', 'Push', '(', 'connection', ',', 'source_file', ',', 'device_filename', ',', 'mtime', '=', 'int', '(', 'mtime', ')', ',', 'progress_callback', '=', 'progress_callback', ',', '*', '*', 'kwargs', ')', 'connection', '.', 'Close', '(', ')']
Push a file or directory to the device. Args: source_file: Either a filename, a directory or file-like object to push to the device. device_filename: Destination on the device to write to. mtime: Optional, modification time to set on the file. timeout_ms: Expected timeout for any part of the push. st_mode: stat mode for filename progress_callback: callback method that accepts filename, bytes_written and total_bytes, total_bytes will be -1 for file-like objects
['Push', 'a', 'file', 'or', 'directory', 'to', 'the', 'device', '.']
train
https://github.com/google/python-adb/blob/d9b94b2dda555c14674c19806debb8449c0e9652/adb/adb_commands.py#L250-L281
9,085
StanfordVL/robosuite
robosuite/devices/keyboard.py
Keyboard.on_press
def on_press(self, window, key, scancode, action, mods): """ Key handler for key presses. """ # controls for moving position if key == glfw.KEY_W: self.pos[0] -= self._pos_step # dec x elif key == glfw.KEY_S: self.pos[0] += self._pos_step # inc x elif key == glfw.KEY_A: self.pos[1] -= self._pos_step # dec y elif key == glfw.KEY_D: self.pos[1] += self._pos_step # inc y elif key == glfw.KEY_F: self.pos[2] -= self._pos_step # dec z elif key == glfw.KEY_R: self.pos[2] += self._pos_step # inc z # controls for moving orientation elif key == glfw.KEY_Z: drot = rotation_matrix(angle=0.1, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x elif key == glfw.KEY_X: drot = rotation_matrix(angle=-0.1, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x elif key == glfw.KEY_T: drot = rotation_matrix(angle=0.1, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y elif key == glfw.KEY_G: drot = rotation_matrix(angle=-0.1, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y elif key == glfw.KEY_C: drot = rotation_matrix(angle=0.1, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z elif key == glfw.KEY_V: drot = rotation_matrix(angle=-0.1, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot)
python
def on_press(self, window, key, scancode, action, mods): """ Key handler for key presses. """ # controls for moving position if key == glfw.KEY_W: self.pos[0] -= self._pos_step # dec x elif key == glfw.KEY_S: self.pos[0] += self._pos_step # inc x elif key == glfw.KEY_A: self.pos[1] -= self._pos_step # dec y elif key == glfw.KEY_D: self.pos[1] += self._pos_step # inc y elif key == glfw.KEY_F: self.pos[2] -= self._pos_step # dec z elif key == glfw.KEY_R: self.pos[2] += self._pos_step # inc z # controls for moving orientation elif key == glfw.KEY_Z: drot = rotation_matrix(angle=0.1, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x elif key == glfw.KEY_X: drot = rotation_matrix(angle=-0.1, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x elif key == glfw.KEY_T: drot = rotation_matrix(angle=0.1, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y elif key == glfw.KEY_G: drot = rotation_matrix(angle=-0.1, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y elif key == glfw.KEY_C: drot = rotation_matrix(angle=0.1, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z elif key == glfw.KEY_V: drot = rotation_matrix(angle=-0.1, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot)
['def', 'on_press', '(', 'self', ',', 'window', ',', 'key', ',', 'scancode', ',', 'action', ',', 'mods', ')', ':', '# controls for moving position', 'if', 'key', '==', 'glfw', '.', 'KEY_W', ':', 'self', '.', 'pos', '[', '0', ']', '-=', 'self', '.', '_pos_step', '# dec x', 'elif', 'key', '==', 'glfw', '.', 'KEY_S', ':', 'self', '.', 'pos', '[', '0', ']', '+=', 'self', '.', '_pos_step', '# inc x', 'elif', 'key', '==', 'glfw', '.', 'KEY_A', ':', 'self', '.', 'pos', '[', '1', ']', '-=', 'self', '.', '_pos_step', '# dec y', 'elif', 'key', '==', 'glfw', '.', 'KEY_D', ':', 'self', '.', 'pos', '[', '1', ']', '+=', 'self', '.', '_pos_step', '# inc y', 'elif', 'key', '==', 'glfw', '.', 'KEY_F', ':', 'self', '.', 'pos', '[', '2', ']', '-=', 'self', '.', '_pos_step', '# dec z', 'elif', 'key', '==', 'glfw', '.', 'KEY_R', ':', 'self', '.', 'pos', '[', '2', ']', '+=', 'self', '.', '_pos_step', '# inc z', '# controls for moving orientation', 'elif', 'key', '==', 'glfw', '.', 'KEY_Z', ':', 'drot', '=', 'rotation_matrix', '(', 'angle', '=', '0.1', ',', 'direction', '=', '[', '1.', ',', '0.', ',', '0.', ']', ')', '[', ':', '3', ',', ':', '3', ']', 'self', '.', 'rotation', '=', 'self', '.', 'rotation', '.', 'dot', '(', 'drot', ')', '# rotates x', 'elif', 'key', '==', 'glfw', '.', 'KEY_X', ':', 'drot', '=', 'rotation_matrix', '(', 'angle', '=', '-', '0.1', ',', 'direction', '=', '[', '1.', ',', '0.', ',', '0.', ']', ')', '[', ':', '3', ',', ':', '3', ']', 'self', '.', 'rotation', '=', 'self', '.', 'rotation', '.', 'dot', '(', 'drot', ')', '# rotates x', 'elif', 'key', '==', 'glfw', '.', 'KEY_T', ':', 'drot', '=', 'rotation_matrix', '(', 'angle', '=', '0.1', ',', 'direction', '=', '[', '0.', ',', '1.', ',', '0.', ']', ')', '[', ':', '3', ',', ':', '3', ']', 'self', '.', 'rotation', '=', 'self', '.', 'rotation', '.', 'dot', '(', 'drot', ')', '# rotates y', 'elif', 'key', '==', 'glfw', '.', 'KEY_G', ':', 'drot', '=', 'rotation_matrix', '(', 'angle', '=', '-', '0.1', ',', 'direction', '=', '[', '0.', ',', '1.', ',', '0.', ']', ')', '[', ':', '3', ',', ':', '3', ']', 'self', '.', 'rotation', '=', 'self', '.', 'rotation', '.', 'dot', '(', 'drot', ')', '# rotates y', 'elif', 'key', '==', 'glfw', '.', 'KEY_C', ':', 'drot', '=', 'rotation_matrix', '(', 'angle', '=', '0.1', ',', 'direction', '=', '[', '0.', ',', '0.', ',', '1.', ']', ')', '[', ':', '3', ',', ':', '3', ']', 'self', '.', 'rotation', '=', 'self', '.', 'rotation', '.', 'dot', '(', 'drot', ')', '# rotates z', 'elif', 'key', '==', 'glfw', '.', 'KEY_V', ':', 'drot', '=', 'rotation_matrix', '(', 'angle', '=', '-', '0.1', ',', 'direction', '=', '[', '0.', ',', '0.', ',', '1.', ']', ')', '[', ':', '3', ',', ':', '3', ']', 'self', '.', 'rotation', '=', 'self', '.', 'rotation', '.', 'dot', '(', 'drot', ')']
Key handler for key presses.
['Key', 'handler', 'for', 'key', 'presses', '.']
train
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/devices/keyboard.py#L76-L113
9,086
galaxyproject/gravity
gravity/config_manager.py
ConfigManager.get_registered_configs
def get_registered_configs(self, instances=None): """ Return the persisted values of all config files registered with the config manager. """ configs = self.state.get('config_files', {}) if instances is not None: for config_file, config in configs.items(): if config['instance_name'] not in instances: configs.pop(config_file) return configs
python
def get_registered_configs(self, instances=None): """ Return the persisted values of all config files registered with the config manager. """ configs = self.state.get('config_files', {}) if instances is not None: for config_file, config in configs.items(): if config['instance_name'] not in instances: configs.pop(config_file) return configs
['def', 'get_registered_configs', '(', 'self', ',', 'instances', '=', 'None', ')', ':', 'configs', '=', 'self', '.', 'state', '.', 'get', '(', "'config_files'", ',', '{', '}', ')', 'if', 'instances', 'is', 'not', 'None', ':', 'for', 'config_file', ',', 'config', 'in', 'configs', '.', 'items', '(', ')', ':', 'if', 'config', '[', "'instance_name'", ']', 'not', 'in', 'instances', ':', 'configs', '.', 'pop', '(', 'config_file', ')', 'return', 'configs']
Return the persisted values of all config files registered with the config manager.
['Return', 'the', 'persisted', 'values', 'of', 'all', 'config', 'files', 'registered', 'with', 'the', 'config', 'manager', '.']
train
https://github.com/galaxyproject/gravity/blob/2f792497fc60874f881c9ef74a5905a286a9ce3e/gravity/config_manager.py#L311-L319
9,087
ronaldguillen/wave
wave/views.py
RestView.handle_exception
def handle_exception(self, exc): """ Handle any exception that occurs, by returning an appropriate response, or re-raising the error. """ if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): # WWW-Authenticate header for 401 responses, else coerce to 403 auth_header = self.get_authenticate_header(self.request) if auth_header: exc.auth_header = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN exception_handler = self.settings.EXCEPTION_HANDLER context = self.get_exception_handler_context() response = exception_handler(exc, context) if response is None: raise response.exception = True return response
python
def handle_exception(self, exc): """ Handle any exception that occurs, by returning an appropriate response, or re-raising the error. """ if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): # WWW-Authenticate header for 401 responses, else coerce to 403 auth_header = self.get_authenticate_header(self.request) if auth_header: exc.auth_header = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN exception_handler = self.settings.EXCEPTION_HANDLER context = self.get_exception_handler_context() response = exception_handler(exc, context) if response is None: raise response.exception = True return response
['def', 'handle_exception', '(', 'self', ',', 'exc', ')', ':', 'if', 'isinstance', '(', 'exc', ',', '(', 'exceptions', '.', 'NotAuthenticated', ',', 'exceptions', '.', 'AuthenticationFailed', ')', ')', ':', '# WWW-Authenticate header for 401 responses, else coerce to 403', 'auth_header', '=', 'self', '.', 'get_authenticate_header', '(', 'self', '.', 'request', ')', 'if', 'auth_header', ':', 'exc', '.', 'auth_header', '=', 'auth_header', 'else', ':', 'exc', '.', 'status_code', '=', 'status', '.', 'HTTP_403_FORBIDDEN', 'exception_handler', '=', 'self', '.', 'settings', '.', 'EXCEPTION_HANDLER', 'context', '=', 'self', '.', 'get_exception_handler_context', '(', ')', 'response', '=', 'exception_handler', '(', 'exc', ',', 'context', ')', 'if', 'response', 'is', 'None', ':', 'raise', 'response', '.', 'exception', '=', 'True', 'return', 'response']
Handle any exception that occurs, by returning an appropriate response, or re-raising the error.
['Handle', 'any', 'exception', 'that', 'occurs', 'by', 'returning', 'an', 'appropriate', 'response', 'or', 're', '-', 'raising', 'the', 'error', '.']
train
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/views.py#L413-L437
9,088
merll/docker-fabric
dockerfabric/tasks.py
list_networks
def list_networks(full_ids=False): """ Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ networks = docker_fabric().networks() _format_output_table(networks, NETWORK_COLUMNS, full_ids)
python
def list_networks(full_ids=False): """ Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ networks = docker_fabric().networks() _format_output_table(networks, NETWORK_COLUMNS, full_ids)
['def', 'list_networks', '(', 'full_ids', '=', 'False', ')', ':', 'networks', '=', 'docker_fabric', '(', ')', '.', 'networks', '(', ')', '_format_output_table', '(', 'networks', ',', 'NETWORK_COLUMNS', ',', 'full_ids', ')']
Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool
['Lists', 'networks', 'on', 'the', 'Docker', 'remote', 'host', 'similar', 'to', 'docker', 'network', 'ls', '.']
train
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/tasks.py#L148-L156
9,089
cloudbase/python-hnvclient
hnv/common/utils.py
_HNVClient.update_resource
def update_resource(self, path, data, if_match=None): """Update the required resource.""" response = self._http_request(resource=path, method="PUT", body=data, if_match=if_match) try: return response.json() except ValueError: raise exception.ServiceException("Invalid service response.")
python
def update_resource(self, path, data, if_match=None): """Update the required resource.""" response = self._http_request(resource=path, method="PUT", body=data, if_match=if_match) try: return response.json() except ValueError: raise exception.ServiceException("Invalid service response.")
['def', 'update_resource', '(', 'self', ',', 'path', ',', 'data', ',', 'if_match', '=', 'None', ')', ':', 'response', '=', 'self', '.', '_http_request', '(', 'resource', '=', 'path', ',', 'method', '=', '"PUT"', ',', 'body', '=', 'data', ',', 'if_match', '=', 'if_match', ')', 'try', ':', 'return', 'response', '.', 'json', '(', ')', 'except', 'ValueError', ':', 'raise', 'exception', '.', 'ServiceException', '(', '"Invalid service response."', ')']
Update the required resource.
['Update', 'the', 'required', 'resource', '.']
train
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/common/utils.py#L172-L179
9,090
ekiro/haps
haps/container.py
inject
def inject(fun: Callable) -> Callable: """ A decorator for injection dependencies into functions/methods, based on their type annotations. .. code-block:: python class SomeClass: @inject def __init__(self, my_dep: DepType) -> None: self.my_dep = my_dep .. important:: On the opposite to :class:`~haps.Inject`, dependency is injected at the moment of method invocation. In case of decorating `__init__`, dependency is injected when `SomeClass` instance is created. :param fun: callable with annotated parameters :return: decorated callable """ sig = inspect.signature(fun) injectables: Dict[str, Any] = {} for name, param in sig.parameters.items(): type_ = param.annotation if name == 'self': continue else: injectables[name] = type_ @wraps(fun) def _inner(*args, **kwargs): container = Container() for n, t in injectables.items(): if n not in kwargs: kwargs[n] = container.get_object(t) return fun(*args, **kwargs) return _inner
python
def inject(fun: Callable) -> Callable: """ A decorator for injection dependencies into functions/methods, based on their type annotations. .. code-block:: python class SomeClass: @inject def __init__(self, my_dep: DepType) -> None: self.my_dep = my_dep .. important:: On the opposite to :class:`~haps.Inject`, dependency is injected at the moment of method invocation. In case of decorating `__init__`, dependency is injected when `SomeClass` instance is created. :param fun: callable with annotated parameters :return: decorated callable """ sig = inspect.signature(fun) injectables: Dict[str, Any] = {} for name, param in sig.parameters.items(): type_ = param.annotation if name == 'self': continue else: injectables[name] = type_ @wraps(fun) def _inner(*args, **kwargs): container = Container() for n, t in injectables.items(): if n not in kwargs: kwargs[n] = container.get_object(t) return fun(*args, **kwargs) return _inner
['def', 'inject', '(', 'fun', ':', 'Callable', ')', '->', 'Callable', ':', 'sig', '=', 'inspect', '.', 'signature', '(', 'fun', ')', 'injectables', ':', 'Dict', '[', 'str', ',', 'Any', ']', '=', '{', '}', 'for', 'name', ',', 'param', 'in', 'sig', '.', 'parameters', '.', 'items', '(', ')', ':', 'type_', '=', 'param', '.', 'annotation', 'if', 'name', '==', "'self'", ':', 'continue', 'else', ':', 'injectables', '[', 'name', ']', '=', 'type_', '@', 'wraps', '(', 'fun', ')', 'def', '_inner', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'container', '=', 'Container', '(', ')', 'for', 'n', ',', 't', 'in', 'injectables', '.', 'items', '(', ')', ':', 'if', 'n', 'not', 'in', 'kwargs', ':', 'kwargs', '[', 'n', ']', '=', 'container', '.', 'get_object', '(', 't', ')', 'return', 'fun', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', '_inner']
A decorator for injection dependencies into functions/methods, based on their type annotations. .. code-block:: python class SomeClass: @inject def __init__(self, my_dep: DepType) -> None: self.my_dep = my_dep .. important:: On the opposite to :class:`~haps.Inject`, dependency is injected at the moment of method invocation. In case of decorating `__init__`, dependency is injected when `SomeClass` instance is created. :param fun: callable with annotated parameters :return: decorated callable
['A', 'decorator', 'for', 'injection', 'dependencies', 'into', 'functions', '/', 'methods', 'based', 'on', 'their', 'type', 'annotations', '.']
train
https://github.com/ekiro/haps/blob/64b6746187e44dadb23b842607d294e03c30a0be/haps/container.py#L280-L320
9,091
Opentrons/opentrons
api/src/opentrons/config/__init__.py
_load_with_overrides
def _load_with_overrides(base) -> Dict[str, str]: """ Load an config or write its defaults """ should_write = False overrides = _get_environ_overrides() try: index = json.load((base/_CONFIG_FILENAME).open()) except (OSError, json.JSONDecodeError) as e: sys.stderr.write("Error loading config from {}: {}\nRewriting...\n" .format(str(base), e)) should_write = True index = generate_config_index(overrides) for key in CONFIG_ELEMENTS: if key.name not in index: sys.stderr.write( f"New config index key {key.name}={key.default}" "\nRewriting...\n") if key.kind in (ConfigElementType.DIR, ConfigElementType.FILE): index[key.name] = base/key.default else: index[key.name] = key.default should_write = True if should_write: try: write_config(index, path=base) except Exception as e: sys.stderr.write( "Error writing config to {}: {}\nProceeding memory-only\n" .format(str(base), e)) index.update(overrides) return index
python
def _load_with_overrides(base) -> Dict[str, str]: """ Load an config or write its defaults """ should_write = False overrides = _get_environ_overrides() try: index = json.load((base/_CONFIG_FILENAME).open()) except (OSError, json.JSONDecodeError) as e: sys.stderr.write("Error loading config from {}: {}\nRewriting...\n" .format(str(base), e)) should_write = True index = generate_config_index(overrides) for key in CONFIG_ELEMENTS: if key.name not in index: sys.stderr.write( f"New config index key {key.name}={key.default}" "\nRewriting...\n") if key.kind in (ConfigElementType.DIR, ConfigElementType.FILE): index[key.name] = base/key.default else: index[key.name] = key.default should_write = True if should_write: try: write_config(index, path=base) except Exception as e: sys.stderr.write( "Error writing config to {}: {}\nProceeding memory-only\n" .format(str(base), e)) index.update(overrides) return index
['def', '_load_with_overrides', '(', 'base', ')', '->', 'Dict', '[', 'str', ',', 'str', ']', ':', 'should_write', '=', 'False', 'overrides', '=', '_get_environ_overrides', '(', ')', 'try', ':', 'index', '=', 'json', '.', 'load', '(', '(', 'base', '/', '_CONFIG_FILENAME', ')', '.', 'open', '(', ')', ')', 'except', '(', 'OSError', ',', 'json', '.', 'JSONDecodeError', ')', 'as', 'e', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"Error loading config from {}: {}\\nRewriting...\\n"', '.', 'format', '(', 'str', '(', 'base', ')', ',', 'e', ')', ')', 'should_write', '=', 'True', 'index', '=', 'generate_config_index', '(', 'overrides', ')', 'for', 'key', 'in', 'CONFIG_ELEMENTS', ':', 'if', 'key', '.', 'name', 'not', 'in', 'index', ':', 'sys', '.', 'stderr', '.', 'write', '(', 'f"New config index key {key.name}={key.default}"', '"\\nRewriting...\\n"', ')', 'if', 'key', '.', 'kind', 'in', '(', 'ConfigElementType', '.', 'DIR', ',', 'ConfigElementType', '.', 'FILE', ')', ':', 'index', '[', 'key', '.', 'name', ']', '=', 'base', '/', 'key', '.', 'default', 'else', ':', 'index', '[', 'key', '.', 'name', ']', '=', 'key', '.', 'default', 'should_write', '=', 'True', 'if', 'should_write', ':', 'try', ':', 'write_config', '(', 'index', ',', 'path', '=', 'base', ')', 'except', 'Exception', 'as', 'e', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"Error writing config to {}: {}\\nProceeding memory-only\\n"', '.', 'format', '(', 'str', '(', 'base', ')', ',', 'e', ')', ')', 'index', '.', 'update', '(', 'overrides', ')', 'return', 'index']
Load an config or write its defaults
['Load', 'an', 'config', 'or', 'write', 'its', 'defaults']
train
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/config/__init__.py#L193-L224
9,092
wummel/linkchecker
linkcheck/HtmlParser/__init__.py
set_encoding
def set_encoding (parsobj, attrs): """ Set document encoding for the HTML parser according to the <meta> tag attribute information. @param attrs: attributes of a <meta> HTML tag @type attrs: dict @return: None """ charset = attrs.get_true('charset', u'') if charset: # <meta charset="utf-8"> # eg. in http://cn.dolphin-browser.com/activity/Dolphinjump charset = charset.encode('ascii', 'ignore').lower() elif attrs.get_true('http-equiv', u'').lower() == u"content-type": # <meta http-equiv="content-type" content="text/html;charset="utf-8"> charset = attrs.get_true('content', u'') charset = charset.encode('ascii', 'ignore').lower() charset = get_ctype_charset(charset) if charset and charset in SUPPORTED_CHARSETS: parsobj.encoding = charset
python
def set_encoding (parsobj, attrs): """ Set document encoding for the HTML parser according to the <meta> tag attribute information. @param attrs: attributes of a <meta> HTML tag @type attrs: dict @return: None """ charset = attrs.get_true('charset', u'') if charset: # <meta charset="utf-8"> # eg. in http://cn.dolphin-browser.com/activity/Dolphinjump charset = charset.encode('ascii', 'ignore').lower() elif attrs.get_true('http-equiv', u'').lower() == u"content-type": # <meta http-equiv="content-type" content="text/html;charset="utf-8"> charset = attrs.get_true('content', u'') charset = charset.encode('ascii', 'ignore').lower() charset = get_ctype_charset(charset) if charset and charset in SUPPORTED_CHARSETS: parsobj.encoding = charset
['def', 'set_encoding', '(', 'parsobj', ',', 'attrs', ')', ':', 'charset', '=', 'attrs', '.', 'get_true', '(', "'charset'", ',', "u''", ')', 'if', 'charset', ':', '# <meta charset="utf-8">', '# eg. in http://cn.dolphin-browser.com/activity/Dolphinjump', 'charset', '=', 'charset', '.', 'encode', '(', "'ascii'", ',', "'ignore'", ')', '.', 'lower', '(', ')', 'elif', 'attrs', '.', 'get_true', '(', "'http-equiv'", ',', "u''", ')', '.', 'lower', '(', ')', '==', 'u"content-type"', ':', '# <meta http-equiv="content-type" content="text/html;charset="utf-8">', 'charset', '=', 'attrs', '.', 'get_true', '(', "'content'", ',', "u''", ')', 'charset', '=', 'charset', '.', 'encode', '(', "'ascii'", ',', "'ignore'", ')', '.', 'lower', '(', ')', 'charset', '=', 'get_ctype_charset', '(', 'charset', ')', 'if', 'charset', 'and', 'charset', 'in', 'SUPPORTED_CHARSETS', ':', 'parsobj', '.', 'encoding', '=', 'charset']
Set document encoding for the HTML parser according to the <meta> tag attribute information. @param attrs: attributes of a <meta> HTML tag @type attrs: dict @return: None
['Set', 'document', 'encoding', 'for', 'the', 'HTML', 'parser', 'according', 'to', 'the', '<meta', '>', 'tag', 'attribute', 'information', '.']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/HtmlParser/__init__.py#L218-L238
9,093
pandas-dev/pandas
pandas/core/frame.py
DataFrame.corr
def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method)) return self._constructor(correl, index=idx, columns=cols)
python
def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method)) return self._constructor(correl, index=idx, columns=cols)
['def', 'corr', '(', 'self', ',', 'method', '=', "'pearson'", ',', 'min_periods', '=', '1', ')', ':', 'numeric_df', '=', 'self', '.', '_get_numeric_data', '(', ')', 'cols', '=', 'numeric_df', '.', 'columns', 'idx', '=', 'cols', '.', 'copy', '(', ')', 'mat', '=', 'numeric_df', '.', 'values', 'if', 'method', '==', "'pearson'", ':', 'correl', '=', 'libalgos', '.', 'nancorr', '(', 'ensure_float64', '(', 'mat', ')', ',', 'minp', '=', 'min_periods', ')', 'elif', 'method', '==', "'spearman'", ':', 'correl', '=', 'libalgos', '.', 'nancorr_spearman', '(', 'ensure_float64', '(', 'mat', ')', ',', 'minp', '=', 'min_periods', ')', 'elif', 'method', '==', "'kendall'", 'or', 'callable', '(', 'method', ')', ':', 'if', 'min_periods', 'is', 'None', ':', 'min_periods', '=', '1', 'mat', '=', 'ensure_float64', '(', 'mat', ')', '.', 'T', 'corrf', '=', 'nanops', '.', 'get_corr_func', '(', 'method', ')', 'K', '=', 'len', '(', 'cols', ')', 'correl', '=', 'np', '.', 'empty', '(', '(', 'K', ',', 'K', ')', ',', 'dtype', '=', 'float', ')', 'mask', '=', 'np', '.', 'isfinite', '(', 'mat', ')', 'for', 'i', ',', 'ac', 'in', 'enumerate', '(', 'mat', ')', ':', 'for', 'j', ',', 'bc', 'in', 'enumerate', '(', 'mat', ')', ':', 'if', 'i', '>', 'j', ':', 'continue', 'valid', '=', 'mask', '[', 'i', ']', '&', 'mask', '[', 'j', ']', 'if', 'valid', '.', 'sum', '(', ')', '<', 'min_periods', ':', 'c', '=', 'np', '.', 'nan', 'elif', 'i', '==', 'j', ':', 'c', '=', '1.', 'elif', 'not', 'valid', '.', 'all', '(', ')', ':', 'c', '=', 'corrf', '(', 'ac', '[', 'valid', ']', ',', 'bc', '[', 'valid', ']', ')', 'else', ':', 'c', '=', 'corrf', '(', 'ac', ',', 'bc', ')', 'correl', '[', 'i', ',', 'j', ']', '=', 'c', 'correl', '[', 'j', ',', 'i', ']', '=', 'c', 'else', ':', 'raise', 'ValueError', '(', '"method must be either \'pearson\', "', '"\'spearman\', \'kendall\', or a callable, "', '"\'{method}\' was supplied"', '.', 'format', '(', 'method', '=', 'method', ')', ')', 'return', 'self', '.', '_constructor', '(', 'correl', ',', 'index', '=', 'idx', ',', 'columns', '=', 'cols', ')']
Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0
['Compute', 'pairwise', 'correlation', 'of', 'columns', 'excluding', 'NA', '/', 'null', 'values', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7033-L7115
9,094
mailund/statusbar
statusbar/__init__.py
ProgressBar.format_summary
def format_summary(self): """Generate a summary string for the progress bar.""" chunks = [chunk.format_chunk_summary() for chunk in self._progress_chunks] return "/".join(chunks)
python
def format_summary(self): """Generate a summary string for the progress bar.""" chunks = [chunk.format_chunk_summary() for chunk in self._progress_chunks] return "/".join(chunks)
['def', 'format_summary', '(', 'self', ')', ':', 'chunks', '=', '[', 'chunk', '.', 'format_chunk_summary', '(', ')', 'for', 'chunk', 'in', 'self', '.', '_progress_chunks', ']', 'return', '"/"', '.', 'join', '(', 'chunks', ')']
Generate a summary string for the progress bar.
['Generate', 'a', 'summary', 'string', 'for', 'the', 'progress', 'bar', '.']
train
https://github.com/mailund/statusbar/blob/e42ac88cdaae281d47318dd8dcf156bfff2a7b2a/statusbar/__init__.py#L107-L111
9,095
django-danceschool/django-danceschool
danceschool/core/views.py
EventRegistrationSelectView.get_queryset
def get_queryset(self): ''' Recent events are listed in link form. ''' return Event.objects.filter( Q(startTime__gte=timezone.now() - timedelta(days=90)) & ( Q(series__isnull=False) | Q(publicevent__isnull=False) ) ).annotate(count=Count('eventregistration')).annotate(**self.get_annotations()).exclude( Q(count=0) & Q(status__in=[ Event.RegStatus.hidden, Event.RegStatus.regHidden, Event.RegStatus.disabled ]) )
python
def get_queryset(self): ''' Recent events are listed in link form. ''' return Event.objects.filter( Q(startTime__gte=timezone.now() - timedelta(days=90)) & ( Q(series__isnull=False) | Q(publicevent__isnull=False) ) ).annotate(count=Count('eventregistration')).annotate(**self.get_annotations()).exclude( Q(count=0) & Q(status__in=[ Event.RegStatus.hidden, Event.RegStatus.regHidden, Event.RegStatus.disabled ]) )
['def', 'get_queryset', '(', 'self', ')', ':', 'return', 'Event', '.', 'objects', '.', 'filter', '(', 'Q', '(', 'startTime__gte', '=', 'timezone', '.', 'now', '(', ')', '-', 'timedelta', '(', 'days', '=', '90', ')', ')', '&', '(', 'Q', '(', 'series__isnull', '=', 'False', ')', '|', 'Q', '(', 'publicevent__isnull', '=', 'False', ')', ')', ')', '.', 'annotate', '(', 'count', '=', 'Count', '(', "'eventregistration'", ')', ')', '.', 'annotate', '(', '*', '*', 'self', '.', 'get_annotations', '(', ')', ')', '.', 'exclude', '(', 'Q', '(', 'count', '=', '0', ')', '&', 'Q', '(', 'status__in', '=', '[', 'Event', '.', 'RegStatus', '.', 'hidden', ',', 'Event', '.', 'RegStatus', '.', 'regHidden', ',', 'Event', '.', 'RegStatus', '.', 'disabled', ']', ')', ')']
Recent events are listed in link form.
['Recent', 'events', 'are', 'listed', 'in', 'link', 'form', '.']
train
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/views.py#L53-L64
9,096
Alignak-monitoring/alignak
alignak/objects/schedulingitem.py
SchedulingItem.check_and_set_unreachability
def check_and_set_unreachability(self, hosts, services): """ Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None """ parent_is_down = [] for (dep_id, _, _, _) in self.act_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']: parent_is_down.append(True) else: parent_is_down.append(False) if False in parent_is_down: return # all parents down self.set_unreachable()
python
def check_and_set_unreachability(self, hosts, services): """ Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None """ parent_is_down = [] for (dep_id, _, _, _) in self.act_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']: parent_is_down.append(True) else: parent_is_down.append(False) if False in parent_is_down: return # all parents down self.set_unreachable()
['def', 'check_and_set_unreachability', '(', 'self', ',', 'hosts', ',', 'services', ')', ':', 'parent_is_down', '=', '[', ']', 'for', '(', 'dep_id', ',', '_', ',', '_', ',', '_', ')', 'in', 'self', '.', 'act_depend_of', ':', 'if', 'dep_id', 'in', 'hosts', ':', 'dep', '=', 'hosts', '[', 'dep_id', ']', 'else', ':', 'dep', '=', 'services', '[', 'dep_id', ']', 'if', 'dep', '.', 'state', 'in', '[', "'d'", ',', "'DOWN'", ',', "'c'", ',', "'CRITICAL'", ',', "'u'", ',', "'UNKNOWN'", ',', "'x'", ',', "'UNREACHABLE'", ']', ':', 'parent_is_down', '.', 'append', '(', 'True', ')', 'else', ':', 'parent_is_down', '.', 'append', '(', 'False', ')', 'if', 'False', 'in', 'parent_is_down', ':', 'return', '# all parents down', 'self', '.', 'set_unreachable', '(', ')']
Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None
['Check', 'if', 'all', 'dependencies', 'are', 'down', 'if', 'yes', 'set', 'this', 'object', 'as', 'unreachable', '.']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1016-L1042
9,097
FreekKalter/geoselect
geoselect.py
convert_to_decimal
def convert_to_decimal(string): """ Decode the exif-gps format into a decimal point. '[51, 4, 1234/34]' -> 51.074948366 """ number_or_fraction = '(?:\d{1,2}) | (?:\d{1,10} \\ \d{1,10})' m = re.compile('''\[?\s? # opening bracket \d{{1,2}}\s?,\s? # first number {0} \s?,\s? # second number (can be a fraction) {0} \s?,\s? # third number (can be a fraction) \]?\s? # closing bracket '''.format(number_or_fraction), re.VERBOSE) if not m.match(string): raise ValueError h, m, s = re.sub('\[|\]', '', string).split(', ') result = int(h) if '/' in m: m = m.split('/') result += int(m[0]) * 1.0 / int(m[1]) / 60 else: result += int(m) * 1.0 / 60 if '/' in s: s = s.split('/') result += int(s[0]) * 1.0 / int(s[1]) / 3600 else: result += int(s) * 1.0 / 60 return result
python
def convert_to_decimal(string): """ Decode the exif-gps format into a decimal point. '[51, 4, 1234/34]' -> 51.074948366 """ number_or_fraction = '(?:\d{1,2}) | (?:\d{1,10} \\ \d{1,10})' m = re.compile('''\[?\s? # opening bracket \d{{1,2}}\s?,\s? # first number {0} \s?,\s? # second number (can be a fraction) {0} \s?,\s? # third number (can be a fraction) \]?\s? # closing bracket '''.format(number_or_fraction), re.VERBOSE) if not m.match(string): raise ValueError h, m, s = re.sub('\[|\]', '', string).split(', ') result = int(h) if '/' in m: m = m.split('/') result += int(m[0]) * 1.0 / int(m[1]) / 60 else: result += int(m) * 1.0 / 60 if '/' in s: s = s.split('/') result += int(s[0]) * 1.0 / int(s[1]) / 3600 else: result += int(s) * 1.0 / 60 return result
['def', 'convert_to_decimal', '(', 'string', ')', ':', 'number_or_fraction', '=', "'(?:\\d{1,2}) | (?:\\d{1,10} \\\\ \\d{1,10})'", 'm', '=', 're', '.', 'compile', '(', "'''\\[?\\s? # opening bracket\r\n \\d{{1,2}}\\s?,\\s? # first number\r\n {0} \\s?,\\s? # second number (can be a fraction)\r\n {0} \\s?,\\s? # third number (can be a fraction)\r\n \\]?\\s? # closing bracket\r\n '''", '.', 'format', '(', 'number_or_fraction', ')', ',', 're', '.', 'VERBOSE', ')', 'if', 'not', 'm', '.', 'match', '(', 'string', ')', ':', 'raise', 'ValueError', 'h', ',', 'm', ',', 's', '=', 're', '.', 'sub', '(', "'\\[|\\]'", ',', "''", ',', 'string', ')', '.', 'split', '(', "', '", ')', 'result', '=', 'int', '(', 'h', ')', 'if', "'/'", 'in', 'm', ':', 'm', '=', 'm', '.', 'split', '(', "'/'", ')', 'result', '+=', 'int', '(', 'm', '[', '0', ']', ')', '*', '1.0', '/', 'int', '(', 'm', '[', '1', ']', ')', '/', '60', 'else', ':', 'result', '+=', 'int', '(', 'm', ')', '*', '1.0', '/', '60', 'if', "'/'", 'in', 's', ':', 's', '=', 's', '.', 'split', '(', "'/'", ')', 'result', '+=', 'int', '(', 's', '[', '0', ']', ')', '*', '1.0', '/', 'int', '(', 's', '[', '1', ']', ')', '/', '3600', 'else', ':', 'result', '+=', 'int', '(', 's', ')', '*', '1.0', '/', '60', 'return', 'result']
Decode the exif-gps format into a decimal point. '[51, 4, 1234/34]' -> 51.074948366
['Decode', 'the', 'exif', '-', 'gps', 'format', 'into', 'a', 'decimal', 'point', '.', '[', '51', '4', '1234', '/', '34', ']', '-', '>', '51', '.', '074948366']
train
https://github.com/FreekKalter/geoselect/blob/2c6ab869d50215eba21dce3e1770bcf8ecdb41f3/geoselect.py#L63-L90
9,098
ioos/compliance-checker
compliance_checker/cf/cf.py
CFBaseCheck._find_cf_standard_name_table
def _find_cf_standard_name_table(self, ds): ''' Parse out the `standard_name_vocabulary` attribute and download that version of the cf standard name table. If the standard name table has already been downloaded, use the cached version. Modifies `_std_names` attribute to store standard names. Returns True if the file exists and False if it fails to download. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: bool ''' # Get the standard name vocab standard_name_vocabulary = getattr(ds, 'standard_name_vocabulary', '') # Try to parse this attribute to get version version = None try: if 'cf standard name table' in standard_name_vocabulary.lower(): version = [s.strip('(').strip(')').strip('v').strip(',') for s in standard_name_vocabulary.split()] # This assumes that table version number won't start with 0. version = [s for s in version if s.isdigit() and len(s) <= 2 and not s.startswith('0')] if len(version) > 1: return False else: version = version[0] else: # Can't parse the attribute, use the packaged version return False # usually raised from .lower() with an incompatible (non-string) # data type except AttributeError: warn("Cannot convert standard name table to lowercase. This can " "occur if a non-string standard_name_vocabulary global " "attribute is supplied") return False if version.startswith('v'): # i.e 'v34' -> '34' drop the v version = version[1:] # If the packaged version is what we're after, then we're good if version == self._std_names._version: print("Using packaged standard name table v{0}".format(version), file=sys.stderr) return False # Try to download the version specified try: data_directory = util.create_cached_data_dir() location = os.path.join(data_directory, 'cf-standard-name-table-test-{0}.xml'.format(version)) # Did we already download this before? if not os.path.isfile(location): util.download_cf_standard_name_table(version, location) print("Using downloaded standard name table v{0}".format(version), file=sys.stderr) else: print("Using cached standard name table v{0} from {1}".format(version, location), file=sys.stderr) self._std_names = util.StandardNameTable(location) return True except Exception as e: # There was an error downloading the CF table. That's ok, we'll just use the packaged version warn("Problem fetching standard name table:\n{0}\n" "Using packaged v{1}".format(e, self._std_names._version)) return False
python
def _find_cf_standard_name_table(self, ds): ''' Parse out the `standard_name_vocabulary` attribute and download that version of the cf standard name table. If the standard name table has already been downloaded, use the cached version. Modifies `_std_names` attribute to store standard names. Returns True if the file exists and False if it fails to download. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: bool ''' # Get the standard name vocab standard_name_vocabulary = getattr(ds, 'standard_name_vocabulary', '') # Try to parse this attribute to get version version = None try: if 'cf standard name table' in standard_name_vocabulary.lower(): version = [s.strip('(').strip(')').strip('v').strip(',') for s in standard_name_vocabulary.split()] # This assumes that table version number won't start with 0. version = [s for s in version if s.isdigit() and len(s) <= 2 and not s.startswith('0')] if len(version) > 1: return False else: version = version[0] else: # Can't parse the attribute, use the packaged version return False # usually raised from .lower() with an incompatible (non-string) # data type except AttributeError: warn("Cannot convert standard name table to lowercase. This can " "occur if a non-string standard_name_vocabulary global " "attribute is supplied") return False if version.startswith('v'): # i.e 'v34' -> '34' drop the v version = version[1:] # If the packaged version is what we're after, then we're good if version == self._std_names._version: print("Using packaged standard name table v{0}".format(version), file=sys.stderr) return False # Try to download the version specified try: data_directory = util.create_cached_data_dir() location = os.path.join(data_directory, 'cf-standard-name-table-test-{0}.xml'.format(version)) # Did we already download this before? if not os.path.isfile(location): util.download_cf_standard_name_table(version, location) print("Using downloaded standard name table v{0}".format(version), file=sys.stderr) else: print("Using cached standard name table v{0} from {1}".format(version, location), file=sys.stderr) self._std_names = util.StandardNameTable(location) return True except Exception as e: # There was an error downloading the CF table. That's ok, we'll just use the packaged version warn("Problem fetching standard name table:\n{0}\n" "Using packaged v{1}".format(e, self._std_names._version)) return False
['def', '_find_cf_standard_name_table', '(', 'self', ',', 'ds', ')', ':', '# Get the standard name vocab', 'standard_name_vocabulary', '=', 'getattr', '(', 'ds', ',', "'standard_name_vocabulary'", ',', "''", ')', '# Try to parse this attribute to get version', 'version', '=', 'None', 'try', ':', 'if', "'cf standard name table'", 'in', 'standard_name_vocabulary', '.', 'lower', '(', ')', ':', 'version', '=', '[', 's', '.', 'strip', '(', "'('", ')', '.', 'strip', '(', "')'", ')', '.', 'strip', '(', "'v'", ')', '.', 'strip', '(', "','", ')', 'for', 's', 'in', 'standard_name_vocabulary', '.', 'split', '(', ')', ']', "# This assumes that table version number won't start with 0.", 'version', '=', '[', 's', 'for', 's', 'in', 'version', 'if', 's', '.', 'isdigit', '(', ')', 'and', 'len', '(', 's', ')', '<=', '2', 'and', 'not', 's', '.', 'startswith', '(', "'0'", ')', ']', 'if', 'len', '(', 'version', ')', '>', '1', ':', 'return', 'False', 'else', ':', 'version', '=', 'version', '[', '0', ']', 'else', ':', "# Can't parse the attribute, use the packaged version", 'return', 'False', '# usually raised from .lower() with an incompatible (non-string)', '# data type', 'except', 'AttributeError', ':', 'warn', '(', '"Cannot convert standard name table to lowercase. This can "', '"occur if a non-string standard_name_vocabulary global "', '"attribute is supplied"', ')', 'return', 'False', 'if', 'version', '.', 'startswith', '(', "'v'", ')', ':', "# i.e 'v34' -> '34' drop the v", 'version', '=', 'version', '[', '1', ':', ']', "# If the packaged version is what we're after, then we're good", 'if', 'version', '==', 'self', '.', '_std_names', '.', '_version', ':', 'print', '(', '"Using packaged standard name table v{0}"', '.', 'format', '(', 'version', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'return', 'False', '# Try to download the version specified', 'try', ':', 'data_directory', '=', 'util', '.', 'create_cached_data_dir', '(', ')', 'location', '=', 'os', '.', 'path', '.', 'join', '(', 'data_directory', ',', "'cf-standard-name-table-test-{0}.xml'", '.', 'format', '(', 'version', ')', ')', '# Did we already download this before?', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'location', ')', ':', 'util', '.', 'download_cf_standard_name_table', '(', 'version', ',', 'location', ')', 'print', '(', '"Using downloaded standard name table v{0}"', '.', 'format', '(', 'version', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'else', ':', 'print', '(', '"Using cached standard name table v{0} from {1}"', '.', 'format', '(', 'version', ',', 'location', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'self', '.', '_std_names', '=', 'util', '.', 'StandardNameTable', '(', 'location', ')', 'return', 'True', 'except', 'Exception', 'as', 'e', ':', "# There was an error downloading the CF table. That's ok, we'll just use the packaged version", 'warn', '(', '"Problem fetching standard name table:\\n{0}\\n"', '"Using packaged v{1}"', '.', 'format', '(', 'e', ',', 'self', '.', '_std_names', '.', '_version', ')', ')', 'return', 'False']
Parse out the `standard_name_vocabulary` attribute and download that version of the cf standard name table. If the standard name table has already been downloaded, use the cached version. Modifies `_std_names` attribute to store standard names. Returns True if the file exists and False if it fails to download. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: bool
['Parse', 'out', 'the', 'standard_name_vocabulary', 'attribute', 'and', 'download', 'that', 'version', 'of', 'the', 'cf', 'standard', 'name', 'table', '.', 'If', 'the', 'standard', 'name', 'table', 'has', 'already', 'been', 'downloaded', 'use', 'the', 'cached', 'version', '.', 'Modifies', '_std_names', 'attribute', 'to', 'store', 'standard', 'names', '.', 'Returns', 'True', 'if', 'the', 'file', 'exists', 'and', 'False', 'if', 'it', 'fails', 'to', 'download', '.']
train
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L154-L215
9,099
secdev/scapy
scapy/layers/tls/crypto/prf.py
PRF.compute_verify_data
def compute_verify_data(self, con_end, read_or_write, handshake_msg, master_secret): """ Return verify_data based on handshake messages, connection end, master secret, and read_or_write position. See RFC 5246, section 7.4.9. Every TLS 1.2 cipher suite has a verify_data of length 12. Note also: "This PRF with the SHA-256 hash function is used for all cipher suites defined in this document and in TLS documents published prior to this document when TLS 1.2 is negotiated." Cipher suites using SHA-384 were defined later on. """ if self.tls_version < 0x0300: return None elif self.tls_version == 0x0300: if read_or_write == "write": d = {"client": b"CLNT", "server": b"SRVR"} else: d = {"client": b"SRVR", "server": b"CLNT"} label = d[con_end] sslv3_md5_pad1 = b"\x36" * 48 sslv3_md5_pad2 = b"\x5c" * 48 sslv3_sha1_pad1 = b"\x36" * 40 sslv3_sha1_pad2 = b"\x5c" * 40 md5 = _tls_hash_algs["MD5"]() sha1 = _tls_hash_algs["SHA"]() md5_hash = md5.digest(master_secret + sslv3_md5_pad2 + md5.digest(handshake_msg + label + master_secret + sslv3_md5_pad1)) sha1_hash = sha1.digest(master_secret + sslv3_sha1_pad2 + sha1.digest(handshake_msg + label + master_secret + sslv3_sha1_pad1)) # noqa: E501 verify_data = md5_hash + sha1_hash else: if read_or_write == "write": d = {"client": "client", "server": "server"} else: d = {"client": "server", "server": "client"} label = ("%s finished" % d[con_end]).encode() if self.tls_version <= 0x0302: s1 = _tls_hash_algs["MD5"]().digest(handshake_msg) s2 = _tls_hash_algs["SHA"]().digest(handshake_msg) verify_data = self.prf(master_secret, label, s1 + s2, 12) else: if self.hash_name in ["MD5", "SHA"]: h = _tls_hash_algs["SHA256"]() else: h = _tls_hash_algs[self.hash_name]() s = h.digest(handshake_msg) verify_data = self.prf(master_secret, label, s, 12) return verify_data
python
def compute_verify_data(self, con_end, read_or_write, handshake_msg, master_secret): """ Return verify_data based on handshake messages, connection end, master secret, and read_or_write position. See RFC 5246, section 7.4.9. Every TLS 1.2 cipher suite has a verify_data of length 12. Note also: "This PRF with the SHA-256 hash function is used for all cipher suites defined in this document and in TLS documents published prior to this document when TLS 1.2 is negotiated." Cipher suites using SHA-384 were defined later on. """ if self.tls_version < 0x0300: return None elif self.tls_version == 0x0300: if read_or_write == "write": d = {"client": b"CLNT", "server": b"SRVR"} else: d = {"client": b"SRVR", "server": b"CLNT"} label = d[con_end] sslv3_md5_pad1 = b"\x36" * 48 sslv3_md5_pad2 = b"\x5c" * 48 sslv3_sha1_pad1 = b"\x36" * 40 sslv3_sha1_pad2 = b"\x5c" * 40 md5 = _tls_hash_algs["MD5"]() sha1 = _tls_hash_algs["SHA"]() md5_hash = md5.digest(master_secret + sslv3_md5_pad2 + md5.digest(handshake_msg + label + master_secret + sslv3_md5_pad1)) sha1_hash = sha1.digest(master_secret + sslv3_sha1_pad2 + sha1.digest(handshake_msg + label + master_secret + sslv3_sha1_pad1)) # noqa: E501 verify_data = md5_hash + sha1_hash else: if read_or_write == "write": d = {"client": "client", "server": "server"} else: d = {"client": "server", "server": "client"} label = ("%s finished" % d[con_end]).encode() if self.tls_version <= 0x0302: s1 = _tls_hash_algs["MD5"]().digest(handshake_msg) s2 = _tls_hash_algs["SHA"]().digest(handshake_msg) verify_data = self.prf(master_secret, label, s1 + s2, 12) else: if self.hash_name in ["MD5", "SHA"]: h = _tls_hash_algs["SHA256"]() else: h = _tls_hash_algs[self.hash_name]() s = h.digest(handshake_msg) verify_data = self.prf(master_secret, label, s, 12) return verify_data
['def', 'compute_verify_data', '(', 'self', ',', 'con_end', ',', 'read_or_write', ',', 'handshake_msg', ',', 'master_secret', ')', ':', 'if', 'self', '.', 'tls_version', '<', '0x0300', ':', 'return', 'None', 'elif', 'self', '.', 'tls_version', '==', '0x0300', ':', 'if', 'read_or_write', '==', '"write"', ':', 'd', '=', '{', '"client"', ':', 'b"CLNT"', ',', '"server"', ':', 'b"SRVR"', '}', 'else', ':', 'd', '=', '{', '"client"', ':', 'b"SRVR"', ',', '"server"', ':', 'b"CLNT"', '}', 'label', '=', 'd', '[', 'con_end', ']', 'sslv3_md5_pad1', '=', 'b"\\x36"', '*', '48', 'sslv3_md5_pad2', '=', 'b"\\x5c"', '*', '48', 'sslv3_sha1_pad1', '=', 'b"\\x36"', '*', '40', 'sslv3_sha1_pad2', '=', 'b"\\x5c"', '*', '40', 'md5', '=', '_tls_hash_algs', '[', '"MD5"', ']', '(', ')', 'sha1', '=', '_tls_hash_algs', '[', '"SHA"', ']', '(', ')', 'md5_hash', '=', 'md5', '.', 'digest', '(', 'master_secret', '+', 'sslv3_md5_pad2', '+', 'md5', '.', 'digest', '(', 'handshake_msg', '+', 'label', '+', 'master_secret', '+', 'sslv3_md5_pad1', ')', ')', 'sha1_hash', '=', 'sha1', '.', 'digest', '(', 'master_secret', '+', 'sslv3_sha1_pad2', '+', 'sha1', '.', 'digest', '(', 'handshake_msg', '+', 'label', '+', 'master_secret', '+', 'sslv3_sha1_pad1', ')', ')', '# noqa: E501', 'verify_data', '=', 'md5_hash', '+', 'sha1_hash', 'else', ':', 'if', 'read_or_write', '==', '"write"', ':', 'd', '=', '{', '"client"', ':', '"client"', ',', '"server"', ':', '"server"', '}', 'else', ':', 'd', '=', '{', '"client"', ':', '"server"', ',', '"server"', ':', '"client"', '}', 'label', '=', '(', '"%s finished"', '%', 'd', '[', 'con_end', ']', ')', '.', 'encode', '(', ')', 'if', 'self', '.', 'tls_version', '<=', '0x0302', ':', 's1', '=', '_tls_hash_algs', '[', '"MD5"', ']', '(', ')', '.', 'digest', '(', 'handshake_msg', ')', 's2', '=', '_tls_hash_algs', '[', '"SHA"', ']', '(', ')', '.', 'digest', '(', 'handshake_msg', ')', 'verify_data', '=', 'self', '.', 'prf', '(', 'master_secret', ',', 'label', ',', 's1', '+', 's2', ',', '12', ')', 'else', ':', 'if', 'self', '.', 'hash_name', 'in', '[', '"MD5"', ',', '"SHA"', ']', ':', 'h', '=', '_tls_hash_algs', '[', '"SHA256"', ']', '(', ')', 'else', ':', 'h', '=', '_tls_hash_algs', '[', 'self', '.', 'hash_name', ']', '(', ')', 's', '=', 'h', '.', 'digest', '(', 'handshake_msg', ')', 'verify_data', '=', 'self', '.', 'prf', '(', 'master_secret', ',', 'label', ',', 's', ',', '12', ')', 'return', 'verify_data']
Return verify_data based on handshake messages, connection end, master secret, and read_or_write position. See RFC 5246, section 7.4.9. Every TLS 1.2 cipher suite has a verify_data of length 12. Note also: "This PRF with the SHA-256 hash function is used for all cipher suites defined in this document and in TLS documents published prior to this document when TLS 1.2 is negotiated." Cipher suites using SHA-384 were defined later on.
['Return', 'verify_data', 'based', 'on', 'handshake', 'messages', 'connection', 'end', 'master', 'secret', 'and', 'read_or_write', 'position', '.', 'See', 'RFC', '5246', 'section', '7', '.', '4', '.', '9', '.']
train
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/crypto/prf.py#L236-L294