repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
saltstack/salt
salt/cloud/clouds/digitalocean.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/digitalocean.py#L1342-L1353
def _get_full_output(node, for_output=False): ''' Helper function for _list_nodes to loop through all node information. Returns a dictionary containing the full information of a node. ''' ret = {} for item in six.iterkeys(node): value = node[item] if value is not None and for_output: value = six.text_type(value) ret[item] = value return ret
[ "def", "_get_full_output", "(", "node", ",", "for_output", "=", "False", ")", ":", "ret", "=", "{", "}", "for", "item", "in", "six", ".", "iterkeys", "(", "node", ")", ":", "value", "=", "node", "[", "item", "]", "if", "value", "is", "not", "None", "and", "for_output", ":", "value", "=", "six", ".", "text_type", "(", "value", ")", "ret", "[", "item", "]", "=", "value", "return", "ret" ]
Helper function for _list_nodes to loop through all node information. Returns a dictionary containing the full information of a node.
[ "Helper", "function", "for", "_list_nodes", "to", "loop", "through", "all", "node", "information", ".", "Returns", "a", "dictionary", "containing", "the", "full", "information", "of", "a", "node", "." ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L1722-L1734
def create_missing(self): """Customize the process of auto-generating instance attributes. Populate ``template_kind`` if: * this template is not a snippet, and * the ``template_kind`` instance attribute is unset. """ super(ProvisioningTemplate, self).create_missing() if (getattr(self, 'snippet', None) is False and not hasattr(self, 'template_kind')): self.template_kind = TemplateKind(self._server_config, id=1)
[ "def", "create_missing", "(", "self", ")", ":", "super", "(", "ProvisioningTemplate", ",", "self", ")", ".", "create_missing", "(", ")", "if", "(", "getattr", "(", "self", ",", "'snippet'", ",", "None", ")", "is", "False", "and", "not", "hasattr", "(", "self", ",", "'template_kind'", ")", ")", ":", "self", ".", "template_kind", "=", "TemplateKind", "(", "self", ".", "_server_config", ",", "id", "=", "1", ")" ]
Customize the process of auto-generating instance attributes. Populate ``template_kind`` if: * this template is not a snippet, and * the ``template_kind`` instance attribute is unset.
[ "Customize", "the", "process", "of", "auto", "-", "generating", "instance", "attributes", "." ]
python
train
google/grr
grr/client/grr_response_client/comms.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/comms.py#L1220-L1277
def Run(self): """The main run method of the client. This method does not normally return. Only if there have been more than connection_error_limit failures, the method returns and allows the client to exit. """ while True: if self.http_manager.ErrorLimitReached(): return # Check if there is a message from the nanny to be sent. self.client_worker.SendNannyMessage() now = time.time() # Check with the foreman if we need to if (now > self.last_foreman_check + config.CONFIG["Client.foreman_check_frequency"]): # We must not queue messages from the comms thread with blocking=True # or we might deadlock. If the output queue is full, we can't accept # more work from the foreman anyways so it's ok to drop the message. try: self.client_worker.SendReply( rdf_protodict.DataBlob(), session_id=rdfvalue.FlowSessionID(flow_name="Foreman"), require_fastpoll=False, blocking=False) self.last_foreman_check = now except queue.Full: pass try: self.RunOnce() except Exception: # pylint: disable=broad-except # Catch everything, yes, this is terrible but necessary logging.warning("Uncaught exception caught: %s", traceback.format_exc()) if flags.FLAGS.pdb_post_mortem: pdb.post_mortem() # We suicide if our memory is exceeded, and there is no more work to do # right now. Our death should not result in loss of messages since we are # not holding any requests in our input queues. if (self.client_worker.MemoryExceeded() and not self.client_worker.IsActive() and self.client_worker.InQueueSize() == 0 and self.client_worker.OutQueueSize() == 0): logging.warning("Memory exceeded - exiting.") self.client_worker.SendClientAlert("Memory limit exceeded, exiting.") # Make sure this will return True so we don't get more work. # pylint: disable=g-bad-name self.client_worker.MemoryExceeded = lambda: True # pylint: enable=g-bad-name # Now send back the client message. self.RunOnce() # And done for now. sys.exit(-1) self.timer.Wait()
[ "def", "Run", "(", "self", ")", ":", "while", "True", ":", "if", "self", ".", "http_manager", ".", "ErrorLimitReached", "(", ")", ":", "return", "# Check if there is a message from the nanny to be sent.", "self", ".", "client_worker", ".", "SendNannyMessage", "(", ")", "now", "=", "time", ".", "time", "(", ")", "# Check with the foreman if we need to", "if", "(", "now", ">", "self", ".", "last_foreman_check", "+", "config", ".", "CONFIG", "[", "\"Client.foreman_check_frequency\"", "]", ")", ":", "# We must not queue messages from the comms thread with blocking=True", "# or we might deadlock. If the output queue is full, we can't accept", "# more work from the foreman anyways so it's ok to drop the message.", "try", ":", "self", ".", "client_worker", ".", "SendReply", "(", "rdf_protodict", ".", "DataBlob", "(", ")", ",", "session_id", "=", "rdfvalue", ".", "FlowSessionID", "(", "flow_name", "=", "\"Foreman\"", ")", ",", "require_fastpoll", "=", "False", ",", "blocking", "=", "False", ")", "self", ".", "last_foreman_check", "=", "now", "except", "queue", ".", "Full", ":", "pass", "try", ":", "self", ".", "RunOnce", "(", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "# Catch everything, yes, this is terrible but necessary", "logging", ".", "warning", "(", "\"Uncaught exception caught: %s\"", ",", "traceback", ".", "format_exc", "(", ")", ")", "if", "flags", ".", "FLAGS", ".", "pdb_post_mortem", ":", "pdb", ".", "post_mortem", "(", ")", "# We suicide if our memory is exceeded, and there is no more work to do", "# right now. Our death should not result in loss of messages since we are", "# not holding any requests in our input queues.", "if", "(", "self", ".", "client_worker", ".", "MemoryExceeded", "(", ")", "and", "not", "self", ".", "client_worker", ".", "IsActive", "(", ")", "and", "self", ".", "client_worker", ".", "InQueueSize", "(", ")", "==", "0", "and", "self", ".", "client_worker", ".", "OutQueueSize", "(", ")", "==", "0", ")", ":", "logging", ".", "warning", "(", "\"Memory exceeded - exiting.\"", ")", "self", ".", "client_worker", ".", "SendClientAlert", "(", "\"Memory limit exceeded, exiting.\"", ")", "# Make sure this will return True so we don't get more work.", "# pylint: disable=g-bad-name", "self", ".", "client_worker", ".", "MemoryExceeded", "=", "lambda", ":", "True", "# pylint: enable=g-bad-name", "# Now send back the client message.", "self", ".", "RunOnce", "(", ")", "# And done for now.", "sys", ".", "exit", "(", "-", "1", ")", "self", ".", "timer", ".", "Wait", "(", ")" ]
The main run method of the client. This method does not normally return. Only if there have been more than connection_error_limit failures, the method returns and allows the client to exit.
[ "The", "main", "run", "method", "of", "the", "client", "." ]
python
train
signalfx/signalfx-python
signalfx/signalflow/computation.py
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/signalflow/computation.py#L76-L169
def stream(self): """Iterate over the messages from the computation's output. Control and metadata messages are intercepted and interpreted to enhance this Computation's object knowledge of the computation's context. Data and event messages are yielded back to the caller as a generator. """ iterator = iter(self._stream) while self._state < Computation.STATE_COMPLETED: try: message = next(iterator) except StopIteration: if self._state < Computation.STATE_COMPLETED: self._stream = self._execute() iterator = iter(self._stream) continue if isinstance(message, messages.StreamStartMessage): self._state = Computation.STATE_STREAM_STARTED continue if isinstance(message, messages.JobStartMessage): self._state = Computation.STATE_COMPUTATION_STARTED self._id = message.handle yield message continue if isinstance(message, messages.JobProgressMessage): yield message continue if isinstance(message, messages.ChannelAbortMessage): self._state = Computation.STATE_ABORTED raise errors.ComputationAborted(message.abort_info) if isinstance(message, messages.EndOfChannelMessage): self._state = Computation.STATE_COMPLETED continue # Intercept metadata messages to accumulate received metadata... if isinstance(message, messages.MetadataMessage): self._metadata[message.tsid] = message.properties yield message continue # ...as well as expired-tsid messages to clean it up. if isinstance(message, messages.ExpiredTsIdMessage): if message.tsid in self._metadata: del self._metadata[message.tsid] yield message continue if isinstance(message, messages.InfoMessage): self._process_info_message(message.message) self._batch_count_detected = True if self._current_batch_message: yield self._get_batch_to_yield() continue # Accumulate data messages and release them when we have received # all batches for the same logical timestamp. if isinstance(message, messages.DataMessage): self._state = Computation.STATE_DATA_RECEIVED if not self._batch_count_detected: self._expected_batches += 1 if not self._current_batch_message: self._current_batch_message = message self._current_batch_count = 1 elif (message.logical_timestamp_ms == self._current_batch_message.logical_timestamp_ms): self._current_batch_message.add_data(message.data) self._current_batch_count += 1 else: self._batch_count_detected = True if (self._batch_count_detected and self._current_batch_count == self._expected_batches): yield self._get_batch_to_yield() continue if isinstance(message, messages.EventMessage): yield message continue if isinstance(message, messages.ErrorMessage): raise errors.ComputationFailed(message.errors) # Yield last batch, even if potentially incomplete. if self._current_batch_message: yield self._get_batch_to_yield()
[ "def", "stream", "(", "self", ")", ":", "iterator", "=", "iter", "(", "self", ".", "_stream", ")", "while", "self", ".", "_state", "<", "Computation", ".", "STATE_COMPLETED", ":", "try", ":", "message", "=", "next", "(", "iterator", ")", "except", "StopIteration", ":", "if", "self", ".", "_state", "<", "Computation", ".", "STATE_COMPLETED", ":", "self", ".", "_stream", "=", "self", ".", "_execute", "(", ")", "iterator", "=", "iter", "(", "self", ".", "_stream", ")", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "StreamStartMessage", ")", ":", "self", ".", "_state", "=", "Computation", ".", "STATE_STREAM_STARTED", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "JobStartMessage", ")", ":", "self", ".", "_state", "=", "Computation", ".", "STATE_COMPUTATION_STARTED", "self", ".", "_id", "=", "message", ".", "handle", "yield", "message", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "JobProgressMessage", ")", ":", "yield", "message", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "ChannelAbortMessage", ")", ":", "self", ".", "_state", "=", "Computation", ".", "STATE_ABORTED", "raise", "errors", ".", "ComputationAborted", "(", "message", ".", "abort_info", ")", "if", "isinstance", "(", "message", ",", "messages", ".", "EndOfChannelMessage", ")", ":", "self", ".", "_state", "=", "Computation", ".", "STATE_COMPLETED", "continue", "# Intercept metadata messages to accumulate received metadata...", "if", "isinstance", "(", "message", ",", "messages", ".", "MetadataMessage", ")", ":", "self", ".", "_metadata", "[", "message", ".", "tsid", "]", "=", "message", ".", "properties", "yield", "message", "continue", "# ...as well as expired-tsid messages to clean it up.", "if", "isinstance", "(", "message", ",", "messages", ".", "ExpiredTsIdMessage", ")", ":", "if", "message", ".", "tsid", "in", "self", ".", "_metadata", ":", "del", "self", ".", "_metadata", "[", "message", ".", "tsid", "]", "yield", "message", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "InfoMessage", ")", ":", "self", ".", "_process_info_message", "(", "message", ".", "message", ")", "self", ".", "_batch_count_detected", "=", "True", "if", "self", ".", "_current_batch_message", ":", "yield", "self", ".", "_get_batch_to_yield", "(", ")", "continue", "# Accumulate data messages and release them when we have received", "# all batches for the same logical timestamp.", "if", "isinstance", "(", "message", ",", "messages", ".", "DataMessage", ")", ":", "self", ".", "_state", "=", "Computation", ".", "STATE_DATA_RECEIVED", "if", "not", "self", ".", "_batch_count_detected", ":", "self", ".", "_expected_batches", "+=", "1", "if", "not", "self", ".", "_current_batch_message", ":", "self", ".", "_current_batch_message", "=", "message", "self", ".", "_current_batch_count", "=", "1", "elif", "(", "message", ".", "logical_timestamp_ms", "==", "self", ".", "_current_batch_message", ".", "logical_timestamp_ms", ")", ":", "self", ".", "_current_batch_message", ".", "add_data", "(", "message", ".", "data", ")", "self", ".", "_current_batch_count", "+=", "1", "else", ":", "self", ".", "_batch_count_detected", "=", "True", "if", "(", "self", ".", "_batch_count_detected", "and", "self", ".", "_current_batch_count", "==", "self", ".", "_expected_batches", ")", ":", "yield", "self", ".", "_get_batch_to_yield", "(", ")", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "EventMessage", ")", ":", "yield", "message", "continue", "if", "isinstance", "(", "message", ",", "messages", ".", "ErrorMessage", ")", ":", "raise", "errors", ".", "ComputationFailed", "(", "message", ".", "errors", ")", "# Yield last batch, even if potentially incomplete.", "if", "self", ".", "_current_batch_message", ":", "yield", "self", ".", "_get_batch_to_yield", "(", ")" ]
Iterate over the messages from the computation's output. Control and metadata messages are intercepted and interpreted to enhance this Computation's object knowledge of the computation's context. Data and event messages are yielded back to the caller as a generator.
[ "Iterate", "over", "the", "messages", "from", "the", "computation", "s", "output", "." ]
python
train
mLewisLogic/foursquare
foursquare/__init__.py
https://github.com/mLewisLogic/foursquare/blob/420f3b588b9af154688ec82649f24a70f96c1665/foursquare/__init__.py#L776-L787
def _process_response(response): """Make the request and handle exception processing""" # Read the response as JSON try: data = response.json() except ValueError: _log_and_raise_exception('Invalid response', response.text) # Default case, Got proper response if response.status_code == 200: return { 'headers': response.headers, 'data': data } return _raise_error_from_response(data)
[ "def", "_process_response", "(", "response", ")", ":", "# Read the response as JSON", "try", ":", "data", "=", "response", ".", "json", "(", ")", "except", "ValueError", ":", "_log_and_raise_exception", "(", "'Invalid response'", ",", "response", ".", "text", ")", "# Default case, Got proper response", "if", "response", ".", "status_code", "==", "200", ":", "return", "{", "'headers'", ":", "response", ".", "headers", ",", "'data'", ":", "data", "}", "return", "_raise_error_from_response", "(", "data", ")" ]
Make the request and handle exception processing
[ "Make", "the", "request", "and", "handle", "exception", "processing" ]
python
train
happyleavesaoc/python-limitlessled
limitlessled/__init__.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/__init__.py#L36-L42
def add_bridge(self, bridge): """ Add bridge groups. :param bridge: Add groups from this bridge. """ for group in bridge.groups: self._groups[group.name] = group
[ "def", "add_bridge", "(", "self", ",", "bridge", ")", ":", "for", "group", "in", "bridge", ".", "groups", ":", "self", ".", "_groups", "[", "group", ".", "name", "]", "=", "group" ]
Add bridge groups. :param bridge: Add groups from this bridge.
[ "Add", "bridge", "groups", "." ]
python
train
pantsbuild/pants
src/python/pants/util/dirutil.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/dirutil.py#L341-L357
def safe_rm_oldest_items_in_dir(root_dir, num_of_items_to_keep, excludes=frozenset()): """ Keep `num_of_items_to_keep` newly modified items besides `excludes` in `root_dir` then remove the rest. :param root_dir: the folder to examine :param num_of_items_to_keep: number of files/folders/symlinks to keep after the cleanup :param excludes: absolute paths excluded from removal (must be prefixed with `root_dir`) :return: none """ if os.path.isdir(root_dir): found_files = [] for old_file in os.listdir(root_dir): full_path = os.path.join(root_dir, old_file) if full_path not in excludes: found_files.append((full_path, os.path.getmtime(full_path))) found_files = sorted(found_files, key=lambda x: x[1], reverse=True) for cur_file, _ in found_files[num_of_items_to_keep:]: rm_rf(cur_file)
[ "def", "safe_rm_oldest_items_in_dir", "(", "root_dir", ",", "num_of_items_to_keep", ",", "excludes", "=", "frozenset", "(", ")", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "root_dir", ")", ":", "found_files", "=", "[", "]", "for", "old_file", "in", "os", ".", "listdir", "(", "root_dir", ")", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "old_file", ")", "if", "full_path", "not", "in", "excludes", ":", "found_files", ".", "append", "(", "(", "full_path", ",", "os", ".", "path", ".", "getmtime", "(", "full_path", ")", ")", ")", "found_files", "=", "sorted", "(", "found_files", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "for", "cur_file", ",", "_", "in", "found_files", "[", "num_of_items_to_keep", ":", "]", ":", "rm_rf", "(", "cur_file", ")" ]
Keep `num_of_items_to_keep` newly modified items besides `excludes` in `root_dir` then remove the rest. :param root_dir: the folder to examine :param num_of_items_to_keep: number of files/folders/symlinks to keep after the cleanup :param excludes: absolute paths excluded from removal (must be prefixed with `root_dir`) :return: none
[ "Keep", "num_of_items_to_keep", "newly", "modified", "items", "besides", "excludes", "in", "root_dir", "then", "remove", "the", "rest", ".", ":", "param", "root_dir", ":", "the", "folder", "to", "examine", ":", "param", "num_of_items_to_keep", ":", "number", "of", "files", "/", "folders", "/", "symlinks", "to", "keep", "after", "the", "cleanup", ":", "param", "excludes", ":", "absolute", "paths", "excluded", "from", "removal", "(", "must", "be", "prefixed", "with", "root_dir", ")", ":", "return", ":", "none" ]
python
train
talkincode/toughlib
toughlib/btforms/net.py
https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L100-L114
def urlquote(val): """ Quotes a string for use in a URL. >>> urlquote('://?f=1&j=1') '%3A//%3Ff%3D1%26j%3D1' >>> urlquote(None) '' >>> urlquote(u'\u203d') '%E2%80%BD' """ if val is None: return '' if not isinstance(val, unicode): val = str(val) else: val = val.encode('utf-8') return urllib.quote(val)
[ "def", "urlquote", "(", "val", ")", ":", "if", "val", "is", "None", ":", "return", "''", "if", "not", "isinstance", "(", "val", ",", "unicode", ")", ":", "val", "=", "str", "(", "val", ")", "else", ":", "val", "=", "val", ".", "encode", "(", "'utf-8'", ")", "return", "urllib", ".", "quote", "(", "val", ")" ]
Quotes a string for use in a URL. >>> urlquote('://?f=1&j=1') '%3A//%3Ff%3D1%26j%3D1' >>> urlquote(None) '' >>> urlquote(u'\u203d') '%E2%80%BD'
[ "Quotes", "a", "string", "for", "use", "in", "a", "URL", ".", ">>>", "urlquote", "(", ":", "//", "?f", "=", "1&j", "=", "1", ")", "%3A", "//", "%3Ff%3D1%26j%3D1", ">>>", "urlquote", "(", "None", ")", ">>>", "urlquote", "(", "u", "\\", "u203d", ")", "%E2%80%BD" ]
python
train
davgeo/clear
clear/util.py
https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/util.py#L162-L240
def UserAcceptance( matchList, recursiveLookup = True, promptComment = None, promptOnly = False, xStrOverride = "to skip this selection" ): """ Prompt user to select a entry from a given match list or to enter a new string to look up. If the match list is empty user must enter a new string or exit. Parameters ---------- matchList : list A list of entries which the user can select a valid match from. recursiveLookup : boolean [optional: default = True] Allow user to enter a new string to look up. promptComment : string [optional: default = None] Add an additional comment on the end of the prompt message. promptOnly : boolean [optional: default = False] Set to true if match list is expected to be empty. In which case the presence of an empty match list will not be mentioned and user will be expected to enter a new response to look up. xStrOverride : string [optional: default = "to skip this selection"] Override the string for 'x' response. This can be used if the behaviour of the 'x' response is changed. Returns ---------- string or None Either a entry from matchList, another valid response or a new string to look up. If match list is empty and recursive lookup is disabled or if the user response is 'x' this will return None. """ matchString = ', '.join(matchList) if len(matchList) == 1: goodlogging.Log.Info("UTIL", "Match found: {0}".format(matchString)) prompt = "Enter 'y' to accept this match or e" elif len(matchList) > 1: goodlogging.Log.Info("UTIL", "Multiple possible matches found: {0}".format(matchString)) prompt = "Enter correct match from list or e" else: if promptOnly is False: goodlogging.Log.Info("UTIL", "No match found") prompt = "E" if not recursiveLookup: return None if recursiveLookup: prompt = prompt + "nter a different string to look up or e" prompt = prompt + "nter 'x' {0} or enter 'exit' to quit this program".format(xStrOverride) if promptComment is None: prompt = prompt + ": " else: prompt = prompt + " ({0}): ".format(promptComment) while(1): response = goodlogging.Log.Input('UTIL', prompt) if response.lower() == 'exit': goodlogging.Log.Fatal("UTIL", "Program terminated by user 'exit'") if response.lower() == 'x': return None elif response.lower() == 'y' and len(matchList) == 1: return matchList[0] elif len(matchList) > 1: for match in matchList: if response.lower() == match.lower(): return match if recursiveLookup: return response
[ "def", "UserAcceptance", "(", "matchList", ",", "recursiveLookup", "=", "True", ",", "promptComment", "=", "None", ",", "promptOnly", "=", "False", ",", "xStrOverride", "=", "\"to skip this selection\"", ")", ":", "matchString", "=", "', '", ".", "join", "(", "matchList", ")", "if", "len", "(", "matchList", ")", "==", "1", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"UTIL\"", ",", "\"Match found: {0}\"", ".", "format", "(", "matchString", ")", ")", "prompt", "=", "\"Enter 'y' to accept this match or e\"", "elif", "len", "(", "matchList", ")", ">", "1", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"UTIL\"", ",", "\"Multiple possible matches found: {0}\"", ".", "format", "(", "matchString", ")", ")", "prompt", "=", "\"Enter correct match from list or e\"", "else", ":", "if", "promptOnly", "is", "False", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"UTIL\"", ",", "\"No match found\"", ")", "prompt", "=", "\"E\"", "if", "not", "recursiveLookup", ":", "return", "None", "if", "recursiveLookup", ":", "prompt", "=", "prompt", "+", "\"nter a different string to look up or e\"", "prompt", "=", "prompt", "+", "\"nter 'x' {0} or enter 'exit' to quit this program\"", ".", "format", "(", "xStrOverride", ")", "if", "promptComment", "is", "None", ":", "prompt", "=", "prompt", "+", "\": \"", "else", ":", "prompt", "=", "prompt", "+", "\" ({0}): \"", ".", "format", "(", "promptComment", ")", "while", "(", "1", ")", ":", "response", "=", "goodlogging", ".", "Log", ".", "Input", "(", "'UTIL'", ",", "prompt", ")", "if", "response", ".", "lower", "(", ")", "==", "'exit'", ":", "goodlogging", ".", "Log", ".", "Fatal", "(", "\"UTIL\"", ",", "\"Program terminated by user 'exit'\"", ")", "if", "response", ".", "lower", "(", ")", "==", "'x'", ":", "return", "None", "elif", "response", ".", "lower", "(", ")", "==", "'y'", "and", "len", "(", "matchList", ")", "==", "1", ":", "return", "matchList", "[", "0", "]", "elif", "len", "(", "matchList", ")", ">", "1", ":", "for", "match", "in", "matchList", ":", "if", "response", ".", "lower", "(", ")", "==", "match", ".", "lower", "(", ")", ":", "return", "match", "if", "recursiveLookup", ":", "return", "response" ]
Prompt user to select a entry from a given match list or to enter a new string to look up. If the match list is empty user must enter a new string or exit. Parameters ---------- matchList : list A list of entries which the user can select a valid match from. recursiveLookup : boolean [optional: default = True] Allow user to enter a new string to look up. promptComment : string [optional: default = None] Add an additional comment on the end of the prompt message. promptOnly : boolean [optional: default = False] Set to true if match list is expected to be empty. In which case the presence of an empty match list will not be mentioned and user will be expected to enter a new response to look up. xStrOverride : string [optional: default = "to skip this selection"] Override the string for 'x' response. This can be used if the behaviour of the 'x' response is changed. Returns ---------- string or None Either a entry from matchList, another valid response or a new string to look up. If match list is empty and recursive lookup is disabled or if the user response is 'x' this will return None.
[ "Prompt", "user", "to", "select", "a", "entry", "from", "a", "given", "match", "list", "or", "to", "enter", "a", "new", "string", "to", "look", "up", ".", "If", "the", "match", "list", "is", "empty", "user", "must", "enter", "a", "new", "string", "or", "exit", "." ]
python
train
Kortemme-Lab/klab
klab/bio/clustalo.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/clustalo.py#L664-L679
def _get_uniparc_sequences_through_uniprot_ACs(self, mapping_pdb_id, uniprot_ACs, cache_dir): '''Get the UniParc sequences associated with the UniProt accession number.''' # Map the UniProt ACs to the UniParc IDs m = uniprot_map('ACC', 'UPARC', uniprot_ACs, cache_dir = cache_dir) UniParcIDs = [] for _, v in m.iteritems(): UniParcIDs.extend(v) # Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc. mapping = {mapping_pdb_id : []} for UniParcID in UniParcIDs: entry = UniParcEntry(UniParcID, cache_dir = cache_dir) mapping[mapping_pdb_id].append(entry) return mapping
[ "def", "_get_uniparc_sequences_through_uniprot_ACs", "(", "self", ",", "mapping_pdb_id", ",", "uniprot_ACs", ",", "cache_dir", ")", ":", "# Map the UniProt ACs to the UniParc IDs", "m", "=", "uniprot_map", "(", "'ACC'", ",", "'UPARC'", ",", "uniprot_ACs", ",", "cache_dir", "=", "cache_dir", ")", "UniParcIDs", "=", "[", "]", "for", "_", ",", "v", "in", "m", ".", "iteritems", "(", ")", ":", "UniParcIDs", ".", "extend", "(", "v", ")", "# Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc.", "mapping", "=", "{", "mapping_pdb_id", ":", "[", "]", "}", "for", "UniParcID", "in", "UniParcIDs", ":", "entry", "=", "UniParcEntry", "(", "UniParcID", ",", "cache_dir", "=", "cache_dir", ")", "mapping", "[", "mapping_pdb_id", "]", ".", "append", "(", "entry", ")", "return", "mapping" ]
Get the UniParc sequences associated with the UniProt accession number.
[ "Get", "the", "UniParc", "sequences", "associated", "with", "the", "UniProt", "accession", "number", "." ]
python
train
dwavesystems/dwavebinarycsp
dwavebinarycsp/compilers/stitcher.py
https://github.com/dwavesystems/dwavebinarycsp/blob/d6b1e70ceaa8f451d7afaa87ea10c7fc948a64e2/dwavebinarycsp/compilers/stitcher.py#L34-L196
def stitch(csp, min_classical_gap=2.0, max_graph_size=8): """Build a binary quadratic model with minimal energy levels at solutions to the specified constraint satisfaction problem. Args: csp (:obj:`.ConstraintSatisfactionProblem`): Constraint satisfaction problem. min_classical_gap (float, optional, default=2.0): Minimum energy gap from ground. Each constraint violated by the solution increases the energy level of the binary quadratic model by at least this much relative to ground energy. max_graph_size (int, optional, default=8): Maximum number of variables in the binary quadratic model that can be used to represent a single constraint. Returns: :class:`~dimod.BinaryQuadraticModel` Notes: For a `min_classical_gap` > 2 or constraints with more than two variables, requires access to factories from the penaltymodel_ ecosystem to construct the binary quadratic model. .. _penaltymodel: https://github.com/dwavesystems/penaltymodel Examples: This example creates a binary-valued constraint satisfaction problem with two constraints, :math:`a = b` and :math:`b \\ne c`, and builds a binary quadratic model with a minimum energy level of -2 such that each constraint violation by a solution adds the default minimum energy gap. >>> import dwavebinarycsp >>> import operator >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c >>> bqm = dwavebinarycsp.stitch(csp) >>> bqm.energy({'a': 0, 'b': 0, 'c': 1}) # satisfies csp -2.0 >>> bqm.energy({'a': 0, 'b': 0, 'c': 0}) # violates one constraint 0.0 >>> bqm.energy({'a': 1, 'b': 0, 'c': 0}) # violates two constraints 2.0 This example creates a binary-valued constraint satisfaction problem with two constraints, :math:`a = b` and :math:`b \\ne c`, and builds a binary quadratic model with a minimum energy gap of 4. Note that in this case the conversion to binary quadratic model adds two ancillary variables that must be minimized over when solving. >>> import dwavebinarycsp >>> import operator >>> import itertools >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c >>> bqm = dwavebinarycsp.stitch(csp, min_classical_gap=4.0) >>> list(bqm) # # doctest: +SKIP ['a', 'aux1', 'aux0', 'b', 'c'] >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 1, 'aux0': aux0, 'aux1': aux1}) for ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # satisfies csp -6.0 >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates one constraint -2.0 >>> min([bqm.energy({'a': 1, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates two constraints 2.0 This example finds for the previous example the minimum graph size. >>> import dwavebinarycsp >>> import operator >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c >>> for n in range(8, 1, -1): ... try: ... bqm = dwavebinarycsp.stitch(csp, min_classical_gap=4.0, max_graph_size=n) ... except dwavebinarycsp.exceptions.ImpossibleBQM: ... print(n+1) ... 3 """ # ensure we have penaltymodel factory available try: dwavebinarycsp.assert_penaltymodel_factory_available() except AssertionError as e: raise RuntimeError(e) def aux_factory(): for i in count(): yield 'aux{}'.format(i) aux = aux_factory() bqm = dimod.BinaryQuadraticModel.empty(csp.vartype) # developer note: we could cache them and relabel, for now though let's do the simple thing # penalty_models = {} for const in csp.constraints: configurations = const.configurations if len(const.variables) > max_graph_size: msg = ("The given csp contains a constraint {const} with {num_var} variables. " "This cannot be mapped to a graph with {max_graph_size} nodes. " "Consider checking whether your constraint is irreducible." "").format(const=const, num_var=len(const.variables), max_graph_size=max_graph_size) raise ImpossibleBQM(msg) pmodel = None if len(const) == 0: # empty constraint continue if min_classical_gap <= 2.0: if len(const) == 1 and max_graph_size >= 1: bqm.update(_bqm_from_1sat(const)) continue elif len(const) == 2 and max_graph_size >= 2: bqm.update(_bqm_from_2sat(const)) continue # developer note: we could cache them and relabel, for now though let's do the simple thing # if configurations in penalty_models: # raise NotImplementedError for G in iter_complete_graphs(const.variables, max_graph_size + 1, aux): # construct a specification spec = pm.Specification( graph=G, decision_variables=const.variables, feasible_configurations=configurations, min_classical_gap=min_classical_gap, vartype=csp.vartype ) # try to use the penaltymodel ecosystem try: pmodel = pm.get_penalty_model(spec) except pm.ImpossiblePenaltyModel: # hopefully adding more variables will make it possible continue if pmodel.classical_gap >= min_classical_gap: break # developer note: we could cache them and relabel, for now though let's do the simple thing # penalty_models[configurations] = pmodel else: msg = ("No penalty model can be build for constraint {}".format(const)) raise ImpossibleBQM(msg) bqm.update(pmodel.model) return bqm
[ "def", "stitch", "(", "csp", ",", "min_classical_gap", "=", "2.0", ",", "max_graph_size", "=", "8", ")", ":", "# ensure we have penaltymodel factory available", "try", ":", "dwavebinarycsp", ".", "assert_penaltymodel_factory_available", "(", ")", "except", "AssertionError", "as", "e", ":", "raise", "RuntimeError", "(", "e", ")", "def", "aux_factory", "(", ")", ":", "for", "i", "in", "count", "(", ")", ":", "yield", "'aux{}'", ".", "format", "(", "i", ")", "aux", "=", "aux_factory", "(", ")", "bqm", "=", "dimod", ".", "BinaryQuadraticModel", ".", "empty", "(", "csp", ".", "vartype", ")", "# developer note: we could cache them and relabel, for now though let's do the simple thing", "# penalty_models = {}", "for", "const", "in", "csp", ".", "constraints", ":", "configurations", "=", "const", ".", "configurations", "if", "len", "(", "const", ".", "variables", ")", ">", "max_graph_size", ":", "msg", "=", "(", "\"The given csp contains a constraint {const} with {num_var} variables. \"", "\"This cannot be mapped to a graph with {max_graph_size} nodes. \"", "\"Consider checking whether your constraint is irreducible.\"", "\"\"", ")", ".", "format", "(", "const", "=", "const", ",", "num_var", "=", "len", "(", "const", ".", "variables", ")", ",", "max_graph_size", "=", "max_graph_size", ")", "raise", "ImpossibleBQM", "(", "msg", ")", "pmodel", "=", "None", "if", "len", "(", "const", ")", "==", "0", ":", "# empty constraint", "continue", "if", "min_classical_gap", "<=", "2.0", ":", "if", "len", "(", "const", ")", "==", "1", "and", "max_graph_size", ">=", "1", ":", "bqm", ".", "update", "(", "_bqm_from_1sat", "(", "const", ")", ")", "continue", "elif", "len", "(", "const", ")", "==", "2", "and", "max_graph_size", ">=", "2", ":", "bqm", ".", "update", "(", "_bqm_from_2sat", "(", "const", ")", ")", "continue", "# developer note: we could cache them and relabel, for now though let's do the simple thing", "# if configurations in penalty_models:", "# raise NotImplementedError", "for", "G", "in", "iter_complete_graphs", "(", "const", ".", "variables", ",", "max_graph_size", "+", "1", ",", "aux", ")", ":", "# construct a specification", "spec", "=", "pm", ".", "Specification", "(", "graph", "=", "G", ",", "decision_variables", "=", "const", ".", "variables", ",", "feasible_configurations", "=", "configurations", ",", "min_classical_gap", "=", "min_classical_gap", ",", "vartype", "=", "csp", ".", "vartype", ")", "# try to use the penaltymodel ecosystem", "try", ":", "pmodel", "=", "pm", ".", "get_penalty_model", "(", "spec", ")", "except", "pm", ".", "ImpossiblePenaltyModel", ":", "# hopefully adding more variables will make it possible", "continue", "if", "pmodel", ".", "classical_gap", ">=", "min_classical_gap", ":", "break", "# developer note: we could cache them and relabel, for now though let's do the simple thing", "# penalty_models[configurations] = pmodel", "else", ":", "msg", "=", "(", "\"No penalty model can be build for constraint {}\"", ".", "format", "(", "const", ")", ")", "raise", "ImpossibleBQM", "(", "msg", ")", "bqm", ".", "update", "(", "pmodel", ".", "model", ")", "return", "bqm" ]
Build a binary quadratic model with minimal energy levels at solutions to the specified constraint satisfaction problem. Args: csp (:obj:`.ConstraintSatisfactionProblem`): Constraint satisfaction problem. min_classical_gap (float, optional, default=2.0): Minimum energy gap from ground. Each constraint violated by the solution increases the energy level of the binary quadratic model by at least this much relative to ground energy. max_graph_size (int, optional, default=8): Maximum number of variables in the binary quadratic model that can be used to represent a single constraint. Returns: :class:`~dimod.BinaryQuadraticModel` Notes: For a `min_classical_gap` > 2 or constraints with more than two variables, requires access to factories from the penaltymodel_ ecosystem to construct the binary quadratic model. .. _penaltymodel: https://github.com/dwavesystems/penaltymodel Examples: This example creates a binary-valued constraint satisfaction problem with two constraints, :math:`a = b` and :math:`b \\ne c`, and builds a binary quadratic model with a minimum energy level of -2 such that each constraint violation by a solution adds the default minimum energy gap. >>> import dwavebinarycsp >>> import operator >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c >>> bqm = dwavebinarycsp.stitch(csp) >>> bqm.energy({'a': 0, 'b': 0, 'c': 1}) # satisfies csp -2.0 >>> bqm.energy({'a': 0, 'b': 0, 'c': 0}) # violates one constraint 0.0 >>> bqm.energy({'a': 1, 'b': 0, 'c': 0}) # violates two constraints 2.0 This example creates a binary-valued constraint satisfaction problem with two constraints, :math:`a = b` and :math:`b \\ne c`, and builds a binary quadratic model with a minimum energy gap of 4. Note that in this case the conversion to binary quadratic model adds two ancillary variables that must be minimized over when solving. >>> import dwavebinarycsp >>> import operator >>> import itertools >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c >>> bqm = dwavebinarycsp.stitch(csp, min_classical_gap=4.0) >>> list(bqm) # # doctest: +SKIP ['a', 'aux1', 'aux0', 'b', 'c'] >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 1, 'aux0': aux0, 'aux1': aux1}) for ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # satisfies csp -6.0 >>> min([bqm.energy({'a': 0, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates one constraint -2.0 >>> min([bqm.energy({'a': 1, 'b': 0, 'c': 0, 'aux0': aux0, 'aux1': aux1}) for ... aux0, aux1 in list(itertools.product([0, 1], repeat=2))]) # violates two constraints 2.0 This example finds for the previous example the minimum graph size. >>> import dwavebinarycsp >>> import operator >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY) >>> csp.add_constraint(operator.eq, ['a', 'b']) # a == b >>> csp.add_constraint(operator.ne, ['b', 'c']) # b != c >>> for n in range(8, 1, -1): ... try: ... bqm = dwavebinarycsp.stitch(csp, min_classical_gap=4.0, max_graph_size=n) ... except dwavebinarycsp.exceptions.ImpossibleBQM: ... print(n+1) ... 3
[ "Build", "a", "binary", "quadratic", "model", "with", "minimal", "energy", "levels", "at", "solutions", "to", "the", "specified", "constraint", "satisfaction", "problem", "." ]
python
valid
rochacbruno/dynaconf
docs/customexts/aafig.py
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/docs/customexts/aafig.py#L153-L212
def render_aafigure(app, text, options): """ Render an ASCII art figure into the requested format output file. """ if aafigure is None: raise AafigError('aafigure module not installed') fname = get_basename(text, options) fname = '%s.%s' % (get_basename(text, options), options['format']) if app.builder.format == 'html': # HTML imgpath = relative_uri(app.builder.env.docname, '_images') relfn = posixpath.join(imgpath, fname) outfn = path.join(app.builder.outdir, '_images', fname) else: # Non-HTML if app.builder.format != 'latex': app.builder.warn('aafig: the builder format %s is not officially ' 'supported, aafigure images could not work. Please report ' 'problems and working builder to avoid this warning in ' 'the future' % app.builder.format) relfn = fname outfn = path.join(app.builder.outdir, fname) metadata_fname = '%s.aafig' % outfn try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: f = open(metadata_fname, 'r') extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError as e: raise AafigError(str(e)) extra = None if options['format'].lower() == 'svg': extra = visitor.get_size_attrs() f = open(metadata_fname, 'w') f.write(extra) f.close() return relfn, outfn, id, extra
[ "def", "render_aafigure", "(", "app", ",", "text", ",", "options", ")", ":", "if", "aafigure", "is", "None", ":", "raise", "AafigError", "(", "'aafigure module not installed'", ")", "fname", "=", "get_basename", "(", "text", ",", "options", ")", "fname", "=", "'%s.%s'", "%", "(", "get_basename", "(", "text", ",", "options", ")", ",", "options", "[", "'format'", "]", ")", "if", "app", ".", "builder", ".", "format", "==", "'html'", ":", "# HTML", "imgpath", "=", "relative_uri", "(", "app", ".", "builder", ".", "env", ".", "docname", ",", "'_images'", ")", "relfn", "=", "posixpath", ".", "join", "(", "imgpath", ",", "fname", ")", "outfn", "=", "path", ".", "join", "(", "app", ".", "builder", ".", "outdir", ",", "'_images'", ",", "fname", ")", "else", ":", "# Non-HTML", "if", "app", ".", "builder", ".", "format", "!=", "'latex'", ":", "app", ".", "builder", ".", "warn", "(", "'aafig: the builder format %s is not officially '", "'supported, aafigure images could not work. Please report '", "'problems and working builder to avoid this warning in '", "'the future'", "%", "app", ".", "builder", ".", "format", ")", "relfn", "=", "fname", "outfn", "=", "path", ".", "join", "(", "app", ".", "builder", ".", "outdir", ",", "fname", ")", "metadata_fname", "=", "'%s.aafig'", "%", "outfn", "try", ":", "if", "path", ".", "isfile", "(", "outfn", ")", ":", "extra", "=", "None", "if", "options", "[", "'format'", "]", ".", "lower", "(", ")", "==", "'svg'", ":", "f", "=", "None", "try", ":", "try", ":", "f", "=", "open", "(", "metadata_fname", ",", "'r'", ")", "extra", "=", "f", ".", "read", "(", ")", "except", ":", "raise", "AafigError", "(", ")", "finally", ":", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")", "return", "relfn", ",", "outfn", ",", "id", ",", "extra", "except", "AafigError", ":", "pass", "ensuredir", "(", "path", ".", "dirname", "(", "outfn", ")", ")", "try", ":", "(", "visitor", ",", "output", ")", "=", "aafigure", ".", "render", "(", "text", ",", "outfn", ",", "options", ")", "output", ".", "close", "(", ")", "except", "aafigure", ".", "UnsupportedFormatError", "as", "e", ":", "raise", "AafigError", "(", "str", "(", "e", ")", ")", "extra", "=", "None", "if", "options", "[", "'format'", "]", ".", "lower", "(", ")", "==", "'svg'", ":", "extra", "=", "visitor", ".", "get_size_attrs", "(", ")", "f", "=", "open", "(", "metadata_fname", ",", "'w'", ")", "f", ".", "write", "(", "extra", ")", "f", ".", "close", "(", ")", "return", "relfn", ",", "outfn", ",", "id", ",", "extra" ]
Render an ASCII art figure into the requested format output file.
[ "Render", "an", "ASCII", "art", "figure", "into", "the", "requested", "format", "output", "file", "." ]
python
train
sirfoga/pyhal
hal/data/linked_list.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/data/linked_list.py#L141-L163
def remove_last(self): """Removes last :return: True iff last element has been removed """ if self.length() <= 1: self.head = None return True node = self.head while node is not None: is_last_but_one = node.next_node is not None and \ node.next_node.next_node is None print(node.val, is_last_but_one) if is_last_but_one: # this is the last node.next_node = None # get to last but one element return True node = node.next_node return False
[ "def", "remove_last", "(", "self", ")", ":", "if", "self", ".", "length", "(", ")", "<=", "1", ":", "self", ".", "head", "=", "None", "return", "True", "node", "=", "self", ".", "head", "while", "node", "is", "not", "None", ":", "is_last_but_one", "=", "node", ".", "next_node", "is", "not", "None", "and", "node", ".", "next_node", ".", "next_node", "is", "None", "print", "(", "node", ".", "val", ",", "is_last_but_one", ")", "if", "is_last_but_one", ":", "# this is the last", "node", ".", "next_node", "=", "None", "# get to last but one element", "return", "True", "node", "=", "node", ".", "next_node", "return", "False" ]
Removes last :return: True iff last element has been removed
[ "Removes", "last" ]
python
train
dslackw/slpkg
slpkg/sbo/network.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/sbo/network.py#L388-L394
def install(self): """Install SBo package found in /tmp directory. """ binary = slack_package(self.prgnam) print("[ {0}Installing{1} ] --> {2}".format(self.green, self.endc, self.name)) PackageManager(binary).upgrade(flag="--install-new")
[ "def", "install", "(", "self", ")", ":", "binary", "=", "slack_package", "(", "self", ".", "prgnam", ")", "print", "(", "\"[ {0}Installing{1} ] --> {2}\"", ".", "format", "(", "self", ".", "green", ",", "self", ".", "endc", ",", "self", ".", "name", ")", ")", "PackageManager", "(", "binary", ")", ".", "upgrade", "(", "flag", "=", "\"--install-new\"", ")" ]
Install SBo package found in /tmp directory.
[ "Install", "SBo", "package", "found", "in", "/", "tmp", "directory", "." ]
python
train
pyviz/holoviews
holoviews/operation/datashader.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/operation/datashader.py#L270-L340
def get_agg_data(cls, obj, category=None): """ Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated. """ paths = [] if isinstance(obj, Graph): obj = obj.edgepaths kdims = list(obj.kdims) vdims = list(obj.vdims) dims = obj.dimensions()[:2] if isinstance(obj, Path): glyph = 'line' for p in obj.split(datatype='dataframe'): paths.append(p) elif isinstance(obj, CompositeOverlay): element = None for key, el in obj.data.items(): x, y, element, glyph = cls.get_agg_data(el) dims = (x, y) df = PandasInterface.as_dframe(element) if isinstance(obj, NdOverlay): df = df.assign(**dict(zip(obj.dimensions('key', True), key))) paths.append(df) if element is None: dims = None else: kdims += element.kdims vdims = element.vdims elif isinstance(obj, Element): glyph = 'line' if isinstance(obj, Curve) else 'points' paths.append(PandasInterface.as_dframe(obj)) if dims is None or len(dims) != 2: return None, None, None, None else: x, y = dims if len(paths) > 1: if glyph == 'line': path = paths[0][:1] if isinstance(path, dd.DataFrame): path = path.compute() empty = path.copy() empty.iloc[0, :] = (np.NaN,) * empty.shape[1] paths = [elem for p in paths for elem in (p, empty)][:-1] if all(isinstance(path, dd.DataFrame) for path in paths): df = dd.concat(paths) else: paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths] df = pd.concat(paths) else: df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name]) if category and df[category].dtype.name != 'category': df[category] = df[category].astype('category') is_dask = isinstance(df, dd.DataFrame) if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or df[d.name].dtype.kind == 'M' for d in (x, y)): df = df.copy() for d in (x, y): vals = df[d.name] if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types): vals = cftime_to_timestamp(vals, 'ns') elif df[d.name].dtype.kind == 'M': vals = vals.astype('datetime64[ns]') else: continue df[d.name] = vals.astype('int64') return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph
[ "def", "get_agg_data", "(", "cls", ",", "obj", ",", "category", "=", "None", ")", ":", "paths", "=", "[", "]", "if", "isinstance", "(", "obj", ",", "Graph", ")", ":", "obj", "=", "obj", ".", "edgepaths", "kdims", "=", "list", "(", "obj", ".", "kdims", ")", "vdims", "=", "list", "(", "obj", ".", "vdims", ")", "dims", "=", "obj", ".", "dimensions", "(", ")", "[", ":", "2", "]", "if", "isinstance", "(", "obj", ",", "Path", ")", ":", "glyph", "=", "'line'", "for", "p", "in", "obj", ".", "split", "(", "datatype", "=", "'dataframe'", ")", ":", "paths", ".", "append", "(", "p", ")", "elif", "isinstance", "(", "obj", ",", "CompositeOverlay", ")", ":", "element", "=", "None", "for", "key", ",", "el", "in", "obj", ".", "data", ".", "items", "(", ")", ":", "x", ",", "y", ",", "element", ",", "glyph", "=", "cls", ".", "get_agg_data", "(", "el", ")", "dims", "=", "(", "x", ",", "y", ")", "df", "=", "PandasInterface", ".", "as_dframe", "(", "element", ")", "if", "isinstance", "(", "obj", ",", "NdOverlay", ")", ":", "df", "=", "df", ".", "assign", "(", "*", "*", "dict", "(", "zip", "(", "obj", ".", "dimensions", "(", "'key'", ",", "True", ")", ",", "key", ")", ")", ")", "paths", ".", "append", "(", "df", ")", "if", "element", "is", "None", ":", "dims", "=", "None", "else", ":", "kdims", "+=", "element", ".", "kdims", "vdims", "=", "element", ".", "vdims", "elif", "isinstance", "(", "obj", ",", "Element", ")", ":", "glyph", "=", "'line'", "if", "isinstance", "(", "obj", ",", "Curve", ")", "else", "'points'", "paths", ".", "append", "(", "PandasInterface", ".", "as_dframe", "(", "obj", ")", ")", "if", "dims", "is", "None", "or", "len", "(", "dims", ")", "!=", "2", ":", "return", "None", ",", "None", ",", "None", ",", "None", "else", ":", "x", ",", "y", "=", "dims", "if", "len", "(", "paths", ")", ">", "1", ":", "if", "glyph", "==", "'line'", ":", "path", "=", "paths", "[", "0", "]", "[", ":", "1", "]", "if", "isinstance", "(", "path", ",", "dd", ".", "DataFrame", ")", ":", "path", "=", "path", ".", "compute", "(", ")", "empty", "=", "path", ".", "copy", "(", ")", "empty", ".", "iloc", "[", "0", ",", ":", "]", "=", "(", "np", ".", "NaN", ",", ")", "*", "empty", ".", "shape", "[", "1", "]", "paths", "=", "[", "elem", "for", "p", "in", "paths", "for", "elem", "in", "(", "p", ",", "empty", ")", "]", "[", ":", "-", "1", "]", "if", "all", "(", "isinstance", "(", "path", ",", "dd", ".", "DataFrame", ")", "for", "path", "in", "paths", ")", ":", "df", "=", "dd", ".", "concat", "(", "paths", ")", "else", ":", "paths", "=", "[", "p", ".", "compute", "(", ")", "if", "isinstance", "(", "p", ",", "dd", ".", "DataFrame", ")", "else", "p", "for", "p", "in", "paths", "]", "df", "=", "pd", ".", "concat", "(", "paths", ")", "else", ":", "df", "=", "paths", "[", "0", "]", "if", "paths", "else", "pd", ".", "DataFrame", "(", "[", "]", ",", "columns", "=", "[", "x", ".", "name", ",", "y", ".", "name", "]", ")", "if", "category", "and", "df", "[", "category", "]", ".", "dtype", ".", "name", "!=", "'category'", ":", "df", "[", "category", "]", "=", "df", "[", "category", "]", ".", "astype", "(", "'category'", ")", "is_dask", "=", "isinstance", "(", "df", ",", "dd", ".", "DataFrame", ")", "if", "any", "(", "(", "not", "is_dask", "and", "len", "(", "df", "[", "d", ".", "name", "]", ")", "and", "isinstance", "(", "df", "[", "d", ".", "name", "]", ".", "values", "[", "0", "]", ",", "cftime_types", ")", ")", "or", "df", "[", "d", ".", "name", "]", ".", "dtype", ".", "kind", "==", "'M'", "for", "d", "in", "(", "x", ",", "y", ")", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "for", "d", "in", "(", "x", ",", "y", ")", ":", "vals", "=", "df", "[", "d", ".", "name", "]", "if", "not", "is_dask", "and", "len", "(", "vals", ")", "and", "isinstance", "(", "vals", ".", "values", "[", "0", "]", ",", "cftime_types", ")", ":", "vals", "=", "cftime_to_timestamp", "(", "vals", ",", "'ns'", ")", "elif", "df", "[", "d", ".", "name", "]", ".", "dtype", ".", "kind", "==", "'M'", ":", "vals", "=", "vals", ".", "astype", "(", "'datetime64[ns]'", ")", "else", ":", "continue", "df", "[", "d", ".", "name", "]", "=", "vals", ".", "astype", "(", "'int64'", ")", "return", "x", ",", "y", ",", "Dataset", "(", "df", ",", "kdims", "=", "kdims", ",", "vdims", "=", "vdims", ")", ",", "glyph" ]
Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated.
[ "Reduces", "any", "Overlay", "or", "NdOverlay", "of", "Elements", "into", "a", "single", "xarray", "Dataset", "that", "can", "be", "aggregated", "." ]
python
train
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L246-L285
def column(self, index_or_label): """Return the values of a column as an array. table.column(label) is equivalent to table[label]. >>> tiles = Table().with_columns( ... 'letter', make_array('c', 'd'), ... 'count', make_array(2, 4), ... ) >>> list(tiles.column('letter')) ['c', 'd'] >>> tiles.column(1) array([2, 4]) Args: label (int or str): The index or label of a column Returns: An instance of ``numpy.array``. Raises: ``ValueError``: When the ``index_or_label`` is not in the table. """ if (isinstance(index_or_label, str) and index_or_label not in self.labels): raise ValueError( 'The column "{}" is not in the table. The table contains ' 'these columns: {}' .format(index_or_label, ', '.join(self.labels)) ) if (isinstance(index_or_label, int) and not 0 <= index_or_label < len(self.labels)): raise ValueError( 'The index {} is not in the table. Only indices between ' '0 and {} are valid' .format(index_or_label, len(self.labels) - 1) ) return self._columns[self._as_label(index_or_label)]
[ "def", "column", "(", "self", ",", "index_or_label", ")", ":", "if", "(", "isinstance", "(", "index_or_label", ",", "str", ")", "and", "index_or_label", "not", "in", "self", ".", "labels", ")", ":", "raise", "ValueError", "(", "'The column \"{}\" is not in the table. The table contains '", "'these columns: {}'", ".", "format", "(", "index_or_label", ",", "', '", ".", "join", "(", "self", ".", "labels", ")", ")", ")", "if", "(", "isinstance", "(", "index_or_label", ",", "int", ")", "and", "not", "0", "<=", "index_or_label", "<", "len", "(", "self", ".", "labels", ")", ")", ":", "raise", "ValueError", "(", "'The index {} is not in the table. Only indices between '", "'0 and {} are valid'", ".", "format", "(", "index_or_label", ",", "len", "(", "self", ".", "labels", ")", "-", "1", ")", ")", "return", "self", ".", "_columns", "[", "self", ".", "_as_label", "(", "index_or_label", ")", "]" ]
Return the values of a column as an array. table.column(label) is equivalent to table[label]. >>> tiles = Table().with_columns( ... 'letter', make_array('c', 'd'), ... 'count', make_array(2, 4), ... ) >>> list(tiles.column('letter')) ['c', 'd'] >>> tiles.column(1) array([2, 4]) Args: label (int or str): The index or label of a column Returns: An instance of ``numpy.array``. Raises: ``ValueError``: When the ``index_or_label`` is not in the table.
[ "Return", "the", "values", "of", "a", "column", "as", "an", "array", "." ]
python
train
stephen-bunn/file-config
src/file_config/contrib/ini_parser.py
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/contrib/ini_parser.py#L81-L117
def _build_dict( cls, parser_dict, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict ): """ Builds a dictionary of ``dict_type`` given the ``parser._sections`` dict. :param dict parser_dict: The ``parser._sections`` mapping :param str delimiter: The delimiter for nested dictionaries, defaults to ":", optional :param class dict_type: The dictionary type to use for building the dict, defaults to :class:`collections.OrderedDict`, optional :return: The resulting dictionary :rtype: dict """ result = dict_type() for (key, value) in parser_dict.items(): if isinstance(value, dict): nestings = key.split(delimiter) # build nested dictionaries if they don't exist (up to 2nd to last key) base_dict = result for nested_key in nestings[:-1]: if nested_key not in base_dict: base_dict[nested_key] = dict_type() base_dict = base_dict[nested_key] base_dict[nestings[-1]] = cls._build_dict( parser_dict.get(key), delimiter=delimiter, dict_type=dict_type ) else: if "\n" in value: result[key] = [ cls._decode_var(_) for _ in value.lstrip("\n").split("\n") ] else: result[key] = cls._decode_var(value) return result
[ "def", "_build_dict", "(", "cls", ",", "parser_dict", ",", "delimiter", "=", "DEFAULT_DELIMITER", ",", "dict_type", "=", "collections", ".", "OrderedDict", ")", ":", "result", "=", "dict_type", "(", ")", "for", "(", "key", ",", "value", ")", "in", "parser_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "nestings", "=", "key", ".", "split", "(", "delimiter", ")", "# build nested dictionaries if they don't exist (up to 2nd to last key)", "base_dict", "=", "result", "for", "nested_key", "in", "nestings", "[", ":", "-", "1", "]", ":", "if", "nested_key", "not", "in", "base_dict", ":", "base_dict", "[", "nested_key", "]", "=", "dict_type", "(", ")", "base_dict", "=", "base_dict", "[", "nested_key", "]", "base_dict", "[", "nestings", "[", "-", "1", "]", "]", "=", "cls", ".", "_build_dict", "(", "parser_dict", ".", "get", "(", "key", ")", ",", "delimiter", "=", "delimiter", ",", "dict_type", "=", "dict_type", ")", "else", ":", "if", "\"\\n\"", "in", "value", ":", "result", "[", "key", "]", "=", "[", "cls", ".", "_decode_var", "(", "_", ")", "for", "_", "in", "value", ".", "lstrip", "(", "\"\\n\"", ")", ".", "split", "(", "\"\\n\"", ")", "]", "else", ":", "result", "[", "key", "]", "=", "cls", ".", "_decode_var", "(", "value", ")", "return", "result" ]
Builds a dictionary of ``dict_type`` given the ``parser._sections`` dict. :param dict parser_dict: The ``parser._sections`` mapping :param str delimiter: The delimiter for nested dictionaries, defaults to ":", optional :param class dict_type: The dictionary type to use for building the dict, defaults to :class:`collections.OrderedDict`, optional :return: The resulting dictionary :rtype: dict
[ "Builds", "a", "dictionary", "of", "dict_type", "given", "the", "parser", ".", "_sections", "dict", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/export.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/export.py#L270-L330
def export_fits(dataset, path, column_names=None, shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return: """ if shuffle: random_index_name = "random_index" while random_index_name in dataset.get_column_names(): random_index_name += "_new" column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True) logger.debug("exporting columns(fits): %r" % column_names) N = len(dataset) if not selection else dataset.selected_length(selection) data_types = [] data_shapes = [] ucds = [] units = [] for column_name in column_names: if column_name in dataset.get_column_names(strings=True): column = dataset.columns[column_name] shape = (N,) + column.shape[1:] dtype = column.dtype if dataset.dtype(column_name) == str_type: max_length = dataset[column_name].apply(lambda x: len(x)).max(selection=selection) dtype = np.dtype('S'+str(int(max_length))) else: dtype = np.float64().dtype shape = (N,) ucds.append(dataset.ucds.get(column_name)) units.append(dataset.units.get(column_name)) data_types.append(dtype) data_shapes.append(shape) if shuffle: column_names.append(random_index_name) data_types.append(np.int64().dtype) data_shapes.append((N,)) ucds.append(None) units.append(None) else: random_index_name = None # TODO: all expressions can have missing values.. how to support that? null_values = {key: dataset.columns[key].fill_value for key in dataset.get_column_names() if dataset.is_masked(key) and dataset.dtype(key).kind != "f"} vaex.file.colfits.empty(path, N, column_names, data_types, data_shapes, ucds, units, null_values=null_values) if shuffle: del column_names[-1] del data_types[-1] del data_shapes[-1] dataset_output = vaex.file.other.FitsBinTable(path, write=True) _export(dataset_input=dataset, dataset_output=dataset_output, path=path, random_index_column=random_index_name, column_names=column_names, selection=selection, shuffle=shuffle, progress=progress, sort=sort, ascending=ascending) dataset_output.close_files()
[ "def", "export_fits", "(", "dataset", ",", "path", ",", "column_names", "=", "None", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "True", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "if", "shuffle", ":", "random_index_name", "=", "\"random_index\"", "while", "random_index_name", "in", "dataset", ".", "get_column_names", "(", ")", ":", "random_index_name", "+=", "\"_new\"", "column_names", "=", "column_names", "or", "dataset", ".", "get_column_names", "(", "virtual", "=", "virtual", ",", "strings", "=", "True", ")", "logger", ".", "debug", "(", "\"exporting columns(fits): %r\"", "%", "column_names", ")", "N", "=", "len", "(", "dataset", ")", "if", "not", "selection", "else", "dataset", ".", "selected_length", "(", "selection", ")", "data_types", "=", "[", "]", "data_shapes", "=", "[", "]", "ucds", "=", "[", "]", "units", "=", "[", "]", "for", "column_name", "in", "column_names", ":", "if", "column_name", "in", "dataset", ".", "get_column_names", "(", "strings", "=", "True", ")", ":", "column", "=", "dataset", ".", "columns", "[", "column_name", "]", "shape", "=", "(", "N", ",", ")", "+", "column", ".", "shape", "[", "1", ":", "]", "dtype", "=", "column", ".", "dtype", "if", "dataset", ".", "dtype", "(", "column_name", ")", "==", "str_type", ":", "max_length", "=", "dataset", "[", "column_name", "]", ".", "apply", "(", "lambda", "x", ":", "len", "(", "x", ")", ")", ".", "max", "(", "selection", "=", "selection", ")", "dtype", "=", "np", ".", "dtype", "(", "'S'", "+", "str", "(", "int", "(", "max_length", ")", ")", ")", "else", ":", "dtype", "=", "np", ".", "float64", "(", ")", ".", "dtype", "shape", "=", "(", "N", ",", ")", "ucds", ".", "append", "(", "dataset", ".", "ucds", ".", "get", "(", "column_name", ")", ")", "units", ".", "append", "(", "dataset", ".", "units", ".", "get", "(", "column_name", ")", ")", "data_types", ".", "append", "(", "dtype", ")", "data_shapes", ".", "append", "(", "shape", ")", "if", "shuffle", ":", "column_names", ".", "append", "(", "random_index_name", ")", "data_types", ".", "append", "(", "np", ".", "int64", "(", ")", ".", "dtype", ")", "data_shapes", ".", "append", "(", "(", "N", ",", ")", ")", "ucds", ".", "append", "(", "None", ")", "units", ".", "append", "(", "None", ")", "else", ":", "random_index_name", "=", "None", "# TODO: all expressions can have missing values.. how to support that?", "null_values", "=", "{", "key", ":", "dataset", ".", "columns", "[", "key", "]", ".", "fill_value", "for", "key", "in", "dataset", ".", "get_column_names", "(", ")", "if", "dataset", ".", "is_masked", "(", "key", ")", "and", "dataset", ".", "dtype", "(", "key", ")", ".", "kind", "!=", "\"f\"", "}", "vaex", ".", "file", ".", "colfits", ".", "empty", "(", "path", ",", "N", ",", "column_names", ",", "data_types", ",", "data_shapes", ",", "ucds", ",", "units", ",", "null_values", "=", "null_values", ")", "if", "shuffle", ":", "del", "column_names", "[", "-", "1", "]", "del", "data_types", "[", "-", "1", "]", "del", "data_shapes", "[", "-", "1", "]", "dataset_output", "=", "vaex", ".", "file", ".", "other", ".", "FitsBinTable", "(", "path", ",", "write", "=", "True", ")", "_export", "(", "dataset_input", "=", "dataset", ",", "dataset_output", "=", "dataset_output", ",", "path", "=", "path", ",", "random_index_column", "=", "random_index_name", ",", "column_names", "=", "column_names", ",", "selection", "=", "selection", ",", "shuffle", "=", "shuffle", ",", "progress", "=", "progress", ",", "sort", "=", "sort", ",", "ascending", "=", "ascending", ")", "dataset_output", ".", "close_files", "(", ")" ]
:param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return:
[ ":", "param", "DatasetLocal", "dataset", ":", "dataset", "to", "export", ":", "param", "str", "path", ":", "path", "for", "file", ":", "param", "lis", "[", "str", "]", "column_names", ":", "list", "of", "column", "names", "to", "export", "or", "None", "for", "all", "columns", ":", "param", "bool", "shuffle", ":", "export", "rows", "in", "random", "order", ":", "param", "bool", "selection", ":", "export", "selection", "or", "not", ":", "param", "progress", ":", "progress", "callback", "that", "gets", "a", "progress", "fraction", "as", "argument", "and", "should", "return", "True", "to", "continue", "or", "a", "default", "progress", "bar", "when", "progress", "=", "True", ":", "param", ":", "bool", "virtual", ":", "When", "True", "export", "virtual", "columns", ":", "return", ":" ]
python
test
helium/helium-python
helium/resource.py
https://github.com/helium/helium-python/blob/db73480b143da4fc48e95c4414bd69c576a3a390/helium/resource.py#L201-L225
def find(cls, session, resource_id, include=None): """Retrieve a single resource. This should only be called from sub-classes. Args: session(Session): The session to find the resource in resource_id: The ``id`` for the resource to look up Keyword Args: include: Resource classes to include Returns: Resource: An instance of a resource, or throws a :class:`NotFoundError` if the resource can not be found. """ url = session._build_url(cls._resource_path(), resource_id) params = build_request_include(include, None) process = cls._mk_one(session, include=include) return session.get(url, CB.json(200, process), params=params)
[ "def", "find", "(", "cls", ",", "session", ",", "resource_id", ",", "include", "=", "None", ")", ":", "url", "=", "session", ".", "_build_url", "(", "cls", ".", "_resource_path", "(", ")", ",", "resource_id", ")", "params", "=", "build_request_include", "(", "include", ",", "None", ")", "process", "=", "cls", ".", "_mk_one", "(", "session", ",", "include", "=", "include", ")", "return", "session", ".", "get", "(", "url", ",", "CB", ".", "json", "(", "200", ",", "process", ")", ",", "params", "=", "params", ")" ]
Retrieve a single resource. This should only be called from sub-classes. Args: session(Session): The session to find the resource in resource_id: The ``id`` for the resource to look up Keyword Args: include: Resource classes to include Returns: Resource: An instance of a resource, or throws a :class:`NotFoundError` if the resource can not be found.
[ "Retrieve", "a", "single", "resource", "." ]
python
train
Anaconda-Platform/anaconda-client
binstar_client/inspect_package/pypi.py
https://github.com/Anaconda-Platform/anaconda-client/blob/b276f0572744c73c184a8b43a897cfa7fc1dc523/binstar_client/inspect_package/pypi.py#L111-L126
def get_header_description(filedata): """Get description from metadata file and remove any empty lines at end.""" python_version = sys.version_info.major if python_version == 3: filedata = Parser().parsestr(filedata) else: filedata = Parser().parsestr(filedata.encode("UTF-8", "replace")) payload = filedata.get_payload() lines = payload.split('\n') while True: if lines and lines[-1] == '': lines.pop() else: break return '\n'.join(lines)
[ "def", "get_header_description", "(", "filedata", ")", ":", "python_version", "=", "sys", ".", "version_info", ".", "major", "if", "python_version", "==", "3", ":", "filedata", "=", "Parser", "(", ")", ".", "parsestr", "(", "filedata", ")", "else", ":", "filedata", "=", "Parser", "(", ")", ".", "parsestr", "(", "filedata", ".", "encode", "(", "\"UTF-8\"", ",", "\"replace\"", ")", ")", "payload", "=", "filedata", ".", "get_payload", "(", ")", "lines", "=", "payload", ".", "split", "(", "'\\n'", ")", "while", "True", ":", "if", "lines", "and", "lines", "[", "-", "1", "]", "==", "''", ":", "lines", ".", "pop", "(", ")", "else", ":", "break", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
Get description from metadata file and remove any empty lines at end.
[ "Get", "description", "from", "metadata", "file", "and", "remove", "any", "empty", "lines", "at", "end", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/templatetags/zinnia.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L411-L420
def week_number(date): """ Return the Python week number of a date. The django \|date:"W" returns incompatible value with the view implementation. """ week_number = date.strftime('%W') if int(week_number) < 10: week_number = week_number[-1] return week_number
[ "def", "week_number", "(", "date", ")", ":", "week_number", "=", "date", ".", "strftime", "(", "'%W'", ")", "if", "int", "(", "week_number", ")", "<", "10", ":", "week_number", "=", "week_number", "[", "-", "1", "]", "return", "week_number" ]
Return the Python week number of a date. The django \|date:"W" returns incompatible value with the view implementation.
[ "Return", "the", "Python", "week", "number", "of", "a", "date", ".", "The", "django", "\\", "|date", ":", "W", "returns", "incompatible", "value", "with", "the", "view", "implementation", "." ]
python
train
tornadoweb/tornado
tornado/gen.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/gen.py#L619-L637
def sleep(duration: float) -> "Future[None]": """Return a `.Future` that resolves after the given number of seconds. When used with ``yield`` in a coroutine, this is a non-blocking analogue to `time.sleep` (which should not be used in coroutines because it is blocking):: yield gen.sleep(0.5) Note that calling this function on its own does nothing; you must wait on the `.Future` it returns (usually by yielding it). .. versionadded:: 4.1 """ f = _create_future() IOLoop.current().call_later( duration, lambda: future_set_result_unless_cancelled(f, None) ) return f
[ "def", "sleep", "(", "duration", ":", "float", ")", "->", "\"Future[None]\"", ":", "f", "=", "_create_future", "(", ")", "IOLoop", ".", "current", "(", ")", ".", "call_later", "(", "duration", ",", "lambda", ":", "future_set_result_unless_cancelled", "(", "f", ",", "None", ")", ")", "return", "f" ]
Return a `.Future` that resolves after the given number of seconds. When used with ``yield`` in a coroutine, this is a non-blocking analogue to `time.sleep` (which should not be used in coroutines because it is blocking):: yield gen.sleep(0.5) Note that calling this function on its own does nothing; you must wait on the `.Future` it returns (usually by yielding it). .. versionadded:: 4.1
[ "Return", "a", ".", "Future", "that", "resolves", "after", "the", "given", "number", "of", "seconds", "." ]
python
train
apache/incubator-mxnet
python/mxnet/image/image.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/image.py#L86-L140
def imresize(src, w, h, *args, **kwargs): r"""Resize image with OpenCV. .. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imresize` to work. Parameters ---------- src : NDArray source image w : int, required Width of resized image. h : int, required Height of resized image. interp : int, optional, default=1 Interpolation method (default=cv2.INTER_LINEAR). Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. out : NDArray, optional The output NDArray to hold the result. Returns ------- out : NDArray or list of NDArrays The output of this function. Example ------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> new_image = mx.img.resize(image, 240, 360) >>> new_image <NDArray 240x360x3 @cpu(0)> """ return _internal._cvimresize(src, w, h, *args, **kwargs)
[ "def", "imresize", "(", "src", ",", "w", ",", "h", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_internal", ".", "_cvimresize", "(", "src", ",", "w", ",", "h", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
r"""Resize image with OpenCV. .. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built with USE_OPENCV=1 for `imresize` to work. Parameters ---------- src : NDArray source image w : int, required Width of resized image. h : int, required Height of resized image. interp : int, optional, default=1 Interpolation method (default=cv2.INTER_LINEAR). Possible values: 0: Nearest Neighbors Interpolation. 1: Bilinear interpolation. 2: Area-based (resampling using pixel area relation). It may be a preferred method for image decimation, as it gives moire-free results. But when the image is zoomed, it is similar to the Nearest Neighbors method. (used by default). 3: Bicubic interpolation over 4x4 pixel neighborhood. 4: Lanczos interpolation over 8x8 pixel neighborhood. 9: Cubic for enlarge, area for shrink, bilinear for others 10: Random select from interpolation method metioned above. Note: When shrinking an image, it will generally look best with AREA-based interpolation, whereas, when enlarging an image, it will generally look best with Bicubic (slow) or Bilinear (faster but still looks OK). More details can be found in the documentation of OpenCV, please refer to http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. out : NDArray, optional The output NDArray to hold the result. Returns ------- out : NDArray or list of NDArrays The output of this function. Example ------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> new_image = mx.img.resize(image, 240, 360) >>> new_image <NDArray 240x360x3 @cpu(0)>
[ "r", "Resize", "image", "with", "OpenCV", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py#L443-L543
def display_outputs(self, groupby="type"): """republish the outputs of the computation Parameters ---------- groupby : str [default: type] if 'type': Group outputs by type (show all stdout, then all stderr, etc.): [stdout:1] foo [stdout:2] foo [stderr:1] bar [stderr:2] bar if 'engine': Display outputs for each engine before moving on to the next: [stdout:1] foo [stderr:1] bar [stdout:2] foo [stderr:2] bar if 'order': Like 'type', but further collate individual displaypub outputs. This is meant for cases of each command producing several plots, and you would like to see all of the first plots together, then all of the second plots, and so on. """ if self._single_result: self._display_single_result() return stdouts = self.stdout stderrs = self.stderr pyouts = self.pyout output_lists = self.outputs results = self.get() targets = self.engine_id if groupby == "engine": for eid,stdout,stderr,outputs,r,pyout in zip( targets, stdouts, stderrs, output_lists, results, pyouts ): self._display_stream(stdout, '[stdout:%i] ' % eid) self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr) try: get_ipython() except NameError: # displaypub is meaningless outside IPython return if outputs or pyout is not None: _raw_text('[output:%i]' % eid) for output in outputs: self._republish_displaypub(output, eid) if pyout is not None: display(r) elif groupby in ('type', 'order'): # republish stdout: for eid,stdout in zip(targets, stdouts): self._display_stream(stdout, '[stdout:%i] ' % eid) # republish stderr: for eid,stderr in zip(targets, stderrs): self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr) try: get_ipython() except NameError: # displaypub is meaningless outside IPython return if groupby == 'order': output_dict = dict((eid, outputs) for eid,outputs in zip(targets, output_lists)) N = max(len(outputs) for outputs in output_lists) for i in range(N): for eid in targets: outputs = output_dict[eid] if len(outputs) >= N: _raw_text('[output:%i]' % eid) self._republish_displaypub(outputs[i], eid) else: # republish displaypub output for eid,outputs in zip(targets, output_lists): if outputs: _raw_text('[output:%i]' % eid) for output in outputs: self._republish_displaypub(output, eid) # finally, add pyout: for eid,r,pyout in zip(targets, results, pyouts): if pyout is not None: display(r) else: raise ValueError("groupby must be one of 'type', 'engine', 'collate', not %r" % groupby)
[ "def", "display_outputs", "(", "self", ",", "groupby", "=", "\"type\"", ")", ":", "if", "self", ".", "_single_result", ":", "self", ".", "_display_single_result", "(", ")", "return", "stdouts", "=", "self", ".", "stdout", "stderrs", "=", "self", ".", "stderr", "pyouts", "=", "self", ".", "pyout", "output_lists", "=", "self", ".", "outputs", "results", "=", "self", ".", "get", "(", ")", "targets", "=", "self", ".", "engine_id", "if", "groupby", "==", "\"engine\"", ":", "for", "eid", ",", "stdout", ",", "stderr", ",", "outputs", ",", "r", ",", "pyout", "in", "zip", "(", "targets", ",", "stdouts", ",", "stderrs", ",", "output_lists", ",", "results", ",", "pyouts", ")", ":", "self", ".", "_display_stream", "(", "stdout", ",", "'[stdout:%i] '", "%", "eid", ")", "self", ".", "_display_stream", "(", "stderr", ",", "'[stderr:%i] '", "%", "eid", ",", "file", "=", "sys", ".", "stderr", ")", "try", ":", "get_ipython", "(", ")", "except", "NameError", ":", "# displaypub is meaningless outside IPython", "return", "if", "outputs", "or", "pyout", "is", "not", "None", ":", "_raw_text", "(", "'[output:%i]'", "%", "eid", ")", "for", "output", "in", "outputs", ":", "self", ".", "_republish_displaypub", "(", "output", ",", "eid", ")", "if", "pyout", "is", "not", "None", ":", "display", "(", "r", ")", "elif", "groupby", "in", "(", "'type'", ",", "'order'", ")", ":", "# republish stdout:", "for", "eid", ",", "stdout", "in", "zip", "(", "targets", ",", "stdouts", ")", ":", "self", ".", "_display_stream", "(", "stdout", ",", "'[stdout:%i] '", "%", "eid", ")", "# republish stderr:", "for", "eid", ",", "stderr", "in", "zip", "(", "targets", ",", "stderrs", ")", ":", "self", ".", "_display_stream", "(", "stderr", ",", "'[stderr:%i] '", "%", "eid", ",", "file", "=", "sys", ".", "stderr", ")", "try", ":", "get_ipython", "(", ")", "except", "NameError", ":", "# displaypub is meaningless outside IPython", "return", "if", "groupby", "==", "'order'", ":", "output_dict", "=", "dict", "(", "(", "eid", ",", "outputs", ")", "for", "eid", ",", "outputs", "in", "zip", "(", "targets", ",", "output_lists", ")", ")", "N", "=", "max", "(", "len", "(", "outputs", ")", "for", "outputs", "in", "output_lists", ")", "for", "i", "in", "range", "(", "N", ")", ":", "for", "eid", "in", "targets", ":", "outputs", "=", "output_dict", "[", "eid", "]", "if", "len", "(", "outputs", ")", ">=", "N", ":", "_raw_text", "(", "'[output:%i]'", "%", "eid", ")", "self", ".", "_republish_displaypub", "(", "outputs", "[", "i", "]", ",", "eid", ")", "else", ":", "# republish displaypub output", "for", "eid", ",", "outputs", "in", "zip", "(", "targets", ",", "output_lists", ")", ":", "if", "outputs", ":", "_raw_text", "(", "'[output:%i]'", "%", "eid", ")", "for", "output", "in", "outputs", ":", "self", ".", "_republish_displaypub", "(", "output", ",", "eid", ")", "# finally, add pyout:", "for", "eid", ",", "r", ",", "pyout", "in", "zip", "(", "targets", ",", "results", ",", "pyouts", ")", ":", "if", "pyout", "is", "not", "None", ":", "display", "(", "r", ")", "else", ":", "raise", "ValueError", "(", "\"groupby must be one of 'type', 'engine', 'collate', not %r\"", "%", "groupby", ")" ]
republish the outputs of the computation Parameters ---------- groupby : str [default: type] if 'type': Group outputs by type (show all stdout, then all stderr, etc.): [stdout:1] foo [stdout:2] foo [stderr:1] bar [stderr:2] bar if 'engine': Display outputs for each engine before moving on to the next: [stdout:1] foo [stderr:1] bar [stdout:2] foo [stderr:2] bar if 'order': Like 'type', but further collate individual displaypub outputs. This is meant for cases of each command producing several plots, and you would like to see all of the first plots together, then all of the second plots, and so on.
[ "republish", "the", "outputs", "of", "the", "computation", "Parameters", "----------", "groupby", ":", "str", "[", "default", ":", "type", "]", "if", "type", ":", "Group", "outputs", "by", "type", "(", "show", "all", "stdout", "then", "all", "stderr", "etc", ".", ")", ":", "[", "stdout", ":", "1", "]", "foo", "[", "stdout", ":", "2", "]", "foo", "[", "stderr", ":", "1", "]", "bar", "[", "stderr", ":", "2", "]", "bar", "if", "engine", ":", "Display", "outputs", "for", "each", "engine", "before", "moving", "on", "to", "the", "next", ":", "[", "stdout", ":", "1", "]", "foo", "[", "stderr", ":", "1", "]", "bar", "[", "stdout", ":", "2", "]", "foo", "[", "stderr", ":", "2", "]", "bar", "if", "order", ":", "Like", "type", "but", "further", "collate", "individual", "displaypub", "outputs", ".", "This", "is", "meant", "for", "cases", "of", "each", "command", "producing", "several", "plots", "and", "you", "would", "like", "to", "see", "all", "of", "the", "first", "plots", "together", "then", "all", "of", "the", "second", "plots", "and", "so", "on", "." ]
python
test
eonpatapon/contrail-api-cli
contrail_api_cli/resource.py
https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/resource.py#L669-L686
def remove_ref(self, ref): """Remove reference from self to ref >>> iip = Resource('instance-ip', uuid='30213cf9-4b03-4afc-b8f9-c9971a216978', fetch=True) >>> for vmi in iip['virtual_machine_interface_refs']: iip.remove_ref(vmi) >>> iip['virtual_machine_interface_refs'] KeyError: u'virtual_machine_interface_refs' :param ref: reference to remove :type ref: Resource :rtype: Resource """ self.session.remove_ref(self, ref) return self.fetch()
[ "def", "remove_ref", "(", "self", ",", "ref", ")", ":", "self", ".", "session", ".", "remove_ref", "(", "self", ",", "ref", ")", "return", "self", ".", "fetch", "(", ")" ]
Remove reference from self to ref >>> iip = Resource('instance-ip', uuid='30213cf9-4b03-4afc-b8f9-c9971a216978', fetch=True) >>> for vmi in iip['virtual_machine_interface_refs']: iip.remove_ref(vmi) >>> iip['virtual_machine_interface_refs'] KeyError: u'virtual_machine_interface_refs' :param ref: reference to remove :type ref: Resource :rtype: Resource
[ "Remove", "reference", "from", "self", "to", "ref" ]
python
train
brutasse/graphite-api
graphite_api/render/glyph.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L2219-L2246
def format_units(v, step=None, system="si", units=None): """Format the given value in standardized units. ``system`` is either 'binary' or 'si' For more info, see: http://en.wikipedia.org/wiki/SI_prefix http://en.wikipedia.org/wiki/Binary_prefix """ if v is None: return 0, '' for prefix, size in UnitSystems[system]: if condition(v, size, step): v2 = v / size if v2 - math.floor(v2) < 0.00000000001 and v > 1: v2 = float(math.floor(v2)) if units: prefix = "%s%s" % (prefix, units) return v2, prefix if v - math.floor(v) < 0.00000000001 and v > 1: v = float(math.floor(v)) if units: prefix = units else: prefix = '' return v, prefix
[ "def", "format_units", "(", "v", ",", "step", "=", "None", ",", "system", "=", "\"si\"", ",", "units", "=", "None", ")", ":", "if", "v", "is", "None", ":", "return", "0", ",", "''", "for", "prefix", ",", "size", "in", "UnitSystems", "[", "system", "]", ":", "if", "condition", "(", "v", ",", "size", ",", "step", ")", ":", "v2", "=", "v", "/", "size", "if", "v2", "-", "math", ".", "floor", "(", "v2", ")", "<", "0.00000000001", "and", "v", ">", "1", ":", "v2", "=", "float", "(", "math", ".", "floor", "(", "v2", ")", ")", "if", "units", ":", "prefix", "=", "\"%s%s\"", "%", "(", "prefix", ",", "units", ")", "return", "v2", ",", "prefix", "if", "v", "-", "math", ".", "floor", "(", "v", ")", "<", "0.00000000001", "and", "v", ">", "1", ":", "v", "=", "float", "(", "math", ".", "floor", "(", "v", ")", ")", "if", "units", ":", "prefix", "=", "units", "else", ":", "prefix", "=", "''", "return", "v", ",", "prefix" ]
Format the given value in standardized units. ``system`` is either 'binary' or 'si' For more info, see: http://en.wikipedia.org/wiki/SI_prefix http://en.wikipedia.org/wiki/Binary_prefix
[ "Format", "the", "given", "value", "in", "standardized", "units", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/opf_task_driver.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/opf_task_driver.py#L120-L129
def _getImpl(self, model): """ Creates and returns the _IterationPhase-based instance corresponding to this phase specification model: Model instance """ impl = _IterationPhaseInferOnly(model=model, nIters=self.__nIters, inferenceArgs=self.__inferenceArgs) return impl
[ "def", "_getImpl", "(", "self", ",", "model", ")", ":", "impl", "=", "_IterationPhaseInferOnly", "(", "model", "=", "model", ",", "nIters", "=", "self", ".", "__nIters", ",", "inferenceArgs", "=", "self", ".", "__inferenceArgs", ")", "return", "impl" ]
Creates and returns the _IterationPhase-based instance corresponding to this phase specification model: Model instance
[ "Creates", "and", "returns", "the", "_IterationPhase", "-", "based", "instance", "corresponding", "to", "this", "phase", "specification" ]
python
valid
limodou/uliweb
uliweb/contrib/rbac/rbac.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/rbac/rbac.py#L42-L78
def has_role(user, *roles, **kwargs): """ Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func. """ Role = get_model('role') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for role in roles: if isinstance(role, (str, unicode)): role = Role.get(Role.c.name==role) if not role: continue name = role.name func = __role_funcs__.get(name, None) if func: if isinstance(func, (unicode, str)): func = import_attr(func) assert callable(func) para = kwargs.copy() para['user'] = user flag = call_func(func, para) if flag: return role flag = role.users.has(user) if flag: return role flag = role.usergroups_has_user(user) if flag: return role return False
[ "def", "has_role", "(", "user", ",", "*", "roles", ",", "*", "*", "kwargs", ")", ":", "Role", "=", "get_model", "(", "'role'", ")", "if", "isinstance", "(", "user", ",", "(", "unicode", ",", "str", ")", ")", ":", "User", "=", "get_model", "(", "'user'", ")", "user", "=", "User", ".", "get", "(", "User", ".", "c", ".", "username", "==", "user", ")", "for", "role", "in", "roles", ":", "if", "isinstance", "(", "role", ",", "(", "str", ",", "unicode", ")", ")", ":", "role", "=", "Role", ".", "get", "(", "Role", ".", "c", ".", "name", "==", "role", ")", "if", "not", "role", ":", "continue", "name", "=", "role", ".", "name", "func", "=", "__role_funcs__", ".", "get", "(", "name", ",", "None", ")", "if", "func", ":", "if", "isinstance", "(", "func", ",", "(", "unicode", ",", "str", ")", ")", ":", "func", "=", "import_attr", "(", "func", ")", "assert", "callable", "(", "func", ")", "para", "=", "kwargs", ".", "copy", "(", ")", "para", "[", "'user'", "]", "=", "user", "flag", "=", "call_func", "(", "func", ",", "para", ")", "if", "flag", ":", "return", "role", "flag", "=", "role", ".", "users", ".", "has", "(", "user", ")", "if", "flag", ":", "return", "role", "flag", "=", "role", ".", "usergroups_has_user", "(", "user", ")", "if", "flag", ":", "return", "role", "return", "False" ]
Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func.
[ "Judge", "is", "the", "user", "belongs", "to", "the", "role", "and", "if", "does", "then", "return", "the", "role", "object", "if", "not", "then", "return", "False", ".", "kwargs", "will", "be", "passed", "to", "role_func", "." ]
python
train
arcus-io/puppetdb-python
puppetdb/v2/facts.py
https://github.com/arcus-io/puppetdb-python/blob/d772eb80a1dfb1154a1f421c7ecdc1ac951b5ea2/puppetdb/v2/facts.py#L34-L42
def get_facts_by_name(api_url=None, fact_name=None, verify=False, cert=list()): """ Returns facts by name :param api_url: Base PuppetDB API url :param fact_name: Name of fact """ return utils._make_api_request(api_url, '/facts/{0}'.format(fact_name), verify, cert)
[ "def", "get_facts_by_name", "(", "api_url", "=", "None", ",", "fact_name", "=", "None", ",", "verify", "=", "False", ",", "cert", "=", "list", "(", ")", ")", ":", "return", "utils", ".", "_make_api_request", "(", "api_url", ",", "'/facts/{0}'", ".", "format", "(", "fact_name", ")", ",", "verify", ",", "cert", ")" ]
Returns facts by name :param api_url: Base PuppetDB API url :param fact_name: Name of fact
[ "Returns", "facts", "by", "name" ]
python
train
fermiPy/fermipy
fermipy/jobs/job_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L362-L368
def make_dict(cls, table): """Build a dictionary map int to `JobDetails` from an `astropy.table.Table`""" ret_dict = {} for row in table: job_details = cls.create_from_row(row) ret_dict[job_details.dbkey] = job_details return ret_dict
[ "def", "make_dict", "(", "cls", ",", "table", ")", ":", "ret_dict", "=", "{", "}", "for", "row", "in", "table", ":", "job_details", "=", "cls", ".", "create_from_row", "(", "row", ")", "ret_dict", "[", "job_details", ".", "dbkey", "]", "=", "job_details", "return", "ret_dict" ]
Build a dictionary map int to `JobDetails` from an `astropy.table.Table`
[ "Build", "a", "dictionary", "map", "int", "to", "JobDetails", "from", "an", "astropy", ".", "table", ".", "Table" ]
python
train
peri-source/peri
peri/comp/psfcalc.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/psfcalc.py#L421-L462
def get_polydisp_pts_wts(kfki, sigkf, dist_type='gaussian', nkpts=3): """ Calculates a set of Gauss quadrature points & weights for polydisperse light. Returns a list of points and weights of the final wavevector's distri- bution, in units of the initial wavevector. Parameters ---------- kfki : Float The mean of the polydisperse outgoing wavevectors. sigkf : Float The standard dev. of the polydisperse outgoing wavevectors. dist_type : {`gaussian`, `gamma`}, optional The distribution, gaussian or gamma, of the wavevectors. Default is `gaussian` nkpts : Int, optional The number of quadrature points to use. Default is 3 Returns ------- kfkipts : numpy.ndarray The Gauss quadrature points at which to calculate kfki. wts : numpy.ndarray The associated Gauss quadrature weights. """ if dist_type.lower() == 'gaussian': pts, wts = np.polynomial.hermite.hermgauss(nkpts) kfkipts = np.abs(kfki + sigkf*np.sqrt(2)*pts) elif dist_type.lower() == 'laguerre' or dist_type.lower() == 'gamma': k_scale = sigkf**2/kfki associated_order = kfki**2/sigkf**2 - 1 #Associated Laguerre with alpha >~170 becomes numerically unstable, so: max_order=150 if associated_order > max_order or associated_order < (-1+1e-3): warnings.warn('Numerically unstable sigk, clipping', RuntimeWarning) associated_order = np.clip(associated_order, -1+1e-3, max_order) kfkipts, wts = la_roots(nkpts, associated_order) kfkipts *= k_scale else: raise ValueError('dist_type must be either gaussian or laguerre') return kfkipts, wts/wts.sum()
[ "def", "get_polydisp_pts_wts", "(", "kfki", ",", "sigkf", ",", "dist_type", "=", "'gaussian'", ",", "nkpts", "=", "3", ")", ":", "if", "dist_type", ".", "lower", "(", ")", "==", "'gaussian'", ":", "pts", ",", "wts", "=", "np", ".", "polynomial", ".", "hermite", ".", "hermgauss", "(", "nkpts", ")", "kfkipts", "=", "np", ".", "abs", "(", "kfki", "+", "sigkf", "*", "np", ".", "sqrt", "(", "2", ")", "*", "pts", ")", "elif", "dist_type", ".", "lower", "(", ")", "==", "'laguerre'", "or", "dist_type", ".", "lower", "(", ")", "==", "'gamma'", ":", "k_scale", "=", "sigkf", "**", "2", "/", "kfki", "associated_order", "=", "kfki", "**", "2", "/", "sigkf", "**", "2", "-", "1", "#Associated Laguerre with alpha >~170 becomes numerically unstable, so:", "max_order", "=", "150", "if", "associated_order", ">", "max_order", "or", "associated_order", "<", "(", "-", "1", "+", "1e-3", ")", ":", "warnings", ".", "warn", "(", "'Numerically unstable sigk, clipping'", ",", "RuntimeWarning", ")", "associated_order", "=", "np", ".", "clip", "(", "associated_order", ",", "-", "1", "+", "1e-3", ",", "max_order", ")", "kfkipts", ",", "wts", "=", "la_roots", "(", "nkpts", ",", "associated_order", ")", "kfkipts", "*=", "k_scale", "else", ":", "raise", "ValueError", "(", "'dist_type must be either gaussian or laguerre'", ")", "return", "kfkipts", ",", "wts", "/", "wts", ".", "sum", "(", ")" ]
Calculates a set of Gauss quadrature points & weights for polydisperse light. Returns a list of points and weights of the final wavevector's distri- bution, in units of the initial wavevector. Parameters ---------- kfki : Float The mean of the polydisperse outgoing wavevectors. sigkf : Float The standard dev. of the polydisperse outgoing wavevectors. dist_type : {`gaussian`, `gamma`}, optional The distribution, gaussian or gamma, of the wavevectors. Default is `gaussian` nkpts : Int, optional The number of quadrature points to use. Default is 3 Returns ------- kfkipts : numpy.ndarray The Gauss quadrature points at which to calculate kfki. wts : numpy.ndarray The associated Gauss quadrature weights.
[ "Calculates", "a", "set", "of", "Gauss", "quadrature", "points", "&", "weights", "for", "polydisperse", "light", "." ]
python
valid
cons3rt/pycons3rt
pycons3rt/awsapi/s3util.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/awsapi/s3util.py#L305-L341
def upload_file(self, filepath, key): """Uploads a file using the passed S3 key This method uploads a file specified by the filepath to S3 using the provided S3 key. :param filepath: (str) Full path to the file to be uploaded :param key: (str) S3 key to be set for the upload :return: True if upload is successful, False otherwise. """ log = logging.getLogger(self.cls_logger + '.upload_file') log.info('Attempting to upload file %s to S3 bucket %s as key %s...', filepath, self.bucket_name, key) if not isinstance(filepath, basestring): log.error('filepath argument is not a string') return False if not isinstance(key, basestring): log.error('key argument is not a string') return False if not os.path.isfile(filepath): log.error('File not found on file system: %s', filepath) return False try: self.s3client.upload_file( Filename=filepath, Bucket=self.bucket_name, Key=key) except ClientError as e: log.error('Unable to upload file %s to bucket %s as key %s:\n%s', filepath, self.bucket_name, key, e) return False else: log.info('Successfully uploaded file to S3 bucket %s as key %s', self.bucket_name, key) return True
[ "def", "upload_file", "(", "self", ",", "filepath", ",", "key", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "self", ".", "cls_logger", "+", "'.upload_file'", ")", "log", ".", "info", "(", "'Attempting to upload file %s to S3 bucket %s as key %s...'", ",", "filepath", ",", "self", ".", "bucket_name", ",", "key", ")", "if", "not", "isinstance", "(", "filepath", ",", "basestring", ")", ":", "log", ".", "error", "(", "'filepath argument is not a string'", ")", "return", "False", "if", "not", "isinstance", "(", "key", ",", "basestring", ")", ":", "log", ".", "error", "(", "'key argument is not a string'", ")", "return", "False", "if", "not", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "log", ".", "error", "(", "'File not found on file system: %s'", ",", "filepath", ")", "return", "False", "try", ":", "self", ".", "s3client", ".", "upload_file", "(", "Filename", "=", "filepath", ",", "Bucket", "=", "self", ".", "bucket_name", ",", "Key", "=", "key", ")", "except", "ClientError", "as", "e", ":", "log", ".", "error", "(", "'Unable to upload file %s to bucket %s as key %s:\\n%s'", ",", "filepath", ",", "self", ".", "bucket_name", ",", "key", ",", "e", ")", "return", "False", "else", ":", "log", ".", "info", "(", "'Successfully uploaded file to S3 bucket %s as key %s'", ",", "self", ".", "bucket_name", ",", "key", ")", "return", "True" ]
Uploads a file using the passed S3 key This method uploads a file specified by the filepath to S3 using the provided S3 key. :param filepath: (str) Full path to the file to be uploaded :param key: (str) S3 key to be set for the upload :return: True if upload is successful, False otherwise.
[ "Uploads", "a", "file", "using", "the", "passed", "S3", "key" ]
python
train
airbus-cert/mispy
mispy/misp.py
https://github.com/airbus-cert/mispy/blob/6d523d6f134d2bd38ec8264be74e73b68403da65/mispy/misp.py#L800-L811
def GET(self, path): """ Raw GET to the MISP server :param path: URL fragment (ie /events/) :returns: HTTP raw content (as seen by :class:`requests.Response`) """ url = self._absolute_url(path) resp = requests.get(url, headers=self.headers, verify=self.verify_ssl) if resp.status_code != 200: raise MispTransportError('GET %s: returned status=%d', path, resp.status_code) return resp.content
[ "def", "GET", "(", "self", ",", "path", ")", ":", "url", "=", "self", ".", "_absolute_url", "(", "path", ")", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "self", ".", "verify_ssl", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "raise", "MispTransportError", "(", "'GET %s: returned status=%d'", ",", "path", ",", "resp", ".", "status_code", ")", "return", "resp", ".", "content" ]
Raw GET to the MISP server :param path: URL fragment (ie /events/) :returns: HTTP raw content (as seen by :class:`requests.Response`)
[ "Raw", "GET", "to", "the", "MISP", "server" ]
python
train
intake/intake
intake/gui/base.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/base.py#L202-L214
def add(self, items): """Add items to options""" options = self._create_options(items) for k, v in options.items(): if k in self.labels and v not in self.items: options.pop(k) count = 0 while f'{k}_{count}' in self.labels: count += 1 options[f'{k}_{count}'] = v self.widget.options.update(options) self.widget.param.trigger('options') self.widget.value = list(options.values())[:1]
[ "def", "add", "(", "self", ",", "items", ")", ":", "options", "=", "self", ".", "_create_options", "(", "items", ")", "for", "k", ",", "v", "in", "options", ".", "items", "(", ")", ":", "if", "k", "in", "self", ".", "labels", "and", "v", "not", "in", "self", ".", "items", ":", "options", ".", "pop", "(", "k", ")", "count", "=", "0", "while", "f'{k}_{count}'", "in", "self", ".", "labels", ":", "count", "+=", "1", "options", "[", "f'{k}_{count}'", "]", "=", "v", "self", ".", "widget", ".", "options", ".", "update", "(", "options", ")", "self", ".", "widget", ".", "param", ".", "trigger", "(", "'options'", ")", "self", ".", "widget", ".", "value", "=", "list", "(", "options", ".", "values", "(", ")", ")", "[", ":", "1", "]" ]
Add items to options
[ "Add", "items", "to", "options" ]
python
train
mjirik/imtools
imtools/tools.py
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/tools.py#L1421-L1451
def split_to_tiles(img, columns, rows): """ Split an image into a specified number of tiles. Args: img (ndarray): The image to split. number_tiles (int): The number of tiles required. Returns: Tuple of tiles """ # validate_image(img, number_tiles) im_w, im_h = img.shape # columns, rows = calc_columns_rows(number_tiles) # extras = (columns * rows) - number_tiles tile_w, tile_h = int(np.floor(im_w / columns)), int(np.floor(im_h / rows)) tiles = [] # number = 1 for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error. for pos_x in range(0, im_w - columns, tile_w): # as above. roi = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h) # image = img.crop(area) tile = img[roi[1]:roi[3], roi[0]:roi[2]] # position = (int(floor(pos_x / tile_w)) + 1, # int(floor(pos_y / tile_h)) + 1) # coords = (pos_x, pos_y) # tile = Tile(image, number, position, coords) tiles.append(tile) # number += 1 return tuple(tiles)
[ "def", "split_to_tiles", "(", "img", ",", "columns", ",", "rows", ")", ":", "# validate_image(img, number_tiles)", "im_w", ",", "im_h", "=", "img", ".", "shape", "# columns, rows = calc_columns_rows(number_tiles)", "# extras = (columns * rows) - number_tiles", "tile_w", ",", "tile_h", "=", "int", "(", "np", ".", "floor", "(", "im_w", "/", "columns", ")", ")", ",", "int", "(", "np", ".", "floor", "(", "im_h", "/", "rows", ")", ")", "tiles", "=", "[", "]", "# number = 1", "for", "pos_y", "in", "range", "(", "0", ",", "im_h", "-", "rows", ",", "tile_h", ")", ":", "# -rows for rounding error.", "for", "pos_x", "in", "range", "(", "0", ",", "im_w", "-", "columns", ",", "tile_w", ")", ":", "# as above.", "roi", "=", "(", "pos_x", ",", "pos_y", ",", "pos_x", "+", "tile_w", ",", "pos_y", "+", "tile_h", ")", "# image = img.crop(area)", "tile", "=", "img", "[", "roi", "[", "1", "]", ":", "roi", "[", "3", "]", ",", "roi", "[", "0", "]", ":", "roi", "[", "2", "]", "]", "# position = (int(floor(pos_x / tile_w)) + 1,", "# int(floor(pos_y / tile_h)) + 1)", "# coords = (pos_x, pos_y)", "# tile = Tile(image, number, position, coords)", "tiles", ".", "append", "(", "tile", ")", "# number += 1", "return", "tuple", "(", "tiles", ")" ]
Split an image into a specified number of tiles. Args: img (ndarray): The image to split. number_tiles (int): The number of tiles required. Returns: Tuple of tiles
[ "Split", "an", "image", "into", "a", "specified", "number", "of", "tiles", ".", "Args", ":", "img", "(", "ndarray", ")", ":", "The", "image", "to", "split", ".", "number_tiles", "(", "int", ")", ":", "The", "number", "of", "tiles", "required", ".", "Returns", ":", "Tuple", "of", "tiles" ]
python
train
scivision/pymap3d
pymap3d/lox.py
https://github.com/scivision/pymap3d/blob/c9cf676594611cdb52ff7e0eca6388c80ed4f63f/pymap3d/lox.py#L130-L202
def loxodrome_inverse(lat1: float, lon1: float, lat2: float, lon2: float, ell: Ellipsoid = None, deg: bool = True): """ computes the arc length and azimuth of the loxodrome between two points on the surface of the reference ellipsoid Parameters ---------- lat1 : float or numpy.ndarray of float geodetic latitude of first point lon1 : float or numpy.ndarray of float geodetic longitude of first point lat2 : float or numpy.ndarray of float geodetic latitude of second point lon2 : float or numpy.ndarray of float geodetic longitude of second point ell : Ellipsoid, optional reference ellipsoid (default WGS84) deg : bool, optional degrees input/output (False: radians in/out) Results ------- lox_s : float or numpy.ndarray of float distance along loxodrome az12 : float or numpy.ndarray of float azimuth of loxodrome (degrees/radians) Based on Deakin, R.E., 2010, 'The Loxodrome on an Ellipsoid', Lecture Notes, School of Mathematical and Geospatial Sciences, RMIT University, January 2010 [1] Bowring, B.R., 1985, 'The geometry of the loxodrome on the ellipsoid', The Canadian Surveyor, Vol. 39, No. 3, Autumn 1985, pp.223-230. [2] Snyder, J.P., 1987, Map Projections-A Working Manual. U.S. Geological Survey Professional Paper 1395. Washington, DC: U.S. Government Printing Office, pp.15-16 and pp. 44-45. [3] Thomas, P.D., 1952, Conformal Projections in Geodesy and Cartography, Special Publication No. 251, Coast and Geodetic Survey, U.S. Department of Commerce, Washington, DC: U.S. Government Printing Office, p. 66. """ # set ellipsoid parameters if ell is None: ell = Ellipsoid() if deg is True: lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2]) # compute isometric latitude of P1 and P2 isolat1 = isometric(lat1, deg=False, ell=ell) isolat2 = isometric(lat2, deg=False, ell=ell) # compute changes in isometric latitude and longitude between points disolat = isolat2 - isolat1 dlon = lon2 - lon1 # compute azimuth az12 = np.arctan2(dlon, disolat) # compute distance along loxodromic curve m1 = meridian_dist(lat1, deg=False, ell=ell) m2 = meridian_dist(lat2, deg=False, ell=ell) dm = m2 - m1 lox_s = dm / np.cos(az12) if deg is True: az12 = np.degrees(az12) % 360. return lox_s, az12
[ "def", "loxodrome_inverse", "(", "lat1", ":", "float", ",", "lon1", ":", "float", ",", "lat2", ":", "float", ",", "lon2", ":", "float", ",", "ell", ":", "Ellipsoid", "=", "None", ",", "deg", ":", "bool", "=", "True", ")", ":", "# set ellipsoid parameters", "if", "ell", "is", "None", ":", "ell", "=", "Ellipsoid", "(", ")", "if", "deg", "is", "True", ":", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", "=", "np", ".", "radians", "(", "[", "lat1", ",", "lon1", ",", "lat2", ",", "lon2", "]", ")", "# compute isometric latitude of P1 and P2", "isolat1", "=", "isometric", "(", "lat1", ",", "deg", "=", "False", ",", "ell", "=", "ell", ")", "isolat2", "=", "isometric", "(", "lat2", ",", "deg", "=", "False", ",", "ell", "=", "ell", ")", "# compute changes in isometric latitude and longitude between points", "disolat", "=", "isolat2", "-", "isolat1", "dlon", "=", "lon2", "-", "lon1", "# compute azimuth", "az12", "=", "np", ".", "arctan2", "(", "dlon", ",", "disolat", ")", "# compute distance along loxodromic curve", "m1", "=", "meridian_dist", "(", "lat1", ",", "deg", "=", "False", ",", "ell", "=", "ell", ")", "m2", "=", "meridian_dist", "(", "lat2", ",", "deg", "=", "False", ",", "ell", "=", "ell", ")", "dm", "=", "m2", "-", "m1", "lox_s", "=", "dm", "/", "np", ".", "cos", "(", "az12", ")", "if", "deg", "is", "True", ":", "az12", "=", "np", ".", "degrees", "(", "az12", ")", "%", "360.", "return", "lox_s", ",", "az12" ]
computes the arc length and azimuth of the loxodrome between two points on the surface of the reference ellipsoid Parameters ---------- lat1 : float or numpy.ndarray of float geodetic latitude of first point lon1 : float or numpy.ndarray of float geodetic longitude of first point lat2 : float or numpy.ndarray of float geodetic latitude of second point lon2 : float or numpy.ndarray of float geodetic longitude of second point ell : Ellipsoid, optional reference ellipsoid (default WGS84) deg : bool, optional degrees input/output (False: radians in/out) Results ------- lox_s : float or numpy.ndarray of float distance along loxodrome az12 : float or numpy.ndarray of float azimuth of loxodrome (degrees/radians) Based on Deakin, R.E., 2010, 'The Loxodrome on an Ellipsoid', Lecture Notes, School of Mathematical and Geospatial Sciences, RMIT University, January 2010 [1] Bowring, B.R., 1985, 'The geometry of the loxodrome on the ellipsoid', The Canadian Surveyor, Vol. 39, No. 3, Autumn 1985, pp.223-230. [2] Snyder, J.P., 1987, Map Projections-A Working Manual. U.S. Geological Survey Professional Paper 1395. Washington, DC: U.S. Government Printing Office, pp.15-16 and pp. 44-45. [3] Thomas, P.D., 1952, Conformal Projections in Geodesy and Cartography, Special Publication No. 251, Coast and Geodetic Survey, U.S. Department of Commerce, Washington, DC: U.S. Government Printing Office, p. 66.
[ "computes", "the", "arc", "length", "and", "azimuth", "of", "the", "loxodrome", "between", "two", "points", "on", "the", "surface", "of", "the", "reference", "ellipsoid" ]
python
train
robotools/extractor
Lib/extractor/formats/opentype.py
https://github.com/robotools/extractor/blob/da3c2c92bfd3da863dd5de29bd8bc94cbbf433df/Lib/extractor/formats/opentype.py#L702-L711
def _replaceRenamedPairMembers(kerning, leftRename, rightRename): """ Populate the renamed pair members into the kerning. """ renamedKerning = {} for (left, right), value in kerning.items(): left = leftRename.get(left, left) right = rightRename.get(right, right) renamedKerning[left, right] = value return renamedKerning
[ "def", "_replaceRenamedPairMembers", "(", "kerning", ",", "leftRename", ",", "rightRename", ")", ":", "renamedKerning", "=", "{", "}", "for", "(", "left", ",", "right", ")", ",", "value", "in", "kerning", ".", "items", "(", ")", ":", "left", "=", "leftRename", ".", "get", "(", "left", ",", "left", ")", "right", "=", "rightRename", ".", "get", "(", "right", ",", "right", ")", "renamedKerning", "[", "left", ",", "right", "]", "=", "value", "return", "renamedKerning" ]
Populate the renamed pair members into the kerning.
[ "Populate", "the", "renamed", "pair", "members", "into", "the", "kerning", "." ]
python
train
ylogx/universal
universal/builder.py
https://github.com/ylogx/universal/blob/1be04c2e828d9f97a94d48bff64031b14c2b8463/universal/builder.py#L41-L53
def compile_files(args, mem_test=False): ''' Copiles the files and runs memory tests if needed. PARAM args: list of files passed as CMD args to be compiled. PARAM mem_test: Weither to perform memory test ? ''' for filename in args: if not os.path.isfile(filename): print('The file doesn\'t exits') return build_and_run_file(filename) print("")
[ "def", "compile_files", "(", "args", ",", "mem_test", "=", "False", ")", ":", "for", "filename", "in", "args", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "print", "(", "'The file doesn\\'t exits'", ")", "return", "build_and_run_file", "(", "filename", ")", "print", "(", "\"\"", ")" ]
Copiles the files and runs memory tests if needed. PARAM args: list of files passed as CMD args to be compiled. PARAM mem_test: Weither to perform memory test ?
[ "Copiles", "the", "files", "and", "runs", "memory", "tests", "if", "needed", ".", "PARAM", "args", ":", "list", "of", "files", "passed", "as", "CMD", "args", "to", "be", "compiled", ".", "PARAM", "mem_test", ":", "Weither", "to", "perform", "memory", "test", "?" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/git/git_client_base.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L405-L429
def get_commit(self, commit_id, repository_id, project=None, change_count=None): """GetCommit. [Preview API] Retrieve a particular commit. :param str commit_id: The id of the commit. :param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified. :param str project: Project ID or project name :param int change_count: The number of changes to include in the result. :rtype: :class:`<GitCommit> <azure.devops.v5_1.git.models.GitCommit>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if commit_id is not None: route_values['commitId'] = self._serialize.url('commit_id', commit_id, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if change_count is not None: query_parameters['changeCount'] = self._serialize.query('change_count', change_count, 'int') response = self._send(http_method='GET', location_id='c2570c3b-5b3f-41b8-98bf-5407bfde8d58', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('GitCommit', response)
[ "def", "get_commit", "(", "self", ",", "commit_id", ",", "repository_id", ",", "project", "=", "None", ",", "change_count", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "commit_id", "is", "not", "None", ":", "route_values", "[", "'commitId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'commit_id'", ",", "commit_id", ",", "'str'", ")", "if", "repository_id", "is", "not", "None", ":", "route_values", "[", "'repositoryId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'repository_id'", ",", "repository_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "change_count", "is", "not", "None", ":", "query_parameters", "[", "'changeCount'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'change_count'", ",", "change_count", ",", "'int'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'c2570c3b-5b3f-41b8-98bf-5407bfde8d58'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'GitCommit'", ",", "response", ")" ]
GetCommit. [Preview API] Retrieve a particular commit. :param str commit_id: The id of the commit. :param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified. :param str project: Project ID or project name :param int change_count: The number of changes to include in the result. :rtype: :class:`<GitCommit> <azure.devops.v5_1.git.models.GitCommit>`
[ "GetCommit", ".", "[", "Preview", "API", "]", "Retrieve", "a", "particular", "commit", ".", ":", "param", "str", "commit_id", ":", "The", "id", "of", "the", "commit", ".", ":", "param", "str", "repository_id", ":", "The", "id", "or", "friendly", "name", "of", "the", "repository", ".", "To", "use", "the", "friendly", "name", "projectId", "must", "also", "be", "specified", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "change_count", ":", "The", "number", "of", "changes", "to", "include", "in", "the", "result", ".", ":", "rtype", ":", ":", "class", ":", "<GitCommit", ">", "<azure", ".", "devops", ".", "v5_1", ".", "git", ".", "models", ".", "GitCommit", ">" ]
python
train
kylef/refract.py
refract/elements/base.py
https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/elements/base.py#L104-L132
def defract(self): """ Returns the underlying (unrefracted) value of element >>> Element(content='Hello').defract 'Hello' >>> Element(content=Element(content='Hello')).defract 'Hello' >>> Element(content=[Element(content='Hello')]).defract ['Hello'] """ from refract.elements.object import Object def get_value(item): if isinstance(item, KeyValuePair): return (get_value(item.key), get_value(item.value)) elif isinstance(item, list): return [get_value(element) for element in item] elif isinstance(item, Element): if isinstance(item, Object) or item.element == 'object': return dict(get_value(item.content)) return get_value(item.content) return item return get_value(self)
[ "def", "defract", "(", "self", ")", ":", "from", "refract", ".", "elements", ".", "object", "import", "Object", "def", "get_value", "(", "item", ")", ":", "if", "isinstance", "(", "item", ",", "KeyValuePair", ")", ":", "return", "(", "get_value", "(", "item", ".", "key", ")", ",", "get_value", "(", "item", ".", "value", ")", ")", "elif", "isinstance", "(", "item", ",", "list", ")", ":", "return", "[", "get_value", "(", "element", ")", "for", "element", "in", "item", "]", "elif", "isinstance", "(", "item", ",", "Element", ")", ":", "if", "isinstance", "(", "item", ",", "Object", ")", "or", "item", ".", "element", "==", "'object'", ":", "return", "dict", "(", "get_value", "(", "item", ".", "content", ")", ")", "return", "get_value", "(", "item", ".", "content", ")", "return", "item", "return", "get_value", "(", "self", ")" ]
Returns the underlying (unrefracted) value of element >>> Element(content='Hello').defract 'Hello' >>> Element(content=Element(content='Hello')).defract 'Hello' >>> Element(content=[Element(content='Hello')]).defract ['Hello']
[ "Returns", "the", "underlying", "(", "unrefracted", ")", "value", "of", "element" ]
python
train
obriencj/python-javatools
javatools/report.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L81-L93
def add_formats_by_name(self, rfmt_list): """ adds formats by short label descriptors, such as 'txt', 'json', or 'html' """ for fmt in rfmt_list: if fmt == "json": self.add_report_format(JSONReportFormat) elif fmt in ("txt", "text"): self.add_report_format(TextReportFormat) elif fmt in ("htm", "html"): self.add_report_format(CheetahReportFormat)
[ "def", "add_formats_by_name", "(", "self", ",", "rfmt_list", ")", ":", "for", "fmt", "in", "rfmt_list", ":", "if", "fmt", "==", "\"json\"", ":", "self", ".", "add_report_format", "(", "JSONReportFormat", ")", "elif", "fmt", "in", "(", "\"txt\"", ",", "\"text\"", ")", ":", "self", ".", "add_report_format", "(", "TextReportFormat", ")", "elif", "fmt", "in", "(", "\"htm\"", ",", "\"html\"", ")", ":", "self", ".", "add_report_format", "(", "CheetahReportFormat", ")" ]
adds formats by short label descriptors, such as 'txt', 'json', or 'html'
[ "adds", "formats", "by", "short", "label", "descriptors", "such", "as", "txt", "json", "or", "html" ]
python
train
openthread/openthread
tools/harness-automation/autothreadharness/harness_case.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-automation/autothreadharness/harness_case.py#L262-L285
def _init_browser(self): """Open harness web page. Open a quiet chrome which: 1. disables extensions, 2. ignore certificate errors and 3. always allow notifications. """ chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--disable-extensions') chrome_options.add_argument('--disable-infobars') chrome_options.add_argument('--ignore-certificate-errors') chrome_options.add_experimental_option('prefs', { 'profile.managed_default_content_settings.notifications': 1 }) browser = webdriver.Chrome(chrome_options=chrome_options) browser.set_page_load_timeout(10) browser.implicitly_wait(1) browser.maximize_window() browser.get(settings.HARNESS_URL) self._browser = browser if not wait_until(lambda: 'Thread' in browser.title, 30): self.assertIn('Thread', browser.title)
[ "def", "_init_browser", "(", "self", ")", ":", "chrome_options", "=", "webdriver", ".", "ChromeOptions", "(", ")", "chrome_options", ".", "add_argument", "(", "'--disable-extensions'", ")", "chrome_options", ".", "add_argument", "(", "'--disable-infobars'", ")", "chrome_options", ".", "add_argument", "(", "'--ignore-certificate-errors'", ")", "chrome_options", ".", "add_experimental_option", "(", "'prefs'", ",", "{", "'profile.managed_default_content_settings.notifications'", ":", "1", "}", ")", "browser", "=", "webdriver", ".", "Chrome", "(", "chrome_options", "=", "chrome_options", ")", "browser", ".", "set_page_load_timeout", "(", "10", ")", "browser", ".", "implicitly_wait", "(", "1", ")", "browser", ".", "maximize_window", "(", ")", "browser", ".", "get", "(", "settings", ".", "HARNESS_URL", ")", "self", ".", "_browser", "=", "browser", "if", "not", "wait_until", "(", "lambda", ":", "'Thread'", "in", "browser", ".", "title", ",", "30", ")", ":", "self", ".", "assertIn", "(", "'Thread'", ",", "browser", ".", "title", ")" ]
Open harness web page. Open a quiet chrome which: 1. disables extensions, 2. ignore certificate errors and 3. always allow notifications.
[ "Open", "harness", "web", "page", "." ]
python
train
twisted/axiom
axiom/sequence.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/sequence.py#L40-L60
def _fixIndex(self, index, truncate=False): """ @param truncate: If true, negative indices which go past the beginning of the list will be evaluated as zero. For example:: >>> L = List([1,2,3,4,5]) >>> len(L) 5 >>> L._fixIndex(-9, truncate=True) 0 """ assert not isinstance(index, slice), 'slices are not supported (yet)' if index < 0: index += self.length if index < 0: if not truncate: raise IndexError('stored List index out of range') else: index = 0 return index
[ "def", "_fixIndex", "(", "self", ",", "index", ",", "truncate", "=", "False", ")", ":", "assert", "not", "isinstance", "(", "index", ",", "slice", ")", ",", "'slices are not supported (yet)'", "if", "index", "<", "0", ":", "index", "+=", "self", ".", "length", "if", "index", "<", "0", ":", "if", "not", "truncate", ":", "raise", "IndexError", "(", "'stored List index out of range'", ")", "else", ":", "index", "=", "0", "return", "index" ]
@param truncate: If true, negative indices which go past the beginning of the list will be evaluated as zero. For example:: >>> L = List([1,2,3,4,5]) >>> len(L) 5 >>> L._fixIndex(-9, truncate=True) 0
[ "@param", "truncate", ":", "If", "true", "negative", "indices", "which", "go", "past", "the", "beginning", "of", "the", "list", "will", "be", "evaluated", "as", "zero", ".", "For", "example", "::" ]
python
train
inasafe/inasafe
safe/utilities/settings.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/settings.py#L42-L58
def set_general_setting(key, value, qsettings=None): """Set value to QSettings based on key. :param key: Unique key for setting. :type key: basestring :param value: Value to be saved. :type value: QVariant :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings """ if not qsettings: qsettings = QSettings() qsettings.setValue(key, deep_convert_dict(value))
[ "def", "set_general_setting", "(", "key", ",", "value", ",", "qsettings", "=", "None", ")", ":", "if", "not", "qsettings", ":", "qsettings", "=", "QSettings", "(", ")", "qsettings", ".", "setValue", "(", "key", ",", "deep_convert_dict", "(", "value", ")", ")" ]
Set value to QSettings based on key. :param key: Unique key for setting. :type key: basestring :param value: Value to be saved. :type value: QVariant :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings
[ "Set", "value", "to", "QSettings", "based", "on", "key", "." ]
python
train
helixyte/everest
everest/resources/utils.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/resources/utils.py#L176-L183
def get_registered_collection_resources(): """ Returns a list of all registered collection resource classes. """ reg = get_current_registry() return [util.component for util in reg.registeredUtilities() if util.name == 'collection-class']
[ "def", "get_registered_collection_resources", "(", ")", ":", "reg", "=", "get_current_registry", "(", ")", "return", "[", "util", ".", "component", "for", "util", "in", "reg", ".", "registeredUtilities", "(", ")", "if", "util", ".", "name", "==", "'collection-class'", "]" ]
Returns a list of all registered collection resource classes.
[ "Returns", "a", "list", "of", "all", "registered", "collection", "resource", "classes", "." ]
python
train
jaywink/federation
federation/protocols/activitypub/protocol.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/protocols/activitypub/protocol.py#L26-L37
def identify_request(request: RequestType) -> bool: """ Try to identify whether this is an ActivityPub request. """ # noinspection PyBroadException try: data = json.loads(decode_if_bytes(request.body)) if "@context" in data: return True except Exception: pass return False
[ "def", "identify_request", "(", "request", ":", "RequestType", ")", "->", "bool", ":", "# noinspection PyBroadException", "try", ":", "data", "=", "json", ".", "loads", "(", "decode_if_bytes", "(", "request", ".", "body", ")", ")", "if", "\"@context\"", "in", "data", ":", "return", "True", "except", "Exception", ":", "pass", "return", "False" ]
Try to identify whether this is an ActivityPub request.
[ "Try", "to", "identify", "whether", "this", "is", "an", "ActivityPub", "request", "." ]
python
train
dylanaraps/pywal
pywal/colors.py
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/colors.py#L86-L94
def cache_fname(img, backend, light, cache_dir, sat=""): """Create the cache file name.""" color_type = "light" if light else "dark" file_name = re.sub("[/|\\|.]", "_", img) file_size = os.path.getsize(img) file_parts = [file_name, color_type, backend, sat, file_size, __cache_version__] return [cache_dir, "schemes", "%s_%s_%s_%s_%s_%s.json" % (*file_parts,)]
[ "def", "cache_fname", "(", "img", ",", "backend", ",", "light", ",", "cache_dir", ",", "sat", "=", "\"\"", ")", ":", "color_type", "=", "\"light\"", "if", "light", "else", "\"dark\"", "file_name", "=", "re", ".", "sub", "(", "\"[/|\\\\|.]\"", ",", "\"_\"", ",", "img", ")", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "img", ")", "file_parts", "=", "[", "file_name", ",", "color_type", ",", "backend", ",", "sat", ",", "file_size", ",", "__cache_version__", "]", "return", "[", "cache_dir", ",", "\"schemes\"", ",", "\"%s_%s_%s_%s_%s_%s.json\"", "%", "(", "*", "file_parts", ",", ")", "]" ]
Create the cache file name.
[ "Create", "the", "cache", "file", "name", "." ]
python
train
aegirhall/console-menu
consolemenu/prompt_utils.py
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/prompt_utils.py#L116-L130
def input_password(self, message=None): """ Prompt the user for a password. This is equivalent to the input() method, but does not echo inputted characters to the screen. :param message: the prompt message. """ message = self.__prompt_formatter.format_prompt(message) try: if message: return getpass.getpass(message) else: return getpass.getpass() except BaseException: self.__screen.println('Warning: Unable to mask input; characters will be echoed to console') return self.input(message)
[ "def", "input_password", "(", "self", ",", "message", "=", "None", ")", ":", "message", "=", "self", ".", "__prompt_formatter", ".", "format_prompt", "(", "message", ")", "try", ":", "if", "message", ":", "return", "getpass", ".", "getpass", "(", "message", ")", "else", ":", "return", "getpass", ".", "getpass", "(", ")", "except", "BaseException", ":", "self", ".", "__screen", ".", "println", "(", "'Warning: Unable to mask input; characters will be echoed to console'", ")", "return", "self", ".", "input", "(", "message", ")" ]
Prompt the user for a password. This is equivalent to the input() method, but does not echo inputted characters to the screen. :param message: the prompt message.
[ "Prompt", "the", "user", "for", "a", "password", ".", "This", "is", "equivalent", "to", "the", "input", "()", "method", "but", "does", "not", "echo", "inputted", "characters", "to", "the", "screen", ".", ":", "param", "message", ":", "the", "prompt", "message", "." ]
python
train
open-homeautomation/pknx
knxip/core.py
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/core.py#L141-L162
def to_frame(self): """Convert the object to its frame format.""" self.sanitize() res = [] res.append((1 << 7) + (1 << 4) + (self.repeat << 5) + (self.priority << 2)) res.append(self.src_addr >> 8) res.append(self.src_addr % 0x100) res.append(self.dst_addr >> 8) res.append(self.dst_addr % 0x100) res.append((self.multicast << 7) + (self.routing << 4) + self.length) for i in range(0, self.length - 1): res.append(self.data[i]) checksum = 0 for i in range(0, 5 + self.length): checksum += res[i] res.append(checksum % 0x100) return bytearray(res)
[ "def", "to_frame", "(", "self", ")", ":", "self", ".", "sanitize", "(", ")", "res", "=", "[", "]", "res", ".", "append", "(", "(", "1", "<<", "7", ")", "+", "(", "1", "<<", "4", ")", "+", "(", "self", ".", "repeat", "<<", "5", ")", "+", "(", "self", ".", "priority", "<<", "2", ")", ")", "res", ".", "append", "(", "self", ".", "src_addr", ">>", "8", ")", "res", ".", "append", "(", "self", ".", "src_addr", "%", "0x100", ")", "res", ".", "append", "(", "self", ".", "dst_addr", ">>", "8", ")", "res", ".", "append", "(", "self", ".", "dst_addr", "%", "0x100", ")", "res", ".", "append", "(", "(", "self", ".", "multicast", "<<", "7", ")", "+", "(", "self", ".", "routing", "<<", "4", ")", "+", "self", ".", "length", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "length", "-", "1", ")", ":", "res", ".", "append", "(", "self", ".", "data", "[", "i", "]", ")", "checksum", "=", "0", "for", "i", "in", "range", "(", "0", ",", "5", "+", "self", ".", "length", ")", ":", "checksum", "+=", "res", "[", "i", "]", "res", ".", "append", "(", "checksum", "%", "0x100", ")", "return", "bytearray", "(", "res", ")" ]
Convert the object to its frame format.
[ "Convert", "the", "object", "to", "its", "frame", "format", "." ]
python
train
duguyue100/minesweeper
minesweeper/msboard.py
https://github.com/duguyue100/minesweeper/blob/38b1910f4c34d0275ac10a300285aba6f1d91d61/minesweeper/msboard.py#L144-L155
def check_board(self): """Check the board status and give feedback.""" num_mines = np.sum(self.info_map == 12) num_undiscovered = np.sum(self.info_map == 11) num_questioned = np.sum(self.info_map == 10) if num_mines > 0: return 0 elif np.array_equal(self.info_map == 9, self.mine_map): return 1 elif num_undiscovered > 0 or num_questioned > 0: return 2
[ "def", "check_board", "(", "self", ")", ":", "num_mines", "=", "np", ".", "sum", "(", "self", ".", "info_map", "==", "12", ")", "num_undiscovered", "=", "np", ".", "sum", "(", "self", ".", "info_map", "==", "11", ")", "num_questioned", "=", "np", ".", "sum", "(", "self", ".", "info_map", "==", "10", ")", "if", "num_mines", ">", "0", ":", "return", "0", "elif", "np", ".", "array_equal", "(", "self", ".", "info_map", "==", "9", ",", "self", ".", "mine_map", ")", ":", "return", "1", "elif", "num_undiscovered", ">", "0", "or", "num_questioned", ">", "0", ":", "return", "2" ]
Check the board status and give feedback.
[ "Check", "the", "board", "status", "and", "give", "feedback", "." ]
python
train
Capitains/flask-capitains-nemo
flask_nemo/common.py
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/common.py#L12-L21
def resource_qualifier(resource): """ Split a resource in (filename, directory) tuple with taking care of external resources :param resource: A file path or a URI :return: (Filename, Directory) for files, (URI, None) for URI """ if resource.startswith("//") or resource.startswith("http"): return resource, None else: return reversed(op.split(resource))
[ "def", "resource_qualifier", "(", "resource", ")", ":", "if", "resource", ".", "startswith", "(", "\"//\"", ")", "or", "resource", ".", "startswith", "(", "\"http\"", ")", ":", "return", "resource", ",", "None", "else", ":", "return", "reversed", "(", "op", ".", "split", "(", "resource", ")", ")" ]
Split a resource in (filename, directory) tuple with taking care of external resources :param resource: A file path or a URI :return: (Filename, Directory) for files, (URI, None) for URI
[ "Split", "a", "resource", "in", "(", "filename", "directory", ")", "tuple", "with", "taking", "care", "of", "external", "resources" ]
python
valid
slackapi/python-slackclient
tutorial/PythOnBoardingBot/app.py
https://github.com/slackapi/python-slackclient/blob/901341c0284fd81e6d2719d6a0502308760d83e4/tutorial/PythOnBoardingBot/app.py#L38-L53
def onboarding_message(**payload): """Create and send an onboarding welcome message to new users. Save the time stamp of this message so we can update this message in the future. """ # Get WebClient so you can communicate back to Slack. web_client = payload["web_client"] # Get the id of the Slack user associated with the incoming event user_id = payload["data"]["user"]["id"] # Open a DM with the new user. response = web_client.im_open(user_id) channel = response["channel"]["id"] # Post the onboarding message. start_onboarding(web_client, user_id, channel)
[ "def", "onboarding_message", "(", "*", "*", "payload", ")", ":", "# Get WebClient so you can communicate back to Slack.", "web_client", "=", "payload", "[", "\"web_client\"", "]", "# Get the id of the Slack user associated with the incoming event", "user_id", "=", "payload", "[", "\"data\"", "]", "[", "\"user\"", "]", "[", "\"id\"", "]", "# Open a DM with the new user.", "response", "=", "web_client", ".", "im_open", "(", "user_id", ")", "channel", "=", "response", "[", "\"channel\"", "]", "[", "\"id\"", "]", "# Post the onboarding message.", "start_onboarding", "(", "web_client", ",", "user_id", ",", "channel", ")" ]
Create and send an onboarding welcome message to new users. Save the time stamp of this message so we can update this message in the future.
[ "Create", "and", "send", "an", "onboarding", "welcome", "message", "to", "new", "users", ".", "Save", "the", "time", "stamp", "of", "this", "message", "so", "we", "can", "update", "this", "message", "in", "the", "future", "." ]
python
train
priendeau/UnderscoreX
setup.py
https://github.com/priendeau/UnderscoreX/blob/ac83e13627cfa009dc5731a1fb31f9c6145d983a/setup.py#L68-L82
def Kargs2Attr( ): """ This Decorator Will: Read **kwargs key and add it to current Object-class ClassTinyDecl under current name readed from **kwargs key name. """ def decorator( func ): def inner( **kwargs ): for ItemName in kwargs.keys(): setattr( __builtins__, ItemName , kwargs[ItemName] ) func( **kwargs ) return inner return decorator
[ "def", "Kargs2Attr", "(", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "inner", "(", "*", "*", "kwargs", ")", ":", "for", "ItemName", "in", "kwargs", ".", "keys", "(", ")", ":", "setattr", "(", "__builtins__", ",", "ItemName", ",", "kwargs", "[", "ItemName", "]", ")", "func", "(", "*", "*", "kwargs", ")", "return", "inner", "return", "decorator" ]
This Decorator Will: Read **kwargs key and add it to current Object-class ClassTinyDecl under current name readed from **kwargs key name.
[ "This", "Decorator", "Will", ":", "Read", "**", "kwargs", "key", "and", "add", "it", "to", "current", "Object", "-", "class", "ClassTinyDecl", "under", "current", "name", "readed", "from", "**", "kwargs", "key", "name", "." ]
python
train
glomex/gcdt
gcdt/iam.py
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/iam.py#L190-L221
def build_bucket(self, name, lifecycle_configuration=False, use_plain_name=False): """ Generate S3 bucket statement :param name: Name of the bucket :param lifecycle_configuration: Additional lifecycle configuration (default=False) :param use_plain_name: Just use the given name and do not add prefix :return: Ref to new bucket """ if use_plain_name: name_aws = name_bucket = name name_aws = name_aws.title() name_aws = name_aws.replace('-', '') else: name_aws = self.name_strip(name, False, False) name_bucket = self.name_build(name) if lifecycle_configuration: return self.__template.add_resource( Bucket( name_aws, BucketName=name_bucket, LifecycleConfiguration=lifecycle_configuration ) ) else: return self.__template.add_resource( Bucket( name_aws, BucketName=name_bucket, ) )
[ "def", "build_bucket", "(", "self", ",", "name", ",", "lifecycle_configuration", "=", "False", ",", "use_plain_name", "=", "False", ")", ":", "if", "use_plain_name", ":", "name_aws", "=", "name_bucket", "=", "name", "name_aws", "=", "name_aws", ".", "title", "(", ")", "name_aws", "=", "name_aws", ".", "replace", "(", "'-'", ",", "''", ")", "else", ":", "name_aws", "=", "self", ".", "name_strip", "(", "name", ",", "False", ",", "False", ")", "name_bucket", "=", "self", ".", "name_build", "(", "name", ")", "if", "lifecycle_configuration", ":", "return", "self", ".", "__template", ".", "add_resource", "(", "Bucket", "(", "name_aws", ",", "BucketName", "=", "name_bucket", ",", "LifecycleConfiguration", "=", "lifecycle_configuration", ")", ")", "else", ":", "return", "self", ".", "__template", ".", "add_resource", "(", "Bucket", "(", "name_aws", ",", "BucketName", "=", "name_bucket", ",", ")", ")" ]
Generate S3 bucket statement :param name: Name of the bucket :param lifecycle_configuration: Additional lifecycle configuration (default=False) :param use_plain_name: Just use the given name and do not add prefix :return: Ref to new bucket
[ "Generate", "S3", "bucket", "statement", ":", "param", "name", ":", "Name", "of", "the", "bucket", ":", "param", "lifecycle_configuration", ":", "Additional", "lifecycle", "configuration", "(", "default", "=", "False", ")", ":", "param", "use_plain_name", ":", "Just", "use", "the", "given", "name", "and", "do", "not", "add", "prefix", ":", "return", ":", "Ref", "to", "new", "bucket" ]
python
train
rbarrois/mpdlcd
mpdlcd/mpdwrapper.py
https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/mpdwrapper.py#L135-L146
def get(self, tags): """Find an adequate value for this field from a dict of tags.""" # Try to find our name value = tags.get(self.name, '') for name in self.alternate_tags: # Iterate of alternates until a non-empty value is found value = value or tags.get(name, '') # If we still have nothing, return our default value = value or self.default return value
[ "def", "get", "(", "self", ",", "tags", ")", ":", "# Try to find our name", "value", "=", "tags", ".", "get", "(", "self", ".", "name", ",", "''", ")", "for", "name", "in", "self", ".", "alternate_tags", ":", "# Iterate of alternates until a non-empty value is found", "value", "=", "value", "or", "tags", ".", "get", "(", "name", ",", "''", ")", "# If we still have nothing, return our default", "value", "=", "value", "or", "self", ".", "default", "return", "value" ]
Find an adequate value for this field from a dict of tags.
[ "Find", "an", "adequate", "value", "for", "this", "field", "from", "a", "dict", "of", "tags", "." ]
python
train
priestc/moneywagon
moneywagon/tx.py
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/tx.py#L31-L43
def from_unit_to_satoshi(self, value, unit='satoshi'): """ Convert a value to satoshis. units can be any fiat currency. By default the unit is satoshi. """ if not unit or unit == 'satoshi': return value if unit == 'bitcoin' or unit == 'btc': return value * 1e8 # assume fiat currency that we can convert convert = get_current_price(self.crypto, unit) return int(value / convert * 1e8)
[ "def", "from_unit_to_satoshi", "(", "self", ",", "value", ",", "unit", "=", "'satoshi'", ")", ":", "if", "not", "unit", "or", "unit", "==", "'satoshi'", ":", "return", "value", "if", "unit", "==", "'bitcoin'", "or", "unit", "==", "'btc'", ":", "return", "value", "*", "1e8", "# assume fiat currency that we can convert", "convert", "=", "get_current_price", "(", "self", ".", "crypto", ",", "unit", ")", "return", "int", "(", "value", "/", "convert", "*", "1e8", ")" ]
Convert a value to satoshis. units can be any fiat currency. By default the unit is satoshi.
[ "Convert", "a", "value", "to", "satoshis", ".", "units", "can", "be", "any", "fiat", "currency", ".", "By", "default", "the", "unit", "is", "satoshi", "." ]
python
train
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L221-L244
def license(self, key, value): """Populate the ``license`` key.""" def _get_license(value): a_values = force_list(value.get('a')) oa_licenses = [el for el in a_values if el == 'OA' or el == 'Open Access'] other_licenses = [el for el in a_values if el != 'OA' and el != 'Open Access'] if not other_licenses: return force_single_element(oa_licenses) return force_single_element(other_licenses) def _get_material(value): material = value.get('3', '').lower() if material == 'article': return 'publication' return material return { 'imposing': value.get('b'), 'license': _get_license(value), 'material': _get_material(value), 'url': value.get('u'), }
[ "def", "license", "(", "self", ",", "key", ",", "value", ")", ":", "def", "_get_license", "(", "value", ")", ":", "a_values", "=", "force_list", "(", "value", ".", "get", "(", "'a'", ")", ")", "oa_licenses", "=", "[", "el", "for", "el", "in", "a_values", "if", "el", "==", "'OA'", "or", "el", "==", "'Open Access'", "]", "other_licenses", "=", "[", "el", "for", "el", "in", "a_values", "if", "el", "!=", "'OA'", "and", "el", "!=", "'Open Access'", "]", "if", "not", "other_licenses", ":", "return", "force_single_element", "(", "oa_licenses", ")", "return", "force_single_element", "(", "other_licenses", ")", "def", "_get_material", "(", "value", ")", ":", "material", "=", "value", ".", "get", "(", "'3'", ",", "''", ")", ".", "lower", "(", ")", "if", "material", "==", "'article'", ":", "return", "'publication'", "return", "material", "return", "{", "'imposing'", ":", "value", ".", "get", "(", "'b'", ")", ",", "'license'", ":", "_get_license", "(", "value", ")", ",", "'material'", ":", "_get_material", "(", "value", ")", ",", "'url'", ":", "value", ".", "get", "(", "'u'", ")", ",", "}" ]
Populate the ``license`` key.
[ "Populate", "the", "license", "key", "." ]
python
train
synw/dataswim
dataswim/data/count.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/count.py#L10-L22
def count_nulls(self, field): """ Count the number of null values in a column """ try: n = self.df[field].isnull().sum() except KeyError: self.warning("Can not find column", field) return except Exception as e: self.err(e, "Can not count nulls") return self.ok("Found", n, "nulls in column", field)
[ "def", "count_nulls", "(", "self", ",", "field", ")", ":", "try", ":", "n", "=", "self", ".", "df", "[", "field", "]", ".", "isnull", "(", ")", ".", "sum", "(", ")", "except", "KeyError", ":", "self", ".", "warning", "(", "\"Can not find column\"", ",", "field", ")", "return", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not count nulls\"", ")", "return", "self", ".", "ok", "(", "\"Found\"", ",", "n", ",", "\"nulls in column\"", ",", "field", ")" ]
Count the number of null values in a column
[ "Count", "the", "number", "of", "null", "values", "in", "a", "column" ]
python
train
limpyd/redis-limpyd
limpyd/collection.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/collection.py#L206-L222
def _prepare_sort_options(self, has_pk): """ Prepare "sort" options to use when calling the collection, depending on "_sort" and "_sort_limits" attributes """ sort_options = {} if self._sort is not None and not has_pk: sort_options.update(self._sort) if self._sort_limits is not None: if 'start' in self._sort_limits and 'num' not in self._sort_limits: self._sort_limits['num'] = -1 elif 'num' in self._sort_limits and 'start' not in self._sort_limits: self._sort_limits['start'] = 0 sort_options.update(self._sort_limits) if not sort_options and self._sort is None: sort_options = None return sort_options
[ "def", "_prepare_sort_options", "(", "self", ",", "has_pk", ")", ":", "sort_options", "=", "{", "}", "if", "self", ".", "_sort", "is", "not", "None", "and", "not", "has_pk", ":", "sort_options", ".", "update", "(", "self", ".", "_sort", ")", "if", "self", ".", "_sort_limits", "is", "not", "None", ":", "if", "'start'", "in", "self", ".", "_sort_limits", "and", "'num'", "not", "in", "self", ".", "_sort_limits", ":", "self", ".", "_sort_limits", "[", "'num'", "]", "=", "-", "1", "elif", "'num'", "in", "self", ".", "_sort_limits", "and", "'start'", "not", "in", "self", ".", "_sort_limits", ":", "self", ".", "_sort_limits", "[", "'start'", "]", "=", "0", "sort_options", ".", "update", "(", "self", ".", "_sort_limits", ")", "if", "not", "sort_options", "and", "self", ".", "_sort", "is", "None", ":", "sort_options", "=", "None", "return", "sort_options" ]
Prepare "sort" options to use when calling the collection, depending on "_sort" and "_sort_limits" attributes
[ "Prepare", "sort", "options", "to", "use", "when", "calling", "the", "collection", "depending", "on", "_sort", "and", "_sort_limits", "attributes" ]
python
train
spulec/moto
moto/ec2/utils.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ec2/utils.py#L204-L240
def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration'): """ turn: {u'AWSAccessKeyId': [u'the_key'], u'Action': [u'CreateDhcpOptions'], u'DhcpConfiguration.1.Key': [u'domain-name'], u'DhcpConfiguration.1.Value.1': [u'example.com'], u'DhcpConfiguration.2.Key': [u'domain-name-servers'], u'DhcpConfiguration.2.Value.1': [u'10.0.0.6'], u'DhcpConfiguration.2.Value.2': [u'10.0.0.7'], u'Signature': [u'uUMHYOoLM6r+sT4fhYjdNT6MHw22Wj1mafUpe0P0bY4='], u'SignatureMethod': [u'HmacSHA256'], u'SignatureVersion': [u'2'], u'Timestamp': [u'2014-03-18T21:54:01Z'], u'Version': [u'2013-10-15']} into: {u'domain-name': [u'example.com'], u'domain-name-servers': [u'10.0.0.6', u'10.0.0.7']} """ key_needle = re.compile(u'{0}.[0-9]+.Key'.format(option), re.UNICODE) response_values = {} for key, value in querystring.items(): if key_needle.match(key): values = [] key_index = key.split(".")[1] value_index = 1 while True: value_key = u'{0}.{1}.Value.{2}'.format( option, key_index, value_index) if value_key in querystring: values.extend(querystring[value_key]) else: break value_index += 1 response_values[value[0]] = values return response_values
[ "def", "dhcp_configuration_from_querystring", "(", "querystring", ",", "option", "=", "u'DhcpConfiguration'", ")", ":", "key_needle", "=", "re", ".", "compile", "(", "u'{0}.[0-9]+.Key'", ".", "format", "(", "option", ")", ",", "re", ".", "UNICODE", ")", "response_values", "=", "{", "}", "for", "key", ",", "value", "in", "querystring", ".", "items", "(", ")", ":", "if", "key_needle", ".", "match", "(", "key", ")", ":", "values", "=", "[", "]", "key_index", "=", "key", ".", "split", "(", "\".\"", ")", "[", "1", "]", "value_index", "=", "1", "while", "True", ":", "value_key", "=", "u'{0}.{1}.Value.{2}'", ".", "format", "(", "option", ",", "key_index", ",", "value_index", ")", "if", "value_key", "in", "querystring", ":", "values", ".", "extend", "(", "querystring", "[", "value_key", "]", ")", "else", ":", "break", "value_index", "+=", "1", "response_values", "[", "value", "[", "0", "]", "]", "=", "values", "return", "response_values" ]
turn: {u'AWSAccessKeyId': [u'the_key'], u'Action': [u'CreateDhcpOptions'], u'DhcpConfiguration.1.Key': [u'domain-name'], u'DhcpConfiguration.1.Value.1': [u'example.com'], u'DhcpConfiguration.2.Key': [u'domain-name-servers'], u'DhcpConfiguration.2.Value.1': [u'10.0.0.6'], u'DhcpConfiguration.2.Value.2': [u'10.0.0.7'], u'Signature': [u'uUMHYOoLM6r+sT4fhYjdNT6MHw22Wj1mafUpe0P0bY4='], u'SignatureMethod': [u'HmacSHA256'], u'SignatureVersion': [u'2'], u'Timestamp': [u'2014-03-18T21:54:01Z'], u'Version': [u'2013-10-15']} into: {u'domain-name': [u'example.com'], u'domain-name-servers': [u'10.0.0.6', u'10.0.0.7']}
[ "turn", ":", "{", "u", "AWSAccessKeyId", ":", "[", "u", "the_key", "]", "u", "Action", ":", "[", "u", "CreateDhcpOptions", "]", "u", "DhcpConfiguration", ".", "1", ".", "Key", ":", "[", "u", "domain", "-", "name", "]", "u", "DhcpConfiguration", ".", "1", ".", "Value", ".", "1", ":", "[", "u", "example", ".", "com", "]", "u", "DhcpConfiguration", ".", "2", ".", "Key", ":", "[", "u", "domain", "-", "name", "-", "servers", "]", "u", "DhcpConfiguration", ".", "2", ".", "Value", ".", "1", ":", "[", "u", "10", ".", "0", ".", "0", ".", "6", "]", "u", "DhcpConfiguration", ".", "2", ".", "Value", ".", "2", ":", "[", "u", "10", ".", "0", ".", "0", ".", "7", "]", "u", "Signature", ":", "[", "u", "uUMHYOoLM6r", "+", "sT4fhYjdNT6MHw22Wj1mafUpe0P0bY4", "=", "]", "u", "SignatureMethod", ":", "[", "u", "HmacSHA256", "]", "u", "SignatureVersion", ":", "[", "u", "2", "]", "u", "Timestamp", ":", "[", "u", "2014", "-", "03", "-", "18T21", ":", "54", ":", "01Z", "]", "u", "Version", ":", "[", "u", "2013", "-", "10", "-", "15", "]", "}", "into", ":", "{", "u", "domain", "-", "name", ":", "[", "u", "example", ".", "com", "]", "u", "domain", "-", "name", "-", "servers", ":", "[", "u", "10", ".", "0", ".", "0", ".", "6", "u", "10", ".", "0", ".", "0", ".", "7", "]", "}" ]
python
train
numenta/htmresearch
htmresearch/frameworks/pytorch/sparse_speech_experiment.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/pytorch/sparse_speech_experiment.py#L266-L282
def createOptimizer(self, params, model): """ Create a new instance of the optimizer """ lr = params["learning_rate"] print("Creating optimizer with learning rate=", lr) if params["optimizer"] == "SGD": optimizer = optim.SGD(model.parameters(), lr=lr, momentum=params["momentum"], weight_decay=params["weight_decay"], ) elif params["optimizer"] == "Adam": optimizer = optim.Adam(model.parameters(), lr=lr) else: raise LookupError("Incorrect optimizer value") return optimizer
[ "def", "createOptimizer", "(", "self", ",", "params", ",", "model", ")", ":", "lr", "=", "params", "[", "\"learning_rate\"", "]", "print", "(", "\"Creating optimizer with learning rate=\"", ",", "lr", ")", "if", "params", "[", "\"optimizer\"", "]", "==", "\"SGD\"", ":", "optimizer", "=", "optim", ".", "SGD", "(", "model", ".", "parameters", "(", ")", ",", "lr", "=", "lr", ",", "momentum", "=", "params", "[", "\"momentum\"", "]", ",", "weight_decay", "=", "params", "[", "\"weight_decay\"", "]", ",", ")", "elif", "params", "[", "\"optimizer\"", "]", "==", "\"Adam\"", ":", "optimizer", "=", "optim", ".", "Adam", "(", "model", ".", "parameters", "(", ")", ",", "lr", "=", "lr", ")", "else", ":", "raise", "LookupError", "(", "\"Incorrect optimizer value\"", ")", "return", "optimizer" ]
Create a new instance of the optimizer
[ "Create", "a", "new", "instance", "of", "the", "optimizer" ]
python
train
materialsproject/pymatgen
pymatgen/io/phonopy.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/phonopy.py#L174-L190
def get_ph_bs_symm_line(bands_path, has_nac=False, labels_dict=None): """ Creates a pymatgen PhononBandStructure from a band.yaml file. The labels will be extracted from the dictionary, if present. If the 'eigenvector' key is found the eigendisplacements will be calculated according to the formula: \\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v and added to the object. Args: bands_path: path to the band.yaml file has_nac: True if the data have been obtained with the option --nac option. Default False. labels_dict: dict that links a qpoint in frac coords to a label. """ return get_ph_bs_symm_line_from_dict(loadfn(bands_path), has_nac, labels_dict)
[ "def", "get_ph_bs_symm_line", "(", "bands_path", ",", "has_nac", "=", "False", ",", "labels_dict", "=", "None", ")", ":", "return", "get_ph_bs_symm_line_from_dict", "(", "loadfn", "(", "bands_path", ")", ",", "has_nac", ",", "labels_dict", ")" ]
Creates a pymatgen PhononBandStructure from a band.yaml file. The labels will be extracted from the dictionary, if present. If the 'eigenvector' key is found the eigendisplacements will be calculated according to the formula: \\exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v and added to the object. Args: bands_path: path to the band.yaml file has_nac: True if the data have been obtained with the option --nac option. Default False. labels_dict: dict that links a qpoint in frac coords to a label.
[ "Creates", "a", "pymatgen", "PhononBandStructure", "from", "a", "band", ".", "yaml", "file", ".", "The", "labels", "will", "be", "extracted", "from", "the", "dictionary", "if", "present", ".", "If", "the", "eigenvector", "key", "is", "found", "the", "eigendisplacements", "will", "be", "calculated", "according", "to", "the", "formula", ":", "\\\\", "exp", "(", "2", "*", "pi", "*", "i", "*", "(", "frac_coords", "\\\\", "dot", "q", ")", "/", "sqrt", "(", "mass", ")", "*", "v", "and", "added", "to", "the", "object", "." ]
python
train
yyuu/botornado
boto/route53/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/route53/connection.py#L78-L108
def get_all_hosted_zones(self, start_marker=None, zone_list=None): """ Returns a Python data structure with information about all Hosted Zones defined for the AWS account. :param int start_marker: start marker to pass when fetching additional results after a truncated list :param list zone_list: a HostedZones list to prepend to results """ params = {} if start_marker: params = {'marker': start_marker} response = self.make_request('GET', '/%s/hostedzone' % self.Version, params=params) body = response.read() boto.log.debug(body) if response.status >= 300: raise exception.DNSServerError(response.status, response.reason, body) e = boto.jsonresponse.Element(list_marker='HostedZones', item_marker=('HostedZone',)) h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) if zone_list: e['ListHostedZonesResponse']['HostedZones'].extend(zone_list) while e['ListHostedZonesResponse'].has_key('NextMarker'): next_marker = e['ListHostedZonesResponse']['NextMarker'] zone_list = e['ListHostedZonesResponse']['HostedZones'] e = self.get_all_hosted_zones(next_marker, zone_list) return e
[ "def", "get_all_hosted_zones", "(", "self", ",", "start_marker", "=", "None", ",", "zone_list", "=", "None", ")", ":", "params", "=", "{", "}", "if", "start_marker", ":", "params", "=", "{", "'marker'", ":", "start_marker", "}", "response", "=", "self", ".", "make_request", "(", "'GET'", ",", "'/%s/hostedzone'", "%", "self", ".", "Version", ",", "params", "=", "params", ")", "body", "=", "response", ".", "read", "(", ")", "boto", ".", "log", ".", "debug", "(", "body", ")", "if", "response", ".", "status", ">=", "300", ":", "raise", "exception", ".", "DNSServerError", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "body", ")", "e", "=", "boto", ".", "jsonresponse", ".", "Element", "(", "list_marker", "=", "'HostedZones'", ",", "item_marker", "=", "(", "'HostedZone'", ",", ")", ")", "h", "=", "boto", ".", "jsonresponse", ".", "XmlHandler", "(", "e", ",", "None", ")", "h", ".", "parse", "(", "body", ")", "if", "zone_list", ":", "e", "[", "'ListHostedZonesResponse'", "]", "[", "'HostedZones'", "]", ".", "extend", "(", "zone_list", ")", "while", "e", "[", "'ListHostedZonesResponse'", "]", ".", "has_key", "(", "'NextMarker'", ")", ":", "next_marker", "=", "e", "[", "'ListHostedZonesResponse'", "]", "[", "'NextMarker'", "]", "zone_list", "=", "e", "[", "'ListHostedZonesResponse'", "]", "[", "'HostedZones'", "]", "e", "=", "self", ".", "get_all_hosted_zones", "(", "next_marker", ",", "zone_list", ")", "return", "e" ]
Returns a Python data structure with information about all Hosted Zones defined for the AWS account. :param int start_marker: start marker to pass when fetching additional results after a truncated list :param list zone_list: a HostedZones list to prepend to results
[ "Returns", "a", "Python", "data", "structure", "with", "information", "about", "all", "Hosted", "Zones", "defined", "for", "the", "AWS", "account", "." ]
python
train
apache/spark
python/pyspark/context.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L568-L579
def pickleFile(self, name, minPartitions=None): """ Load an RDD previously saved using L{RDD.saveAsPickleFile} method. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5) >>> sorted(sc.pickleFile(tmpFile.name, 3).collect()) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ minPartitions = minPartitions or self.defaultMinPartitions return RDD(self._jsc.objectFile(name, minPartitions), self)
[ "def", "pickleFile", "(", "self", ",", "name", ",", "minPartitions", "=", "None", ")", ":", "minPartitions", "=", "minPartitions", "or", "self", ".", "defaultMinPartitions", "return", "RDD", "(", "self", ".", "_jsc", ".", "objectFile", "(", "name", ",", "minPartitions", ")", ",", "self", ")" ]
Load an RDD previously saved using L{RDD.saveAsPickleFile} method. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5) >>> sorted(sc.pickleFile(tmpFile.name, 3).collect()) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[ "Load", "an", "RDD", "previously", "saved", "using", "L", "{", "RDD", ".", "saveAsPickleFile", "}", "method", "." ]
python
train
Guake/guake
guake/terminal.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/terminal.py#L299-L327
def button_press(self, terminal, event): """Handles the button press event in the terminal widget. If any match string is caught, another application is open to handle the matched resource uri. """ self.matched_value = '' if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) >= (0, 46): matched_string = self.match_check_event(event) else: matched_string = self.match_check( int(event.x / self.get_char_width()), int(event.y / self.get_char_height()) ) self.found_link = None if event.button == 1 and (event.get_state() & Gdk.ModifierType.CONTROL_MASK): if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) > (0, 50): s = self.hyperlink_check_event(event) else: s = None if s is not None: self._on_ctrl_click_matcher((s, None)) elif self.get_has_selection(): self.quick_open() elif matched_string and matched_string[0]: self._on_ctrl_click_matcher(matched_string) elif event.button == 3 and matched_string: self.found_link = self.handleTerminalMatch(matched_string) self.matched_value = matched_string[0]
[ "def", "button_press", "(", "self", ",", "terminal", ",", "event", ")", ":", "self", ".", "matched_value", "=", "''", "if", "(", "Vte", ".", "MAJOR_VERSION", ",", "Vte", ".", "MINOR_VERSION", ")", ">=", "(", "0", ",", "46", ")", ":", "matched_string", "=", "self", ".", "match_check_event", "(", "event", ")", "else", ":", "matched_string", "=", "self", ".", "match_check", "(", "int", "(", "event", ".", "x", "/", "self", ".", "get_char_width", "(", ")", ")", ",", "int", "(", "event", ".", "y", "/", "self", ".", "get_char_height", "(", ")", ")", ")", "self", ".", "found_link", "=", "None", "if", "event", ".", "button", "==", "1", "and", "(", "event", ".", "get_state", "(", ")", "&", "Gdk", ".", "ModifierType", ".", "CONTROL_MASK", ")", ":", "if", "(", "Vte", ".", "MAJOR_VERSION", ",", "Vte", ".", "MINOR_VERSION", ")", ">", "(", "0", ",", "50", ")", ":", "s", "=", "self", ".", "hyperlink_check_event", "(", "event", ")", "else", ":", "s", "=", "None", "if", "s", "is", "not", "None", ":", "self", ".", "_on_ctrl_click_matcher", "(", "(", "s", ",", "None", ")", ")", "elif", "self", ".", "get_has_selection", "(", ")", ":", "self", ".", "quick_open", "(", ")", "elif", "matched_string", "and", "matched_string", "[", "0", "]", ":", "self", ".", "_on_ctrl_click_matcher", "(", "matched_string", ")", "elif", "event", ".", "button", "==", "3", "and", "matched_string", ":", "self", ".", "found_link", "=", "self", ".", "handleTerminalMatch", "(", "matched_string", ")", "self", ".", "matched_value", "=", "matched_string", "[", "0", "]" ]
Handles the button press event in the terminal widget. If any match string is caught, another application is open to handle the matched resource uri.
[ "Handles", "the", "button", "press", "event", "in", "the", "terminal", "widget", ".", "If", "any", "match", "string", "is", "caught", "another", "application", "is", "open", "to", "handle", "the", "matched", "resource", "uri", "." ]
python
train
caseyjlaw/sdmreader
sdmreader/sdmreader.py
https://github.com/caseyjlaw/sdmreader/blob/b6c3498f1915138727819715ee00d2c46353382d/sdmreader/sdmreader.py#L351-L440
def _parse (self): """Parse the BDF mime structure and record the locations of the binary blobs. Sets up various data fields in the BDFData object.""" feedparser = FeedParser (Message) binarychunks = {} sizeinfo = None headxml = None self.fp.seek (0, 0) while True: data = self.fp.readline () if not data: break feedparser.feed (data) skip = (data == '\n' and len (feedparser._msgstack) == 3 and feedparser._msgstack[-1].get_content_type () in ('application/octet-stream', 'binary/octet-stream')) if skip: # We just finished reading the headers for a huge binary blob. # Time to remember where the data chunk is and pretend it doesn't # exist. msg = feedparser._msgstack[-1] ident = msg['Content-Location'] assert ident.endswith ('.bin'), 'confusion #1 in hacky MIME parsing!' binarychunks[ident] = self.fp.tell () if sizeinfo is None: headxml, sizeinfo, tagpfx = _extract_size_info (feedparser) kind = ident.split ('/')[-1] assert kind in sizeinfo, 'no size info for binary chunk kind %s in MIME!' % kind self.fp.seek (sizeinfo[kind] + 1, 1) # skip ahead by data chunk size sample = self.fp.read (16) assert sample.startswith ('--MIME'), 'crap, unexpected chunk size in MIME parsing: %r' % sample self.fp.seek (-16, 1) # go back # check that two major kinds of data are read at least once if any([k.split('/')[3] == '3' for k in binarychunks.iterkeys()]): break if headxml is None: raise RuntimeError ('never found any binary data') self.mimemsg = feedparser.close () self.headxml = headxml self.sizeinfo = sizeinfo self.binarychunks = binarychunks headsize, intsize = self.calc_intsize() # Compute some miscellaneous parameters that we'll need. # self.n_integrations = len (self.mimemsg.get_payload ()) - 1 self.n_integrations = os.stat(self.fp.name).st_size/intsize self.n_antennas = int (headxml.find (tagpfx + nanttag).text) self.n_baselines = (self.n_antennas * (self.n_antennas - 1)) // 2 ds = headxml.find (tagpfx + dstag) nbb = 0 nspw = 0 nchan = 0 crosspolstr = None for bb in ds.findall (tagpfx + basebandtag): nbb += 1 for spw in bb.getchildren (): nspw += 1 nchan += int (spw.get ('numSpectralPoint')) if crosspolstr is None: crosspolstr = spw.get ('crossPolProducts') elif spw.get ('crossPolProducts') != crosspolstr: raise Exception ('can only handle spectral windows with identical cross pol products') self.n_basebands = nbb self.n_spws = nspw self.n_channels = nchan self.crosspols = crosspolstr.split () self.n_pols = len(self.crosspols) # if bdf info pkl not present, write it if os.path.exists(os.path.dirname(self.pklname)) and self.pklname and (not os.path.exists(self.pklname)): logger.info('Writing bdf pkl info to %s...' % (self.pklname)) with open(self.pklname,'wb') as pkl: # Compute some miscellaneous parameters that we'll need. pickle.dump( (self.mimemsg, self.headxml, self.sizeinfo, self.binarychunks, self.n_integrations, self.n_antennas, self.n_baselines, self.n_basebands, self.n_spws, self.n_channels, self.crosspols), pkl) return self
[ "def", "_parse", "(", "self", ")", ":", "feedparser", "=", "FeedParser", "(", "Message", ")", "binarychunks", "=", "{", "}", "sizeinfo", "=", "None", "headxml", "=", "None", "self", ".", "fp", ".", "seek", "(", "0", ",", "0", ")", "while", "True", ":", "data", "=", "self", ".", "fp", ".", "readline", "(", ")", "if", "not", "data", ":", "break", "feedparser", ".", "feed", "(", "data", ")", "skip", "=", "(", "data", "==", "'\\n'", "and", "len", "(", "feedparser", ".", "_msgstack", ")", "==", "3", "and", "feedparser", ".", "_msgstack", "[", "-", "1", "]", ".", "get_content_type", "(", ")", "in", "(", "'application/octet-stream'", ",", "'binary/octet-stream'", ")", ")", "if", "skip", ":", "# We just finished reading the headers for a huge binary blob.\r", "# Time to remember where the data chunk is and pretend it doesn't\r", "# exist.\r", "msg", "=", "feedparser", ".", "_msgstack", "[", "-", "1", "]", "ident", "=", "msg", "[", "'Content-Location'", "]", "assert", "ident", ".", "endswith", "(", "'.bin'", ")", ",", "'confusion #1 in hacky MIME parsing!'", "binarychunks", "[", "ident", "]", "=", "self", ".", "fp", ".", "tell", "(", ")", "if", "sizeinfo", "is", "None", ":", "headxml", ",", "sizeinfo", ",", "tagpfx", "=", "_extract_size_info", "(", "feedparser", ")", "kind", "=", "ident", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "assert", "kind", "in", "sizeinfo", ",", "'no size info for binary chunk kind %s in MIME!'", "%", "kind", "self", ".", "fp", ".", "seek", "(", "sizeinfo", "[", "kind", "]", "+", "1", ",", "1", ")", "# skip ahead by data chunk size\r", "sample", "=", "self", ".", "fp", ".", "read", "(", "16", ")", "assert", "sample", ".", "startswith", "(", "'--MIME'", ")", ",", "'crap, unexpected chunk size in MIME parsing: %r'", "%", "sample", "self", ".", "fp", ".", "seek", "(", "-", "16", ",", "1", ")", "# go back\r", "# check that two major kinds of data are read at least once\r", "if", "any", "(", "[", "k", ".", "split", "(", "'/'", ")", "[", "3", "]", "==", "'3'", "for", "k", "in", "binarychunks", ".", "iterkeys", "(", ")", "]", ")", ":", "break", "if", "headxml", "is", "None", ":", "raise", "RuntimeError", "(", "'never found any binary data'", ")", "self", ".", "mimemsg", "=", "feedparser", ".", "close", "(", ")", "self", ".", "headxml", "=", "headxml", "self", ".", "sizeinfo", "=", "sizeinfo", "self", ".", "binarychunks", "=", "binarychunks", "headsize", ",", "intsize", "=", "self", ".", "calc_intsize", "(", ")", "# Compute some miscellaneous parameters that we'll need.\r", "# self.n_integrations = len (self.mimemsg.get_payload ()) - 1\r", "self", ".", "n_integrations", "=", "os", ".", "stat", "(", "self", ".", "fp", ".", "name", ")", ".", "st_size", "/", "intsize", "self", ".", "n_antennas", "=", "int", "(", "headxml", ".", "find", "(", "tagpfx", "+", "nanttag", ")", ".", "text", ")", "self", ".", "n_baselines", "=", "(", "self", ".", "n_antennas", "*", "(", "self", ".", "n_antennas", "-", "1", ")", ")", "//", "2", "ds", "=", "headxml", ".", "find", "(", "tagpfx", "+", "dstag", ")", "nbb", "=", "0", "nspw", "=", "0", "nchan", "=", "0", "crosspolstr", "=", "None", "for", "bb", "in", "ds", ".", "findall", "(", "tagpfx", "+", "basebandtag", ")", ":", "nbb", "+=", "1", "for", "spw", "in", "bb", ".", "getchildren", "(", ")", ":", "nspw", "+=", "1", "nchan", "+=", "int", "(", "spw", ".", "get", "(", "'numSpectralPoint'", ")", ")", "if", "crosspolstr", "is", "None", ":", "crosspolstr", "=", "spw", ".", "get", "(", "'crossPolProducts'", ")", "elif", "spw", ".", "get", "(", "'crossPolProducts'", ")", "!=", "crosspolstr", ":", "raise", "Exception", "(", "'can only handle spectral windows with identical cross pol products'", ")", "self", ".", "n_basebands", "=", "nbb", "self", ".", "n_spws", "=", "nspw", "self", ".", "n_channels", "=", "nchan", "self", ".", "crosspols", "=", "crosspolstr", ".", "split", "(", ")", "self", ".", "n_pols", "=", "len", "(", "self", ".", "crosspols", ")", "# if bdf info pkl not present, write it\r", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "pklname", ")", ")", "and", "self", ".", "pklname", "and", "(", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "pklname", ")", ")", ":", "logger", ".", "info", "(", "'Writing bdf pkl info to %s...'", "%", "(", "self", ".", "pklname", ")", ")", "with", "open", "(", "self", ".", "pklname", ",", "'wb'", ")", "as", "pkl", ":", "# Compute some miscellaneous parameters that we'll need.\r", "pickle", ".", "dump", "(", "(", "self", ".", "mimemsg", ",", "self", ".", "headxml", ",", "self", ".", "sizeinfo", ",", "self", ".", "binarychunks", ",", "self", ".", "n_integrations", ",", "self", ".", "n_antennas", ",", "self", ".", "n_baselines", ",", "self", ".", "n_basebands", ",", "self", ".", "n_spws", ",", "self", ".", "n_channels", ",", "self", ".", "crosspols", ")", ",", "pkl", ")", "return", "self" ]
Parse the BDF mime structure and record the locations of the binary blobs. Sets up various data fields in the BDFData object.
[ "Parse", "the", "BDF", "mime", "structure", "and", "record", "the", "locations", "of", "the", "binary", "blobs", ".", "Sets", "up", "various", "data", "fields", "in", "the", "BDFData", "object", "." ]
python
train
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L919-L926
def get_value(cls, value): '''Takes the class of the item that we want to query, along with a potential instance of that class. If the value is an instance of int or basestring, then we will treat it like an id for that instance.''' if isinstance(value, (basestring, int)): value = cls.fetch_by(id=value) return value if isinstance(value, cls) else None
[ "def", "get_value", "(", "cls", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "basestring", ",", "int", ")", ")", ":", "value", "=", "cls", ".", "fetch_by", "(", "id", "=", "value", ")", "return", "value", "if", "isinstance", "(", "value", ",", "cls", ")", "else", "None" ]
Takes the class of the item that we want to query, along with a potential instance of that class. If the value is an instance of int or basestring, then we will treat it like an id for that instance.
[ "Takes", "the", "class", "of", "the", "item", "that", "we", "want", "to", "query", "along", "with", "a", "potential", "instance", "of", "that", "class", ".", "If", "the", "value", "is", "an", "instance", "of", "int", "or", "basestring", "then", "we", "will", "treat", "it", "like", "an", "id", "for", "that", "instance", "." ]
python
train
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L1795-L1808
def list_extensions(request): """List neutron extensions. :param request: django request object """ neutron_api = neutronclient(request) try: extensions_list = neutron_api.list_extensions() except exceptions.ServiceCatalogException: return {} if 'extensions' in extensions_list: return tuple(extensions_list['extensions']) else: return ()
[ "def", "list_extensions", "(", "request", ")", ":", "neutron_api", "=", "neutronclient", "(", "request", ")", "try", ":", "extensions_list", "=", "neutron_api", ".", "list_extensions", "(", ")", "except", "exceptions", ".", "ServiceCatalogException", ":", "return", "{", "}", "if", "'extensions'", "in", "extensions_list", ":", "return", "tuple", "(", "extensions_list", "[", "'extensions'", "]", ")", "else", ":", "return", "(", ")" ]
List neutron extensions. :param request: django request object
[ "List", "neutron", "extensions", "." ]
python
train
coordt/django-alphabetfilter
alphafilter/templatetags/alphafilter.py
https://github.com/coordt/django-alphabetfilter/blob/a7bc21c0ea985c2021a4668241bf643c615c6c1f/alphafilter/templatetags/alphafilter.py#L13-L35
def _get_default_letters(model_admin=None): """ Returns the set of letters defined in the configuration variable DEFAULT_ALPHABET. DEFAULT_ALPHABET can be a callable, string, tuple, or list and returns a set. If a ModelAdmin class is passed, it will look for a DEFAULT_ALPHABET attribute and use it instead. """ from django.conf import settings import string default_ltrs = string.digits + string.ascii_uppercase default_letters = getattr(settings, 'DEFAULT_ALPHABET', default_ltrs) if model_admin and hasattr(model_admin, 'DEFAULT_ALPHABET'): default_letters = model_admin.DEFAULT_ALPHABET if callable(default_letters): return set(default_letters()) elif isinstance(default_letters, str): return set([x for x in default_letters]) elif isinstance(default_letters, str): return set([x for x in default_letters.decode('utf8')]) elif isinstance(default_letters, (tuple, list)): return set(default_letters)
[ "def", "_get_default_letters", "(", "model_admin", "=", "None", ")", ":", "from", "django", ".", "conf", "import", "settings", "import", "string", "default_ltrs", "=", "string", ".", "digits", "+", "string", ".", "ascii_uppercase", "default_letters", "=", "getattr", "(", "settings", ",", "'DEFAULT_ALPHABET'", ",", "default_ltrs", ")", "if", "model_admin", "and", "hasattr", "(", "model_admin", ",", "'DEFAULT_ALPHABET'", ")", ":", "default_letters", "=", "model_admin", ".", "DEFAULT_ALPHABET", "if", "callable", "(", "default_letters", ")", ":", "return", "set", "(", "default_letters", "(", ")", ")", "elif", "isinstance", "(", "default_letters", ",", "str", ")", ":", "return", "set", "(", "[", "x", "for", "x", "in", "default_letters", "]", ")", "elif", "isinstance", "(", "default_letters", ",", "str", ")", ":", "return", "set", "(", "[", "x", "for", "x", "in", "default_letters", ".", "decode", "(", "'utf8'", ")", "]", ")", "elif", "isinstance", "(", "default_letters", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "set", "(", "default_letters", ")" ]
Returns the set of letters defined in the configuration variable DEFAULT_ALPHABET. DEFAULT_ALPHABET can be a callable, string, tuple, or list and returns a set. If a ModelAdmin class is passed, it will look for a DEFAULT_ALPHABET attribute and use it instead.
[ "Returns", "the", "set", "of", "letters", "defined", "in", "the", "configuration", "variable", "DEFAULT_ALPHABET", ".", "DEFAULT_ALPHABET", "can", "be", "a", "callable", "string", "tuple", "or", "list", "and", "returns", "a", "set", "." ]
python
train
hotdoc/hotdoc
hotdoc/core/project.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/project.py#L151-L160
def persist(self): """ Banana banana """ if self.app.dry: return for proj in self.subprojects.values(): proj.persist()
[ "def", "persist", "(", "self", ")", ":", "if", "self", ".", "app", ".", "dry", ":", "return", "for", "proj", "in", "self", ".", "subprojects", ".", "values", "(", ")", ":", "proj", ".", "persist", "(", ")" ]
Banana banana
[ "Banana", "banana" ]
python
train
LuminosoInsight/ordered-set
ordered_set.py
https://github.com/LuminosoInsight/ordered-set/blob/a29eaedcedfe5072bcee11bdef61dea321d5e9f9/ordered_set.py#L310-L327
def union(self, *sets): """ Combines all unique items. Each items order is defined by its first appearance. Example: >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0]) >>> print(oset) OrderedSet([3, 1, 4, 5, 2, 0]) >>> oset.union([8, 9]) OrderedSet([3, 1, 4, 5, 2, 0, 8, 9]) >>> oset | {10} OrderedSet([3, 1, 4, 5, 2, 0, 10]) """ cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet containers = map(list, it.chain([self], sets)) items = it.chain.from_iterable(containers) return cls(items)
[ "def", "union", "(", "self", ",", "*", "sets", ")", ":", "cls", "=", "self", ".", "__class__", "if", "isinstance", "(", "self", ",", "OrderedSet", ")", "else", "OrderedSet", "containers", "=", "map", "(", "list", ",", "it", ".", "chain", "(", "[", "self", "]", ",", "sets", ")", ")", "items", "=", "it", ".", "chain", ".", "from_iterable", "(", "containers", ")", "return", "cls", "(", "items", ")" ]
Combines all unique items. Each items order is defined by its first appearance. Example: >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0]) >>> print(oset) OrderedSet([3, 1, 4, 5, 2, 0]) >>> oset.union([8, 9]) OrderedSet([3, 1, 4, 5, 2, 0, 8, 9]) >>> oset | {10} OrderedSet([3, 1, 4, 5, 2, 0, 10])
[ "Combines", "all", "unique", "items", ".", "Each", "items", "order", "is", "defined", "by", "its", "first", "appearance", "." ]
python
train
MIT-LCP/wfdb-python
wfdb/io/annotation.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L781-L859
def get_contained_labels(self, inplace=True): """ Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann. """ if self.custom_labels is not None: self.check_field('custom_labels') # Create the label map label_map = ann_label_table.copy() # Convert the tuple triplets into a pandas dataframe if needed if isinstance(self.custom_labels, (list, tuple)): custom_labels = label_triplets_to_df(self.custom_labels) elif isinstance(self.custom_labels, pd.DataFrame): # Set the index just in case it doesn't already match the label_store self.custom_labels.set_index( self.custom_labels['label_store'].values, inplace=True) custom_labels = self.custom_labels else: custom_labels = None # Merge the standard wfdb labels with the custom labels. # custom labels values overwrite standard wfdb if overlap. if custom_labels is not None: for i in custom_labels.index: label_map.loc[i] = custom_labels.loc[i] # This doesn't work... # label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index] # Get the labels using one of the features if self.label_store is not None: index_vals = set(self.label_store) reset_index = False counts = np.unique(self.label_store, return_counts=True) elif self.symbol is not None: index_vals = set(self.symbol) label_map.set_index(label_map['symbol'].values, inplace=True) reset_index = True counts = np.unique(self.symbol, return_counts=True) elif self.description is not None: index_vals = set(self.description) label_map.set_index(label_map['description'].values, inplace=True) reset_index = True counts = np.unique(self.description, return_counts=True) else: raise Exception('No annotation labels contained in object') contained_labels = label_map.loc[index_vals, :] # Add the counts for i in range(len(counts[0])): contained_labels.loc[counts[0][i], 'n_occurrences'] = counts[1][i] contained_labels['n_occurrences'] = pd.to_numeric(contained_labels['n_occurrences'], downcast='integer') if reset_index: contained_labels.set_index(contained_labels['label_store'].values, inplace=True) if inplace: self.contained_labels = contained_labels return else: return contained_labels
[ "def", "get_contained_labels", "(", "self", ",", "inplace", "=", "True", ")", ":", "if", "self", ".", "custom_labels", "is", "not", "None", ":", "self", ".", "check_field", "(", "'custom_labels'", ")", "# Create the label map", "label_map", "=", "ann_label_table", ".", "copy", "(", ")", "# Convert the tuple triplets into a pandas dataframe if needed", "if", "isinstance", "(", "self", ".", "custom_labels", ",", "(", "list", ",", "tuple", ")", ")", ":", "custom_labels", "=", "label_triplets_to_df", "(", "self", ".", "custom_labels", ")", "elif", "isinstance", "(", "self", ".", "custom_labels", ",", "pd", ".", "DataFrame", ")", ":", "# Set the index just in case it doesn't already match the label_store", "self", ".", "custom_labels", ".", "set_index", "(", "self", ".", "custom_labels", "[", "'label_store'", "]", ".", "values", ",", "inplace", "=", "True", ")", "custom_labels", "=", "self", ".", "custom_labels", "else", ":", "custom_labels", "=", "None", "# Merge the standard wfdb labels with the custom labels.", "# custom labels values overwrite standard wfdb if overlap.", "if", "custom_labels", "is", "not", "None", ":", "for", "i", "in", "custom_labels", ".", "index", ":", "label_map", ".", "loc", "[", "i", "]", "=", "custom_labels", ".", "loc", "[", "i", "]", "# This doesn't work...", "# label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index]", "# Get the labels using one of the features", "if", "self", ".", "label_store", "is", "not", "None", ":", "index_vals", "=", "set", "(", "self", ".", "label_store", ")", "reset_index", "=", "False", "counts", "=", "np", ".", "unique", "(", "self", ".", "label_store", ",", "return_counts", "=", "True", ")", "elif", "self", ".", "symbol", "is", "not", "None", ":", "index_vals", "=", "set", "(", "self", ".", "symbol", ")", "label_map", ".", "set_index", "(", "label_map", "[", "'symbol'", "]", ".", "values", ",", "inplace", "=", "True", ")", "reset_index", "=", "True", "counts", "=", "np", ".", "unique", "(", "self", ".", "symbol", ",", "return_counts", "=", "True", ")", "elif", "self", ".", "description", "is", "not", "None", ":", "index_vals", "=", "set", "(", "self", ".", "description", ")", "label_map", ".", "set_index", "(", "label_map", "[", "'description'", "]", ".", "values", ",", "inplace", "=", "True", ")", "reset_index", "=", "True", "counts", "=", "np", ".", "unique", "(", "self", ".", "description", ",", "return_counts", "=", "True", ")", "else", ":", "raise", "Exception", "(", "'No annotation labels contained in object'", ")", "contained_labels", "=", "label_map", ".", "loc", "[", "index_vals", ",", ":", "]", "# Add the counts", "for", "i", "in", "range", "(", "len", "(", "counts", "[", "0", "]", ")", ")", ":", "contained_labels", ".", "loc", "[", "counts", "[", "0", "]", "[", "i", "]", ",", "'n_occurrences'", "]", "=", "counts", "[", "1", "]", "[", "i", "]", "contained_labels", "[", "'n_occurrences'", "]", "=", "pd", ".", "to_numeric", "(", "contained_labels", "[", "'n_occurrences'", "]", ",", "downcast", "=", "'integer'", ")", "if", "reset_index", ":", "contained_labels", ".", "set_index", "(", "contained_labels", "[", "'label_store'", "]", ".", "values", ",", "inplace", "=", "True", ")", "if", "inplace", ":", "self", ".", "contained_labels", "=", "contained_labels", "return", "else", ":", "return", "contained_labels" ]
Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann.
[ "Get", "the", "set", "of", "unique", "labels", "contained", "in", "this", "annotation", ".", "Returns", "a", "pandas", "dataframe", "or", "sets", "the", "contained_labels", "attribute", "of", "the", "object", "." ]
python
train
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L424-L461
def remove(self, entity_id, property_uri, value): """Method removes a triple for the given/subject. Args: entity_id(string): Fedora Object ID, ideally URI of the subject property_uri(string): value(string): Return: boolean: True if triple was removed from the object """ if not entity_id.startswith("http"): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id sparql_template = Template("""$prefix DELETE { <$entity> $prop_name $value_str } WHERE { <$entity> $prop_name $value_str }""") sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_name=property_uri, value_str=self.__value_format__(value)) delete_property_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) response = urllib.request.urlopen(delete_property_request) if response.code < 400: return True return False
[ "def", "remove", "(", "self", ",", "entity_id", ",", "property_uri", ",", "value", ")", ":", "if", "not", "entity_id", ".", "startswith", "(", "\"http\"", ")", ":", "entity_uri", "=", "urllib", ".", "parse", ".", "urljoin", "(", "self", ".", "base_url", ",", "entity_id", ")", "else", ":", "entity_uri", "=", "entity_id", "sparql_template", "=", "Template", "(", "\"\"\"$prefix\n DELETE {\n <$entity> $prop_name $value_str\n } WHERE {\n <$entity> $prop_name $value_str\n }\"\"\"", ")", "sparql", "=", "sparql_template", ".", "substitute", "(", "prefix", "=", "build_prefixes", "(", "self", ".", "namespaces", ")", ",", "entity", "=", "entity_uri", ",", "prop_name", "=", "property_uri", ",", "value_str", "=", "self", ".", "__value_format__", "(", "value", ")", ")", "delete_property_request", "=", "urllib", ".", "request", ".", "Request", "(", "entity_uri", ",", "data", "=", "sparql", ".", "encode", "(", ")", ",", "method", "=", "'PATCH'", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/sparql-update'", "}", ")", "response", "=", "urllib", ".", "request", ".", "urlopen", "(", "delete_property_request", ")", "if", "response", ".", "code", "<", "400", ":", "return", "True", "return", "False" ]
Method removes a triple for the given/subject. Args: entity_id(string): Fedora Object ID, ideally URI of the subject property_uri(string): value(string): Return: boolean: True if triple was removed from the object
[ "Method", "removes", "a", "triple", "for", "the", "given", "/", "subject", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/message.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/message.py#L369-L379
def __pack_message(operation, data): """Takes message data and adds a message header based on the operation. Returns the resultant message string. """ request_id = _randint() message = struct.pack("<i", 16 + len(data)) message += struct.pack("<i", request_id) message += _ZERO_32 # responseTo message += struct.pack("<i", operation) return (request_id, message + data)
[ "def", "__pack_message", "(", "operation", ",", "data", ")", ":", "request_id", "=", "_randint", "(", ")", "message", "=", "struct", ".", "pack", "(", "\"<i\"", ",", "16", "+", "len", "(", "data", ")", ")", "message", "+=", "struct", ".", "pack", "(", "\"<i\"", ",", "request_id", ")", "message", "+=", "_ZERO_32", "# responseTo", "message", "+=", "struct", ".", "pack", "(", "\"<i\"", ",", "operation", ")", "return", "(", "request_id", ",", "message", "+", "data", ")" ]
Takes message data and adds a message header based on the operation. Returns the resultant message string.
[ "Takes", "message", "data", "and", "adds", "a", "message", "header", "based", "on", "the", "operation", "." ]
python
train
dougalsutherland/skl-groups
skl_groups/preprocessing.py
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L78-L97
def fit_transform(self, X, y=None, **params): ''' Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.fit_transform(X.stacked_features, y, **params) return self._gather_outputs(X, X_new)
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "params", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "X_new", "=", "self", ".", "transformer", ".", "fit_transform", "(", "X", ".", "stacked_features", ",", "y", ",", "*", "*", "params", ")", "return", "self", ".", "_gather_outputs", "(", "X", ",", "X_new", ")" ]
Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features.
[ "Fit", "and", "transform", "the", "stacked", "points", "." ]
python
valid
ev3dev/ev3dev-lang-python
ev3dev2/sensor/lego.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sensor/lego.py#L935-L941
def sound_pressure(self): """ A measurement of the measured sound pressure level, as a percent. Uses a flat weighting. """ self._ensure_mode(self.MODE_DB) return self.value(0) * self._scale('DB')
[ "def", "sound_pressure", "(", "self", ")", ":", "self", ".", "_ensure_mode", "(", "self", ".", "MODE_DB", ")", "return", "self", ".", "value", "(", "0", ")", "*", "self", ".", "_scale", "(", "'DB'", ")" ]
A measurement of the measured sound pressure level, as a percent. Uses a flat weighting.
[ "A", "measurement", "of", "the", "measured", "sound", "pressure", "level", "as", "a", "percent", ".", "Uses", "a", "flat", "weighting", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/anchor/holderprover.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/holderprover.py#L149-L199
async def _sync_revoc_for_proof(self, rr_id: str) -> None: """ Pick up tails file reader handle for input revocation registry identifier. If no symbolic link is present, get the revocation registry definition to retrieve its tails file hash, then find the tails file and link it. Raise AbsentTails for missing corresponding tails file. :param rr_id: revocation registry identifier """ LOGGER.debug('HolderProver._sync_revoc_for_proof >>> rr_id: %s', rr_id) if not ok_rev_reg_id(rr_id): LOGGER.debug('HolderProver._sync_revoc_for_proof <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) (cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id) try: json.loads(await self.get_cred_def(cd_id)) except AbsentCredDef: LOGGER.debug( 'HolderProver._sync_revoc_for_proof <!< corrupt tails tree %s may be for another ledger', self._dir_tails) raise AbsentCredDef('Corrupt tails tree {} may be for another ledger'.format(self._dir_tails)) except ClosedPool: pass # carry on, may be OK from cache only with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) tails = revo_cache_entry.tails if revo_cache_entry else None if tails is None: # it's not yet set in cache try: tails = await Tails(self._dir_tails, cd_id, tag).open() except AbsentTails: # get hash from ledger and check for tails file rr_def = json.loads(await self.get_rev_reg_def(rr_id)) tails_hash = rr_def['value']['tailsHash'] path_tails = join(Tails.dir(self._dir_tails, rr_id), tails_hash) if not isfile(path_tails): LOGGER.debug('HolderProver._sync_revoc_for_proof <!< No tails file present at %s', path_tails) raise AbsentTails('No tails file present at {}'.format(path_tails)) Tails.associate(self._dir_tails, rr_id, tails_hash) tails = await Tails(self._dir_tails, cd_id, tag).open() # OK now since tails file present if revo_cache_entry is None: REVO_CACHE[rr_id] = RevoCacheEntry(None, tails) else: REVO_CACHE[rr_id].tails = tails LOGGER.debug('HolderProver._sync_revoc_for_proof <<<')
[ "async", "def", "_sync_revoc_for_proof", "(", "self", ",", "rr_id", ":", "str", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'HolderProver._sync_revoc_for_proof >>> rr_id: %s'", ",", "rr_id", ")", "if", "not", "ok_rev_reg_id", "(", "rr_id", ")", ":", "LOGGER", ".", "debug", "(", "'HolderProver._sync_revoc_for_proof <!< Bad rev reg id %s'", ",", "rr_id", ")", "raise", "BadIdentifier", "(", "'Bad rev reg id {}'", ".", "format", "(", "rr_id", ")", ")", "(", "cd_id", ",", "tag", ")", "=", "rev_reg_id2cred_def_id_tag", "(", "rr_id", ")", "try", ":", "json", ".", "loads", "(", "await", "self", ".", "get_cred_def", "(", "cd_id", ")", ")", "except", "AbsentCredDef", ":", "LOGGER", ".", "debug", "(", "'HolderProver._sync_revoc_for_proof <!< corrupt tails tree %s may be for another ledger'", ",", "self", ".", "_dir_tails", ")", "raise", "AbsentCredDef", "(", "'Corrupt tails tree {} may be for another ledger'", ".", "format", "(", "self", ".", "_dir_tails", ")", ")", "except", "ClosedPool", ":", "pass", "# carry on, may be OK from cache only", "with", "REVO_CACHE", ".", "lock", ":", "revo_cache_entry", "=", "REVO_CACHE", ".", "get", "(", "rr_id", ",", "None", ")", "tails", "=", "revo_cache_entry", ".", "tails", "if", "revo_cache_entry", "else", "None", "if", "tails", "is", "None", ":", "# it's not yet set in cache", "try", ":", "tails", "=", "await", "Tails", "(", "self", ".", "_dir_tails", ",", "cd_id", ",", "tag", ")", ".", "open", "(", ")", "except", "AbsentTails", ":", "# get hash from ledger and check for tails file", "rr_def", "=", "json", ".", "loads", "(", "await", "self", ".", "get_rev_reg_def", "(", "rr_id", ")", ")", "tails_hash", "=", "rr_def", "[", "'value'", "]", "[", "'tailsHash'", "]", "path_tails", "=", "join", "(", "Tails", ".", "dir", "(", "self", ".", "_dir_tails", ",", "rr_id", ")", ",", "tails_hash", ")", "if", "not", "isfile", "(", "path_tails", ")", ":", "LOGGER", ".", "debug", "(", "'HolderProver._sync_revoc_for_proof <!< No tails file present at %s'", ",", "path_tails", ")", "raise", "AbsentTails", "(", "'No tails file present at {}'", ".", "format", "(", "path_tails", ")", ")", "Tails", ".", "associate", "(", "self", ".", "_dir_tails", ",", "rr_id", ",", "tails_hash", ")", "tails", "=", "await", "Tails", "(", "self", ".", "_dir_tails", ",", "cd_id", ",", "tag", ")", ".", "open", "(", ")", "# OK now since tails file present", "if", "revo_cache_entry", "is", "None", ":", "REVO_CACHE", "[", "rr_id", "]", "=", "RevoCacheEntry", "(", "None", ",", "tails", ")", "else", ":", "REVO_CACHE", "[", "rr_id", "]", ".", "tails", "=", "tails", "LOGGER", ".", "debug", "(", "'HolderProver._sync_revoc_for_proof <<<'", ")" ]
Pick up tails file reader handle for input revocation registry identifier. If no symbolic link is present, get the revocation registry definition to retrieve its tails file hash, then find the tails file and link it. Raise AbsentTails for missing corresponding tails file. :param rr_id: revocation registry identifier
[ "Pick", "up", "tails", "file", "reader", "handle", "for", "input", "revocation", "registry", "identifier", ".", "If", "no", "symbolic", "link", "is", "present", "get", "the", "revocation", "registry", "definition", "to", "retrieve", "its", "tails", "file", "hash", "then", "find", "the", "tails", "file", "and", "link", "it", "." ]
python
train
ethereum/pyrlp
rlp/sedes/serializable.py
https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/sedes/serializable.py#L82-L90
def _eq(left, right): """ Equality comparison that allows for equality between tuple and list types with equivalent elements. """ if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)): return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right)) else: return left == right
[ "def", "_eq", "(", "left", ",", "right", ")", ":", "if", "isinstance", "(", "left", ",", "(", "tuple", ",", "list", ")", ")", "and", "isinstance", "(", "right", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "len", "(", "left", ")", "==", "len", "(", "right", ")", "and", "all", "(", "_eq", "(", "*", "pair", ")", "for", "pair", "in", "zip", "(", "left", ",", "right", ")", ")", "else", ":", "return", "left", "==", "right" ]
Equality comparison that allows for equality between tuple and list types with equivalent elements.
[ "Equality", "comparison", "that", "allows", "for", "equality", "between", "tuple", "and", "list", "types", "with", "equivalent", "elements", "." ]
python
train
CodeReclaimers/neat-python
neat/statistics.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/statistics.py#L124-L129
def save_species_count(self, delimiter=' ', filename='speciation.csv'): """ Log speciation throughout evolution. """ with open(filename, 'w') as f: w = csv.writer(f, delimiter=delimiter) for s in self.get_species_sizes(): w.writerow(s)
[ "def", "save_species_count", "(", "self", ",", "delimiter", "=", "' '", ",", "filename", "=", "'speciation.csv'", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "w", "=", "csv", ".", "writer", "(", "f", ",", "delimiter", "=", "delimiter", ")", "for", "s", "in", "self", ".", "get_species_sizes", "(", ")", ":", "w", ".", "writerow", "(", "s", ")" ]
Log speciation throughout evolution.
[ "Log", "speciation", "throughout", "evolution", "." ]
python
train
chrisjrn/registrasion
registrasion/templatetags/registrasion_tags.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/templatetags/registrasion_tags.py#L124-L149
def sold_out_and_unregistered(context): ''' If the current user is unregistered, returns True if there are no products in the TICKET_PRODUCT_CATEGORY that are available to that user. If there *are* products available, the return False. If the current user *is* registered, then return None (it's not a pertinent question for people who already have a ticket). ''' user = user_for_context(context) if hasattr(user, "attendee") and user.attendee.completed_registration: # This user has completed registration, and so we don't need to answer # whether they have sold out yet. # TODO: what if a user has got to the review phase? # currently that user will hit the review page, click "Check out and # pay", and that will fail. Probably good enough for now. return None ticket_category = settings.TICKET_PRODUCT_CATEGORY categories = available_categories(context) return ticket_category not in [cat.id for cat in categories]
[ "def", "sold_out_and_unregistered", "(", "context", ")", ":", "user", "=", "user_for_context", "(", "context", ")", "if", "hasattr", "(", "user", ",", "\"attendee\"", ")", "and", "user", ".", "attendee", ".", "completed_registration", ":", "# This user has completed registration, and so we don't need to answer", "# whether they have sold out yet.", "# TODO: what if a user has got to the review phase?", "# currently that user will hit the review page, click \"Check out and", "# pay\", and that will fail. Probably good enough for now.", "return", "None", "ticket_category", "=", "settings", ".", "TICKET_PRODUCT_CATEGORY", "categories", "=", "available_categories", "(", "context", ")", "return", "ticket_category", "not", "in", "[", "cat", ".", "id", "for", "cat", "in", "categories", "]" ]
If the current user is unregistered, returns True if there are no products in the TICKET_PRODUCT_CATEGORY that are available to that user. If there *are* products available, the return False. If the current user *is* registered, then return None (it's not a pertinent question for people who already have a ticket).
[ "If", "the", "current", "user", "is", "unregistered", "returns", "True", "if", "there", "are", "no", "products", "in", "the", "TICKET_PRODUCT_CATEGORY", "that", "are", "available", "to", "that", "user", "." ]
python
test
sagemath/sage-package
sage_package/sphinx.py
https://github.com/sagemath/sage-package/blob/6e511753fb0667b202f497fc00b763647456a066/sage_package/sphinx.py#L72-L79
def themes_path(): """ Retrieve the location of the themes directory from the location of this package This is taken from Sphinx's theme documentation """ package_dir = os.path.abspath(os.path.dirname(__file__)) return os.path.join(package_dir, 'themes')
[ "def", "themes_path", "(", ")", ":", "package_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "return", "os", ".", "path", ".", "join", "(", "package_dir", ",", "'themes'", ")" ]
Retrieve the location of the themes directory from the location of this package This is taken from Sphinx's theme documentation
[ "Retrieve", "the", "location", "of", "the", "themes", "directory", "from", "the", "location", "of", "this", "package" ]
python
test
qweeze/wex-api-client
wex/client.py
https://github.com/qweeze/wex-api-client/blob/e84d139be229aab2c7c5eda5976b812be651807b/wex/client.py#L80-L88
def ticker(self, pair, ignore_invalid=0): """ This method provides all the information about currently active pairs, such as: the maximum price, the minimum price, average price, trade volume, trade volume in currency, the last trade, Buy and Sell price. All information is provided over the past 24 hours. :param str or iterable pair: pair (ex. 'btc_usd' or ['btc_usd', 'eth_usd']) :param int ignore_invalid: ignore non-existing pairs """ return self._public_api_call('ticker', pair=pair, ignore_invalid=ignore_invalid)
[ "def", "ticker", "(", "self", ",", "pair", ",", "ignore_invalid", "=", "0", ")", ":", "return", "self", ".", "_public_api_call", "(", "'ticker'", ",", "pair", "=", "pair", ",", "ignore_invalid", "=", "ignore_invalid", ")" ]
This method provides all the information about currently active pairs, such as: the maximum price, the minimum price, average price, trade volume, trade volume in currency, the last trade, Buy and Sell price. All information is provided over the past 24 hours. :param str or iterable pair: pair (ex. 'btc_usd' or ['btc_usd', 'eth_usd']) :param int ignore_invalid: ignore non-existing pairs
[ "This", "method", "provides", "all", "the", "information", "about", "currently", "active", "pairs", "such", "as", ":", "the", "maximum", "price", "the", "minimum", "price", "average", "price", "trade", "volume", "trade", "volume", "in", "currency", "the", "last", "trade", "Buy", "and", "Sell", "price", ".", "All", "information", "is", "provided", "over", "the", "past", "24", "hours", ".", ":", "param", "str", "or", "iterable", "pair", ":", "pair", "(", "ex", ".", "btc_usd", "or", "[", "btc_usd", "eth_usd", "]", ")", ":", "param", "int", "ignore_invalid", ":", "ignore", "non", "-", "existing", "pairs" ]
python
train
NASA-AMMOS/AIT-Core
ait/core/log.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/log.py#L66-L76
def formatTime (self, record, datefmt=None): """Return the creation time of the specified LogRecord as formatted text.""" if datefmt is None: datefmt = '%Y-%m-%d %H:%M:%S' ct = self.converter(record.created) t = time.strftime(datefmt, ct) s = '%s.%03d' % (t, record.msecs) return s
[ "def", "formatTime", "(", "self", ",", "record", ",", "datefmt", "=", "None", ")", ":", "if", "datefmt", "is", "None", ":", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", "ct", "=", "self", ".", "converter", "(", "record", ".", "created", ")", "t", "=", "time", ".", "strftime", "(", "datefmt", ",", "ct", ")", "s", "=", "'%s.%03d'", "%", "(", "t", ",", "record", ".", "msecs", ")", "return", "s" ]
Return the creation time of the specified LogRecord as formatted text.
[ "Return", "the", "creation", "time", "of", "the", "specified", "LogRecord", "as", "formatted", "text", "." ]
python
train
rabitt/pysox
sox/transform.py
https://github.com/rabitt/pysox/blob/eae89bde74567136ec3f723c3e6b369916d9b837/sox/transform.py#L3026-L3116
def vad(self, location=1, normalize=True, activity_threshold=7.0, min_activity_duration=0.25, initial_search_buffer=1.0, max_gap=0.25, initial_pad=0.0): '''Voice Activity Detector. Attempts to trim silence and quiet background sounds from the ends of recordings of speech. The algorithm currently uses a simple cepstral power measurement to detect voice, so may be fooled by other things, especially music. The effect can trim only from the front of the audio, so in order to trim from the back, the reverse effect must also be used. Parameters ---------- location : 1 or -1, default=1 If 1, trims silence from the beginning If -1, trims silence from the end normalize : bool, default=True If true, normalizes audio before processing. activity_threshold : float, default=7.0 The measurement level used to trigger activity detection. This may need to be cahnged depending on the noise level, signal level, and other characteristics of the input audio. min_activity_duration : float, default=0.25 The time constant (in seconds) used to help ignore short bursts of sound. initial_search_buffer : float, default=1.0 The amount of audio (in seconds) to search for quieter/shorter bursts of audio to include prior to the detected trigger point. max_gap : float, default=0.25 The allowed gap (in seconds) between quiteter/shorter bursts of audio to include prior to the detected trigger point initial_pad : float, default=0.0 The amount of audio (in seconds) to preserve before the trigger point and any found quieter/shorter bursts. See Also -------- silence Examples -------- >>> tfm = sox.Transformer() Remove silence from the beginning of speech >>> tfm.vad(initial_pad=0.3) Remove silence from the end of speech >>> tfm.vad(location=-1, initial_pad=0.2) ''' if location not in [-1, 1]: raise ValueError("location must be -1 or 1.") if not isinstance(normalize, bool): raise ValueError("normalize muse be a boolean.") if not is_number(activity_threshold): raise ValueError("activity_threshold must be a number.") if not is_number(min_activity_duration) or min_activity_duration < 0: raise ValueError("min_activity_duration must be a positive number") if not is_number(initial_search_buffer) or initial_search_buffer < 0: raise ValueError("initial_search_buffer must be a positive number") if not is_number(max_gap) or max_gap < 0: raise ValueError("max_gap must be a positive number.") if not is_number(initial_pad) or initial_pad < 0: raise ValueError("initial_pad must be a positive number.") effect_args = [] if normalize: effect_args.append('norm') if location == -1: effect_args.append('reverse') effect_args.extend([ 'vad', '-t', '{:f}'.format(activity_threshold), '-T', '{:f}'.format(min_activity_duration), '-s', '{:f}'.format(initial_search_buffer), '-g', '{:f}'.format(max_gap), '-p', '{:f}'.format(initial_pad) ]) if location == -1: effect_args.append('reverse') self.effects.extend(effect_args) self.effects_log.append('vad') return self
[ "def", "vad", "(", "self", ",", "location", "=", "1", ",", "normalize", "=", "True", ",", "activity_threshold", "=", "7.0", ",", "min_activity_duration", "=", "0.25", ",", "initial_search_buffer", "=", "1.0", ",", "max_gap", "=", "0.25", ",", "initial_pad", "=", "0.0", ")", ":", "if", "location", "not", "in", "[", "-", "1", ",", "1", "]", ":", "raise", "ValueError", "(", "\"location must be -1 or 1.\"", ")", "if", "not", "isinstance", "(", "normalize", ",", "bool", ")", ":", "raise", "ValueError", "(", "\"normalize muse be a boolean.\"", ")", "if", "not", "is_number", "(", "activity_threshold", ")", ":", "raise", "ValueError", "(", "\"activity_threshold must be a number.\"", ")", "if", "not", "is_number", "(", "min_activity_duration", ")", "or", "min_activity_duration", "<", "0", ":", "raise", "ValueError", "(", "\"min_activity_duration must be a positive number\"", ")", "if", "not", "is_number", "(", "initial_search_buffer", ")", "or", "initial_search_buffer", "<", "0", ":", "raise", "ValueError", "(", "\"initial_search_buffer must be a positive number\"", ")", "if", "not", "is_number", "(", "max_gap", ")", "or", "max_gap", "<", "0", ":", "raise", "ValueError", "(", "\"max_gap must be a positive number.\"", ")", "if", "not", "is_number", "(", "initial_pad", ")", "or", "initial_pad", "<", "0", ":", "raise", "ValueError", "(", "\"initial_pad must be a positive number.\"", ")", "effect_args", "=", "[", "]", "if", "normalize", ":", "effect_args", ".", "append", "(", "'norm'", ")", "if", "location", "==", "-", "1", ":", "effect_args", ".", "append", "(", "'reverse'", ")", "effect_args", ".", "extend", "(", "[", "'vad'", ",", "'-t'", ",", "'{:f}'", ".", "format", "(", "activity_threshold", ")", ",", "'-T'", ",", "'{:f}'", ".", "format", "(", "min_activity_duration", ")", ",", "'-s'", ",", "'{:f}'", ".", "format", "(", "initial_search_buffer", ")", ",", "'-g'", ",", "'{:f}'", ".", "format", "(", "max_gap", ")", ",", "'-p'", ",", "'{:f}'", ".", "format", "(", "initial_pad", ")", "]", ")", "if", "location", "==", "-", "1", ":", "effect_args", ".", "append", "(", "'reverse'", ")", "self", ".", "effects", ".", "extend", "(", "effect_args", ")", "self", ".", "effects_log", ".", "append", "(", "'vad'", ")", "return", "self" ]
Voice Activity Detector. Attempts to trim silence and quiet background sounds from the ends of recordings of speech. The algorithm currently uses a simple cepstral power measurement to detect voice, so may be fooled by other things, especially music. The effect can trim only from the front of the audio, so in order to trim from the back, the reverse effect must also be used. Parameters ---------- location : 1 or -1, default=1 If 1, trims silence from the beginning If -1, trims silence from the end normalize : bool, default=True If true, normalizes audio before processing. activity_threshold : float, default=7.0 The measurement level used to trigger activity detection. This may need to be cahnged depending on the noise level, signal level, and other characteristics of the input audio. min_activity_duration : float, default=0.25 The time constant (in seconds) used to help ignore short bursts of sound. initial_search_buffer : float, default=1.0 The amount of audio (in seconds) to search for quieter/shorter bursts of audio to include prior to the detected trigger point. max_gap : float, default=0.25 The allowed gap (in seconds) between quiteter/shorter bursts of audio to include prior to the detected trigger point initial_pad : float, default=0.0 The amount of audio (in seconds) to preserve before the trigger point and any found quieter/shorter bursts. See Also -------- silence Examples -------- >>> tfm = sox.Transformer() Remove silence from the beginning of speech >>> tfm.vad(initial_pad=0.3) Remove silence from the end of speech >>> tfm.vad(location=-1, initial_pad=0.2)
[ "Voice", "Activity", "Detector", ".", "Attempts", "to", "trim", "silence", "and", "quiet", "background", "sounds", "from", "the", "ends", "of", "recordings", "of", "speech", ".", "The", "algorithm", "currently", "uses", "a", "simple", "cepstral", "power", "measurement", "to", "detect", "voice", "so", "may", "be", "fooled", "by", "other", "things", "especially", "music", "." ]
python
valid
intelsdi-x/snap-plugin-lib-py
snap_plugin/v1/metric.py
https://github.com/intelsdi-x/snap-plugin-lib-py/blob/8da5d00ac5f9d2b48a7239563ac7788209891ca4/snap_plugin/v1/metric.py#L243-L277
def data(self): """Metric data Args: value (:obj:`bool` or :obj:`int` or :obj:`long` or :obj:`float` or :obj:`basestring` or :obj:`bytes`) Returns: value Raises: :obj:`TypeError` """ if self._data_type == int: if self._pb.HasField("int64_data"): return self._pb.int64_data if self._pb.HasField("int32_data"): return self._pb.int32_data if self._pb.HasField("uint64_data"): return self._pb.uint64_data if self._pb.HasField("uint32_data"): return self._pb.uint32_data elif self._data_type == float: if self._pb.HasField("float32_data"): return self._pb.float32_data if self._pb.HasField("float64_data"): return self._pb.float64_data elif self._data_type == str: return self._pb.string_data elif self._data_type == bool: return self._pb.bool_data elif self._data_type == bytes: return self._pb.bytes_data return None
[ "def", "data", "(", "self", ")", ":", "if", "self", ".", "_data_type", "==", "int", ":", "if", "self", ".", "_pb", ".", "HasField", "(", "\"int64_data\"", ")", ":", "return", "self", ".", "_pb", ".", "int64_data", "if", "self", ".", "_pb", ".", "HasField", "(", "\"int32_data\"", ")", ":", "return", "self", ".", "_pb", ".", "int32_data", "if", "self", ".", "_pb", ".", "HasField", "(", "\"uint64_data\"", ")", ":", "return", "self", ".", "_pb", ".", "uint64_data", "if", "self", ".", "_pb", ".", "HasField", "(", "\"uint32_data\"", ")", ":", "return", "self", ".", "_pb", ".", "uint32_data", "elif", "self", ".", "_data_type", "==", "float", ":", "if", "self", ".", "_pb", ".", "HasField", "(", "\"float32_data\"", ")", ":", "return", "self", ".", "_pb", ".", "float32_data", "if", "self", ".", "_pb", ".", "HasField", "(", "\"float64_data\"", ")", ":", "return", "self", ".", "_pb", ".", "float64_data", "elif", "self", ".", "_data_type", "==", "str", ":", "return", "self", ".", "_pb", ".", "string_data", "elif", "self", ".", "_data_type", "==", "bool", ":", "return", "self", ".", "_pb", ".", "bool_data", "elif", "self", ".", "_data_type", "==", "bytes", ":", "return", "self", ".", "_pb", ".", "bytes_data", "return", "None" ]
Metric data Args: value (:obj:`bool` or :obj:`int` or :obj:`long` or :obj:`float` or :obj:`basestring` or :obj:`bytes`) Returns: value Raises: :obj:`TypeError`
[ "Metric", "data" ]
python
train
nickmckay/LiPD-utilities
Python/lipd/directory.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/directory.py#L155-L189
def collect_metadata_files(cwd, new_files, existing_files): """ Collect all files from a given path. Separate by file type, and return one list for each type If 'files' contains specific :param str cwd: Directory w/ target files :param list new_files: Specific new files to load :param dict existing_files: Files currently loaded, separated by type :return list: All files separated by type """ obj = {} try: os.chdir(cwd) # Special case: User uses gui to mult-select 2+ files. You'll be given a list of file paths. if new_files: for full_path in new_files: # Create the file metadata for one file, and append it to the existing files. obj = collect_metadata_file(full_path) # directory: get all files in the directory and sort by type else: for file_type in [".lpd", ".xls", ".txt"]: # get all files in cwd of this file extension files_found = list_files(file_type) # if looking for excel files, also look for the alternate extension. if file_type == ".xls": files_found += list_files(".xlsx") # for each file found, build it's metadata and append it to files_by_type for file in files_found: fn = os.path.splitext(file)[0] existing_files[file_type].append({"full_path": os.path.join(cwd, file), "filename_ext": file, "filename_no_ext": fn, "dir": cwd}) except Exception: logger_directory.info("directory: collect_files: there's a problem") return obj
[ "def", "collect_metadata_files", "(", "cwd", ",", "new_files", ",", "existing_files", ")", ":", "obj", "=", "{", "}", "try", ":", "os", ".", "chdir", "(", "cwd", ")", "# Special case: User uses gui to mult-select 2+ files. You'll be given a list of file paths.", "if", "new_files", ":", "for", "full_path", "in", "new_files", ":", "# Create the file metadata for one file, and append it to the existing files.", "obj", "=", "collect_metadata_file", "(", "full_path", ")", "# directory: get all files in the directory and sort by type", "else", ":", "for", "file_type", "in", "[", "\".lpd\"", ",", "\".xls\"", ",", "\".txt\"", "]", ":", "# get all files in cwd of this file extension", "files_found", "=", "list_files", "(", "file_type", ")", "# if looking for excel files, also look for the alternate extension.", "if", "file_type", "==", "\".xls\"", ":", "files_found", "+=", "list_files", "(", "\".xlsx\"", ")", "# for each file found, build it's metadata and append it to files_by_type", "for", "file", "in", "files_found", ":", "fn", "=", "os", ".", "path", ".", "splitext", "(", "file", ")", "[", "0", "]", "existing_files", "[", "file_type", "]", ".", "append", "(", "{", "\"full_path\"", ":", "os", ".", "path", ".", "join", "(", "cwd", ",", "file", ")", ",", "\"filename_ext\"", ":", "file", ",", "\"filename_no_ext\"", ":", "fn", ",", "\"dir\"", ":", "cwd", "}", ")", "except", "Exception", ":", "logger_directory", ".", "info", "(", "\"directory: collect_files: there's a problem\"", ")", "return", "obj" ]
Collect all files from a given path. Separate by file type, and return one list for each type If 'files' contains specific :param str cwd: Directory w/ target files :param list new_files: Specific new files to load :param dict existing_files: Files currently loaded, separated by type :return list: All files separated by type
[ "Collect", "all", "files", "from", "a", "given", "path", ".", "Separate", "by", "file", "type", "and", "return", "one", "list", "for", "each", "type", "If", "files", "contains", "specific", ":", "param", "str", "cwd", ":", "Directory", "w", "/", "target", "files", ":", "param", "list", "new_files", ":", "Specific", "new", "files", "to", "load", ":", "param", "dict", "existing_files", ":", "Files", "currently", "loaded", "separated", "by", "type", ":", "return", "list", ":", "All", "files", "separated", "by", "type" ]
python
train
getfleety/coralillo
coralillo/hashing.py
https://github.com/getfleety/coralillo/blob/9cac101738a0fa7c1106f129604c00ef703370e1/coralillo/hashing.py#L146-L153
def mask_hash(hash, show=6, char="*"): """ Return the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons. """ masked = hash[:show] masked += char * len(hash[show:]) return masked
[ "def", "mask_hash", "(", "hash", ",", "show", "=", "6", ",", "char", "=", "\"*\"", ")", ":", "masked", "=", "hash", "[", ":", "show", "]", "masked", "+=", "char", "*", "len", "(", "hash", "[", "show", ":", "]", ")", "return", "masked" ]
Return the given hash, with only the first ``show`` number shown. The rest are masked with ``char`` for security reasons.
[ "Return", "the", "given", "hash", "with", "only", "the", "first", "show", "number", "shown", ".", "The", "rest", "are", "masked", "with", "char", "for", "security", "reasons", "." ]
python
train
adrianliaw/PyCuber
pycuber/solver/cfop/cross.py
https://github.com/adrianliaw/PyCuber/blob/e44b5ba48c831b964ce73d046fb813222771853f/pycuber/solver/cfop/cross.py#L82-L144
def cross_state_value(state): """ Compute the state value of the cross solving search. """ centres, edges = state value = 0 for edge in edges: if "U" in edge: if edge["U"] == centres["D"]["D"]: value += 1 else: value += 2 elif "D" in edge: if edge["D"] != centres["D"]["D"]: value += 3 else: value += 1 edgeposes = {} counts = {f: 0 for f in "LFRB"} ngedges = [] for edge in edges: if "U" in edge and edge["U"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("U", "") edgeposes[k] = edge[k] counts[k] += 1 elif "D" in edge and edge["D"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("D", "") edgeposes[k] = edge[k] counts[k] += 1 elif "U" in edge or "D" in edge: ngedges.append(edge) else: for k, s in edge: if s != centres["D"]["D"]: edgeposes[k] = s counts[k] += 1 break for edge in ngedges: idx = "LFRB".index(edge[centres["D"].colour]) for i in [-1, 1]: if "LFRB"[(idx+1)%4] not in edgeposes: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") edgeposes["LFRB"[(idx+1)%4]] = edge[k] counts["LFRB"[(idx+1)%4]] += 1 break else: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") if counts["LFRB"[(idx-1)%4]] > counts["LFRB"[(idx+1)%4]]: edgeposes["LFRB"[(idx-1)%4]] = edge[k] else: edgeposes["LFRB"[(idx+1)%4]] = edge[k] relative_pos = {f: centres[f][f] for f in "LFRB"} if len(edgeposes) == 4: for i in range(4): edgeposes["L"], edgeposes["F"], edgeposes["R"], edgeposes["B"] = \ edgeposes["F"], edgeposes["R"], edgeposes["B"], edgeposes["L"] if edgeposes == relative_pos: break else: value += 5 else: value += 3 return value
[ "def", "cross_state_value", "(", "state", ")", ":", "centres", ",", "edges", "=", "state", "value", "=", "0", "for", "edge", "in", "edges", ":", "if", "\"U\"", "in", "edge", ":", "if", "edge", "[", "\"U\"", "]", "==", "centres", "[", "\"D\"", "]", "[", "\"D\"", "]", ":", "value", "+=", "1", "else", ":", "value", "+=", "2", "elif", "\"D\"", "in", "edge", ":", "if", "edge", "[", "\"D\"", "]", "!=", "centres", "[", "\"D\"", "]", "[", "\"D\"", "]", ":", "value", "+=", "3", "else", ":", "value", "+=", "1", "edgeposes", "=", "{", "}", "counts", "=", "{", "f", ":", "0", "for", "f", "in", "\"LFRB\"", "}", "ngedges", "=", "[", "]", "for", "edge", "in", "edges", ":", "if", "\"U\"", "in", "edge", "and", "edge", "[", "\"U\"", "]", "==", "centres", "[", "\"D\"", "]", "[", "\"D\"", "]", ":", "k", "=", "\"\"", ".", "join", "(", "edge", ".", "facings", ".", "keys", "(", ")", ")", ".", "replace", "(", "\"U\"", ",", "\"\"", ")", "edgeposes", "[", "k", "]", "=", "edge", "[", "k", "]", "counts", "[", "k", "]", "+=", "1", "elif", "\"D\"", "in", "edge", "and", "edge", "[", "\"D\"", "]", "==", "centres", "[", "\"D\"", "]", "[", "\"D\"", "]", ":", "k", "=", "\"\"", ".", "join", "(", "edge", ".", "facings", ".", "keys", "(", ")", ")", ".", "replace", "(", "\"D\"", ",", "\"\"", ")", "edgeposes", "[", "k", "]", "=", "edge", "[", "k", "]", "counts", "[", "k", "]", "+=", "1", "elif", "\"U\"", "in", "edge", "or", "\"D\"", "in", "edge", ":", "ngedges", ".", "append", "(", "edge", ")", "else", ":", "for", "k", ",", "s", "in", "edge", ":", "if", "s", "!=", "centres", "[", "\"D\"", "]", "[", "\"D\"", "]", ":", "edgeposes", "[", "k", "]", "=", "s", "counts", "[", "k", "]", "+=", "1", "break", "for", "edge", "in", "ngedges", ":", "idx", "=", "\"LFRB\"", ".", "index", "(", "edge", "[", "centres", "[", "\"D\"", "]", ".", "colour", "]", ")", "for", "i", "in", "[", "-", "1", ",", "1", "]", ":", "if", "\"LFRB\"", "[", "(", "idx", "+", "1", ")", "%", "4", "]", "not", "in", "edgeposes", ":", "k", "=", "\"\"", ".", "join", "(", "edge", ".", "facings", ".", "keys", "(", ")", ")", ".", "replace", "(", "\"LFRB\"", "[", "idx", "]", ",", "\"\"", ")", "edgeposes", "[", "\"LFRB\"", "[", "(", "idx", "+", "1", ")", "%", "4", "]", "]", "=", "edge", "[", "k", "]", "counts", "[", "\"LFRB\"", "[", "(", "idx", "+", "1", ")", "%", "4", "]", "]", "+=", "1", "break", "else", ":", "k", "=", "\"\"", ".", "join", "(", "edge", ".", "facings", ".", "keys", "(", ")", ")", ".", "replace", "(", "\"LFRB\"", "[", "idx", "]", ",", "\"\"", ")", "if", "counts", "[", "\"LFRB\"", "[", "(", "idx", "-", "1", ")", "%", "4", "]", "]", ">", "counts", "[", "\"LFRB\"", "[", "(", "idx", "+", "1", ")", "%", "4", "]", "]", ":", "edgeposes", "[", "\"LFRB\"", "[", "(", "idx", "-", "1", ")", "%", "4", "]", "]", "=", "edge", "[", "k", "]", "else", ":", "edgeposes", "[", "\"LFRB\"", "[", "(", "idx", "+", "1", ")", "%", "4", "]", "]", "=", "edge", "[", "k", "]", "relative_pos", "=", "{", "f", ":", "centres", "[", "f", "]", "[", "f", "]", "for", "f", "in", "\"LFRB\"", "}", "if", "len", "(", "edgeposes", ")", "==", "4", ":", "for", "i", "in", "range", "(", "4", ")", ":", "edgeposes", "[", "\"L\"", "]", ",", "edgeposes", "[", "\"F\"", "]", ",", "edgeposes", "[", "\"R\"", "]", ",", "edgeposes", "[", "\"B\"", "]", "=", "edgeposes", "[", "\"F\"", "]", ",", "edgeposes", "[", "\"R\"", "]", ",", "edgeposes", "[", "\"B\"", "]", ",", "edgeposes", "[", "\"L\"", "]", "if", "edgeposes", "==", "relative_pos", ":", "break", "else", ":", "value", "+=", "5", "else", ":", "value", "+=", "3", "return", "value" ]
Compute the state value of the cross solving search.
[ "Compute", "the", "state", "value", "of", "the", "cross", "solving", "search", "." ]
python
train
saltstack/salt
salt/modules/nilrt_ip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nilrt_ip.py#L85-L94
def _get_state(): ''' Returns the state of connman ''' try: return pyconnman.ConnManager().get_property('State') except KeyError: return 'offline' except dbus.DBusException as exc: raise salt.exceptions.CommandExecutionError('Connman daemon error: {0}'.format(exc))
[ "def", "_get_state", "(", ")", ":", "try", ":", "return", "pyconnman", ".", "ConnManager", "(", ")", ".", "get_property", "(", "'State'", ")", "except", "KeyError", ":", "return", "'offline'", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "'Connman daemon error: {0}'", ".", "format", "(", "exc", ")", ")" ]
Returns the state of connman
[ "Returns", "the", "state", "of", "connman" ]
python
train
spyder-ide/spyder-notebook
spyder_notebook/widgets/client.py
https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/widgets/client.py#L174-L194
def register(self, server_info): """Register attributes that can be computed with the server info.""" # Path relative to the server directory self.path = os.path.relpath(self.filename, start=server_info['notebook_dir']) # Replace backslashes on Windows if os.name == 'nt': self.path = self.path.replace('\\', '/') # Server url to send requests to self.server_url = server_info['url'] # Server token self.token = server_info['token'] url = url_path_join(self.server_url, 'notebooks', url_escape(self.path)) # Set file url to load this notebook self.file_url = self.add_token(url)
[ "def", "register", "(", "self", ",", "server_info", ")", ":", "# Path relative to the server directory", "self", ".", "path", "=", "os", ".", "path", ".", "relpath", "(", "self", ".", "filename", ",", "start", "=", "server_info", "[", "'notebook_dir'", "]", ")", "# Replace backslashes on Windows", "if", "os", ".", "name", "==", "'nt'", ":", "self", ".", "path", "=", "self", ".", "path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# Server url to send requests to", "self", ".", "server_url", "=", "server_info", "[", "'url'", "]", "# Server token", "self", ".", "token", "=", "server_info", "[", "'token'", "]", "url", "=", "url_path_join", "(", "self", ".", "server_url", ",", "'notebooks'", ",", "url_escape", "(", "self", ".", "path", ")", ")", "# Set file url to load this notebook", "self", ".", "file_url", "=", "self", ".", "add_token", "(", "url", ")" ]
Register attributes that can be computed with the server info.
[ "Register", "attributes", "that", "can", "be", "computed", "with", "the", "server", "info", "." ]
python
train
zblz/naima
naima/plot.py
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/plot.py#L1282-L1376
def plot_data( input_data, xlabel=None, ylabel=None, sed=True, figure=None, e_unit=None, ulim_opts={}, errorbar_opts={}, ): """ Plot spectral data. Parameters ---------- input_data : `emcee.EnsembleSampler`, `astropy.table.Table`, or `dict` Spectral data to plot. Can be given as a data table, a dict generated with `validate_data_table` or a `emcee.EnsembleSampler` with a data property. xlabel : str, optional Label for the ``x`` axis of the plot. ylabel : str, optional Label for the ``y`` axis of the plot. sed : bool, optional Whether to plot SED or differential spectrum. figure : `matplotlib.figure.Figure`, optional `matplotlib` figure to plot on. If omitted a new one will be generated. e_unit : `astropy.unit.Unit`, optional Units for energy axis. Defaults to those of the data. ulim_opts : dict Options for upper-limit plotting. Available options are capsize (arrow width) and height_fraction (arrow length in fraction of flux value). errorbar_opts : dict Addtional options to pass to `matplotlib.plt.errorbar` for plotting the spectral flux points. """ import matplotlib.pyplot as plt try: data = validate_data_table(input_data) except TypeError as exc: if hasattr(input_data, "data"): data = input_data.data elif isinstance(input_data, dict) and "energy" in input_data.keys(): data = input_data else: log.warning( "input_data format unknown, no plotting data! " "Data loading exception: {}".format(exc) ) raise if figure is None: f = plt.figure() else: f = figure if len(f.axes) > 0: ax1 = f.axes[0] else: ax1 = f.add_subplot(111) # try to get units from previous plot in figure try: old_e_unit = u.Unit(ax1.get_xlabel().split("[")[-1].split("]")[0]) except ValueError: old_e_unit = u.Unit("") if e_unit is None and old_e_unit.physical_type == "energy": e_unit = old_e_unit elif e_unit is None: e_unit = data["energy"].unit _plot_data_to_ax( data, ax1, e_unit=e_unit, sed=sed, ylabel=ylabel, ulim_opts=ulim_opts, errorbar_opts=errorbar_opts, ) if xlabel is not None: ax1.set_xlabel(xlabel) elif xlabel is None and ax1.get_xlabel() == "": ax1.set_xlabel( r"$\mathrm{Energy}$" + " [{0}]".format(e_unit.to_string("latex_inline")) ) ax1.autoscale() return f
[ "def", "plot_data", "(", "input_data", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "sed", "=", "True", ",", "figure", "=", "None", ",", "e_unit", "=", "None", ",", "ulim_opts", "=", "{", "}", ",", "errorbar_opts", "=", "{", "}", ",", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "try", ":", "data", "=", "validate_data_table", "(", "input_data", ")", "except", "TypeError", "as", "exc", ":", "if", "hasattr", "(", "input_data", ",", "\"data\"", ")", ":", "data", "=", "input_data", ".", "data", "elif", "isinstance", "(", "input_data", ",", "dict", ")", "and", "\"energy\"", "in", "input_data", ".", "keys", "(", ")", ":", "data", "=", "input_data", "else", ":", "log", ".", "warning", "(", "\"input_data format unknown, no plotting data! \"", "\"Data loading exception: {}\"", ".", "format", "(", "exc", ")", ")", "raise", "if", "figure", "is", "None", ":", "f", "=", "plt", ".", "figure", "(", ")", "else", ":", "f", "=", "figure", "if", "len", "(", "f", ".", "axes", ")", ">", "0", ":", "ax1", "=", "f", ".", "axes", "[", "0", "]", "else", ":", "ax1", "=", "f", ".", "add_subplot", "(", "111", ")", "# try to get units from previous plot in figure", "try", ":", "old_e_unit", "=", "u", ".", "Unit", "(", "ax1", ".", "get_xlabel", "(", ")", ".", "split", "(", "\"[\"", ")", "[", "-", "1", "]", ".", "split", "(", "\"]\"", ")", "[", "0", "]", ")", "except", "ValueError", ":", "old_e_unit", "=", "u", ".", "Unit", "(", "\"\"", ")", "if", "e_unit", "is", "None", "and", "old_e_unit", ".", "physical_type", "==", "\"energy\"", ":", "e_unit", "=", "old_e_unit", "elif", "e_unit", "is", "None", ":", "e_unit", "=", "data", "[", "\"energy\"", "]", ".", "unit", "_plot_data_to_ax", "(", "data", ",", "ax1", ",", "e_unit", "=", "e_unit", ",", "sed", "=", "sed", ",", "ylabel", "=", "ylabel", ",", "ulim_opts", "=", "ulim_opts", ",", "errorbar_opts", "=", "errorbar_opts", ",", ")", "if", "xlabel", "is", "not", "None", ":", "ax1", ".", "set_xlabel", "(", "xlabel", ")", "elif", "xlabel", "is", "None", "and", "ax1", ".", "get_xlabel", "(", ")", "==", "\"\"", ":", "ax1", ".", "set_xlabel", "(", "r\"$\\mathrm{Energy}$\"", "+", "\" [{0}]\"", ".", "format", "(", "e_unit", ".", "to_string", "(", "\"latex_inline\"", ")", ")", ")", "ax1", ".", "autoscale", "(", ")", "return", "f" ]
Plot spectral data. Parameters ---------- input_data : `emcee.EnsembleSampler`, `astropy.table.Table`, or `dict` Spectral data to plot. Can be given as a data table, a dict generated with `validate_data_table` or a `emcee.EnsembleSampler` with a data property. xlabel : str, optional Label for the ``x`` axis of the plot. ylabel : str, optional Label for the ``y`` axis of the plot. sed : bool, optional Whether to plot SED or differential spectrum. figure : `matplotlib.figure.Figure`, optional `matplotlib` figure to plot on. If omitted a new one will be generated. e_unit : `astropy.unit.Unit`, optional Units for energy axis. Defaults to those of the data. ulim_opts : dict Options for upper-limit plotting. Available options are capsize (arrow width) and height_fraction (arrow length in fraction of flux value). errorbar_opts : dict Addtional options to pass to `matplotlib.plt.errorbar` for plotting the spectral flux points.
[ "Plot", "spectral", "data", "." ]
python
train
rlisagor/freshen
freshen/stepregistry.py
https://github.com/rlisagor/freshen/blob/5578f7368e8d53b4cf51c589fb192090d3524968/freshen/stepregistry.py#L250-L263
def hook_decorator(cb_type): """ Decorator to wrap hook definitions in. Registers hook. """ def decorator_wrapper(*tags_or_func): if len(tags_or_func) == 1 and callable(tags_or_func[0]): # No tags were passed to this decorator func = tags_or_func[0] return HookImpl(cb_type, func) else: # We got some tags, so we need to produce the real decorator tags = tags_or_func def d(func): return HookImpl(cb_type, func, tags) return d return decorator_wrapper
[ "def", "hook_decorator", "(", "cb_type", ")", ":", "def", "decorator_wrapper", "(", "*", "tags_or_func", ")", ":", "if", "len", "(", "tags_or_func", ")", "==", "1", "and", "callable", "(", "tags_or_func", "[", "0", "]", ")", ":", "# No tags were passed to this decorator", "func", "=", "tags_or_func", "[", "0", "]", "return", "HookImpl", "(", "cb_type", ",", "func", ")", "else", ":", "# We got some tags, so we need to produce the real decorator", "tags", "=", "tags_or_func", "def", "d", "(", "func", ")", ":", "return", "HookImpl", "(", "cb_type", ",", "func", ",", "tags", ")", "return", "d", "return", "decorator_wrapper" ]
Decorator to wrap hook definitions in. Registers hook.
[ "Decorator", "to", "wrap", "hook", "definitions", "in", ".", "Registers", "hook", "." ]
python
train
pennersr/django-allauth
allauth/account/adapter.py
https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/account/adapter.py#L139-L153
def get_login_redirect_url(self, request): """ Returns the default URL to redirect to after logging in. Note that URLs passed explicitly (e.g. by passing along a `next` GET parameter) take precedence over the value returned here. """ assert request.user.is_authenticated url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None) if url: warnings.warn("LOGIN_REDIRECT_URLNAME is deprecated, simply" " use LOGIN_REDIRECT_URL with a URL name", DeprecationWarning) else: url = settings.LOGIN_REDIRECT_URL return resolve_url(url)
[ "def", "get_login_redirect_url", "(", "self", ",", "request", ")", ":", "assert", "request", ".", "user", ".", "is_authenticated", "url", "=", "getattr", "(", "settings", ",", "\"LOGIN_REDIRECT_URLNAME\"", ",", "None", ")", "if", "url", ":", "warnings", ".", "warn", "(", "\"LOGIN_REDIRECT_URLNAME is deprecated, simply\"", "\" use LOGIN_REDIRECT_URL with a URL name\"", ",", "DeprecationWarning", ")", "else", ":", "url", "=", "settings", ".", "LOGIN_REDIRECT_URL", "return", "resolve_url", "(", "url", ")" ]
Returns the default URL to redirect to after logging in. Note that URLs passed explicitly (e.g. by passing along a `next` GET parameter) take precedence over the value returned here.
[ "Returns", "the", "default", "URL", "to", "redirect", "to", "after", "logging", "in", ".", "Note", "that", "URLs", "passed", "explicitly", "(", "e", ".", "g", ".", "by", "passing", "along", "a", "next", "GET", "parameter", ")", "take", "precedence", "over", "the", "value", "returned", "here", "." ]
python
train
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L316-L321
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
[ "def", "parameters_to_datetime", "(", "self", ",", "p", ")", ":", "dt", "=", "p", "[", "self", ".", "_param_name", "]", "return", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")" ]
Given a dictionary of parameters, will extract the ranged task parameter value
[ "Given", "a", "dictionary", "of", "parameters", "will", "extract", "the", "ranged", "task", "parameter", "value" ]
python
train
fossasia/knittingpattern
knittingpattern/convert/KnittingPatternToSVG.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/KnittingPatternToSVG.py#L74-L97
def _register_instruction_in_defs(self, instruction): """Create a definition for the instruction. :return: the id of a symbol in the defs for the specified :paramref:`instruction` :rtype: str If no symbol yet exists in the defs for the :paramref:`instruction` a symbol is created and saved using :meth:`_make_symbol`. """ type_ = instruction.type color_ = instruction.color instruction_to_svg_dict = \ self._instruction_to_svg.instruction_to_svg_dict instruction_id = "{}:{}".format(type_, color_) defs_id = instruction_id + ":defs" if instruction_id not in self._instruction_type_color_to_symbol: svg_dict = instruction_to_svg_dict(instruction) self._compute_scale(instruction_id, svg_dict) symbol = self._make_definition(svg_dict, instruction_id) self._instruction_type_color_to_symbol[defs_id] = \ symbol[DEFINITION_HOLDER].pop("defs", {}) self._instruction_type_color_to_symbol[instruction_id] = symbol return instruction_id
[ "def", "_register_instruction_in_defs", "(", "self", ",", "instruction", ")", ":", "type_", "=", "instruction", ".", "type", "color_", "=", "instruction", ".", "color", "instruction_to_svg_dict", "=", "self", ".", "_instruction_to_svg", ".", "instruction_to_svg_dict", "instruction_id", "=", "\"{}:{}\"", ".", "format", "(", "type_", ",", "color_", ")", "defs_id", "=", "instruction_id", "+", "\":defs\"", "if", "instruction_id", "not", "in", "self", ".", "_instruction_type_color_to_symbol", ":", "svg_dict", "=", "instruction_to_svg_dict", "(", "instruction", ")", "self", ".", "_compute_scale", "(", "instruction_id", ",", "svg_dict", ")", "symbol", "=", "self", ".", "_make_definition", "(", "svg_dict", ",", "instruction_id", ")", "self", ".", "_instruction_type_color_to_symbol", "[", "defs_id", "]", "=", "symbol", "[", "DEFINITION_HOLDER", "]", ".", "pop", "(", "\"defs\"", ",", "{", "}", ")", "self", ".", "_instruction_type_color_to_symbol", "[", "instruction_id", "]", "=", "symbol", "return", "instruction_id" ]
Create a definition for the instruction. :return: the id of a symbol in the defs for the specified :paramref:`instruction` :rtype: str If no symbol yet exists in the defs for the :paramref:`instruction` a symbol is created and saved using :meth:`_make_symbol`.
[ "Create", "a", "definition", "for", "the", "instruction", "." ]
python
valid
Seeed-Studio/wio-cli
wio/commands/cmd_state.py
https://github.com/Seeed-Studio/wio-cli/blob/ce83f4c2d30be7f72d1a128acd123dfc5effa563/wio/commands/cmd_state.py#L6-L34
def cli(wio): ''' Login state. \b DOES: Display login email, token, server url. \b USE: wio state ''' user_token = wio.config.get("token", None) mserver_url = wio.config.get("mserver", None) if not mserver_url or not user_token: click.echo(click.style('>> ', fg='red') + "Please login, use " + click.style("wio login", fg='green')) return email = wio.config.get("email",None) server = wio.config.get("server",None) token = wio.config.get("token",None) click.secho('> ', fg='green', nl=False) click.echo("server: " + click.style(server, fg='green', bold=True) + ', ' + click.style(mserver_url, fg='green', bold=True)) click.secho('> ', fg='green', nl=False) click.echo("email: " + click.style(email, fg='green', bold=True)) click.secho('> ', fg='green', nl=False) click.echo("token: " + click.style(token, fg='green', bold=True))
[ "def", "cli", "(", "wio", ")", ":", "user_token", "=", "wio", ".", "config", ".", "get", "(", "\"token\"", ",", "None", ")", "mserver_url", "=", "wio", ".", "config", ".", "get", "(", "\"mserver\"", ",", "None", ")", "if", "not", "mserver_url", "or", "not", "user_token", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "'>> '", ",", "fg", "=", "'red'", ")", "+", "\"Please login, use \"", "+", "click", ".", "style", "(", "\"wio login\"", ",", "fg", "=", "'green'", ")", ")", "return", "email", "=", "wio", ".", "config", ".", "get", "(", "\"email\"", ",", "None", ")", "server", "=", "wio", ".", "config", ".", "get", "(", "\"server\"", ",", "None", ")", "token", "=", "wio", ".", "config", ".", "get", "(", "\"token\"", ",", "None", ")", "click", ".", "secho", "(", "'> '", ",", "fg", "=", "'green'", ",", "nl", "=", "False", ")", "click", ".", "echo", "(", "\"server: \"", "+", "click", ".", "style", "(", "server", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", "+", "', '", "+", "click", ".", "style", "(", "mserver_url", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", ")", "click", ".", "secho", "(", "'> '", ",", "fg", "=", "'green'", ",", "nl", "=", "False", ")", "click", ".", "echo", "(", "\"email: \"", "+", "click", ".", "style", "(", "email", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", ")", "click", ".", "secho", "(", "'> '", ",", "fg", "=", "'green'", ",", "nl", "=", "False", ")", "click", ".", "echo", "(", "\"token: \"", "+", "click", ".", "style", "(", "token", ",", "fg", "=", "'green'", ",", "bold", "=", "True", ")", ")" ]
Login state. \b DOES: Display login email, token, server url. \b USE: wio state
[ "Login", "state", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmagplotlib.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmagplotlib.py#L1637-L1666
def plot_evec(fignum, Vs, symsize, title): """ plots eigenvector directions of S vectors Paramters ________ fignum : matplotlib figure number Vs : nested list of eigenvectors symsize : size in pts for symbol title : title for plot """ # plt.figure(num=fignum) plt.text(-1.1, 1.15, title) # plot V1s as squares, V2s as triangles and V3s as circles symb, symkey = ['s', 'v', 'o'], 0 col = ['r', 'b', 'k'] # plot V1s rec, V2s blue, V3s black for VEC in range(3): X, Y = [], [] for Vdirs in Vs: # # # plot the V1 data first # XY = pmag.dimap(Vdirs[VEC][0], Vdirs[VEC][1]) X.append(XY[0]) Y.append(XY[1]) plt.scatter(X, Y, s=symsize, marker=symb[VEC], c=col[VEC], edgecolors='none') plt.axis("equal")
[ "def", "plot_evec", "(", "fignum", ",", "Vs", ",", "symsize", ",", "title", ")", ":", "#", "plt", ".", "figure", "(", "num", "=", "fignum", ")", "plt", ".", "text", "(", "-", "1.1", ",", "1.15", ",", "title", ")", "# plot V1s as squares, V2s as triangles and V3s as circles", "symb", ",", "symkey", "=", "[", "'s'", ",", "'v'", ",", "'o'", "]", ",", "0", "col", "=", "[", "'r'", ",", "'b'", ",", "'k'", "]", "# plot V1s rec, V2s blue, V3s black", "for", "VEC", "in", "range", "(", "3", ")", ":", "X", ",", "Y", "=", "[", "]", ",", "[", "]", "for", "Vdirs", "in", "Vs", ":", "#", "#", "# plot the V1 data first", "#", "XY", "=", "pmag", ".", "dimap", "(", "Vdirs", "[", "VEC", "]", "[", "0", "]", ",", "Vdirs", "[", "VEC", "]", "[", "1", "]", ")", "X", ".", "append", "(", "XY", "[", "0", "]", ")", "Y", ".", "append", "(", "XY", "[", "1", "]", ")", "plt", ".", "scatter", "(", "X", ",", "Y", ",", "s", "=", "symsize", ",", "marker", "=", "symb", "[", "VEC", "]", ",", "c", "=", "col", "[", "VEC", "]", ",", "edgecolors", "=", "'none'", ")", "plt", ".", "axis", "(", "\"equal\"", ")" ]
plots eigenvector directions of S vectors Paramters ________ fignum : matplotlib figure number Vs : nested list of eigenvectors symsize : size in pts for symbol title : title for plot
[ "plots", "eigenvector", "directions", "of", "S", "vectors" ]
python
train
slundberg/shap
shap/benchmark/models.py
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/models.py#L133-L141
def cric__lasso(): """ Lasso Regression """ model = sklearn.linear_model.LogisticRegression(penalty="l1", C=0.002) # we want to explain the raw probability outputs of the trees model.predict = lambda X: model.predict_proba(X)[:,1] return model
[ "def", "cric__lasso", "(", ")", ":", "model", "=", "sklearn", ".", "linear_model", ".", "LogisticRegression", "(", "penalty", "=", "\"l1\"", ",", "C", "=", "0.002", ")", "# we want to explain the raw probability outputs of the trees", "model", ".", "predict", "=", "lambda", "X", ":", "model", ".", "predict_proba", "(", "X", ")", "[", ":", ",", "1", "]", "return", "model" ]
Lasso Regression
[ "Lasso", "Regression" ]
python
train
materialsproject/pymatgen
pymatgen/io/feff/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/feff/inputs.py#L925-L941
def get_absorbing_atom_symbol_index(absorbing_atom, structure): """ Return the absorbing atom symboll and site index in the given structure. Args: absorbing_atom (str/int): symbol or site index structure (Structure) Returns: str, int: symbol and site index """ if isinstance(absorbing_atom, str): return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0] elif isinstance(absorbing_atom, int): return str(structure[absorbing_atom].specie), absorbing_atom else: raise ValueError("absorbing_atom must be either specie symbol or site index")
[ "def", "get_absorbing_atom_symbol_index", "(", "absorbing_atom", ",", "structure", ")", ":", "if", "isinstance", "(", "absorbing_atom", ",", "str", ")", ":", "return", "absorbing_atom", ",", "structure", ".", "indices_from_symbol", "(", "absorbing_atom", ")", "[", "0", "]", "elif", "isinstance", "(", "absorbing_atom", ",", "int", ")", ":", "return", "str", "(", "structure", "[", "absorbing_atom", "]", ".", "specie", ")", ",", "absorbing_atom", "else", ":", "raise", "ValueError", "(", "\"absorbing_atom must be either specie symbol or site index\"", ")" ]
Return the absorbing atom symboll and site index in the given structure. Args: absorbing_atom (str/int): symbol or site index structure (Structure) Returns: str, int: symbol and site index
[ "Return", "the", "absorbing", "atom", "symboll", "and", "site", "index", "in", "the", "given", "structure", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiV4As.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiV4As.py#L36-L49
def search(self, **kwargs): """ Method to search asns based on extends search. :param search: Dict containing QuerySets to find asns. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing asns """ return super(ApiV4As, self).get(self.prepare_url( 'api/v4/as/', kwargs))
[ "def", "search", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "ApiV4As", ",", "self", ")", ".", "get", "(", "self", ".", "prepare_url", "(", "'api/v4/as/'", ",", "kwargs", ")", ")" ]
Method to search asns based on extends search. :param search: Dict containing QuerySets to find asns. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing asns
[ "Method", "to", "search", "asns", "based", "on", "extends", "search", "." ]
python
train