repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Capitains/MyCapytain
MyCapytain/common/utils/_json_ld.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/utils/_json_ld.py#L4-L19
def literal_to_dict(value): """ Transform an object value into a dict readable value :param value: Object of a triple which is not a BNode :type value: Literal or URIRef :return: dict or str or list """ if isinstance(value, Literal): if value.language is not None: return {"@value": str(value), "@language": value.language} return value.toPython() elif isinstance(value, URIRef): return {"@id": str(value)} elif value is None: return None return str(value)
[ "def", "literal_to_dict", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Literal", ")", ":", "if", "value", ".", "language", "is", "not", "None", ":", "return", "{", "\"@value\"", ":", "str", "(", "value", ")", ",", "\"@language\"", ":", "value", ".", "language", "}", "return", "value", ".", "toPython", "(", ")", "elif", "isinstance", "(", "value", ",", "URIRef", ")", ":", "return", "{", "\"@id\"", ":", "str", "(", "value", ")", "}", "elif", "value", "is", "None", ":", "return", "None", "return", "str", "(", "value", ")" ]
Transform an object value into a dict readable value :param value: Object of a triple which is not a BNode :type value: Literal or URIRef :return: dict or str or list
[ "Transform", "an", "object", "value", "into", "a", "dict", "readable", "value" ]
python
train
32.625
mongodb/mongo-python-driver
pymongo/client_session.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/client_session.py#L529-L573
def commit_transaction(self): """Commit a multi-statement transaction. .. versionadded:: 3.7 """ self._check_ended() retry = False state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation("No transaction started") elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): # Server transaction was never started, no need to send a command. self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: raise InvalidOperation( "Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: # We're explicitly retrying the commit, move the state back to # "in progress" so that _in_transaction returns true. self._transaction.state = _TxnState.IN_PROGRESS retry = True try: self._finish_transaction_with_retry("commitTransaction", retry) except ConnectionFailure as exc: # We do not know if the commit was successfully applied on the # server or if it satisfied the provided write concern, set the # unknown commit error label. exc._remove_error_label("TransientTransactionError") _reraise_with_unknown_commit(exc) except WTimeoutError as exc: # We do not know if the commit has satisfied the provided write # concern, add the unknown commit error label. _reraise_with_unknown_commit(exc) except OperationFailure as exc: if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: # The server reports errorLabels in the case. raise # We do not know if the commit was successfully applied on the # server or if it satisfied the provided write concern, set the # unknown commit error label. _reraise_with_unknown_commit(exc) finally: self._transaction.state = _TxnState.COMMITTED
[ "def", "commit_transaction", "(", "self", ")", ":", "self", ".", "_check_ended", "(", ")", "retry", "=", "False", "state", "=", "self", ".", "_transaction", ".", "state", "if", "state", "is", "_TxnState", ".", "NONE", ":", "raise", "InvalidOperation", "(", "\"No transaction started\"", ")", "elif", "state", "in", "(", "_TxnState", ".", "STARTING", ",", "_TxnState", ".", "COMMITTED_EMPTY", ")", ":", "# Server transaction was never started, no need to send a command.", "self", ".", "_transaction", ".", "state", "=", "_TxnState", ".", "COMMITTED_EMPTY", "return", "elif", "state", "is", "_TxnState", ".", "ABORTED", ":", "raise", "InvalidOperation", "(", "\"Cannot call commitTransaction after calling abortTransaction\"", ")", "elif", "state", "is", "_TxnState", ".", "COMMITTED", ":", "# We're explicitly retrying the commit, move the state back to", "# \"in progress\" so that _in_transaction returns true.", "self", ".", "_transaction", ".", "state", "=", "_TxnState", ".", "IN_PROGRESS", "retry", "=", "True", "try", ":", "self", ".", "_finish_transaction_with_retry", "(", "\"commitTransaction\"", ",", "retry", ")", "except", "ConnectionFailure", "as", "exc", ":", "# We do not know if the commit was successfully applied on the", "# server or if it satisfied the provided write concern, set the", "# unknown commit error label.", "exc", ".", "_remove_error_label", "(", "\"TransientTransactionError\"", ")", "_reraise_with_unknown_commit", "(", "exc", ")", "except", "WTimeoutError", "as", "exc", ":", "# We do not know if the commit has satisfied the provided write", "# concern, add the unknown commit error label.", "_reraise_with_unknown_commit", "(", "exc", ")", "except", "OperationFailure", "as", "exc", ":", "if", "exc", ".", "code", "not", "in", "_UNKNOWN_COMMIT_ERROR_CODES", ":", "# The server reports errorLabels in the case.", "raise", "# We do not know if the commit was successfully applied on the", "# server or if it satisfied the provided write concern, set the", "# unknown commit error label.", "_reraise_with_unknown_commit", "(", "exc", ")", "finally", ":", "self", ".", "_transaction", ".", "state", "=", "_TxnState", ".", "COMMITTED" ]
Commit a multi-statement transaction. .. versionadded:: 3.7
[ "Commit", "a", "multi", "-", "statement", "transaction", "." ]
python
train
46.088889
zooniverse/panoptes-python-client
panoptes_client/subject_set.py
https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/subject_set.py#L69-L82
def subjects(self): """ A generator which yields :py:class:`.Subject` objects which are in this subject set. Examples:: for subject in subject_set.subjects: print(subject.id) """ for sms in SetMemberSubject.where(subject_set_id=self.id): yield sms.links.subject
[ "def", "subjects", "(", "self", ")", ":", "for", "sms", "in", "SetMemberSubject", ".", "where", "(", "subject_set_id", "=", "self", ".", "id", ")", ":", "yield", "sms", ".", "links", ".", "subject" ]
A generator which yields :py:class:`.Subject` objects which are in this subject set. Examples:: for subject in subject_set.subjects: print(subject.id)
[ "A", "generator", "which", "yields", ":", "py", ":", "class", ":", ".", "Subject", "objects", "which", "are", "in", "this", "subject", "set", "." ]
python
train
24.285714
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L635-L640
def gps_velocity_body(GPS_RAW_INT, ATTITUDE): '''return GPS velocity vector in body frame''' r = rotation(ATTITUDE) return r.transposed() * Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)), GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)), -tan(ATTITUDE.pitch)*GPS_RAW_INT.vel*0.01)
[ "def", "gps_velocity_body", "(", "GPS_RAW_INT", ",", "ATTITUDE", ")", ":", "r", "=", "rotation", "(", "ATTITUDE", ")", "return", "r", ".", "transposed", "(", ")", "*", "Vector3", "(", "GPS_RAW_INT", ".", "vel", "*", "0.01", "*", "cos", "(", "radians", "(", "GPS_RAW_INT", ".", "cog", "*", "0.01", ")", ")", ",", "GPS_RAW_INT", ".", "vel", "*", "0.01", "*", "sin", "(", "radians", "(", "GPS_RAW_INT", ".", "cog", "*", "0.01", ")", ")", ",", "-", "tan", "(", "ATTITUDE", ".", "pitch", ")", "*", "GPS_RAW_INT", ".", "vel", "*", "0.01", ")" ]
return GPS velocity vector in body frame
[ "return", "GPS", "velocity", "vector", "in", "body", "frame" ]
python
train
63.833333
UCL-INGI/INGInious
inginious/agent/docker_agent/__init__.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/agent/docker_agent/__init__.py#L501-L627
async def handle_job_closing(self, container_id, retval): """ Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend """ try: self._logger.debug("Closing %s", container_id) try: message, container_path, future_results = self._containers_running[container_id] del self._containers_running[container_id] except asyncio.CancelledError: raise except: self._logger.warning("Container %s that has finished(p1) was not launched by this agent", str(container_id), exc_info=True) return # Close sub containers for student_container_id_loop in self._student_containers_for_job[message.job_id]: # little hack to ensure the value of student_container_id_loop is copied into the closure async def close_and_delete(student_container_id=student_container_id_loop): try: await self._docker.kill_container(student_container_id) await self._docker.remove_container(student_container_id) except asyncio.CancelledError: raise except: pass # ignore self._create_safe_task(close_and_delete(student_container_id_loop)) del self._student_containers_for_job[message.job_id] # Allow other container to reuse the external ports this container has finished to use if container_id in self._assigned_external_ports: for p in self._assigned_external_ports[container_id]: self._external_ports.add(p) del self._assigned_external_ports[container_id] # Verify if the container was killed, either by the client, by an OOM or by a timeout killed = await self._timeout_watcher.was_killed(container_id) if container_id in self._containers_killed: killed = self._containers_killed[container_id] del self._containers_killed[container_id] stdout = "" stderr = "" result = "crash" if retval == -1 else None error_msg = None grade = None problems = {} custom = {} tests = {} archive = None state = "" if killed is not None: result = killed # If everything did well, continue to retrieve the status from the container if result is None: # Get logs back try: return_value = await future_results # Accepted types for return dict accepted_types = {"stdout": str, "stderr": str, "result": str, "text": str, "grade": float, "problems": dict, "custom": dict, "tests": dict, "state": str, "archive": str} keys_fct = {"problems": id_checker, "custom": id_checker, "tests": id_checker_tests} # Check dict content for key, item in return_value.items(): if not isinstance(item, accepted_types[key]): raise Exception("Feedback file is badly formatted.") elif accepted_types[key] == dict and key != "custom": #custom can contain anything: for sub_key, sub_item in item.items(): if not keys_fct[key](sub_key) or isinstance(sub_item, dict): raise Exception("Feedback file is badly formatted.") # Set output fields stdout = return_value.get("stdout", "") stderr = return_value.get("stderr", "") result = return_value.get("result", "error") error_msg = return_value.get("text", "") grade = return_value.get("grade", None) problems = return_value.get("problems", {}) custom = return_value.get("custom", {}) tests = return_value.get("tests", {}) state = return_value.get("state", "") archive = return_value.get("archive", None) if archive is not None: archive = base64.b64decode(archive) except Exception as e: self._logger.exception("Cannot get back output of container %s! (%s)", container_id, str(e)) result = "crash" error_msg = 'The grader did not return a readable output : {}'.format(str(e)) # Default values if error_msg is None: error_msg = "" if grade is None: if result == "success": grade = 100.0 else: grade = 0.0 # Remove container try: await self._docker.remove_container(container_id) except asyncio.CancelledError: raise except: pass # Delete folders try: await self._ashutil.rmtree(container_path) except PermissionError: self._logger.debug("Cannot remove old container path!") pass # todo: run a docker container to force removal # Return! await self.send_job_result(message.job_id, result, error_msg, grade, problems, tests, custom, state, archive, stdout, stderr) # Do not forget to remove data from internal state del self._container_for_job[message.job_id] except asyncio.CancelledError: raise except: self._logger.exception("Exception in handle_job_closing")
[ "async", "def", "handle_job_closing", "(", "self", ",", "container_id", ",", "retval", ")", ":", "try", ":", "self", ".", "_logger", ".", "debug", "(", "\"Closing %s\"", ",", "container_id", ")", "try", ":", "message", ",", "container_path", ",", "future_results", "=", "self", ".", "_containers_running", "[", "container_id", "]", "del", "self", ".", "_containers_running", "[", "container_id", "]", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "self", ".", "_logger", ".", "warning", "(", "\"Container %s that has finished(p1) was not launched by this agent\"", ",", "str", "(", "container_id", ")", ",", "exc_info", "=", "True", ")", "return", "# Close sub containers", "for", "student_container_id_loop", "in", "self", ".", "_student_containers_for_job", "[", "message", ".", "job_id", "]", ":", "# little hack to ensure the value of student_container_id_loop is copied into the closure", "async", "def", "close_and_delete", "(", "student_container_id", "=", "student_container_id_loop", ")", ":", "try", ":", "await", "self", ".", "_docker", ".", "kill_container", "(", "student_container_id", ")", "await", "self", ".", "_docker", ".", "remove_container", "(", "student_container_id", ")", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "pass", "# ignore", "self", ".", "_create_safe_task", "(", "close_and_delete", "(", "student_container_id_loop", ")", ")", "del", "self", ".", "_student_containers_for_job", "[", "message", ".", "job_id", "]", "# Allow other container to reuse the external ports this container has finished to use", "if", "container_id", "in", "self", ".", "_assigned_external_ports", ":", "for", "p", "in", "self", ".", "_assigned_external_ports", "[", "container_id", "]", ":", "self", ".", "_external_ports", ".", "add", "(", "p", ")", "del", "self", ".", "_assigned_external_ports", "[", "container_id", "]", "# Verify if the container was killed, either by the client, by an OOM or by a timeout", "killed", "=", "await", "self", ".", "_timeout_watcher", ".", "was_killed", "(", "container_id", ")", "if", "container_id", "in", "self", ".", "_containers_killed", ":", "killed", "=", "self", ".", "_containers_killed", "[", "container_id", "]", "del", "self", ".", "_containers_killed", "[", "container_id", "]", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "result", "=", "\"crash\"", "if", "retval", "==", "-", "1", "else", "None", "error_msg", "=", "None", "grade", "=", "None", "problems", "=", "{", "}", "custom", "=", "{", "}", "tests", "=", "{", "}", "archive", "=", "None", "state", "=", "\"\"", "if", "killed", "is", "not", "None", ":", "result", "=", "killed", "# If everything did well, continue to retrieve the status from the container", "if", "result", "is", "None", ":", "# Get logs back", "try", ":", "return_value", "=", "await", "future_results", "# Accepted types for return dict", "accepted_types", "=", "{", "\"stdout\"", ":", "str", ",", "\"stderr\"", ":", "str", ",", "\"result\"", ":", "str", ",", "\"text\"", ":", "str", ",", "\"grade\"", ":", "float", ",", "\"problems\"", ":", "dict", ",", "\"custom\"", ":", "dict", ",", "\"tests\"", ":", "dict", ",", "\"state\"", ":", "str", ",", "\"archive\"", ":", "str", "}", "keys_fct", "=", "{", "\"problems\"", ":", "id_checker", ",", "\"custom\"", ":", "id_checker", ",", "\"tests\"", ":", "id_checker_tests", "}", "# Check dict content", "for", "key", ",", "item", "in", "return_value", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "item", ",", "accepted_types", "[", "key", "]", ")", ":", "raise", "Exception", "(", "\"Feedback file is badly formatted.\"", ")", "elif", "accepted_types", "[", "key", "]", "==", "dict", "and", "key", "!=", "\"custom\"", ":", "#custom can contain anything:", "for", "sub_key", ",", "sub_item", "in", "item", ".", "items", "(", ")", ":", "if", "not", "keys_fct", "[", "key", "]", "(", "sub_key", ")", "or", "isinstance", "(", "sub_item", ",", "dict", ")", ":", "raise", "Exception", "(", "\"Feedback file is badly formatted.\"", ")", "# Set output fields", "stdout", "=", "return_value", ".", "get", "(", "\"stdout\"", ",", "\"\"", ")", "stderr", "=", "return_value", ".", "get", "(", "\"stderr\"", ",", "\"\"", ")", "result", "=", "return_value", ".", "get", "(", "\"result\"", ",", "\"error\"", ")", "error_msg", "=", "return_value", ".", "get", "(", "\"text\"", ",", "\"\"", ")", "grade", "=", "return_value", ".", "get", "(", "\"grade\"", ",", "None", ")", "problems", "=", "return_value", ".", "get", "(", "\"problems\"", ",", "{", "}", ")", "custom", "=", "return_value", ".", "get", "(", "\"custom\"", ",", "{", "}", ")", "tests", "=", "return_value", ".", "get", "(", "\"tests\"", ",", "{", "}", ")", "state", "=", "return_value", ".", "get", "(", "\"state\"", ",", "\"\"", ")", "archive", "=", "return_value", ".", "get", "(", "\"archive\"", ",", "None", ")", "if", "archive", "is", "not", "None", ":", "archive", "=", "base64", ".", "b64decode", "(", "archive", ")", "except", "Exception", "as", "e", ":", "self", ".", "_logger", ".", "exception", "(", "\"Cannot get back output of container %s! (%s)\"", ",", "container_id", ",", "str", "(", "e", ")", ")", "result", "=", "\"crash\"", "error_msg", "=", "'The grader did not return a readable output : {}'", ".", "format", "(", "str", "(", "e", ")", ")", "# Default values", "if", "error_msg", "is", "None", ":", "error_msg", "=", "\"\"", "if", "grade", "is", "None", ":", "if", "result", "==", "\"success\"", ":", "grade", "=", "100.0", "else", ":", "grade", "=", "0.0", "# Remove container", "try", ":", "await", "self", ".", "_docker", ".", "remove_container", "(", "container_id", ")", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "pass", "# Delete folders", "try", ":", "await", "self", ".", "_ashutil", ".", "rmtree", "(", "container_path", ")", "except", "PermissionError", ":", "self", ".", "_logger", ".", "debug", "(", "\"Cannot remove old container path!\"", ")", "pass", "# todo: run a docker container to force removal", "# Return!", "await", "self", ".", "send_job_result", "(", "message", ".", "job_id", ",", "result", ",", "error_msg", ",", "grade", ",", "problems", ",", "tests", ",", "custom", ",", "state", ",", "archive", ",", "stdout", ",", "stderr", ")", "# Do not forget to remove data from internal state", "del", "self", ".", "_container_for_job", "[", "message", ".", "job_id", "]", "except", "asyncio", ".", "CancelledError", ":", "raise", "except", ":", "self", ".", "_logger", ".", "exception", "(", "\"Exception in handle_job_closing\"", ")" ]
Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend
[ "Handle", "a", "closing", "student", "container", ".", "Do", "some", "cleaning", "verify", "memory", "limits", "timeouts", "...", "and", "returns", "data", "to", "the", "backend" ]
python
train
46.346457
zakdoek/django-simple-resizer
simple_resizer/__init__.py
https://github.com/zakdoek/django-simple-resizer/blob/5614eb1717948c65d179c3d1567439a8c90a4d44/simple_resizer/__init__.py#L147-L169
def resize(image, width=None, height=None, crop=False): """ Resize an image and return the resized file. """ # First normalize params to determine which file to get width, height, crop = _normalize_params(image, width, height, crop) try: # Check the image file state for clean close is_closed = image.closed if is_closed: image.open() # Create the resized file # Do resize and crop resized_image = _resize(image, width, height, crop) finally: # Re-close if received a closed file if is_closed: image.close() return ImageFile(resized_image)
[ "def", "resize", "(", "image", ",", "width", "=", "None", ",", "height", "=", "None", ",", "crop", "=", "False", ")", ":", "# First normalize params to determine which file to get", "width", ",", "height", ",", "crop", "=", "_normalize_params", "(", "image", ",", "width", ",", "height", ",", "crop", ")", "try", ":", "# Check the image file state for clean close", "is_closed", "=", "image", ".", "closed", "if", "is_closed", ":", "image", ".", "open", "(", ")", "# Create the resized file", "# Do resize and crop", "resized_image", "=", "_resize", "(", "image", ",", "width", ",", "height", ",", "crop", ")", "finally", ":", "# Re-close if received a closed file", "if", "is_closed", ":", "image", ".", "close", "(", ")", "return", "ImageFile", "(", "resized_image", ")" ]
Resize an image and return the resized file.
[ "Resize", "an", "image", "and", "return", "the", "resized", "file", "." ]
python
train
27.869565
lepture/flask-oauthlib
flask_oauthlib/contrib/oauth2.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/contrib/oauth2.py#L304-L322
def set(self, client_id, code, request, *args, **kwargs): """Creates Grant object with the given params :param client_id: ID of the client :param code: :param request: OAuthlib request object """ expires = datetime.utcnow() + timedelta(seconds=100) grant = self.model( client_id=request.client.client_id, code=code['code'], redirect_uri=request.redirect_uri, scope=' '.join(request.scopes), user=self.current_user(), expires=expires ) self.session.add(grant) self.session.commit()
[ "def", "set", "(", "self", ",", "client_id", ",", "code", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "expires", "=", "datetime", ".", "utcnow", "(", ")", "+", "timedelta", "(", "seconds", "=", "100", ")", "grant", "=", "self", ".", "model", "(", "client_id", "=", "request", ".", "client", ".", "client_id", ",", "code", "=", "code", "[", "'code'", "]", ",", "redirect_uri", "=", "request", ".", "redirect_uri", ",", "scope", "=", "' '", ".", "join", "(", "request", ".", "scopes", ")", ",", "user", "=", "self", ".", "current_user", "(", ")", ",", "expires", "=", "expires", ")", "self", ".", "session", ".", "add", "(", "grant", ")", "self", ".", "session", ".", "commit", "(", ")" ]
Creates Grant object with the given params :param client_id: ID of the client :param code: :param request: OAuthlib request object
[ "Creates", "Grant", "object", "with", "the", "given", "params" ]
python
test
32.421053
RRZE-HPC/kerncraft
kerncraft/iaca.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/iaca.py#L59-L217
def find_asm_blocks(asm_lines): """Find blocks probably corresponding to loops in assembly.""" blocks = [] last_labels = OrderedDict() packed_ctr = 0 avx_ctr = 0 xmm_references = [] ymm_references = [] zmm_references = [] gp_references = [] mem_references = [] increments = {} for i, line in enumerate(asm_lines): # Register access counts zmm_references += re.findall('%zmm[0-9]+', line) ymm_references += re.findall('%ymm[0-9]+', line) xmm_references += re.findall('%xmm[0-9]+', line) gp_references += re.findall('%r[a-z0-9]+', line) if re.search(r'\d*\(%\w+(,%\w+)?(,\d)?\)', line): m = re.search(r'(?P<off>[-]?\d*)\(%(?P<basep>\w+)(,%(?P<idx>\w+))?(?:,(?P<scale>\d))?\)' r'(?P<eol>$)?', line) mem_references.append(( int(m.group('off')) if m.group('off') else 0, m.group('basep'), m.group('idx'), int(m.group('scale')) if m.group('scale') else 1, 'load' if m.group('eol') is None else 'store')) if re.match(r"^[v]?(mul|add|sub|div|fmadd(132|213|231)?)[h]?p[ds]", line): if line.startswith('v'): avx_ctr += 1 packed_ctr += 1 elif re.match(r'^\S+:', line): # last_labels[label_name] = line_number last_labels[line[0:line.find(':')]] =i # Reset counters packed_ctr = 0 avx_ctr = 0 xmm_references = [] ymm_references = [] zmm_references = [] gp_references = [] mem_references = [] increments = {} elif re.match(r'^inc[bwlq]?\s+%[a-z0-9]+', line): reg_start = line.find('%') + 1 increments[line[reg_start:]] = 1 elif re.match(r'^add[bwlq]?\s+\$[0-9]+,\s*%[a-z0-9]+', line): const_start = line.find('$') + 1 const_end = line[const_start + 1:].find(',') + const_start + 1 reg_start = line.find('%') + 1 increments[line[reg_start:]] = int(line[const_start:const_end]) elif re.match(r'^dec[bwlq]?', line): reg_start = line.find('%') + 1 increments[line[reg_start:]] = -1 elif re.match(r'^sub[bwlq]?\s+\$[0-9]+,', line): const_start = line.find('$') + 1 const_end = line[const_start + 1:].find(',') + const_start + 1 reg_start = line.find('%') + 1 increments[line[reg_start:]] = -int(line[const_start:const_end]) elif last_labels and re.match(r'^j[a-z]+\s+\S+\s*', line): # End of block(s) due to jump # Check if jump target matches any previously recoded label last_label = None last_label_line = -1 for label_name, label_line in last_labels.items(): if re.match(r'^j[a-z]+\s+' + re.escape(label_name) + r'\s*', line): # matched last_label = label_name last_label_line = label_line labels = list(last_labels.keys()) if last_label: # deduce loop increment from memory index register pointer_increment = None # default -> can not decide, let user choose possible_idx_regs = None if mem_references: # we found memory references to work with # If store accesses exist, consider only those store_references = [mref for mref in mem_references if mref[4] == 'store'] refs = store_references or mem_references possible_idx_regs = list(set(increments.keys()).intersection( set([r[1] for r in refs if r[1] is not None] + [r[2] for r in refs if r[2] is not None]))) for mref in refs: for reg in list(possible_idx_regs): # Only consider references with two registers, where one could be an index if None not in mref[1:3]: # One needs to mach, other registers will be excluded if not (reg == mref[1] or reg == mref[2]): # reg can not be it possible_idx_regs.remove(reg) idx_reg = None if len(possible_idx_regs) == 1: # good, exactly one register was found idx_reg = possible_idx_regs[0] elif possible_idx_regs and itemsEqual([increments[pidxreg] for pidxreg in possible_idx_regs]): # multiple were option found, but all have the same increment # use first match: idx_reg = possible_idx_regs[0] if idx_reg: mem_scales = [mref[3] for mref in refs if idx_reg == mref[2] or idx_reg == mref[1]] if itemsEqual(mem_scales): # good, all scales are equal try: pointer_increment = mem_scales[0] * increments[idx_reg] except: print("labels", pformat(labels[labels.index(last_label):])) print("lines", pformat(asm_lines[last_label_line:i + 1])) print("increments", increments) print("mem_references", pformat(mem_references)) print("idx_reg", idx_reg) print("mem_scales", mem_scales) raise blocks.append({'first_line': last_label_line, 'last_line': i, 'ops': i - last_label_line, 'labels': labels[labels.index(last_label):], 'packed_instr': packed_ctr, 'avx_instr': avx_ctr, 'XMM': (len(xmm_references), len(set(xmm_references))), 'YMM': (len(ymm_references), len(set(ymm_references))), 'ZMM': (len(zmm_references), len(set(zmm_references))), 'GP': (len(gp_references), len(set(gp_references))), 'regs': (len(xmm_references) + len(ymm_references) + len(zmm_references) + len(gp_references), len(set(xmm_references)) + len(set(ymm_references)) + len(set(zmm_references)) + len(set(gp_references))), 'pointer_increment': pointer_increment, 'lines': asm_lines[last_label_line:i + 1], 'possible_idx_regs': possible_idx_regs, 'mem_references': mem_references, 'increments': increments, }) # Reset counters packed_ctr = 0 avx_ctr = 0 xmm_references = [] ymm_references = [] zmm_references = [] gp_references = [] mem_references = [] increments = {} last_labels = OrderedDict() return list(enumerate(blocks))
[ "def", "find_asm_blocks", "(", "asm_lines", ")", ":", "blocks", "=", "[", "]", "last_labels", "=", "OrderedDict", "(", ")", "packed_ctr", "=", "0", "avx_ctr", "=", "0", "xmm_references", "=", "[", "]", "ymm_references", "=", "[", "]", "zmm_references", "=", "[", "]", "gp_references", "=", "[", "]", "mem_references", "=", "[", "]", "increments", "=", "{", "}", "for", "i", ",", "line", "in", "enumerate", "(", "asm_lines", ")", ":", "# Register access counts", "zmm_references", "+=", "re", ".", "findall", "(", "'%zmm[0-9]+'", ",", "line", ")", "ymm_references", "+=", "re", ".", "findall", "(", "'%ymm[0-9]+'", ",", "line", ")", "xmm_references", "+=", "re", ".", "findall", "(", "'%xmm[0-9]+'", ",", "line", ")", "gp_references", "+=", "re", ".", "findall", "(", "'%r[a-z0-9]+'", ",", "line", ")", "if", "re", ".", "search", "(", "r'\\d*\\(%\\w+(,%\\w+)?(,\\d)?\\)'", ",", "line", ")", ":", "m", "=", "re", ".", "search", "(", "r'(?P<off>[-]?\\d*)\\(%(?P<basep>\\w+)(,%(?P<idx>\\w+))?(?:,(?P<scale>\\d))?\\)'", "r'(?P<eol>$)?'", ",", "line", ")", "mem_references", ".", "append", "(", "(", "int", "(", "m", ".", "group", "(", "'off'", ")", ")", "if", "m", ".", "group", "(", "'off'", ")", "else", "0", ",", "m", ".", "group", "(", "'basep'", ")", ",", "m", ".", "group", "(", "'idx'", ")", ",", "int", "(", "m", ".", "group", "(", "'scale'", ")", ")", "if", "m", ".", "group", "(", "'scale'", ")", "else", "1", ",", "'load'", "if", "m", ".", "group", "(", "'eol'", ")", "is", "None", "else", "'store'", ")", ")", "if", "re", ".", "match", "(", "r\"^[v]?(mul|add|sub|div|fmadd(132|213|231)?)[h]?p[ds]\"", ",", "line", ")", ":", "if", "line", ".", "startswith", "(", "'v'", ")", ":", "avx_ctr", "+=", "1", "packed_ctr", "+=", "1", "elif", "re", ".", "match", "(", "r'^\\S+:'", ",", "line", ")", ":", "# last_labels[label_name] = line_number", "last_labels", "[", "line", "[", "0", ":", "line", ".", "find", "(", "':'", ")", "]", "]", "=", "i", "# Reset counters", "packed_ctr", "=", "0", "avx_ctr", "=", "0", "xmm_references", "=", "[", "]", "ymm_references", "=", "[", "]", "zmm_references", "=", "[", "]", "gp_references", "=", "[", "]", "mem_references", "=", "[", "]", "increments", "=", "{", "}", "elif", "re", ".", "match", "(", "r'^inc[bwlq]?\\s+%[a-z0-9]+'", ",", "line", ")", ":", "reg_start", "=", "line", ".", "find", "(", "'%'", ")", "+", "1", "increments", "[", "line", "[", "reg_start", ":", "]", "]", "=", "1", "elif", "re", ".", "match", "(", "r'^add[bwlq]?\\s+\\$[0-9]+,\\s*%[a-z0-9]+'", ",", "line", ")", ":", "const_start", "=", "line", ".", "find", "(", "'$'", ")", "+", "1", "const_end", "=", "line", "[", "const_start", "+", "1", ":", "]", ".", "find", "(", "','", ")", "+", "const_start", "+", "1", "reg_start", "=", "line", ".", "find", "(", "'%'", ")", "+", "1", "increments", "[", "line", "[", "reg_start", ":", "]", "]", "=", "int", "(", "line", "[", "const_start", ":", "const_end", "]", ")", "elif", "re", ".", "match", "(", "r'^dec[bwlq]?'", ",", "line", ")", ":", "reg_start", "=", "line", ".", "find", "(", "'%'", ")", "+", "1", "increments", "[", "line", "[", "reg_start", ":", "]", "]", "=", "-", "1", "elif", "re", ".", "match", "(", "r'^sub[bwlq]?\\s+\\$[0-9]+,'", ",", "line", ")", ":", "const_start", "=", "line", ".", "find", "(", "'$'", ")", "+", "1", "const_end", "=", "line", "[", "const_start", "+", "1", ":", "]", ".", "find", "(", "','", ")", "+", "const_start", "+", "1", "reg_start", "=", "line", ".", "find", "(", "'%'", ")", "+", "1", "increments", "[", "line", "[", "reg_start", ":", "]", "]", "=", "-", "int", "(", "line", "[", "const_start", ":", "const_end", "]", ")", "elif", "last_labels", "and", "re", ".", "match", "(", "r'^j[a-z]+\\s+\\S+\\s*'", ",", "line", ")", ":", "# End of block(s) due to jump", "# Check if jump target matches any previously recoded label", "last_label", "=", "None", "last_label_line", "=", "-", "1", "for", "label_name", ",", "label_line", "in", "last_labels", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "r'^j[a-z]+\\s+'", "+", "re", ".", "escape", "(", "label_name", ")", "+", "r'\\s*'", ",", "line", ")", ":", "# matched", "last_label", "=", "label_name", "last_label_line", "=", "label_line", "labels", "=", "list", "(", "last_labels", ".", "keys", "(", ")", ")", "if", "last_label", ":", "# deduce loop increment from memory index register", "pointer_increment", "=", "None", "# default -> can not decide, let user choose", "possible_idx_regs", "=", "None", "if", "mem_references", ":", "# we found memory references to work with", "# If store accesses exist, consider only those", "store_references", "=", "[", "mref", "for", "mref", "in", "mem_references", "if", "mref", "[", "4", "]", "==", "'store'", "]", "refs", "=", "store_references", "or", "mem_references", "possible_idx_regs", "=", "list", "(", "set", "(", "increments", ".", "keys", "(", ")", ")", ".", "intersection", "(", "set", "(", "[", "r", "[", "1", "]", "for", "r", "in", "refs", "if", "r", "[", "1", "]", "is", "not", "None", "]", "+", "[", "r", "[", "2", "]", "for", "r", "in", "refs", "if", "r", "[", "2", "]", "is", "not", "None", "]", ")", ")", ")", "for", "mref", "in", "refs", ":", "for", "reg", "in", "list", "(", "possible_idx_regs", ")", ":", "# Only consider references with two registers, where one could be an index", "if", "None", "not", "in", "mref", "[", "1", ":", "3", "]", ":", "# One needs to mach, other registers will be excluded", "if", "not", "(", "reg", "==", "mref", "[", "1", "]", "or", "reg", "==", "mref", "[", "2", "]", ")", ":", "# reg can not be it", "possible_idx_regs", ".", "remove", "(", "reg", ")", "idx_reg", "=", "None", "if", "len", "(", "possible_idx_regs", ")", "==", "1", ":", "# good, exactly one register was found", "idx_reg", "=", "possible_idx_regs", "[", "0", "]", "elif", "possible_idx_regs", "and", "itemsEqual", "(", "[", "increments", "[", "pidxreg", "]", "for", "pidxreg", "in", "possible_idx_regs", "]", ")", ":", "# multiple were option found, but all have the same increment", "# use first match:", "idx_reg", "=", "possible_idx_regs", "[", "0", "]", "if", "idx_reg", ":", "mem_scales", "=", "[", "mref", "[", "3", "]", "for", "mref", "in", "refs", "if", "idx_reg", "==", "mref", "[", "2", "]", "or", "idx_reg", "==", "mref", "[", "1", "]", "]", "if", "itemsEqual", "(", "mem_scales", ")", ":", "# good, all scales are equal", "try", ":", "pointer_increment", "=", "mem_scales", "[", "0", "]", "*", "increments", "[", "idx_reg", "]", "except", ":", "print", "(", "\"labels\"", ",", "pformat", "(", "labels", "[", "labels", ".", "index", "(", "last_label", ")", ":", "]", ")", ")", "print", "(", "\"lines\"", ",", "pformat", "(", "asm_lines", "[", "last_label_line", ":", "i", "+", "1", "]", ")", ")", "print", "(", "\"increments\"", ",", "increments", ")", "print", "(", "\"mem_references\"", ",", "pformat", "(", "mem_references", ")", ")", "print", "(", "\"idx_reg\"", ",", "idx_reg", ")", "print", "(", "\"mem_scales\"", ",", "mem_scales", ")", "raise", "blocks", ".", "append", "(", "{", "'first_line'", ":", "last_label_line", ",", "'last_line'", ":", "i", ",", "'ops'", ":", "i", "-", "last_label_line", ",", "'labels'", ":", "labels", "[", "labels", ".", "index", "(", "last_label", ")", ":", "]", ",", "'packed_instr'", ":", "packed_ctr", ",", "'avx_instr'", ":", "avx_ctr", ",", "'XMM'", ":", "(", "len", "(", "xmm_references", ")", ",", "len", "(", "set", "(", "xmm_references", ")", ")", ")", ",", "'YMM'", ":", "(", "len", "(", "ymm_references", ")", ",", "len", "(", "set", "(", "ymm_references", ")", ")", ")", ",", "'ZMM'", ":", "(", "len", "(", "zmm_references", ")", ",", "len", "(", "set", "(", "zmm_references", ")", ")", ")", ",", "'GP'", ":", "(", "len", "(", "gp_references", ")", ",", "len", "(", "set", "(", "gp_references", ")", ")", ")", ",", "'regs'", ":", "(", "len", "(", "xmm_references", ")", "+", "len", "(", "ymm_references", ")", "+", "len", "(", "zmm_references", ")", "+", "len", "(", "gp_references", ")", ",", "len", "(", "set", "(", "xmm_references", ")", ")", "+", "len", "(", "set", "(", "ymm_references", ")", ")", "+", "len", "(", "set", "(", "zmm_references", ")", ")", "+", "len", "(", "set", "(", "gp_references", ")", ")", ")", ",", "'pointer_increment'", ":", "pointer_increment", ",", "'lines'", ":", "asm_lines", "[", "last_label_line", ":", "i", "+", "1", "]", ",", "'possible_idx_regs'", ":", "possible_idx_regs", ",", "'mem_references'", ":", "mem_references", ",", "'increments'", ":", "increments", ",", "}", ")", "# Reset counters", "packed_ctr", "=", "0", "avx_ctr", "=", "0", "xmm_references", "=", "[", "]", "ymm_references", "=", "[", "]", "zmm_references", "=", "[", "]", "gp_references", "=", "[", "]", "mem_references", "=", "[", "]", "increments", "=", "{", "}", "last_labels", "=", "OrderedDict", "(", ")", "return", "list", "(", "enumerate", "(", "blocks", ")", ")" ]
Find blocks probably corresponding to loops in assembly.
[ "Find", "blocks", "probably", "corresponding", "to", "loops", "in", "assembly", "." ]
python
test
48.119497
openp2pdesign/makerlabs
makerlabs/timeline.py
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/timeline.py#L24-L39
def get_multiple_data(): """Get data from all the platforms listed in makerlabs.""" # Get data from all the mapped platforms all_labs = {} all_labs["diybio_org"] = diybio_org.get_labs(format="dict") all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict") all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs( format="dict") all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict") all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict") all_labs["makery_info"] = makery_info.get_labs(format="dict") all_labs["nesta"] = nesta.get_labs(format="dict") # all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict") return all_labs
[ "def", "get_multiple_data", "(", ")", ":", "# Get data from all the mapped platforms", "all_labs", "=", "{", "}", "all_labs", "[", "\"diybio_org\"", "]", "=", "diybio_org", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "all_labs", "[", "\"fablabs_io\"", "]", "=", "fablabs_io", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "all_labs", "[", "\"makeinitaly_foundation\"", "]", "=", "makeinitaly_foundation", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "all_labs", "[", "\"hackaday_io\"", "]", "=", "hackaday_io", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "all_labs", "[", "\"hackerspaces_org\"", "]", "=", "hackerspaces_org", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "all_labs", "[", "\"makery_info\"", "]", "=", "makery_info", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "all_labs", "[", "\"nesta\"", "]", "=", "nesta", ".", "get_labs", "(", "format", "=", "\"dict\"", ")", "# all_labs[\"techshop_ws\"] = techshop_ws.get_labs(format=\"dict\")", "return", "all_labs" ]
Get data from all the platforms listed in makerlabs.
[ "Get", "data", "from", "all", "the", "platforms", "listed", "in", "makerlabs", "." ]
python
train
44.5
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L1597-L1607
def check_physical(self, line): """Run all physical checks on a raw input line.""" self.physical_line = line for name, check, argument_names in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result self.report_error(self.line_number, offset, text, check) if text[:4] == 'E101': self.indent_char = line[0]
[ "def", "check_physical", "(", "self", ",", "line", ")", ":", "self", ".", "physical_line", "=", "line", "for", "name", ",", "check", ",", "argument_names", "in", "self", ".", "_physical_checks", ":", "self", ".", "init_checker_state", "(", "name", ",", "argument_names", ")", "result", "=", "self", ".", "run_check", "(", "check", ",", "argument_names", ")", "if", "result", "is", "not", "None", ":", "(", "offset", ",", "text", ")", "=", "result", "self", ".", "report_error", "(", "self", ".", "line_number", ",", "offset", ",", "text", ",", "check", ")", "if", "text", "[", ":", "4", "]", "==", "'E101'", ":", "self", ".", "indent_char", "=", "line", "[", "0", "]" ]
Run all physical checks on a raw input line.
[ "Run", "all", "physical", "checks", "on", "a", "raw", "input", "line", "." ]
python
train
48.272727
isislovecruft/python-gnupg
pretty_bad_protocol/gnupg.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/gnupg.py#L504-L538
def sign_key(self, keyid, default_key=None, passphrase=None): """ sign (an imported) public key - keyid, with default secret key >>> import gnupg >>> gpg = gnupg.GPG(homedir="doctests") >>> key_input = gpg.gen_key_input() >>> key = gpg.gen_key(key_input) >>> gpg.sign_key(key['fingerprint']) >>> gpg.list_sigs(key['fingerprint']) :param str keyid: key shortID, longID, fingerprint or email_address :param str passphrase: passphrase used when creating the key, leave None otherwise :returns: The result giving status of the key signing... success can be verified by gpg.list_sigs(keyid) """ args = [] input_command = "" if passphrase: passphrase_arg = "--passphrase-fd 0" input_command = "%s\n" % passphrase args.append(passphrase_arg) if default_key: args.append(str("--default-key %s" % default_key)) args.extend(["--command-fd 0", "--sign-key %s" % keyid]) p = self._open_subprocess(args) result = self._result_map['signing'](self) confirm_command = "%sy\n" % input_command p.stdin.write(b(confirm_command)) self._collect_output(p, result, stdin=p.stdin) return result
[ "def", "sign_key", "(", "self", ",", "keyid", ",", "default_key", "=", "None", ",", "passphrase", "=", "None", ")", ":", "args", "=", "[", "]", "input_command", "=", "\"\"", "if", "passphrase", ":", "passphrase_arg", "=", "\"--passphrase-fd 0\"", "input_command", "=", "\"%s\\n\"", "%", "passphrase", "args", ".", "append", "(", "passphrase_arg", ")", "if", "default_key", ":", "args", ".", "append", "(", "str", "(", "\"--default-key %s\"", "%", "default_key", ")", ")", "args", ".", "extend", "(", "[", "\"--command-fd 0\"", ",", "\"--sign-key %s\"", "%", "keyid", "]", ")", "p", "=", "self", ".", "_open_subprocess", "(", "args", ")", "result", "=", "self", ".", "_result_map", "[", "'signing'", "]", "(", "self", ")", "confirm_command", "=", "\"%sy\\n\"", "%", "input_command", "p", ".", "stdin", ".", "write", "(", "b", "(", "confirm_command", ")", ")", "self", ".", "_collect_output", "(", "p", ",", "result", ",", "stdin", "=", "p", ".", "stdin", ")", "return", "result" ]
sign (an imported) public key - keyid, with default secret key >>> import gnupg >>> gpg = gnupg.GPG(homedir="doctests") >>> key_input = gpg.gen_key_input() >>> key = gpg.gen_key(key_input) >>> gpg.sign_key(key['fingerprint']) >>> gpg.list_sigs(key['fingerprint']) :param str keyid: key shortID, longID, fingerprint or email_address :param str passphrase: passphrase used when creating the key, leave None otherwise :returns: The result giving status of the key signing... success can be verified by gpg.list_sigs(keyid)
[ "sign", "(", "an", "imported", ")", "public", "key", "-", "keyid", "with", "default", "secret", "key" ]
python
train
36.771429
ska-sa/katcp-python
katcp/core.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/core.py#L135-L159
def steal_docstring_from(obj): """Decorator that lets you steal a docstring from another object Example ------- :: @steal_docstring_from(superclass.meth) def meth(self, arg): "Extra subclass documentation" pass In this case the docstring of the new 'meth' will be copied from superclass.meth, and if an additional dosctring was defined for meth it will be appended to the superclass docstring with a two newlines inbetween. """ def deco(fn): docs = [obj.__doc__] if fn.__doc__: docs.append(fn.__doc__) fn.__doc__ = '\n\n'.join(docs) return fn return deco
[ "def", "steal_docstring_from", "(", "obj", ")", ":", "def", "deco", "(", "fn", ")", ":", "docs", "=", "[", "obj", ".", "__doc__", "]", "if", "fn", ".", "__doc__", ":", "docs", ".", "append", "(", "fn", ".", "__doc__", ")", "fn", ".", "__doc__", "=", "'\\n\\n'", ".", "join", "(", "docs", ")", "return", "fn", "return", "deco" ]
Decorator that lets you steal a docstring from another object Example ------- :: @steal_docstring_from(superclass.meth) def meth(self, arg): "Extra subclass documentation" pass In this case the docstring of the new 'meth' will be copied from superclass.meth, and if an additional dosctring was defined for meth it will be appended to the superclass docstring with a two newlines inbetween.
[ "Decorator", "that", "lets", "you", "steal", "a", "docstring", "from", "another", "object" ]
python
train
25.72
deschler/django-modeltranslation
modeltranslation/translator.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/translator.py#L196-L220
def add_manager(model): """ Monkey patches the original model to use MultilingualManager instead of default managers (not only ``objects``, but also every manager defined and inherited). Custom managers are merged with MultilingualManager. """ if model._meta.abstract: return # Make all managers local for this model to fix patching parent model managers model._meta.local_managers = model._meta.managers for current_manager in model._meta.local_managers: prev_class = current_manager.__class__ patch_manager_class(current_manager) if model._default_manager.__class__ is prev_class: # Normally model._default_manager is a reference to one of model's managers # (and would be patched by the way). # However, in some rare situations (mostly proxy models) # model._default_manager is not the same instance as one of managers, but it # share the same class. model._default_manager.__class__ = current_manager.__class__ patch_manager_class(model._base_manager) model._meta.base_manager_name = 'objects' model._meta._expire_cache()
[ "def", "add_manager", "(", "model", ")", ":", "if", "model", ".", "_meta", ".", "abstract", ":", "return", "# Make all managers local for this model to fix patching parent model managers", "model", ".", "_meta", ".", "local_managers", "=", "model", ".", "_meta", ".", "managers", "for", "current_manager", "in", "model", ".", "_meta", ".", "local_managers", ":", "prev_class", "=", "current_manager", ".", "__class__", "patch_manager_class", "(", "current_manager", ")", "if", "model", ".", "_default_manager", ".", "__class__", "is", "prev_class", ":", "# Normally model._default_manager is a reference to one of model's managers", "# (and would be patched by the way).", "# However, in some rare situations (mostly proxy models)", "# model._default_manager is not the same instance as one of managers, but it", "# share the same class.", "model", ".", "_default_manager", ".", "__class__", "=", "current_manager", ".", "__class__", "patch_manager_class", "(", "model", ".", "_base_manager", ")", "model", ".", "_meta", ".", "base_manager_name", "=", "'objects'", "model", ".", "_meta", ".", "_expire_cache", "(", ")" ]
Monkey patches the original model to use MultilingualManager instead of default managers (not only ``objects``, but also every manager defined and inherited). Custom managers are merged with MultilingualManager.
[ "Monkey", "patches", "the", "original", "model", "to", "use", "MultilingualManager", "instead", "of", "default", "managers", "(", "not", "only", "objects", "but", "also", "every", "manager", "defined", "and", "inherited", ")", "." ]
python
train
46.2
pybel/pybel
src/pybel/struct/summary/edge_summary.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/summary/edge_summary.py#L83-L92
def count_relations(graph) -> Counter: """Return a histogram over all relationships in a graph. :param pybel.BELGraph graph: A BEL graph :return: A Counter from {relation type: frequency} """ return Counter( data[RELATION] for _, _, data in graph.edges(data=True) )
[ "def", "count_relations", "(", "graph", ")", "->", "Counter", ":", "return", "Counter", "(", "data", "[", "RELATION", "]", "for", "_", ",", "_", ",", "data", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ")" ]
Return a histogram over all relationships in a graph. :param pybel.BELGraph graph: A BEL graph :return: A Counter from {relation type: frequency}
[ "Return", "a", "histogram", "over", "all", "relationships", "in", "a", "graph", "." ]
python
train
29.7
googledatalab/pydatalab
google/datalab/bigquery/commands/_bigquery.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L325-L352
def _get_query_argument(args, cell, env): """ Get a query argument to a cell magic. The query is specified with args['query']. We look that up and if it is a BQ query object, just return it. If it is a string, build a query object out of it and return that Args: args: the dictionary of magic arguments. cell: the cell contents which can be variable value overrides (if args has a 'query' value) or inline SQL otherwise. env: a dictionary that is used for looking up variable values. Returns: A Query object. """ sql_arg = args.get('query', None) if sql_arg is None: # Assume we have inline SQL in the cell if not isinstance(cell, basestring): raise Exception('Expected a --query argument or inline SQL') return bigquery.Query(cell, env=env) item = google.datalab.utils.commands.get_notebook_item(sql_arg) if isinstance(item, bigquery.Query): return item else: raise Exception('Expected a query object, got %s.' % type(item))
[ "def", "_get_query_argument", "(", "args", ",", "cell", ",", "env", ")", ":", "sql_arg", "=", "args", ".", "get", "(", "'query'", ",", "None", ")", "if", "sql_arg", "is", "None", ":", "# Assume we have inline SQL in the cell", "if", "not", "isinstance", "(", "cell", ",", "basestring", ")", ":", "raise", "Exception", "(", "'Expected a --query argument or inline SQL'", ")", "return", "bigquery", ".", "Query", "(", "cell", ",", "env", "=", "env", ")", "item", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "get_notebook_item", "(", "sql_arg", ")", "if", "isinstance", "(", "item", ",", "bigquery", ".", "Query", ")", ":", "return", "item", "else", ":", "raise", "Exception", "(", "'Expected a query object, got %s.'", "%", "type", "(", "item", ")", ")" ]
Get a query argument to a cell magic. The query is specified with args['query']. We look that up and if it is a BQ query object, just return it. If it is a string, build a query object out of it and return that Args: args: the dictionary of magic arguments. cell: the cell contents which can be variable value overrides (if args has a 'query' value) or inline SQL otherwise. env: a dictionary that is used for looking up variable values. Returns: A Query object.
[ "Get", "a", "query", "argument", "to", "a", "cell", "magic", "." ]
python
train
34.857143
mwickert/scikit-dsp-comm
sk_dsp_comm/iir_design_helper.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L301-L312
def freqz_cas(sos,w): """ Cascade frequency response Mark Wickert October 2016 """ Ns,Mcol = sos.shape w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w) for k in range(1,Ns): w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w) Hcas *= Htemp return w, Hcas
[ "def", "freqz_cas", "(", "sos", ",", "w", ")", ":", "Ns", ",", "Mcol", "=", "sos", ".", "shape", "w", ",", "Hcas", "=", "signal", ".", "freqz", "(", "sos", "[", "0", ",", ":", "3", "]", ",", "sos", "[", "0", ",", "3", ":", "]", ",", "w", ")", "for", "k", "in", "range", "(", "1", ",", "Ns", ")", ":", "w", ",", "Htemp", "=", "signal", ".", "freqz", "(", "sos", "[", "k", ",", ":", "3", "]", ",", "sos", "[", "k", ",", "3", ":", "]", ",", "w", ")", "Hcas", "*=", "Htemp", "return", "w", ",", "Hcas" ]
Cascade frequency response Mark Wickert October 2016
[ "Cascade", "frequency", "response", "Mark", "Wickert", "October", "2016" ]
python
valid
24.75
mjirik/imtools
imtools/show_segmentation.py
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/show_segmentation.py#L126-L135
def select_labels(self, labels=None): """ Prepare binar segmentation based on input segmentation and labels. :param labels: :return: """ self._resize_if_required() segmentation = self._select_labels(self.resized_segmentation, labels) # logger.debug("select labels in show_segmentation {} sum {}".format(labels, np.sum(segmentation))) self.resized_binar_segmentation = segmentation
[ "def", "select_labels", "(", "self", ",", "labels", "=", "None", ")", ":", "self", ".", "_resize_if_required", "(", ")", "segmentation", "=", "self", ".", "_select_labels", "(", "self", ".", "resized_segmentation", ",", "labels", ")", "# logger.debug(\"select labels in show_segmentation {} sum {}\".format(labels, np.sum(segmentation)))", "self", ".", "resized_binar_segmentation", "=", "segmentation" ]
Prepare binar segmentation based on input segmentation and labels. :param labels: :return:
[ "Prepare", "binar", "segmentation", "based", "on", "input", "segmentation", "and", "labels", "." ]
python
train
43.6
scanny/python-pptx
pptx/text/fonts.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/text/fonts.py#L295-L306
def family_name(self): """ The name of the typeface family for this font, e.g. 'Arial'. """ def find_first(dict_, keys, default=None): for key in keys: value = dict_.get(key) if value is not None: return value return default # keys for Unicode, Mac, and Windows family name, respectively return find_first(self._names, ((0, 1), (1, 1), (3, 1)))
[ "def", "family_name", "(", "self", ")", ":", "def", "find_first", "(", "dict_", ",", "keys", ",", "default", "=", "None", ")", ":", "for", "key", "in", "keys", ":", "value", "=", "dict_", ".", "get", "(", "key", ")", "if", "value", "is", "not", "None", ":", "return", "value", "return", "default", "# keys for Unicode, Mac, and Windows family name, respectively", "return", "find_first", "(", "self", ".", "_names", ",", "(", "(", "0", ",", "1", ")", ",", "(", "1", ",", "1", ")", ",", "(", "3", ",", "1", ")", ")", ")" ]
The name of the typeface family for this font, e.g. 'Arial'.
[ "The", "name", "of", "the", "typeface", "family", "for", "this", "font", "e", ".", "g", ".", "Arial", "." ]
python
train
38.083333
Rapptz/discord.py
examples/basic_voice.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/examples/basic_voice.py#L69-L75
async def play(self, ctx, *, query): """Plays a file from the local filesystem""" source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query)) ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None) await ctx.send('Now playing: {}'.format(query))
[ "async", "def", "play", "(", "self", ",", "ctx", ",", "*", ",", "query", ")", ":", "source", "=", "discord", ".", "PCMVolumeTransformer", "(", "discord", ".", "FFmpegPCMAudio", "(", "query", ")", ")", "ctx", ".", "voice_client", ".", "play", "(", "source", ",", "after", "=", "lambda", "e", ":", "print", "(", "'Player error: %s'", "%", "e", ")", "if", "e", "else", "None", ")", "await", "ctx", ".", "send", "(", "'Now playing: {}'", ".", "format", "(", "query", ")", ")" ]
Plays a file from the local filesystem
[ "Plays", "a", "file", "from", "the", "local", "filesystem" ]
python
train
45.428571
pytroll/satpy
satpy/node.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/node.py#L279-L313
def _find_reader_dataset(self, dataset_key, **dfilter): """Attempt to find a `DatasetID` in the available readers. Args: dataset_key (str, float, DatasetID): Dataset name, wavelength, or a combination of `DatasetID` parameters to use in searching for the dataset from the available readers. **dfilter (list or str): `DatasetID` parameters besides `name` and `wavelength` to use to filter the available datasets. Passed directly to `get_dataset_key` of the readers, see that method for more information. """ too_many = False for reader_name, reader_instance in self.readers.items(): try: ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter) except TooManyResults: LOG.trace("Too many datasets matching key {} in reader {}".format(dataset_key, reader_name)) too_many = True continue except KeyError: LOG.trace("Can't find dataset %s in reader %s", str(dataset_key), reader_name) continue LOG.trace("Found {} in reader {} when asking for {}".format(str(ds_id), reader_name, repr(dataset_key))) try: # now that we know we have the exact DatasetID see if we have already created a Node for it return self.getitem(ds_id) except KeyError: # we haven't created a node yet, create it now return Node(ds_id, {'reader_name': reader_name}) if too_many: raise TooManyResults("Too many keys matching: {}".format(dataset_key))
[ "def", "_find_reader_dataset", "(", "self", ",", "dataset_key", ",", "*", "*", "dfilter", ")", ":", "too_many", "=", "False", "for", "reader_name", ",", "reader_instance", "in", "self", ".", "readers", ".", "items", "(", ")", ":", "try", ":", "ds_id", "=", "reader_instance", ".", "get_dataset_key", "(", "dataset_key", ",", "*", "*", "dfilter", ")", "except", "TooManyResults", ":", "LOG", ".", "trace", "(", "\"Too many datasets matching key {} in reader {}\"", ".", "format", "(", "dataset_key", ",", "reader_name", ")", ")", "too_many", "=", "True", "continue", "except", "KeyError", ":", "LOG", ".", "trace", "(", "\"Can't find dataset %s in reader %s\"", ",", "str", "(", "dataset_key", ")", ",", "reader_name", ")", "continue", "LOG", ".", "trace", "(", "\"Found {} in reader {} when asking for {}\"", ".", "format", "(", "str", "(", "ds_id", ")", ",", "reader_name", ",", "repr", "(", "dataset_key", ")", ")", ")", "try", ":", "# now that we know we have the exact DatasetID see if we have already created a Node for it", "return", "self", ".", "getitem", "(", "ds_id", ")", "except", "KeyError", ":", "# we haven't created a node yet, create it now", "return", "Node", "(", "ds_id", ",", "{", "'reader_name'", ":", "reader_name", "}", ")", "if", "too_many", ":", "raise", "TooManyResults", "(", "\"Too many keys matching: {}\"", ".", "format", "(", "dataset_key", ")", ")" ]
Attempt to find a `DatasetID` in the available readers. Args: dataset_key (str, float, DatasetID): Dataset name, wavelength, or a combination of `DatasetID` parameters to use in searching for the dataset from the available readers. **dfilter (list or str): `DatasetID` parameters besides `name` and `wavelength` to use to filter the available datasets. Passed directly to `get_dataset_key` of the readers, see that method for more information.
[ "Attempt", "to", "find", "a", "DatasetID", "in", "the", "available", "readers", "." ]
python
train
51.485714
WhyNotHugo/django-afip
django_afip/models.py
https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L385-L414
def fetch_points_of_sales(self, ticket=None): """ Fetch all point of sales objects. Fetch all point of sales from the WS and store (or update) them locally. Returns a list of tuples with the format (pos, created,). """ ticket = ticket or self.get_or_create_ticket('wsfe') client = clients.get_client('wsfe', self.is_sandboxed) response = client.service.FEParamGetPtosVenta( serializers.serialize_ticket(ticket), ) check_response(response) results = [] for pos_data in response.ResultGet.PtoVenta: results.append(PointOfSales.objects.update_or_create( number=pos_data.Nro, issuance_type=pos_data.EmisionTipo, owner=self, defaults={ 'blocked': pos_data.Bloqueado == 'N', 'drop_date': parsers.parse_date(pos_data.FchBaja), } )) return results
[ "def", "fetch_points_of_sales", "(", "self", ",", "ticket", "=", "None", ")", ":", "ticket", "=", "ticket", "or", "self", ".", "get_or_create_ticket", "(", "'wsfe'", ")", "client", "=", "clients", ".", "get_client", "(", "'wsfe'", ",", "self", ".", "is_sandboxed", ")", "response", "=", "client", ".", "service", ".", "FEParamGetPtosVenta", "(", "serializers", ".", "serialize_ticket", "(", "ticket", ")", ",", ")", "check_response", "(", "response", ")", "results", "=", "[", "]", "for", "pos_data", "in", "response", ".", "ResultGet", ".", "PtoVenta", ":", "results", ".", "append", "(", "PointOfSales", ".", "objects", ".", "update_or_create", "(", "number", "=", "pos_data", ".", "Nro", ",", "issuance_type", "=", "pos_data", ".", "EmisionTipo", ",", "owner", "=", "self", ",", "defaults", "=", "{", "'blocked'", ":", "pos_data", ".", "Bloqueado", "==", "'N'", ",", "'drop_date'", ":", "parsers", ".", "parse_date", "(", "pos_data", ".", "FchBaja", ")", ",", "}", ")", ")", "return", "results" ]
Fetch all point of sales objects. Fetch all point of sales from the WS and store (or update) them locally. Returns a list of tuples with the format (pos, created,).
[ "Fetch", "all", "point", "of", "sales", "objects", "." ]
python
train
32.766667
carsongee/flask-htpasswd
flask_htpasswd.py
https://github.com/carsongee/flask-htpasswd/blob/db6fe596dd167f33aeb3d77e975c861d0534cecf/flask_htpasswd.py#L76-L90
def check_basic_auth(self, username, password): """ This function is called to check if a username / password combination is valid via the htpasswd file. """ valid = self.users.check_password( username, password ) if not valid: log.warning('Invalid login from %s', username) valid = False return ( valid, username )
[ "def", "check_basic_auth", "(", "self", ",", "username", ",", "password", ")", ":", "valid", "=", "self", ".", "users", ".", "check_password", "(", "username", ",", "password", ")", "if", "not", "valid", ":", "log", ".", "warning", "(", "'Invalid login from %s'", ",", "username", ")", "valid", "=", "False", "return", "(", "valid", ",", "username", ")" ]
This function is called to check if a username / password combination is valid via the htpasswd file.
[ "This", "function", "is", "called", "to", "check", "if", "a", "username", "/", "password", "combination", "is", "valid", "via", "the", "htpasswd", "file", "." ]
python
train
28.866667
reingart/gui2py
gui/controls/listview.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listview.py#L183-L187
def clear_all(self): "Remove all items and column headings" self.clear() for ch in reversed(self.columns): del self[ch.name]
[ "def", "clear_all", "(", "self", ")", ":", "self", ".", "clear", "(", ")", "for", "ch", "in", "reversed", "(", "self", ".", "columns", ")", ":", "del", "self", "[", "ch", ".", "name", "]" ]
Remove all items and column headings
[ "Remove", "all", "items", "and", "column", "headings" ]
python
test
32
pandas-dev/pandas
pandas/core/dtypes/common.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L90-L114
def ensure_int64_or_float64(arr, copy=False): """ Ensure that an dtype array of some integer dtype has an int64 dtype if possible If it's not possible, potentially because of overflow, convert the array to float64 instead. Parameters ---------- arr : array-like The array whose data type we want to enforce. copy: boolean Whether to copy the original array or reuse it in place, if possible. Returns ------- out_arr : The input array cast as int64 if possible without overflow. Otherwise the input array cast to float64. """ try: return arr.astype('int64', copy=copy, casting='safe') except TypeError: return arr.astype('float64', copy=copy)
[ "def", "ensure_int64_or_float64", "(", "arr", ",", "copy", "=", "False", ")", ":", "try", ":", "return", "arr", ".", "astype", "(", "'int64'", ",", "copy", "=", "copy", ",", "casting", "=", "'safe'", ")", "except", "TypeError", ":", "return", "arr", ".", "astype", "(", "'float64'", ",", "copy", "=", "copy", ")" ]
Ensure that an dtype array of some integer dtype has an int64 dtype if possible If it's not possible, potentially because of overflow, convert the array to float64 instead. Parameters ---------- arr : array-like The array whose data type we want to enforce. copy: boolean Whether to copy the original array or reuse it in place, if possible. Returns ------- out_arr : The input array cast as int64 if possible without overflow. Otherwise the input array cast to float64.
[ "Ensure", "that", "an", "dtype", "array", "of", "some", "integer", "dtype", "has", "an", "int64", "dtype", "if", "possible", "If", "it", "s", "not", "possible", "potentially", "because", "of", "overflow", "convert", "the", "array", "to", "float64", "instead", "." ]
python
train
30.12
funkybob/knights-templater
knights/tags.py
https://github.com/funkybob/knights-templater/blob/b15cdbaae7d824d02f7f03ca04599ae94bb759dd/knights/tags.py#L74-L93
def _create_with_scope(body, kwargs): ''' Helper function to wrap a block in a scope stack: with ContextScope(context, **kwargs) as context: ... body ... ''' return ast.With( items=[ ast.withitem( context_expr=_a.Call( _a.Name('ContextScope'), [_a.Name('context')], keywords=kwargs, ), optional_vars=_a.Name('context', ctx=ast.Store()) ), ], body=body, )
[ "def", "_create_with_scope", "(", "body", ",", "kwargs", ")", ":", "return", "ast", ".", "With", "(", "items", "=", "[", "ast", ".", "withitem", "(", "context_expr", "=", "_a", ".", "Call", "(", "_a", ".", "Name", "(", "'ContextScope'", ")", ",", "[", "_a", ".", "Name", "(", "'context'", ")", "]", ",", "keywords", "=", "kwargs", ",", ")", ",", "optional_vars", "=", "_a", ".", "Name", "(", "'context'", ",", "ctx", "=", "ast", ".", "Store", "(", ")", ")", ")", ",", "]", ",", "body", "=", "body", ",", ")" ]
Helper function to wrap a block in a scope stack: with ContextScope(context, **kwargs) as context: ... body ...
[ "Helper", "function", "to", "wrap", "a", "block", "in", "a", "scope", "stack", ":" ]
python
train
26.2
python-bonobo/bonobo
bonobo/registry.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/registry.py#L94-L106
def create_reader(name, *args, format=None, registry=default_registry, **kwargs): """ Create a reader instance, guessing its factory using filename (and eventually format). :param name: :param args: :param format: :param registry: :param kwargs: :return: mixed """ return registry.get_reader_factory_for(name, format=format)(name, *args, **kwargs)
[ "def", "create_reader", "(", "name", ",", "*", "args", ",", "format", "=", "None", ",", "registry", "=", "default_registry", ",", "*", "*", "kwargs", ")", ":", "return", "registry", ".", "get_reader_factory_for", "(", "name", ",", "format", "=", "format", ")", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Create a reader instance, guessing its factory using filename (and eventually format). :param name: :param args: :param format: :param registry: :param kwargs: :return: mixed
[ "Create", "a", "reader", "instance", "guessing", "its", "factory", "using", "filename", "(", "and", "eventually", "format", ")", "." ]
python
train
29
oemof/oemof.db
oemof/db/config.py
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L113-L125
def init(FILE): """ Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str """ try: cfg.read(FILE) global _loaded _loaded = True except: file_not_found_message(FILE)
[ "def", "init", "(", "FILE", ")", ":", "try", ":", "cfg", ".", "read", "(", "FILE", ")", "global", "_loaded", "_loaded", "=", "True", "except", ":", "file_not_found_message", "(", "FILE", ")" ]
Read config file :param FILE: Absolute path to config file (incl. filename) :type FILE: str
[ "Read", "config", "file" ]
python
train
19.307692
ranaroussi/qtpylib
qtpylib/tools.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/tools.py#L447-L452
def after_third_friday(day=None): """ check if day is after month's 3rd friday """ day = day if day is not None else datetime.datetime.now() now = day.replace(day=1, hour=16, minute=0, second=0, microsecond=0) now += relativedelta.relativedelta(weeks=2, weekday=relativedelta.FR) return day > now
[ "def", "after_third_friday", "(", "day", "=", "None", ")", ":", "day", "=", "day", "if", "day", "is", "not", "None", "else", "datetime", ".", "datetime", ".", "now", "(", ")", "now", "=", "day", ".", "replace", "(", "day", "=", "1", ",", "hour", "=", "16", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "now", "+=", "relativedelta", ".", "relativedelta", "(", "weeks", "=", "2", ",", "weekday", "=", "relativedelta", ".", "FR", ")", "return", "day", ">", "now" ]
check if day is after month's 3rd friday
[ "check", "if", "day", "is", "after", "month", "s", "3rd", "friday" ]
python
train
51.833333
ray-project/ray
python/ray/experimental/tf_utils.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/tf_utils.py#L159-L169
def get_weights(self): """Returns a dictionary containing the weights of the network. Returns: Dictionary mapping variable names to their weights. """ self._check_sess() return { k: v.eval(session=self.sess) for k, v in self.variables.items() }
[ "def", "get_weights", "(", "self", ")", ":", "self", ".", "_check_sess", "(", ")", "return", "{", "k", ":", "v", ".", "eval", "(", "session", "=", "self", ".", "sess", ")", "for", "k", ",", "v", "in", "self", ".", "variables", ".", "items", "(", ")", "}" ]
Returns a dictionary containing the weights of the network. Returns: Dictionary mapping variable names to their weights.
[ "Returns", "a", "dictionary", "containing", "the", "weights", "of", "the", "network", "." ]
python
train
29
has2k1/plotnine
plotnine/utils.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/utils.py#L342-L374
def join_keys(x, y, by=None): """ Join keys. Given two data frames, create a unique key for each row. Parameters ----------- x : dataframe y : dataframe by : list-like Column names to join by Returns ------- out : dict Dictionary with keys x and y. The values of both keys are arrays with integer elements. Identical rows in x and y dataframes would have the same key in the output. The key elements start at 1. """ if by is None: by = slice(None, None, None) if isinstance(by, tuple): by = list(by) joint = x[by].append(y[by], ignore_index=True) keys = ninteraction(joint, drop=True) keys = np.asarray(keys) nx, ny = len(x), len(y) return {'x': keys[np.arange(nx)], 'y': keys[nx + np.arange(ny)]}
[ "def", "join_keys", "(", "x", ",", "y", ",", "by", "=", "None", ")", ":", "if", "by", "is", "None", ":", "by", "=", "slice", "(", "None", ",", "None", ",", "None", ")", "if", "isinstance", "(", "by", ",", "tuple", ")", ":", "by", "=", "list", "(", "by", ")", "joint", "=", "x", "[", "by", "]", ".", "append", "(", "y", "[", "by", "]", ",", "ignore_index", "=", "True", ")", "keys", "=", "ninteraction", "(", "joint", ",", "drop", "=", "True", ")", "keys", "=", "np", ".", "asarray", "(", "keys", ")", "nx", ",", "ny", "=", "len", "(", "x", ")", ",", "len", "(", "y", ")", "return", "{", "'x'", ":", "keys", "[", "np", ".", "arange", "(", "nx", ")", "]", ",", "'y'", ":", "keys", "[", "nx", "+", "np", ".", "arange", "(", "ny", ")", "]", "}" ]
Join keys. Given two data frames, create a unique key for each row. Parameters ----------- x : dataframe y : dataframe by : list-like Column names to join by Returns ------- out : dict Dictionary with keys x and y. The values of both keys are arrays with integer elements. Identical rows in x and y dataframes would have the same key in the output. The key elements start at 1.
[ "Join", "keys", "." ]
python
train
24.666667
frawau/aiolifx
aiolifx/aiolifx.py
https://github.com/frawau/aiolifx/blob/9bd8c5e6d291f4c79314989402f7e2c6476d5851/aiolifx/aiolifx.py#L1168-L1222
def datagram_received(self, data, addr): """Method run when data is received from the devices This method will unpack the data according to the LIFX protocol. If a new device is found, the Light device will be created and started aa a DatagramProtocol and will be registered with the parent. :param data: raw data :type data: bytestring :param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6 :type addr: tuple """ response = unpack_lifx_message(data) response.ip_addr = addr[0] mac_addr = response.target_addr if mac_addr == BROADCAST_MAC: return if type(response) == StateService and response.service == 1: # only look for UDP services # discovered remote_port = response.port elif type(response) == LightState: # looks like the lights are volunteering LigthState after booting remote_port = UDP_BROADCAST_PORT else: return if self.ipv6prefix: family = socket.AF_INET6 remote_ip = mac_to_ipv6_linklocal(mac_addr, self.ipv6prefix) else: family = socket.AF_INET remote_ip = response.ip_addr if mac_addr in self.lights: # rediscovered light = self.lights[mac_addr] # nothing to do if light.registered: return light.cleanup() light.ip_addr = remote_ip light.port = remote_port else: # newly discovered light = Light(self.loop, mac_addr, remote_ip, remote_port, parent=self) self.lights[mac_addr] = light coro = self.loop.create_datagram_endpoint( lambda: light, family=family, remote_addr=(remote_ip, remote_port)) light.task = self.loop.create_task(coro)
[ "def", "datagram_received", "(", "self", ",", "data", ",", "addr", ")", ":", "response", "=", "unpack_lifx_message", "(", "data", ")", "response", ".", "ip_addr", "=", "addr", "[", "0", "]", "mac_addr", "=", "response", ".", "target_addr", "if", "mac_addr", "==", "BROADCAST_MAC", ":", "return", "if", "type", "(", "response", ")", "==", "StateService", "and", "response", ".", "service", "==", "1", ":", "# only look for UDP services", "# discovered", "remote_port", "=", "response", ".", "port", "elif", "type", "(", "response", ")", "==", "LightState", ":", "# looks like the lights are volunteering LigthState after booting", "remote_port", "=", "UDP_BROADCAST_PORT", "else", ":", "return", "if", "self", ".", "ipv6prefix", ":", "family", "=", "socket", ".", "AF_INET6", "remote_ip", "=", "mac_to_ipv6_linklocal", "(", "mac_addr", ",", "self", ".", "ipv6prefix", ")", "else", ":", "family", "=", "socket", ".", "AF_INET", "remote_ip", "=", "response", ".", "ip_addr", "if", "mac_addr", "in", "self", ".", "lights", ":", "# rediscovered", "light", "=", "self", ".", "lights", "[", "mac_addr", "]", "# nothing to do", "if", "light", ".", "registered", ":", "return", "light", ".", "cleanup", "(", ")", "light", ".", "ip_addr", "=", "remote_ip", "light", ".", "port", "=", "remote_port", "else", ":", "# newly discovered", "light", "=", "Light", "(", "self", ".", "loop", ",", "mac_addr", ",", "remote_ip", ",", "remote_port", ",", "parent", "=", "self", ")", "self", ".", "lights", "[", "mac_addr", "]", "=", "light", "coro", "=", "self", ".", "loop", ".", "create_datagram_endpoint", "(", "lambda", ":", "light", ",", "family", "=", "family", ",", "remote_addr", "=", "(", "remote_ip", ",", "remote_port", ")", ")", "light", ".", "task", "=", "self", ".", "loop", ".", "create_task", "(", "coro", ")" ]
Method run when data is received from the devices This method will unpack the data according to the LIFX protocol. If a new device is found, the Light device will be created and started aa a DatagramProtocol and will be registered with the parent. :param data: raw data :type data: bytestring :param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6 :type addr: tuple
[ "Method", "run", "when", "data", "is", "received", "from", "the", "devices" ]
python
train
34.163636
joyent/python-manta
manta/cmdln.py
https://github.com/joyent/python-manta/blob/f68ef142bdbac058c981e3b28e18d77612f5b7c6/manta/cmdln.py#L851-L879
def _get_canonical_map(self): """Return a mapping of available command names and aliases to their canonical command name. """ cacheattr = "_token2canonical" if not hasattr(self, cacheattr): # Get the list of commands and their aliases, if any. token2canonical = {} cmd2funcname = {} # use a dict to strip duplicates for attr in self.get_names(): if attr.startswith("do_"): cmdname = attr[3:] elif attr.startswith("_do_"): cmdname = attr[4:] else: continue cmd2funcname[cmdname] = attr token2canonical[cmdname] = cmdname for cmdname, funcname in cmd2funcname.items(): # add aliases func = getattr(self, funcname) aliases = getattr(func, "aliases", []) for alias in aliases: if alias in cmd2funcname: import warnings warnings.warn("'%s' alias for '%s' command conflicts " "with '%s' handler" % (alias, cmdname, cmd2funcname[alias])) continue token2canonical[alias] = cmdname setattr(self, cacheattr, token2canonical) return getattr(self, cacheattr)
[ "def", "_get_canonical_map", "(", "self", ")", ":", "cacheattr", "=", "\"_token2canonical\"", "if", "not", "hasattr", "(", "self", ",", "cacheattr", ")", ":", "# Get the list of commands and their aliases, if any.", "token2canonical", "=", "{", "}", "cmd2funcname", "=", "{", "}", "# use a dict to strip duplicates", "for", "attr", "in", "self", ".", "get_names", "(", ")", ":", "if", "attr", ".", "startswith", "(", "\"do_\"", ")", ":", "cmdname", "=", "attr", "[", "3", ":", "]", "elif", "attr", ".", "startswith", "(", "\"_do_\"", ")", ":", "cmdname", "=", "attr", "[", "4", ":", "]", "else", ":", "continue", "cmd2funcname", "[", "cmdname", "]", "=", "attr", "token2canonical", "[", "cmdname", "]", "=", "cmdname", "for", "cmdname", ",", "funcname", "in", "cmd2funcname", ".", "items", "(", ")", ":", "# add aliases", "func", "=", "getattr", "(", "self", ",", "funcname", ")", "aliases", "=", "getattr", "(", "func", ",", "\"aliases\"", ",", "[", "]", ")", "for", "alias", "in", "aliases", ":", "if", "alias", "in", "cmd2funcname", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"'%s' alias for '%s' command conflicts \"", "\"with '%s' handler\"", "%", "(", "alias", ",", "cmdname", ",", "cmd2funcname", "[", "alias", "]", ")", ")", "continue", "token2canonical", "[", "alias", "]", "=", "cmdname", "setattr", "(", "self", ",", "cacheattr", ",", "token2canonical", ")", "return", "getattr", "(", "self", ",", "cacheattr", ")" ]
Return a mapping of available command names and aliases to their canonical command name.
[ "Return", "a", "mapping", "of", "available", "command", "names", "and", "aliases", "to", "their", "canonical", "command", "name", "." ]
python
train
47.448276
pypa/setuptools
setuptools/command/bdist_egg.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/bdist_egg.py#L442-L451
def iter_symbols(code): """Yield names and strings used by `code` and its nested code objects""" for name in code.co_names: yield name for const in code.co_consts: if isinstance(const, six.string_types): yield const elif isinstance(const, CodeType): for name in iter_symbols(const): yield name
[ "def", "iter_symbols", "(", "code", ")", ":", "for", "name", "in", "code", ".", "co_names", ":", "yield", "name", "for", "const", "in", "code", ".", "co_consts", ":", "if", "isinstance", "(", "const", ",", "six", ".", "string_types", ")", ":", "yield", "const", "elif", "isinstance", "(", "const", ",", "CodeType", ")", ":", "for", "name", "in", "iter_symbols", "(", "const", ")", ":", "yield", "name" ]
Yield names and strings used by `code` and its nested code objects
[ "Yield", "names", "and", "strings", "used", "by", "code", "and", "its", "nested", "code", "objects" ]
python
train
36
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L3008-L3021
def get_port_chann_detail_request(last_aggregator_id): """ Creates a new Netconf request based on the last received aggregator id when the hasMore flag is true """ port_channel_ns = 'urn:brocade.com:mgmt:brocade-lag' request_port_channel = ET.Element('get-port-channel-detail', xmlns=port_channel_ns) if last_aggregator_id != '': last_received_port_chann_el = ET.SubElement(request_port_channel, "last-aggregator-id") last_received_port_chann_el.text = last_aggregator_id return request_port_channel
[ "def", "get_port_chann_detail_request", "(", "last_aggregator_id", ")", ":", "port_channel_ns", "=", "'urn:brocade.com:mgmt:brocade-lag'", "request_port_channel", "=", "ET", ".", "Element", "(", "'get-port-channel-detail'", ",", "xmlns", "=", "port_channel_ns", ")", "if", "last_aggregator_id", "!=", "''", ":", "last_received_port_chann_el", "=", "ET", ".", "SubElement", "(", "request_port_channel", ",", "\"last-aggregator-id\"", ")", "last_received_port_chann_el", ".", "text", "=", "last_aggregator_id", "return", "request_port_channel" ]
Creates a new Netconf request based on the last received aggregator id when the hasMore flag is true
[ "Creates", "a", "new", "Netconf", "request", "based", "on", "the", "last", "received", "aggregator", "id", "when", "the", "hasMore", "flag", "is", "true" ]
python
train
47.571429
materialsproject/pymatgen
pymatgen/analysis/quasiharmonic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/quasiharmonic.py#L291-L312
def thermal_conductivity(self, temperature, volume): """ Eq(17) in 10.1103/PhysRevB.90.174107 Args: temperature (float): temperature in K volume (float): in Ang^3 Returns: float: thermal conductivity in W/K/m """ gamma = self.gruneisen_parameter(temperature, volume) theta_d = self.debye_temperature(volume) # K theta_a = theta_d * self.natoms**(-1./3.) # K prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3) # kg/K^3/s^3 prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228) # kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10 # = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10 kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10 return kappa
[ "def", "thermal_conductivity", "(", "self", ",", "temperature", ",", "volume", ")", ":", "gamma", "=", "self", ".", "gruneisen_parameter", "(", "temperature", ",", "volume", ")", "theta_d", "=", "self", ".", "debye_temperature", "(", "volume", ")", "# K", "theta_a", "=", "theta_d", "*", "self", ".", "natoms", "**", "(", "-", "1.", "/", "3.", ")", "# K", "prefactor", "=", "(", "0.849", "*", "3", "*", "4", "**", "(", "1.", "/", "3.", ")", ")", "/", "(", "20.", "*", "np", ".", "pi", "**", "3", ")", "# kg/K^3/s^3", "prefactor", "=", "prefactor", "*", "(", "self", ".", "kb", "/", "self", ".", "hbar", ")", "**", "3", "*", "self", ".", "avg_mass", "kappa", "=", "prefactor", "/", "(", "gamma", "**", "2", "-", "0.514", "*", "gamma", "+", "0.228", ")", "# kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10", "# = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10", "kappa", "=", "kappa", "*", "theta_a", "**", "2", "*", "volume", "**", "(", "1.", "/", "3.", ")", "*", "1e-10", "return", "kappa" ]
Eq(17) in 10.1103/PhysRevB.90.174107 Args: temperature (float): temperature in K volume (float): in Ang^3 Returns: float: thermal conductivity in W/K/m
[ "Eq", "(", "17", ")", "in", "10", ".", "1103", "/", "PhysRevB", ".", "90", ".", "174107" ]
python
train
38.454545
google/pyringe
pyringe/payload/gdb_service.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/gdb_service.py#L313-L326
def _UnpackGdbVal(self, gdb_value): """Unpacks gdb.Value objects and returns the best-matched python object.""" val_type = gdb_value.type.code if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM: return int(gdb_value) if val_type == gdb.TYPE_CODE_VOID: return None if val_type == gdb.TYPE_CODE_PTR: return long(gdb_value) if val_type == gdb.TYPE_CODE_ARRAY: # This is probably a string return str(gdb_value) # I'm out of ideas, let's return it as a string return str(gdb_value)
[ "def", "_UnpackGdbVal", "(", "self", ",", "gdb_value", ")", ":", "val_type", "=", "gdb_value", ".", "type", ".", "code", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_INT", "or", "val_type", "==", "gdb", ".", "TYPE_CODE_ENUM", ":", "return", "int", "(", "gdb_value", ")", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_VOID", ":", "return", "None", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_PTR", ":", "return", "long", "(", "gdb_value", ")", "if", "val_type", "==", "gdb", ".", "TYPE_CODE_ARRAY", ":", "# This is probably a string", "return", "str", "(", "gdb_value", ")", "# I'm out of ideas, let's return it as a string", "return", "str", "(", "gdb_value", ")" ]
Unpacks gdb.Value objects and returns the best-matched python object.
[ "Unpacks", "gdb", ".", "Value", "objects", "and", "returns", "the", "best", "-", "matched", "python", "object", "." ]
python
train
38.642857
getpelican/pelican-plugins
liquid_tags/pygalcharts.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/liquid_tags/pygalcharts.py#L149-L160
def pygal_parser(preprocessor, tag, markup): """ Simple pygal parser """ # Find JSON payload data = loads(markup) if tag == 'pygal' and data is not None: # Run generation of chart output = run_pygal(data) # Return embedded SVG image return '<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=%s style="max-width:1000px"/></div>' % output else: raise ValueError('Error processing input. \nExpected syntax: {0}'.format(SYNTAX))
[ "def", "pygal_parser", "(", "preprocessor", ",", "tag", ",", "markup", ")", ":", "# Find JSON payload", "data", "=", "loads", "(", "markup", ")", "if", "tag", "==", "'pygal'", "and", "data", "is", "not", "None", ":", "# Run generation of chart", "output", "=", "run_pygal", "(", "data", ")", "# Return embedded SVG image", "return", "'<div class=\"pygal\" style=\"text-align: center;\"><embed type=\"image/svg+xml\" src=%s style=\"max-width:1000px\"/></div>'", "%", "output", "else", ":", "raise", "ValueError", "(", "'Error processing input. \\nExpected syntax: {0}'", ".", "format", "(", "SYNTAX", ")", ")" ]
Simple pygal parser
[ "Simple", "pygal", "parser" ]
python
train
41.916667
kiwiz/gkeepapi
gkeepapi/node.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/node.py#L1140-L1151
def remove(self, node, dirty=True): """Remove the given child node. Args: node (gkeepapi.Node): Node to remove. dirty (bool): Whether this node should be marked dirty. """ if node.id in self._children: self._children[node.id].parent = None del self._children[node.id] if dirty: self.touch()
[ "def", "remove", "(", "self", ",", "node", ",", "dirty", "=", "True", ")", ":", "if", "node", ".", "id", "in", "self", ".", "_children", ":", "self", ".", "_children", "[", "node", ".", "id", "]", ".", "parent", "=", "None", "del", "self", ".", "_children", "[", "node", ".", "id", "]", "if", "dirty", ":", "self", ".", "touch", "(", ")" ]
Remove the given child node. Args: node (gkeepapi.Node): Node to remove. dirty (bool): Whether this node should be marked dirty.
[ "Remove", "the", "given", "child", "node", "." ]
python
train
31.666667
saltstack/salt
salt/template.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L142-L150
def compile_template_str(template, renderers, default, blacklist, whitelist): ''' Take template as a string and return the high data structure derived from the template. ''' fn_ = salt.utils.files.mkstemp() with salt.utils.files.fopen(fn_, 'wb') as ofile: ofile.write(SLS_ENCODER(template)[0]) return compile_template(fn_, renderers, default, blacklist, whitelist)
[ "def", "compile_template_str", "(", "template", ",", "renderers", ",", "default", ",", "blacklist", ",", "whitelist", ")", ":", "fn_", "=", "salt", ".", "utils", ".", "files", ".", "mkstemp", "(", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "fn_", ",", "'wb'", ")", "as", "ofile", ":", "ofile", ".", "write", "(", "SLS_ENCODER", "(", "template", ")", "[", "0", "]", ")", "return", "compile_template", "(", "fn_", ",", "renderers", ",", "default", ",", "blacklist", ",", "whitelist", ")" ]
Take template as a string and return the high data structure derived from the template.
[ "Take", "template", "as", "a", "string", "and", "return", "the", "high", "data", "structure", "derived", "from", "the", "template", "." ]
python
train
43.555556
dpkp/kafka-python
kafka/consumer/base.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/base.py#L172-L182
def _auto_commit(self): """ Check if we have to commit based on number of messages and commit """ # Check if we are supposed to do an auto-commit if not self.auto_commit or self.auto_commit_every_n is None: return if self.count_since_commit >= self.auto_commit_every_n: self.commit()
[ "def", "_auto_commit", "(", "self", ")", ":", "# Check if we are supposed to do an auto-commit", "if", "not", "self", ".", "auto_commit", "or", "self", ".", "auto_commit_every_n", "is", "None", ":", "return", "if", "self", ".", "count_since_commit", ">=", "self", ".", "auto_commit_every_n", ":", "self", ".", "commit", "(", ")" ]
Check if we have to commit based on number of messages and commit
[ "Check", "if", "we", "have", "to", "commit", "based", "on", "number", "of", "messages", "and", "commit" ]
python
train
31.545455
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py#L46-L60
def delete_process_behavior(self, process_id, behavior_ref_name): """DeleteProcessBehavior. [Preview API] Removes a behavior in the process. :param str process_id: The ID of the process :param str behavior_ref_name: The reference name of the behavior """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if behavior_ref_name is not None: route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str') self._send(http_method='DELETE', location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e', version='5.0-preview.2', route_values=route_values)
[ "def", "delete_process_behavior", "(", "self", ",", "process_id", ",", "behavior_ref_name", ")", ":", "route_values", "=", "{", "}", "if", "process_id", "is", "not", "None", ":", "route_values", "[", "'processId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'process_id'", ",", "process_id", ",", "'str'", ")", "if", "behavior_ref_name", "is", "not", "None", ":", "route_values", "[", "'behaviorRefName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'behavior_ref_name'", ",", "behavior_ref_name", ",", "'str'", ")", "self", ".", "_send", "(", "http_method", "=", "'DELETE'", ",", "location_id", "=", "'d1800200-f184-4e75-a5f2-ad0b04b4373e'", ",", "version", "=", "'5.0-preview.2'", ",", "route_values", "=", "route_values", ")" ]
DeleteProcessBehavior. [Preview API] Removes a behavior in the process. :param str process_id: The ID of the process :param str behavior_ref_name: The reference name of the behavior
[ "DeleteProcessBehavior", ".", "[", "Preview", "API", "]", "Removes", "a", "behavior", "in", "the", "process", ".", ":", "param", "str", "process_id", ":", "The", "ID", "of", "the", "process", ":", "param", "str", "behavior_ref_name", ":", "The", "reference", "name", "of", "the", "behavior" ]
python
train
52.733333
cuzzo/iw_parse
iw_parse.py
https://github.com/cuzzo/iw_parse/blob/84c287dc6cfceb04ccbc0a8995f8a87323356ee5/iw_parse.py#L212-L225
def match(line, keyword): """ If the first part of line (modulo blanks) matches keyword, returns the end of that line. Otherwise checks if keyword is anywhere in the line and returns that section, else returns None""" line = line.lstrip() length = len(keyword) if line[:length] == keyword: return line[length:] else: if keyword in line: return line[line.index(keyword):] else: return None
[ "def", "match", "(", "line", ",", "keyword", ")", ":", "line", "=", "line", ".", "lstrip", "(", ")", "length", "=", "len", "(", "keyword", ")", "if", "line", "[", ":", "length", "]", "==", "keyword", ":", "return", "line", "[", "length", ":", "]", "else", ":", "if", "keyword", "in", "line", ":", "return", "line", "[", "line", ".", "index", "(", "keyword", ")", ":", "]", "else", ":", "return", "None" ]
If the first part of line (modulo blanks) matches keyword, returns the end of that line. Otherwise checks if keyword is anywhere in the line and returns that section, else returns None
[ "If", "the", "first", "part", "of", "line", "(", "modulo", "blanks", ")", "matches", "keyword", "returns", "the", "end", "of", "that", "line", ".", "Otherwise", "checks", "if", "keyword", "is", "anywhere", "in", "the", "line", "and", "returns", "that", "section", "else", "returns", "None" ]
python
train
32.285714
nooperpudd/weibopy
weibopy/auth.py
https://github.com/nooperpudd/weibopy/blob/61f3fb0502c1f07a591388aaa7526e74c63eaeb1/weibopy/auth.py#L125-L166
def auth_access(self, auth_code): """ verify the fist authorization response url code response data 返回值字段 字段类型 字段说明 access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据, 第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID 字段来做登录识别。 expires_in string access_token的生命周期,单位是秒数。 remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。 uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户 登录状态的识别,只有access_token才是用户授权的唯一票据。 :param auth_code: authorize_url response code :return: normal: { "access_token": "ACCESS_TOKEN", "expires_in": 1234, "remind_in":"798114", "uid":"12341234" } mobile: { "access_token": "SlAV32hkKG", "remind_in": 3600, "expires_in": 3600 "refresh_token": "QXBK19xm62" } """ data = { 'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'authorization_code', 'code': auth_code, 'redirect_uri': self.redirect_url } return self.request("post", "access_token", data=data)
[ "def", "auth_access", "(", "self", ",", "auth_code", ")", ":", "data", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'client_secret'", ":", "self", ".", "client_secret", ",", "'grant_type'", ":", "'authorization_code'", ",", "'code'", ":", "auth_code", ",", "'redirect_uri'", ":", "self", ".", "redirect_url", "}", "return", "self", ".", "request", "(", "\"post\"", ",", "\"access_token\"", ",", "data", "=", "data", ")" ]
verify the fist authorization response url code response data 返回值字段 字段类型 字段说明 access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据, 第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID 字段来做登录识别。 expires_in string access_token的生命周期,单位是秒数。 remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。 uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户 登录状态的识别,只有access_token才是用户授权的唯一票据。 :param auth_code: authorize_url response code :return: normal: { "access_token": "ACCESS_TOKEN", "expires_in": 1234, "remind_in":"798114", "uid":"12341234" } mobile: { "access_token": "SlAV32hkKG", "remind_in": 3600, "expires_in": 3600 "refresh_token": "QXBK19xm62" }
[ "verify", "the", "fist", "authorization", "response", "url", "code" ]
python
train
33.428571
SmokinCaterpillar/pypet
pypet/naturalnaming.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L2647-L2658
def f_get_parent(self): """Returns the parent of the node. Raises a TypeError if current node is root. """ if self.v_is_root: raise TypeError('Root does not have a parent') elif self.v_location == '': return self.v_root else: return self.v_root.f_get(self.v_location, fast_access=False, shortcuts=False)
[ "def", "f_get_parent", "(", "self", ")", ":", "if", "self", ".", "v_is_root", ":", "raise", "TypeError", "(", "'Root does not have a parent'", ")", "elif", "self", ".", "v_location", "==", "''", ":", "return", "self", ".", "v_root", "else", ":", "return", "self", ".", "v_root", ".", "f_get", "(", "self", ".", "v_location", ",", "fast_access", "=", "False", ",", "shortcuts", "=", "False", ")" ]
Returns the parent of the node. Raises a TypeError if current node is root.
[ "Returns", "the", "parent", "of", "the", "node", "." ]
python
test
31.5
CZ-NIC/python-rt
rt.py
https://github.com/CZ-NIC/python-rt/blob/e7a9f555e136708aec3317f857045145a2271e16/rt.py#L1391-L1405
def merge_ticket(self, ticket_id, into_id): """ Merge ticket into another (undocumented API feature). :param ticket_id: ID of ticket to be merged :param into: ID of destination ticket :returns: ``True`` Operation was successful ``False`` Either origin or destination ticket does not exist or user does not have ModifyTicket permission. """ msg = self.__request('ticket/{}/merge/{}'.format(str(ticket_id), str(into_id))) state = msg.split('\n')[2] return self.RE_PATTERNS['merge_successful_pattern'].match(state) is not None
[ "def", "merge_ticket", "(", "self", ",", "ticket_id", ",", "into_id", ")", ":", "msg", "=", "self", ".", "__request", "(", "'ticket/{}/merge/{}'", ".", "format", "(", "str", "(", "ticket_id", ")", ",", "str", "(", "into_id", ")", ")", ")", "state", "=", "msg", ".", "split", "(", "'\\n'", ")", "[", "2", "]", "return", "self", ".", "RE_PATTERNS", "[", "'merge_successful_pattern'", "]", ".", "match", "(", "state", ")", "is", "not", "None" ]
Merge ticket into another (undocumented API feature). :param ticket_id: ID of ticket to be merged :param into: ID of destination ticket :returns: ``True`` Operation was successful ``False`` Either origin or destination ticket does not exist or user does not have ModifyTicket permission.
[ "Merge", "ticket", "into", "another", "(", "undocumented", "API", "feature", ")", "." ]
python
train
47.666667
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1100-L1112
def clear_created_date(self): """Removes the created date. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template if (self.get_created_date_metadata().is_read_only() or self.get_created_date_metadata().is_required()): raise errors.NoAccess() self._my_map['createdDate'] = self._created_date_default
[ "def", "clear_created_date", "(", "self", ")", ":", "# Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template", "if", "(", "self", ".", "get_created_date_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_created_date_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "self", ".", "_my_map", "[", "'createdDate'", "]", "=", "self", ".", "_created_date_default" ]
Removes the created date. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Removes", "the", "created", "date", "." ]
python
train
45.461538
agusmakmun/djipsum
djipsum/faker.py
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/faker.py#L90-L110
def fake_chars_or_choice(self, field_name): """ Return fake chars or choice it if the `field_name` has choices. Then, returning random value from it. This specially for `CharField`. Usage: faker.fake_chars_or_choice('field_name') Example for field: TYPE_CHOICES = ( ('project', 'I wanna to talk about project'), ('feedback', 'I want to report a bugs or give feedback'), ('hello', 'I just want to say hello') ) type = models.CharField(max_length=200, choices=TYPE_CHOICES) """ return self.djipsum_fields().randomCharField( self.model_class(), field_name=field_name )
[ "def", "fake_chars_or_choice", "(", "self", ",", "field_name", ")", ":", "return", "self", ".", "djipsum_fields", "(", ")", ".", "randomCharField", "(", "self", ".", "model_class", "(", ")", ",", "field_name", "=", "field_name", ")" ]
Return fake chars or choice it if the `field_name` has choices. Then, returning random value from it. This specially for `CharField`. Usage: faker.fake_chars_or_choice('field_name') Example for field: TYPE_CHOICES = ( ('project', 'I wanna to talk about project'), ('feedback', 'I want to report a bugs or give feedback'), ('hello', 'I just want to say hello') ) type = models.CharField(max_length=200, choices=TYPE_CHOICES)
[ "Return", "fake", "chars", "or", "choice", "it", "if", "the", "field_name", "has", "choices", ".", "Then", "returning", "random", "value", "from", "it", ".", "This", "specially", "for", "CharField", "." ]
python
train
34.904762
opendatateam/udata
udata/models/owned.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/owned.py#L41-L68
def owned_pre_save(sender, document, **kwargs): ''' Owned mongoengine.pre_save signal handler Need to fetch original owner before the new one erase it. ''' if not isinstance(document, Owned): return changed_fields = getattr(document, '_changed_fields', []) if 'organization' in changed_fields: if document.owner: # Change from owner to organization document._previous_owner = document.owner document.owner = None else: # Change from org to another # Need to fetch previous value in base original = sender.objects.only('organization').get(pk=document.pk) document._previous_owner = original.organization elif 'owner' in changed_fields: if document.organization: # Change from organization to owner document._previous_owner = document.organization document.organization = None else: # Change from owner to another # Need to fetch previous value in base original = sender.objects.only('owner').get(pk=document.pk) document._previous_owner = original.owner
[ "def", "owned_pre_save", "(", "sender", ",", "document", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "document", ",", "Owned", ")", ":", "return", "changed_fields", "=", "getattr", "(", "document", ",", "'_changed_fields'", ",", "[", "]", ")", "if", "'organization'", "in", "changed_fields", ":", "if", "document", ".", "owner", ":", "# Change from owner to organization", "document", ".", "_previous_owner", "=", "document", ".", "owner", "document", ".", "owner", "=", "None", "else", ":", "# Change from org to another", "# Need to fetch previous value in base", "original", "=", "sender", ".", "objects", ".", "only", "(", "'organization'", ")", ".", "get", "(", "pk", "=", "document", ".", "pk", ")", "document", ".", "_previous_owner", "=", "original", ".", "organization", "elif", "'owner'", "in", "changed_fields", ":", "if", "document", ".", "organization", ":", "# Change from organization to owner", "document", ".", "_previous_owner", "=", "document", ".", "organization", "document", ".", "organization", "=", "None", "else", ":", "# Change from owner to another", "# Need to fetch previous value in base", "original", "=", "sender", ".", "objects", ".", "only", "(", "'owner'", ")", ".", "get", "(", "pk", "=", "document", ".", "pk", ")", "document", ".", "_previous_owner", "=", "original", ".", "owner" ]
Owned mongoengine.pre_save signal handler Need to fetch original owner before the new one erase it.
[ "Owned", "mongoengine", ".", "pre_save", "signal", "handler", "Need", "to", "fetch", "original", "owner", "before", "the", "new", "one", "erase", "it", "." ]
python
train
41.607143
bukun/TorCMS
torcms/handlers/user_info_list_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/user_info_list_handler.py#L36-L45
def list_app(self): ''' List the apps. ''' kwd = { 'pager': '', 'title': '' } self.render('user/info_list/list_app.html', kwd=kwd, userinfo=self.userinfo)
[ "def", "list_app", "(", "self", ")", ":", "kwd", "=", "{", "'pager'", ":", "''", ",", "'title'", ":", "''", "}", "self", ".", "render", "(", "'user/info_list/list_app.html'", ",", "kwd", "=", "kwd", ",", "userinfo", "=", "self", ".", "userinfo", ")" ]
List the apps.
[ "List", "the", "apps", "." ]
python
train
23.7
yunojuno/elasticsearch-django
elasticsearch_django/index.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/index.py#L119-L140
def scan_index(index, model): """ Yield all documents of model type in an index. This function calls the elasticsearch.helpers.scan function, and yields all the documents in the index that match the doc_type produced by a specific Django model. Args: index: string, the name of the index to scan, must be a configured index as returned from settings.get_index_names. model: a Django model type, used to filter the the documents that are scanned. Yields each document of type model in index, one at a time. """ # see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html query = {"query": {"type": {"value": model._meta.model_name}}} client = get_client() for hit in helpers.scan(client, index=index, query=query): yield hit
[ "def", "scan_index", "(", "index", ",", "model", ")", ":", "# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html", "query", "=", "{", "\"query\"", ":", "{", "\"type\"", ":", "{", "\"value\"", ":", "model", ".", "_meta", ".", "model_name", "}", "}", "}", "client", "=", "get_client", "(", ")", "for", "hit", "in", "helpers", ".", "scan", "(", "client", ",", "index", "=", "index", ",", "query", "=", "query", ")", ":", "yield", "hit" ]
Yield all documents of model type in an index. This function calls the elasticsearch.helpers.scan function, and yields all the documents in the index that match the doc_type produced by a specific Django model. Args: index: string, the name of the index to scan, must be a configured index as returned from settings.get_index_names. model: a Django model type, used to filter the the documents that are scanned. Yields each document of type model in index, one at a time.
[ "Yield", "all", "documents", "of", "model", "type", "in", "an", "index", "." ]
python
train
38.090909
bpython/curtsies
curtsies/formatstring.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/formatstring.py#L494-L503
def shared_atts(self): """Gets atts shared among all nonzero length component Chunk""" #TODO cache this, could get ugly for large FmtStrs atts = {} first = self.chunks[0] for att in sorted(first.atts): #TODO how to write this without the '???'? if all(fs.atts.get(att, '???') == first.atts[att] for fs in self.chunks if len(fs) > 0): atts[att] = first.atts[att] return atts
[ "def", "shared_atts", "(", "self", ")", ":", "#TODO cache this, could get ugly for large FmtStrs", "atts", "=", "{", "}", "first", "=", "self", ".", "chunks", "[", "0", "]", "for", "att", "in", "sorted", "(", "first", ".", "atts", ")", ":", "#TODO how to write this without the '???'?", "if", "all", "(", "fs", ".", "atts", ".", "get", "(", "att", ",", "'???'", ")", "==", "first", ".", "atts", "[", "att", "]", "for", "fs", "in", "self", ".", "chunks", "if", "len", "(", "fs", ")", ">", "0", ")", ":", "atts", "[", "att", "]", "=", "first", ".", "atts", "[", "att", "]", "return", "atts" ]
Gets atts shared among all nonzero length component Chunk
[ "Gets", "atts", "shared", "among", "all", "nonzero", "length", "component", "Chunk" ]
python
train
45.2
fermiPy/fermipy
fermipy/diffuse/fitting.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/fitting.py#L33-L56
def build_srcdict(gta, prop): """Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property """ o = {} for s in gta.roi.sources: o[s.name] = s[prop] return o
[ "def", "build_srcdict", "(", "gta", ",", "prop", ")", ":", "o", "=", "{", "}", "for", "s", "in", "gta", ".", "roi", ".", "sources", ":", "o", "[", "s", ".", "name", "]", "=", "s", "[", "prop", "]", "return", "o" ]
Build a dictionary that maps from source name to the value of a source property Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object prop : str The name of the property we are mapping Returns ------- odict : dict Dictionary that maps from source name to the value of the specified property
[ "Build", "a", "dictionary", "that", "maps", "from", "source", "name", "to", "the", "value", "of", "a", "source", "property" ]
python
train
19.5
niklasf/python-chess
chess/pgn.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/pgn.py#L710-L724
def parse_san(self, board: chess.Board, san: str) -> chess.Move: """ When the visitor is used by a parser, this is called to parse a move in standard algebraic notation. You can override the default implementation to work around specific quirks of your input format. """ # Replace zeros with correct castling notation. if san == "0-0": san = "O-O" elif san == "0-0-0": san = "O-O-O" return board.parse_san(san)
[ "def", "parse_san", "(", "self", ",", "board", ":", "chess", ".", "Board", ",", "san", ":", "str", ")", "->", "chess", ".", "Move", ":", "# Replace zeros with correct castling notation.", "if", "san", "==", "\"0-0\"", ":", "san", "=", "\"O-O\"", "elif", "san", "==", "\"0-0-0\"", ":", "san", "=", "\"O-O-O\"", "return", "board", ".", "parse_san", "(", "san", ")" ]
When the visitor is used by a parser, this is called to parse a move in standard algebraic notation. You can override the default implementation to work around specific quirks of your input format.
[ "When", "the", "visitor", "is", "used", "by", "a", "parser", "this", "is", "called", "to", "parse", "a", "move", "in", "standard", "algebraic", "notation", "." ]
python
train
33.466667
PyCQA/astroid
astroid/as_string.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/as_string.py#L351-L357
def visit_ifexp(self, node): """return an astroid.IfExp node as string""" return "%s if %s else %s" % ( self._precedence_parens(node, node.body, is_left=True), self._precedence_parens(node, node.test, is_left=True), self._precedence_parens(node, node.orelse, is_left=False), )
[ "def", "visit_ifexp", "(", "self", ",", "node", ")", ":", "return", "\"%s if %s else %s\"", "%", "(", "self", ".", "_precedence_parens", "(", "node", ",", "node", ".", "body", ",", "is_left", "=", "True", ")", ",", "self", ".", "_precedence_parens", "(", "node", ",", "node", ".", "test", ",", "is_left", "=", "True", ")", ",", "self", ".", "_precedence_parens", "(", "node", ",", "node", ".", "orelse", ",", "is_left", "=", "False", ")", ",", ")" ]
return an astroid.IfExp node as string
[ "return", "an", "astroid", ".", "IfExp", "node", "as", "string" ]
python
train
47.142857
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L1379-L1384
def p_if_then_part(p): """ if_then_part : IF expr then """ if is_number(p[2]): api.errmsg.warning_condition_is_always(p.lineno(1), bool(p[2].value)) p[0] = p[2]
[ "def", "p_if_then_part", "(", "p", ")", ":", "if", "is_number", "(", "p", "[", "2", "]", ")", ":", "api", ".", "errmsg", ".", "warning_condition_is_always", "(", "p", ".", "lineno", "(", "1", ")", ",", "bool", "(", "p", "[", "2", "]", ".", "value", ")", ")", "p", "[", "0", "]", "=", "p", "[", "2", "]" ]
if_then_part : IF expr then
[ "if_then_part", ":", "IF", "expr", "then" ]
python
train
29.333333
h2oai/h2o-3
py2/h2o_ray.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/py2/h2o_ray.py#L410-L443
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs): ''' Check a dictionary of model builder parameters on the h2o cluster using the given algorithm and model parameters. ''' assert algo is not None, '"algo" parameter is null' # Allow this now: assert training_frame is not None, '"training_frame" parameter is null' assert parameters is not None, '"parameters" parameter is null' model_builders = self.model_builders(timeoutSecs=timeoutSecs) assert model_builders is not None, "/ModelBuilders REST call failed" assert algo in model_builders['model_builders'] builder = model_builders['model_builders'][algo] # TODO: test this assert, I don't think this is working. . . if training_frame is not None: frames = self.frames(key=training_frame) assert frames is not None, "/Frames/{0} REST call failed".format(training_frame) key_name = frames['frames'][0]['key']['name'] assert key_name==training_frame, \ "/Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, key_name, training_frame) parameters['training_frame'] = training_frame # TODO: add parameter existence checks # TODO: add parameter value validation # FIX! why ignoreH2oError here? result = self.do_json_request('/3/ModelBuilders.json/' + algo + "/parameters", cmd='post', timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True) verboseprint("model parameters validation: " + repr(result)) return result
[ "def", "validate_model_parameters", "(", "self", ",", "algo", ",", "training_frame", ",", "parameters", ",", "timeoutSecs", "=", "60", ",", "*", "*", "kwargs", ")", ":", "assert", "algo", "is", "not", "None", ",", "'\"algo\" parameter is null'", "# Allow this now: assert training_frame is not None, '\"training_frame\" parameter is null'", "assert", "parameters", "is", "not", "None", ",", "'\"parameters\" parameter is null'", "model_builders", "=", "self", ".", "model_builders", "(", "timeoutSecs", "=", "timeoutSecs", ")", "assert", "model_builders", "is", "not", "None", ",", "\"/ModelBuilders REST call failed\"", "assert", "algo", "in", "model_builders", "[", "'model_builders'", "]", "builder", "=", "model_builders", "[", "'model_builders'", "]", "[", "algo", "]", "# TODO: test this assert, I don't think this is working. . .", "if", "training_frame", "is", "not", "None", ":", "frames", "=", "self", ".", "frames", "(", "key", "=", "training_frame", ")", "assert", "frames", "is", "not", "None", ",", "\"/Frames/{0} REST call failed\"", ".", "format", "(", "training_frame", ")", "key_name", "=", "frames", "[", "'frames'", "]", "[", "0", "]", "[", "'key'", "]", "[", "'name'", "]", "assert", "key_name", "==", "training_frame", ",", "\"/Frames/{0} returned Frame {1} rather than Frame {2}\"", ".", "format", "(", "training_frame", ",", "key_name", ",", "training_frame", ")", "parameters", "[", "'training_frame'", "]", "=", "training_frame", "# TODO: add parameter existence checks", "# TODO: add parameter value validation", "# FIX! why ignoreH2oError here?", "result", "=", "self", ".", "do_json_request", "(", "'/3/ModelBuilders.json/'", "+", "algo", "+", "\"/parameters\"", ",", "cmd", "=", "'post'", ",", "timeout", "=", "timeoutSecs", ",", "postData", "=", "parameters", ",", "ignoreH2oError", "=", "True", ",", "noExtraErrorCheck", "=", "True", ")", "verboseprint", "(", "\"model parameters validation: \"", "+", "repr", "(", "result", ")", ")", "return", "result" ]
Check a dictionary of model builder parameters on the h2o cluster using the given algorithm and model parameters.
[ "Check", "a", "dictionary", "of", "model", "builder", "parameters", "on", "the", "h2o", "cluster", "using", "the", "given", "algorithm", "and", "model", "parameters", "." ]
python
test
46.352941
saltstack/salt
salt/modules/portage_config.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/portage_config.py#L558-L599
def is_present(conf, atom): ''' Tell if a given package or DEPEND atom is present in the configuration files tree. Warning: This only works if the configuration files tree is in the correct format (the one enforced by enforce_nice_config) CLI Example: .. code-block:: bash salt '*' portage_config.is_present unmask salt ''' if conf in SUPPORTED_CONFS: if not isinstance(atom, portage.dep.Atom): atom = portage.dep.Atom(atom, allow_wildcard=True) has_wildcard = '*' in atom package_file = _get_config_file(conf, six.text_type(atom)) # wildcards are valid in confs if has_wildcard: match_list = set(atom) else: match_list = set(_porttree().dbapi.xmatch("match-all", atom)) try: with salt.utils.files.fopen(package_file) as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line).strip() line_package = line.split()[0] if has_wildcard: if line_package == six.text_type(atom): return True else: line_list = _porttree().dbapi.xmatch("match-all", line_package) if match_list.issubset(line_list): return True except IOError: pass return False
[ "def", "is_present", "(", "conf", ",", "atom", ")", ":", "if", "conf", "in", "SUPPORTED_CONFS", ":", "if", "not", "isinstance", "(", "atom", ",", "portage", ".", "dep", ".", "Atom", ")", ":", "atom", "=", "portage", ".", "dep", ".", "Atom", "(", "atom", ",", "allow_wildcard", "=", "True", ")", "has_wildcard", "=", "'*'", "in", "atom", "package_file", "=", "_get_config_file", "(", "conf", ",", "six", ".", "text_type", "(", "atom", ")", ")", "# wildcards are valid in confs", "if", "has_wildcard", ":", "match_list", "=", "set", "(", "atom", ")", "else", ":", "match_list", "=", "set", "(", "_porttree", "(", ")", ".", "dbapi", ".", "xmatch", "(", "\"match-all\"", ",", "atom", ")", ")", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "package_file", ")", "as", "fp_", ":", "for", "line", "in", "fp_", ":", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", ".", "strip", "(", ")", "line_package", "=", "line", ".", "split", "(", ")", "[", "0", "]", "if", "has_wildcard", ":", "if", "line_package", "==", "six", ".", "text_type", "(", "atom", ")", ":", "return", "True", "else", ":", "line_list", "=", "_porttree", "(", ")", ".", "dbapi", ".", "xmatch", "(", "\"match-all\"", ",", "line_package", ")", "if", "match_list", ".", "issubset", "(", "line_list", ")", ":", "return", "True", "except", "IOError", ":", "pass", "return", "False" ]
Tell if a given package or DEPEND atom is present in the configuration files tree. Warning: This only works if the configuration files tree is in the correct format (the one enforced by enforce_nice_config) CLI Example: .. code-block:: bash salt '*' portage_config.is_present unmask salt
[ "Tell", "if", "a", "given", "package", "or", "DEPEND", "atom", "is", "present", "in", "the", "configuration", "files", "tree", ".", "Warning", ":", "This", "only", "works", "if", "the", "configuration", "files", "tree", "is", "in", "the", "correct", "format", "(", "the", "one", "enforced", "by", "enforce_nice_config", ")" ]
python
train
33.666667
fboender/ansible-cmdb
lib/mako/ext/extract.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/ext/extract.py#L97-L101
def _split_comment(lineno, comment): """Return the multiline comment at lineno split into a list of comment line numbers and the accompanying comment line""" return [(lineno + index, line) for index, line in enumerate(comment.splitlines())]
[ "def", "_split_comment", "(", "lineno", ",", "comment", ")", ":", "return", "[", "(", "lineno", "+", "index", ",", "line", ")", "for", "index", ",", "line", "in", "enumerate", "(", "comment", ".", "splitlines", "(", ")", ")", "]" ]
Return the multiline comment at lineno split into a list of comment line numbers and the accompanying comment line
[ "Return", "the", "multiline", "comment", "at", "lineno", "split", "into", "a", "list", "of", "comment", "line", "numbers", "and", "the", "accompanying", "comment", "line" ]
python
train
55.2
InspectorMustache/base16-builder-python
pybase16_builder/injector.py
https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/injector.py#L42-L64
def get_colorscheme(self, scheme_file): """Return a string object with the colorscheme that is to be inserted.""" scheme = get_yaml_dict(scheme_file) scheme_slug = builder.slugify(scheme_file) builder.format_scheme(scheme, scheme_slug) try: temp_base, temp_sub = self.temp.split('##') except ValueError: temp_base, temp_sub = (self.temp.strip('##'), 'default') temp_path = rel_to_cwd('templates', temp_base) temp_group = builder.TemplateGroup(temp_path) try: single_temp = temp_group.templates[temp_sub] except KeyError: raise FileNotFoundError(None, None, self.path + ' (sub-template)') colorscheme = pystache.render(single_temp['parsed'], scheme) return colorscheme
[ "def", "get_colorscheme", "(", "self", ",", "scheme_file", ")", ":", "scheme", "=", "get_yaml_dict", "(", "scheme_file", ")", "scheme_slug", "=", "builder", ".", "slugify", "(", "scheme_file", ")", "builder", ".", "format_scheme", "(", "scheme", ",", "scheme_slug", ")", "try", ":", "temp_base", ",", "temp_sub", "=", "self", ".", "temp", ".", "split", "(", "'##'", ")", "except", "ValueError", ":", "temp_base", ",", "temp_sub", "=", "(", "self", ".", "temp", ".", "strip", "(", "'##'", ")", ",", "'default'", ")", "temp_path", "=", "rel_to_cwd", "(", "'templates'", ",", "temp_base", ")", "temp_group", "=", "builder", ".", "TemplateGroup", "(", "temp_path", ")", "try", ":", "single_temp", "=", "temp_group", ".", "templates", "[", "temp_sub", "]", "except", "KeyError", ":", "raise", "FileNotFoundError", "(", "None", ",", "None", ",", "self", ".", "path", "+", "' (sub-template)'", ")", "colorscheme", "=", "pystache", ".", "render", "(", "single_temp", "[", "'parsed'", "]", ",", "scheme", ")", "return", "colorscheme" ]
Return a string object with the colorscheme that is to be inserted.
[ "Return", "a", "string", "object", "with", "the", "colorscheme", "that", "is", "to", "be", "inserted", "." ]
python
train
37.913043
knipknap/exscript
Exscript/stdlib/ipv4.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/stdlib/ipv4.py#L28-L54
def in_network(scope, prefixes, destination, default_pfxlen=[24]): """ Returns True if the given destination is in the network range that is defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix does not have a prefix length specified, the given default prefix length is applied. If no such prefix length is given, the default length is /24. If a list of prefixes is passed, this function returns True only if the given destination is in ANY of the given prefixes. :type prefixes: string :param prefixes: A prefix, or a list of IP prefixes. :type destination: string :param destination: An IP address. :type default_pfxlen: int :param default_pfxlen: The default prefix length. :rtype: True :return: Whether the given destination is in the given network. """ needle = ipv4.ip2int(destination[0]) for prefix in prefixes: network, pfxlen = ipv4.parse_prefix(prefix, default_pfxlen[0]) mask = ipv4.pfxlen2mask_int(pfxlen) if needle & mask == ipv4.ip2int(network) & mask: return [True] return [False]
[ "def", "in_network", "(", "scope", ",", "prefixes", ",", "destination", ",", "default_pfxlen", "=", "[", "24", "]", ")", ":", "needle", "=", "ipv4", ".", "ip2int", "(", "destination", "[", "0", "]", ")", "for", "prefix", "in", "prefixes", ":", "network", ",", "pfxlen", "=", "ipv4", ".", "parse_prefix", "(", "prefix", ",", "default_pfxlen", "[", "0", "]", ")", "mask", "=", "ipv4", ".", "pfxlen2mask_int", "(", "pfxlen", ")", "if", "needle", "&", "mask", "==", "ipv4", ".", "ip2int", "(", "network", ")", "&", "mask", ":", "return", "[", "True", "]", "return", "[", "False", "]" ]
Returns True if the given destination is in the network range that is defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix does not have a prefix length specified, the given default prefix length is applied. If no such prefix length is given, the default length is /24. If a list of prefixes is passed, this function returns True only if the given destination is in ANY of the given prefixes. :type prefixes: string :param prefixes: A prefix, or a list of IP prefixes. :type destination: string :param destination: An IP address. :type default_pfxlen: int :param default_pfxlen: The default prefix length. :rtype: True :return: Whether the given destination is in the given network.
[ "Returns", "True", "if", "the", "given", "destination", "is", "in", "the", "network", "range", "that", "is", "defined", "by", "the", "given", "prefix", "(", "e", ".", "g", ".", "10", ".", "0", ".", "0", ".", "1", "/", "22", ")", ".", "If", "the", "given", "prefix", "does", "not", "have", "a", "prefix", "length", "specified", "the", "given", "default", "prefix", "length", "is", "applied", ".", "If", "no", "such", "prefix", "length", "is", "given", "the", "default", "length", "is", "/", "24", "." ]
python
train
40.962963
secdev/scapy
scapy/layers/tls/keyexchange.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/keyexchange.py#L769-L787
def post_dissection(self, m): """ First we update the client DHParams. Then, we try to update the server DHParams generated during Server*DHParams building, with the shared secret. Finally, we derive the session keys and update the context. """ s = self.tls_session # if there are kx params and keys, we assume the crypto library is ok if s.client_kx_ffdh_params: y = pkcs_os2ip(self.dh_Yc) param_numbers = s.client_kx_ffdh_params.parameter_numbers() public_numbers = dh.DHPublicNumbers(y, param_numbers) s.client_kx_pubkey = public_numbers.public_key(default_backend()) if s.server_kx_privkey and s.client_kx_pubkey: ZZ = s.server_kx_privkey.exchange(s.client_kx_pubkey) s.pre_master_secret = ZZ s.compute_ms_and_derive_keys()
[ "def", "post_dissection", "(", "self", ",", "m", ")", ":", "s", "=", "self", ".", "tls_session", "# if there are kx params and keys, we assume the crypto library is ok", "if", "s", ".", "client_kx_ffdh_params", ":", "y", "=", "pkcs_os2ip", "(", "self", ".", "dh_Yc", ")", "param_numbers", "=", "s", ".", "client_kx_ffdh_params", ".", "parameter_numbers", "(", ")", "public_numbers", "=", "dh", ".", "DHPublicNumbers", "(", "y", ",", "param_numbers", ")", "s", ".", "client_kx_pubkey", "=", "public_numbers", ".", "public_key", "(", "default_backend", "(", ")", ")", "if", "s", ".", "server_kx_privkey", "and", "s", ".", "client_kx_pubkey", ":", "ZZ", "=", "s", ".", "server_kx_privkey", ".", "exchange", "(", "s", ".", "client_kx_pubkey", ")", "s", ".", "pre_master_secret", "=", "ZZ", "s", ".", "compute_ms_and_derive_keys", "(", ")" ]
First we update the client DHParams. Then, we try to update the server DHParams generated during Server*DHParams building, with the shared secret. Finally, we derive the session keys and update the context.
[ "First", "we", "update", "the", "client", "DHParams", ".", "Then", "we", "try", "to", "update", "the", "server", "DHParams", "generated", "during", "Server", "*", "DHParams", "building", "with", "the", "shared", "secret", ".", "Finally", "we", "derive", "the", "session", "keys", "and", "update", "the", "context", "." ]
python
train
45.631579
markovmodel/msmtools
msmtools/flux/reactive_flux.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/flux/reactive_flux.py#L111-L115
def I(self): r"""Returns the set of intermediate states """ return list(set(range(self.nstates)) - set(self._A) - set(self._B))
[ "def", "I", "(", "self", ")", ":", "return", "list", "(", "set", "(", "range", "(", "self", ".", "nstates", ")", ")", "-", "set", "(", "self", ".", "_A", ")", "-", "set", "(", "self", ".", "_B", ")", ")" ]
r"""Returns the set of intermediate states
[ "r", "Returns", "the", "set", "of", "intermediate", "states" ]
python
train
29.6
readbeyond/aeneas
aeneas/globalfunctions.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/globalfunctions.py#L649-L685
def time_to_hhmmssmmm(time_value, decimal_separator="."): """ Format the given time value into a ``HH:MM:SS.mmm`` string. Examples: :: 12 => 00:00:12.000 12.345 => 00:00:12.345 12.345432 => 00:00:12.345 12.345678 => 00:00:12.346 83 => 00:01:23.000 83.456 => 00:01:23.456 83.456789 => 00:01:23.456 3600 => 01:00:00.000 3612.345 => 01:00:12.345 :param float time_value: a time value, in seconds :param string decimal_separator: the decimal separator, default ``.`` :rtype: string """ if time_value is None: time_value = 0 tmp = time_value hours = int(math.floor(tmp / 3600)) tmp -= (hours * 3600) minutes = int(math.floor(tmp / 60)) tmp -= minutes * 60 seconds = int(math.floor(tmp)) tmp -= seconds milliseconds = int(math.floor(tmp * 1000)) return "%02d:%02d:%02d%s%03d" % ( hours, minutes, seconds, decimal_separator, milliseconds )
[ "def", "time_to_hhmmssmmm", "(", "time_value", ",", "decimal_separator", "=", "\".\"", ")", ":", "if", "time_value", "is", "None", ":", "time_value", "=", "0", "tmp", "=", "time_value", "hours", "=", "int", "(", "math", ".", "floor", "(", "tmp", "/", "3600", ")", ")", "tmp", "-=", "(", "hours", "*", "3600", ")", "minutes", "=", "int", "(", "math", ".", "floor", "(", "tmp", "/", "60", ")", ")", "tmp", "-=", "minutes", "*", "60", "seconds", "=", "int", "(", "math", ".", "floor", "(", "tmp", ")", ")", "tmp", "-=", "seconds", "milliseconds", "=", "int", "(", "math", ".", "floor", "(", "tmp", "*", "1000", ")", ")", "return", "\"%02d:%02d:%02d%s%03d\"", "%", "(", "hours", ",", "minutes", ",", "seconds", ",", "decimal_separator", ",", "milliseconds", ")" ]
Format the given time value into a ``HH:MM:SS.mmm`` string. Examples: :: 12 => 00:00:12.000 12.345 => 00:00:12.345 12.345432 => 00:00:12.345 12.345678 => 00:00:12.346 83 => 00:01:23.000 83.456 => 00:01:23.456 83.456789 => 00:01:23.456 3600 => 01:00:00.000 3612.345 => 01:00:12.345 :param float time_value: a time value, in seconds :param string decimal_separator: the decimal separator, default ``.`` :rtype: string
[ "Format", "the", "given", "time", "value", "into", "a", "HH", ":", "MM", ":", "SS", ".", "mmm", "string", "." ]
python
train
27.486486
pyblish/pyblish-qml
pyblish_qml/app.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/app.py#L273-L299
def main(demo=False, aschild=False, targets=[]): """Start the Qt-runtime and show the window Arguments: aschild (bool, optional): Run as child of parent process """ if aschild: print("Starting pyblish-qml") compat.main() app = Application(APP_PATH, targets) app.listen() print("Done, don't forget to call `show()`") return app.exec_() else: print("Starting pyblish-qml server..") service = ipc.service.MockService() if demo else ipc.service.Service() server = ipc.server.Server(service, targets=targets) proxy = ipc.server.Proxy(server) proxy.show(settings.to_dict()) server.listen() server.wait()
[ "def", "main", "(", "demo", "=", "False", ",", "aschild", "=", "False", ",", "targets", "=", "[", "]", ")", ":", "if", "aschild", ":", "print", "(", "\"Starting pyblish-qml\"", ")", "compat", ".", "main", "(", ")", "app", "=", "Application", "(", "APP_PATH", ",", "targets", ")", "app", ".", "listen", "(", ")", "print", "(", "\"Done, don't forget to call `show()`\"", ")", "return", "app", ".", "exec_", "(", ")", "else", ":", "print", "(", "\"Starting pyblish-qml server..\"", ")", "service", "=", "ipc", ".", "service", ".", "MockService", "(", ")", "if", "demo", "else", "ipc", ".", "service", ".", "Service", "(", ")", "server", "=", "ipc", ".", "server", ".", "Server", "(", "service", ",", "targets", "=", "targets", ")", "proxy", "=", "ipc", ".", "server", ".", "Proxy", "(", "server", ")", "proxy", ".", "show", "(", "settings", ".", "to_dict", "(", ")", ")", "server", ".", "listen", "(", ")", "server", ".", "wait", "(", ")" ]
Start the Qt-runtime and show the window Arguments: aschild (bool, optional): Run as child of parent process
[ "Start", "the", "Qt", "-", "runtime", "and", "show", "the", "window" ]
python
train
26.296296
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L1267-L1281
def list_servers(self, datacenter_id, depth=1): """ Retrieves a list of all servers bound to the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/datacenters/%s/servers?depth=%s' % (datacenter_id, str(depth))) return response
[ "def", "list_servers", "(", "self", ",", "datacenter_id", ",", "depth", "=", "1", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "'/datacenters/%s/servers?depth=%s'", "%", "(", "datacenter_id", ",", "str", "(", "depth", ")", ")", ")", "return", "response" ]
Retrieves a list of all servers bound to the specified data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param depth: The depth of the response data. :type depth: ``int``
[ "Retrieves", "a", "list", "of", "all", "servers", "bound", "to", "the", "specified", "data", "center", "." ]
python
valid
32.466667
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L852-L871
def conv_ast_to_sym(self, math_ast): """ Convert mathematical expressions to a sympy representation. May only contain paranthesis, addition, subtraction and multiplication from AST. """ if type(math_ast) is c_ast.ID: return symbol_pos_int(math_ast.name) elif type(math_ast) is c_ast.Constant: return sympy.Integer(math_ast.value) else: # elif type(dim) is c_ast.BinaryOp: op = { '*': operator.mul, '+': operator.add, '-': operator.sub } return op[math_ast.op]( self.conv_ast_to_sym(math_ast.left), self.conv_ast_to_sym(math_ast.right))
[ "def", "conv_ast_to_sym", "(", "self", ",", "math_ast", ")", ":", "if", "type", "(", "math_ast", ")", "is", "c_ast", ".", "ID", ":", "return", "symbol_pos_int", "(", "math_ast", ".", "name", ")", "elif", "type", "(", "math_ast", ")", "is", "c_ast", ".", "Constant", ":", "return", "sympy", ".", "Integer", "(", "math_ast", ".", "value", ")", "else", ":", "# elif type(dim) is c_ast.BinaryOp:", "op", "=", "{", "'*'", ":", "operator", ".", "mul", ",", "'+'", ":", "operator", ".", "add", ",", "'-'", ":", "operator", ".", "sub", "}", "return", "op", "[", "math_ast", ".", "op", "]", "(", "self", ".", "conv_ast_to_sym", "(", "math_ast", ".", "left", ")", ",", "self", ".", "conv_ast_to_sym", "(", "math_ast", ".", "right", ")", ")" ]
Convert mathematical expressions to a sympy representation. May only contain paranthesis, addition, subtraction and multiplication from AST.
[ "Convert", "mathematical", "expressions", "to", "a", "sympy", "representation", "." ]
python
test
35.75
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6258-L6268
def createSpatialAnchorFromDescriptor(self, pchDescriptor): """ Returns a handle for an spatial anchor described by "descriptor". On success, pHandle will contain a handle valid for this session. Caller can wait for an event or occasionally poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor. """ fn = self.function_table.createSpatialAnchorFromDescriptor pHandleOut = SpatialAnchorHandle_t() result = fn(pchDescriptor, byref(pHandleOut)) return result, pHandleOut
[ "def", "createSpatialAnchorFromDescriptor", "(", "self", ",", "pchDescriptor", ")", ":", "fn", "=", "self", ".", "function_table", ".", "createSpatialAnchorFromDescriptor", "pHandleOut", "=", "SpatialAnchorHandle_t", "(", ")", "result", "=", "fn", "(", "pchDescriptor", ",", "byref", "(", "pHandleOut", ")", ")", "return", "result", ",", "pHandleOut" ]
Returns a handle for an spatial anchor described by "descriptor". On success, pHandle will contain a handle valid for this session. Caller can wait for an event or occasionally poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor.
[ "Returns", "a", "handle", "for", "an", "spatial", "anchor", "described", "by", "descriptor", ".", "On", "success", "pHandle", "will", "contain", "a", "handle", "valid", "for", "this", "session", ".", "Caller", "can", "wait", "for", "an", "event", "or", "occasionally", "poll", "GetSpatialAnchorPose", "()", "to", "find", "the", "virtual", "coordinate", "associated", "with", "this", "anchor", "." ]
python
train
51.363636
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1712-L1728
def setStyles(self, styleUpdatesDict): ''' setStyles - Sets one or more style params. This all happens in one shot, so it is much much faster than calling setStyle for every value. To remove a style, set its value to empty string. When all styles are removed, the "style" attribute will be nullified. @param styleUpdatesDict - Dictionary of attribute : value styles. @return - String of current value of "style" after change is made. ''' setStyleMethod = self.setStyle for newName, newValue in styleUpdatesDict.items(): setStyleMethod(newName, newValue) return self.style
[ "def", "setStyles", "(", "self", ",", "styleUpdatesDict", ")", ":", "setStyleMethod", "=", "self", ".", "setStyle", "for", "newName", ",", "newValue", "in", "styleUpdatesDict", ".", "items", "(", ")", ":", "setStyleMethod", "(", "newName", ",", "newValue", ")", "return", "self", ".", "style" ]
setStyles - Sets one or more style params. This all happens in one shot, so it is much much faster than calling setStyle for every value. To remove a style, set its value to empty string. When all styles are removed, the "style" attribute will be nullified. @param styleUpdatesDict - Dictionary of attribute : value styles. @return - String of current value of "style" after change is made.
[ "setStyles", "-", "Sets", "one", "or", "more", "style", "params", ".", "This", "all", "happens", "in", "one", "shot", "so", "it", "is", "much", "much", "faster", "than", "calling", "setStyle", "for", "every", "value", "." ]
python
train
40.941176
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L2441-L2443
def HardwareInput(uMsg: int, param: int = 0) -> INPUT: """Create Win32 struct `HARDWAREINPUT` for `SendInput`.""" return _CreateInput(HARDWAREINPUT(uMsg, param & 0xFFFF, param >> 16 & 0xFFFF))
[ "def", "HardwareInput", "(", "uMsg", ":", "int", ",", "param", ":", "int", "=", "0", ")", "->", "INPUT", ":", "return", "_CreateInput", "(", "HARDWAREINPUT", "(", "uMsg", ",", "param", "&", "0xFFFF", ",", "param", ">>", "16", "&", "0xFFFF", ")", ")" ]
Create Win32 struct `HARDWAREINPUT` for `SendInput`.
[ "Create", "Win32", "struct", "HARDWAREINPUT", "for", "SendInput", "." ]
python
valid
66
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L2508-L2526
def dlafns(handle, descr): """ Find the segment following a specified segment in a DLA file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html :param handle: Handle of open DLA file. :type handle: c_int :param descr: Descriptor of a DLA segment. :type descr: spiceypy.utils.support_types.SpiceDLADescr :return: Descriptor of next segment in DLA file :rtype: spiceypy.utils.support_types.SpiceDLADescr """ assert isinstance(descr, stypes.SpiceDLADescr) handle = ctypes.c_int(handle) nxtdsc = stypes.SpiceDLADescr() found = ctypes.c_int() libspice.dlafns_c(handle, ctypes.byref(descr), ctypes.byref(nxtdsc), ctypes.byref(found)) return nxtdsc, bool(found.value)
[ "def", "dlafns", "(", "handle", ",", "descr", ")", ":", "assert", "isinstance", "(", "descr", ",", "stypes", ".", "SpiceDLADescr", ")", "handle", "=", "ctypes", ".", "c_int", "(", "handle", ")", "nxtdsc", "=", "stypes", ".", "SpiceDLADescr", "(", ")", "found", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "dlafns_c", "(", "handle", ",", "ctypes", ".", "byref", "(", "descr", ")", ",", "ctypes", ".", "byref", "(", "nxtdsc", ")", ",", "ctypes", ".", "byref", "(", "found", ")", ")", "return", "nxtdsc", ",", "bool", "(", "found", ".", "value", ")" ]
Find the segment following a specified segment in a DLA file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html :param handle: Handle of open DLA file. :type handle: c_int :param descr: Descriptor of a DLA segment. :type descr: spiceypy.utils.support_types.SpiceDLADescr :return: Descriptor of next segment in DLA file :rtype: spiceypy.utils.support_types.SpiceDLADescr
[ "Find", "the", "segment", "following", "a", "specified", "segment", "in", "a", "DLA", "file", ".", "https", ":", "//", "naif", ".", "jpl", ".", "nasa", ".", "gov", "/", "pub", "/", "naif", "/", "toolkit_docs", "/", "C", "/", "cspice", "/", "dlafns_c", ".", "html", ":", "param", "handle", ":", "Handle", "of", "open", "DLA", "file", ".", ":", "type", "handle", ":", "c_int", ":", "param", "descr", ":", "Descriptor", "of", "a", "DLA", "segment", ".", ":", "type", "descr", ":", "spiceypy", ".", "utils", ".", "support_types", ".", "SpiceDLADescr", ":", "return", ":", "Descriptor", "of", "next", "segment", "in", "DLA", "file", ":", "rtype", ":", "spiceypy", ".", "utils", ".", "support_types", ".", "SpiceDLADescr" ]
python
train
38.842105
saltstack/salt
salt/modules/ansiblegate.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L68-L92
def _get_modules_map(self, path=None): ''' Get installed Ansible modules :return: ''' paths = {} root = ansible.modules.__path__[0] if not path: path = root for p_el in os.listdir(path): p_el_path = os.path.join(path, p_el) if os.path.islink(p_el_path): continue if os.path.isdir(p_el_path): paths.update(self._get_modules_map(p_el_path)) else: if (any(p_el.startswith(elm) for elm in ['__', '.']) or not p_el.endswith('.py') or p_el in ansible.constants.IGNORE_FILES): continue p_el_path = p_el_path.replace(root, '').split('.')[0] als_name = p_el_path.replace('.', '').replace('/', '', 1).replace('/', '.') paths[als_name] = p_el_path return paths
[ "def", "_get_modules_map", "(", "self", ",", "path", "=", "None", ")", ":", "paths", "=", "{", "}", "root", "=", "ansible", ".", "modules", ".", "__path__", "[", "0", "]", "if", "not", "path", ":", "path", "=", "root", "for", "p_el", "in", "os", ".", "listdir", "(", "path", ")", ":", "p_el_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "p_el", ")", "if", "os", ".", "path", ".", "islink", "(", "p_el_path", ")", ":", "continue", "if", "os", ".", "path", ".", "isdir", "(", "p_el_path", ")", ":", "paths", ".", "update", "(", "self", ".", "_get_modules_map", "(", "p_el_path", ")", ")", "else", ":", "if", "(", "any", "(", "p_el", ".", "startswith", "(", "elm", ")", "for", "elm", "in", "[", "'__'", ",", "'.'", "]", ")", "or", "not", "p_el", ".", "endswith", "(", "'.py'", ")", "or", "p_el", "in", "ansible", ".", "constants", ".", "IGNORE_FILES", ")", ":", "continue", "p_el_path", "=", "p_el_path", ".", "replace", "(", "root", ",", "''", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "als_name", "=", "p_el_path", ".", "replace", "(", "'.'", ",", "''", ")", ".", "replace", "(", "'/'", ",", "''", ",", "1", ")", ".", "replace", "(", "'/'", ",", "'.'", ")", "paths", "[", "als_name", "]", "=", "p_el_path", "return", "paths" ]
Get installed Ansible modules :return:
[ "Get", "installed", "Ansible", "modules", ":", "return", ":" ]
python
train
36.88
twilio/twilio-python
twilio/rest/api/v2010/account/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/__init__.py#L357-L369
def authorized_connect_apps(self): """ Access the authorized_connect_apps :returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList :rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList """ if self._authorized_connect_apps is None: self._authorized_connect_apps = AuthorizedConnectAppList( self._version, account_sid=self._solution['sid'], ) return self._authorized_connect_apps
[ "def", "authorized_connect_apps", "(", "self", ")", ":", "if", "self", ".", "_authorized_connect_apps", "is", "None", ":", "self", ".", "_authorized_connect_apps", "=", "AuthorizedConnectAppList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_authorized_connect_apps" ]
Access the authorized_connect_apps :returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList :rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
[ "Access", "the", "authorized_connect_apps" ]
python
train
41.615385
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L3992-L4038
def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None): """ Get slice of time, z and (if provided) zdot from timeStart to timeEnd. Parameters ---------- time : ndarray array of time values z : ndarray array of z values zdot : ndarray, optional array of zdot (velocity) values. timeStart : float, optional time at which to start the slice. Defaults to beginnging of time trace timeEnd : float, optional time at which to end the slide. Defaults to end of time trace Returns ------- time_sliced : ndarray array of time values from timeStart to timeEnd z_sliced : ndarray array of z values from timeStart to timeEnd zdot_sliced : ndarray array of zdot values from timeStart to timeEnd. None if zdot not provided """ if timeStart == None: timeStart = time[0] if timeEnd == None: timeEnd = time[-1] StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] time_sliced = time[StartIndex:EndIndex] z_sliced = z[StartIndex:EndIndex] if zdot != None: zdot_sliced = zdot[StartIndex:EndIndex] else: zdot_sliced = None return time_sliced, z_sliced, zdot_sliced
[ "def", "get_time_slice", "(", "time", ",", "z", ",", "zdot", "=", "None", ",", "timeStart", "=", "None", ",", "timeEnd", "=", "None", ")", ":", "if", "timeStart", "==", "None", ":", "timeStart", "=", "time", "[", "0", "]", "if", "timeEnd", "==", "None", ":", "timeEnd", "=", "time", "[", "-", "1", "]", "StartIndex", "=", "_np", ".", "where", "(", "time", "==", "take_closest", "(", "time", ",", "timeStart", ")", ")", "[", "0", "]", "[", "0", "]", "EndIndex", "=", "_np", ".", "where", "(", "time", "==", "take_closest", "(", "time", ",", "timeEnd", ")", ")", "[", "0", "]", "[", "0", "]", "time_sliced", "=", "time", "[", "StartIndex", ":", "EndIndex", "]", "z_sliced", "=", "z", "[", "StartIndex", ":", "EndIndex", "]", "if", "zdot", "!=", "None", ":", "zdot_sliced", "=", "zdot", "[", "StartIndex", ":", "EndIndex", "]", "else", ":", "zdot_sliced", "=", "None", "return", "time_sliced", ",", "z_sliced", ",", "zdot_sliced" ]
Get slice of time, z and (if provided) zdot from timeStart to timeEnd. Parameters ---------- time : ndarray array of time values z : ndarray array of z values zdot : ndarray, optional array of zdot (velocity) values. timeStart : float, optional time at which to start the slice. Defaults to beginnging of time trace timeEnd : float, optional time at which to end the slide. Defaults to end of time trace Returns ------- time_sliced : ndarray array of time values from timeStart to timeEnd z_sliced : ndarray array of z values from timeStart to timeEnd zdot_sliced : ndarray array of zdot values from timeStart to timeEnd. None if zdot not provided
[ "Get", "slice", "of", "time", "z", "and", "(", "if", "provided", ")", "zdot", "from", "timeStart", "to", "timeEnd", "." ]
python
train
28.12766
pypa/pipenv
pipenv/vendor/docopt.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/docopt.py#L392-L401
def parse_seq(tokens, options): """seq ::= ( atom [ '...' ] )* ;""" result = [] while tokens.current() not in [None, ']', ')', '|']: atom = parse_atom(tokens, options) if tokens.current() == '...': atom = [OneOrMore(*atom)] tokens.move() result += atom return result
[ "def", "parse_seq", "(", "tokens", ",", "options", ")", ":", "result", "=", "[", "]", "while", "tokens", ".", "current", "(", ")", "not", "in", "[", "None", ",", "']'", ",", "')'", ",", "'|'", "]", ":", "atom", "=", "parse_atom", "(", "tokens", ",", "options", ")", "if", "tokens", ".", "current", "(", ")", "==", "'...'", ":", "atom", "=", "[", "OneOrMore", "(", "*", "atom", ")", "]", "tokens", ".", "move", "(", ")", "result", "+=", "atom", "return", "result" ]
seq ::= ( atom [ '...' ] )* ;
[ "seq", "::", "=", "(", "atom", "[", "...", "]", ")", "*", ";" ]
python
train
32.1
quantopian/zipline
zipline/data/continuous_future_reader.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/continuous_future_reader.py#L204-L282
def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- fields : list of str 'open', 'high', 'low', 'close', or 'volume' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ rolls_by_asset = {} tc = self.trading_calendar start_session = tc.minute_to_session_label(start_date) end_session = tc.minute_to_session_label(end_date) for asset in assets: rf = self._roll_finders[asset.roll_style] rolls_by_asset[asset] = rf.get_rolls( asset.root_symbol, start_session, end_session, asset.offset) sessions = tc.sessions_in_range(start_date, end_date) minutes = tc.minutes_in_range(start_date, end_date) num_minutes = len(minutes) shape = num_minutes, len(assets) results = [] # Get partitions partitions_by_asset = {} for asset in assets: partitions = [] partitions_by_asset[asset] = partitions rolls = rolls_by_asset[asset] start = start_date for roll in rolls: sid, roll_date = roll start_loc = minutes.searchsorted(start) if roll_date is not None: _, end = tc.open_and_close_for_session( roll_date - sessions.freq) end_loc = minutes.searchsorted(end) else: end = end_date end_loc = len(minutes) - 1 partitions.append((sid, start, end, start_loc, end_loc)) if roll[-1] is not None: start, _ = tc.open_and_close_for_session( tc.minute_to_session_label(minutes[end_loc + 1])) for column in columns: if column != 'volume': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.uint32) for i, asset in enumerate(assets): partitions = partitions_by_asset[asset] for sid, start, end, start_loc, end_loc in partitions: if column != 'sid': result = self._bar_reader.load_raw_arrays( [column], start, end, [sid])[0][:, 0] else: result = int(sid) out[start_loc:end_loc + 1, i] = result results.append(out) return results
[ "def", "load_raw_arrays", "(", "self", ",", "columns", ",", "start_date", ",", "end_date", ",", "assets", ")", ":", "rolls_by_asset", "=", "{", "}", "tc", "=", "self", ".", "trading_calendar", "start_session", "=", "tc", ".", "minute_to_session_label", "(", "start_date", ")", "end_session", "=", "tc", ".", "minute_to_session_label", "(", "end_date", ")", "for", "asset", "in", "assets", ":", "rf", "=", "self", ".", "_roll_finders", "[", "asset", ".", "roll_style", "]", "rolls_by_asset", "[", "asset", "]", "=", "rf", ".", "get_rolls", "(", "asset", ".", "root_symbol", ",", "start_session", ",", "end_session", ",", "asset", ".", "offset", ")", "sessions", "=", "tc", ".", "sessions_in_range", "(", "start_date", ",", "end_date", ")", "minutes", "=", "tc", ".", "minutes_in_range", "(", "start_date", ",", "end_date", ")", "num_minutes", "=", "len", "(", "minutes", ")", "shape", "=", "num_minutes", ",", "len", "(", "assets", ")", "results", "=", "[", "]", "# Get partitions", "partitions_by_asset", "=", "{", "}", "for", "asset", "in", "assets", ":", "partitions", "=", "[", "]", "partitions_by_asset", "[", "asset", "]", "=", "partitions", "rolls", "=", "rolls_by_asset", "[", "asset", "]", "start", "=", "start_date", "for", "roll", "in", "rolls", ":", "sid", ",", "roll_date", "=", "roll", "start_loc", "=", "minutes", ".", "searchsorted", "(", "start", ")", "if", "roll_date", "is", "not", "None", ":", "_", ",", "end", "=", "tc", ".", "open_and_close_for_session", "(", "roll_date", "-", "sessions", ".", "freq", ")", "end_loc", "=", "minutes", ".", "searchsorted", "(", "end", ")", "else", ":", "end", "=", "end_date", "end_loc", "=", "len", "(", "minutes", ")", "-", "1", "partitions", ".", "append", "(", "(", "sid", ",", "start", ",", "end", ",", "start_loc", ",", "end_loc", ")", ")", "if", "roll", "[", "-", "1", "]", "is", "not", "None", ":", "start", ",", "_", "=", "tc", ".", "open_and_close_for_session", "(", "tc", ".", "minute_to_session_label", "(", "minutes", "[", "end_loc", "+", "1", "]", ")", ")", "for", "column", "in", "columns", ":", "if", "column", "!=", "'volume'", ":", "out", "=", "np", ".", "full", "(", "shape", ",", "np", ".", "nan", ")", "else", ":", "out", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "np", ".", "uint32", ")", "for", "i", ",", "asset", "in", "enumerate", "(", "assets", ")", ":", "partitions", "=", "partitions_by_asset", "[", "asset", "]", "for", "sid", ",", "start", ",", "end", ",", "start_loc", ",", "end_loc", "in", "partitions", ":", "if", "column", "!=", "'sid'", ":", "result", "=", "self", ".", "_bar_reader", ".", "load_raw_arrays", "(", "[", "column", "]", ",", "start", ",", "end", ",", "[", "sid", "]", ")", "[", "0", "]", "[", ":", ",", "0", "]", "else", ":", "result", "=", "int", "(", "sid", ")", "out", "[", "start_loc", ":", "end_loc", "+", "1", ",", "i", "]", "=", "result", "results", ".", "append", "(", "out", ")", "return", "results" ]
Parameters ---------- fields : list of str 'open', 'high', 'low', 'close', or 'volume' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range.
[ "Parameters", "----------", "fields", ":", "list", "of", "str", "open", "high", "low", "close", "or", "volume", "start_dt", ":", "Timestamp", "Beginning", "of", "the", "window", "range", ".", "end_dt", ":", "Timestamp", "End", "of", "the", "window", "range", ".", "sids", ":", "list", "of", "int", "The", "asset", "identifiers", "in", "the", "window", "." ]
python
train
36.835443
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/prometheus/mixins.py#L437-L492
def process_metric(self, message, **kwargs): """ Handle a prometheus metric message according to the following flow: - search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping - call check method with the same name as the metric - log some info if none of the above worked `send_histograms_buckets` is used to specify if yes or no you want to send the buckets as tagged values when dealing with histograms. """ # If targeted metric, store labels self.store_labels(message) if message.name in self.ignore_metrics: return # Ignore the metric # Filter metric to see if we can enrich with joined labels self.join_labels(message) send_histograms_buckets = kwargs.get('send_histograms_buckets', True) send_monotonic_counter = kwargs.get('send_monotonic_counter', False) custom_tags = kwargs.get('custom_tags') ignore_unmapped = kwargs.get('ignore_unmapped', False) try: if not self._dry_run: try: self._submit( self.metrics_mapper[message.name], message, send_histograms_buckets, send_monotonic_counter, custom_tags, ) except KeyError: if not ignore_unmapped: # call magic method (non-generic check) handler = getattr(self, message.name) # Lookup will throw AttributeError if not found try: handler(message, **kwargs) except Exception as err: self.log.warning("Error handling metric: {} - error: {}".format(message.name, err)) else: # build the wildcard list if first pass if self._metrics_wildcards is None: self._metrics_wildcards = [x for x in self.metrics_mapper.keys() if '*' in x] # try matching wildcard (generic check) for wildcard in self._metrics_wildcards: if fnmatchcase(message.name, wildcard): self._submit( message.name, message, send_histograms_buckets, send_monotonic_counter, custom_tags ) except AttributeError as err: self.log.debug("Unable to handle metric: {} - error: {}".format(message.name, err))
[ "def", "process_metric", "(", "self", ",", "message", ",", "*", "*", "kwargs", ")", ":", "# If targeted metric, store labels", "self", ".", "store_labels", "(", "message", ")", "if", "message", ".", "name", "in", "self", ".", "ignore_metrics", ":", "return", "# Ignore the metric", "# Filter metric to see if we can enrich with joined labels", "self", ".", "join_labels", "(", "message", ")", "send_histograms_buckets", "=", "kwargs", ".", "get", "(", "'send_histograms_buckets'", ",", "True", ")", "send_monotonic_counter", "=", "kwargs", ".", "get", "(", "'send_monotonic_counter'", ",", "False", ")", "custom_tags", "=", "kwargs", ".", "get", "(", "'custom_tags'", ")", "ignore_unmapped", "=", "kwargs", ".", "get", "(", "'ignore_unmapped'", ",", "False", ")", "try", ":", "if", "not", "self", ".", "_dry_run", ":", "try", ":", "self", ".", "_submit", "(", "self", ".", "metrics_mapper", "[", "message", ".", "name", "]", ",", "message", ",", "send_histograms_buckets", ",", "send_monotonic_counter", ",", "custom_tags", ",", ")", "except", "KeyError", ":", "if", "not", "ignore_unmapped", ":", "# call magic method (non-generic check)", "handler", "=", "getattr", "(", "self", ",", "message", ".", "name", ")", "# Lookup will throw AttributeError if not found", "try", ":", "handler", "(", "message", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "err", ":", "self", ".", "log", ".", "warning", "(", "\"Error handling metric: {} - error: {}\"", ".", "format", "(", "message", ".", "name", ",", "err", ")", ")", "else", ":", "# build the wildcard list if first pass", "if", "self", ".", "_metrics_wildcards", "is", "None", ":", "self", ".", "_metrics_wildcards", "=", "[", "x", "for", "x", "in", "self", ".", "metrics_mapper", ".", "keys", "(", ")", "if", "'*'", "in", "x", "]", "# try matching wildcard (generic check)", "for", "wildcard", "in", "self", ".", "_metrics_wildcards", ":", "if", "fnmatchcase", "(", "message", ".", "name", ",", "wildcard", ")", ":", "self", ".", "_submit", "(", "message", ".", "name", ",", "message", ",", "send_histograms_buckets", ",", "send_monotonic_counter", ",", "custom_tags", ")", "except", "AttributeError", "as", "err", ":", "self", ".", "log", ".", "debug", "(", "\"Unable to handle metric: {} - error: {}\"", ".", "format", "(", "message", ".", "name", ",", "err", ")", ")" ]
Handle a prometheus metric message according to the following flow: - search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping - call check method with the same name as the metric - log some info if none of the above worked `send_histograms_buckets` is used to specify if yes or no you want to send the buckets as tagged values when dealing with histograms.
[ "Handle", "a", "prometheus", "metric", "message", "according", "to", "the", "following", "flow", ":", "-", "search", "self", ".", "metrics_mapper", "for", "a", "prometheus", ".", "metric", "<", "--", ">", "datadog", ".", "metric", "mapping", "-", "call", "check", "method", "with", "the", "same", "name", "as", "the", "metric", "-", "log", "some", "info", "if", "none", "of", "the", "above", "worked" ]
python
train
46.910714
RPi-Distro/python-gpiozero
gpiozero/boards.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/boards.py#L480-L505
def toggle(self, *args): """ If no arguments are specified, toggle the state of all LEDs. If arguments are specified, they must be the indexes of the LEDs you wish to toggle. For example:: from gpiozero import LEDBoard leds = LEDBoard(2, 3, 4, 5) leds.toggle(0) # turn on the first LED (pin 2) leds.toggle(-1) # turn on the last LED (pin 5) leds.toggle() # turn the first and last LED off, and the # middle pair on If :meth:`blink` is currently active, it will be stopped first. :param int args: The index(es) of the LED(s) to toggle. If no indexes are specified toggle the state of all LEDs. """ self._stop_blink() if args: for index in args: self[index].toggle() else: super(LEDBoard, self).toggle()
[ "def", "toggle", "(", "self", ",", "*", "args", ")", ":", "self", ".", "_stop_blink", "(", ")", "if", "args", ":", "for", "index", "in", "args", ":", "self", "[", "index", "]", ".", "toggle", "(", ")", "else", ":", "super", "(", "LEDBoard", ",", "self", ")", ".", "toggle", "(", ")" ]
If no arguments are specified, toggle the state of all LEDs. If arguments are specified, they must be the indexes of the LEDs you wish to toggle. For example:: from gpiozero import LEDBoard leds = LEDBoard(2, 3, 4, 5) leds.toggle(0) # turn on the first LED (pin 2) leds.toggle(-1) # turn on the last LED (pin 5) leds.toggle() # turn the first and last LED off, and the # middle pair on If :meth:`blink` is currently active, it will be stopped first. :param int args: The index(es) of the LED(s) to toggle. If no indexes are specified toggle the state of all LEDs.
[ "If", "no", "arguments", "are", "specified", "toggle", "the", "state", "of", "all", "LEDs", ".", "If", "arguments", "are", "specified", "they", "must", "be", "the", "indexes", "of", "the", "LEDs", "you", "wish", "to", "toggle", ".", "For", "example", "::" ]
python
train
35.346154
swevm/scaleio-py
scaleiopy/api/scaleio/provisioning/volume.py
https://github.com/swevm/scaleio-py/blob/d043a0137cb925987fd5c895a3210968ce1d9028/scaleiopy/api/scaleio/provisioning/volume.py#L195-L206
def get_volume_by_name(self, name): """ Get ScaleIO Volume object by its Name :param name: Name of volume :return: ScaleIO Volume object :raise KeyError: No Volume with specified name found :rtype: ScaleIO Volume object """ for vol in self.conn.volumes: if vol.name == name: return vol raise KeyError("Volume with NAME " + name + " not found")
[ "def", "get_volume_by_name", "(", "self", ",", "name", ")", ":", "for", "vol", "in", "self", ".", "conn", ".", "volumes", ":", "if", "vol", ".", "name", "==", "name", ":", "return", "vol", "raise", "KeyError", "(", "\"Volume with NAME \"", "+", "name", "+", "\" not found\"", ")" ]
Get ScaleIO Volume object by its Name :param name: Name of volume :return: ScaleIO Volume object :raise KeyError: No Volume with specified name found :rtype: ScaleIO Volume object
[ "Get", "ScaleIO", "Volume", "object", "by", "its", "Name", ":", "param", "name", ":", "Name", "of", "volume", ":", "return", ":", "ScaleIO", "Volume", "object", ":", "raise", "KeyError", ":", "No", "Volume", "with", "specified", "name", "found", ":", "rtype", ":", "ScaleIO", "Volume", "object" ]
python
train
36
jjjake/internetarchive
internetarchive/api.py
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/api.py#L78-L116
def get_item(identifier, config=None, config_file=None, archive_session=None, debug=None, http_adapter_kwargs=None, request_kwargs=None): """Get an :class:`Item` object. :type identifier: str :param identifier: The globally unique Archive.org item identifier. :type config: dict :param config: (optional) A dictionary used to configure your session. :type config_file: str :param config_file: (optional) A path to a config file used to configure your session. :type archive_session: :class:`ArchiveSession` :param archive_session: (optional) An :class:`ArchiveSession` object can be provided via the ``archive_session`` parameter. :type http_adapter_kwargs: dict :param http_adapter_kwargs: (optional) Keyword arguments that :py:class:`requests.adapters.HTTPAdapter` takes. :type request_kwargs: dict :param request_kwargs: (optional) Keyword arguments that :py:class:`requests.Request` takes. Usage: >>> from internetarchive import get_item >>> item = get_item('nasa') >>> item.item_size 121084 """ if not archive_session: archive_session = get_session(config, config_file, debug, http_adapter_kwargs) return archive_session.get_item(identifier, request_kwargs=request_kwargs)
[ "def", "get_item", "(", "identifier", ",", "config", "=", "None", ",", "config_file", "=", "None", ",", "archive_session", "=", "None", ",", "debug", "=", "None", ",", "http_adapter_kwargs", "=", "None", ",", "request_kwargs", "=", "None", ")", ":", "if", "not", "archive_session", ":", "archive_session", "=", "get_session", "(", "config", ",", "config_file", ",", "debug", ",", "http_adapter_kwargs", ")", "return", "archive_session", ".", "get_item", "(", "identifier", ",", "request_kwargs", "=", "request_kwargs", ")" ]
Get an :class:`Item` object. :type identifier: str :param identifier: The globally unique Archive.org item identifier. :type config: dict :param config: (optional) A dictionary used to configure your session. :type config_file: str :param config_file: (optional) A path to a config file used to configure your session. :type archive_session: :class:`ArchiveSession` :param archive_session: (optional) An :class:`ArchiveSession` object can be provided via the ``archive_session`` parameter. :type http_adapter_kwargs: dict :param http_adapter_kwargs: (optional) Keyword arguments that :py:class:`requests.adapters.HTTPAdapter` takes. :type request_kwargs: dict :param request_kwargs: (optional) Keyword arguments that :py:class:`requests.Request` takes. Usage: >>> from internetarchive import get_item >>> item = get_item('nasa') >>> item.item_size 121084
[ "Get", "an", ":", "class", ":", "Item", "object", "." ]
python
train
36.384615
shoebot/shoebot
lib/web/google.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/google.py#L138-L146
def _parse(self, str): """ Parses the text data from an XML element defined by tag. """ str = replace_entities(str) str = strip_tags(str) str = collapse_spaces(str) return str
[ "def", "_parse", "(", "self", ",", "str", ")", ":", "str", "=", "replace_entities", "(", "str", ")", "str", "=", "strip_tags", "(", "str", ")", "str", "=", "collapse_spaces", "(", "str", ")", "return", "str" ]
Parses the text data from an XML element defined by tag.
[ "Parses", "the", "text", "data", "from", "an", "XML", "element", "defined", "by", "tag", "." ]
python
valid
25.888889
ConsenSys/mythril-classic
mythril/laser/ethereum/plugins/plugin_factory.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/ethereum/plugins/plugin_factory.py#L26-L32
def build_instruction_coverage_plugin() -> LaserPlugin: """ Creates an instance of the instruction coverage plugin""" from mythril.laser.ethereum.plugins.implementations.coverage import ( InstructionCoveragePlugin, ) return InstructionCoveragePlugin()
[ "def", "build_instruction_coverage_plugin", "(", ")", "->", "LaserPlugin", ":", "from", "mythril", ".", "laser", ".", "ethereum", ".", "plugins", ".", "implementations", ".", "coverage", "import", "(", "InstructionCoveragePlugin", ",", ")", "return", "InstructionCoveragePlugin", "(", ")" ]
Creates an instance of the instruction coverage plugin
[ "Creates", "an", "instance", "of", "the", "instruction", "coverage", "plugin" ]
python
train
41.428571
django-blog-zinnia/wordpress2zinnia
zinnia_wordpress/management/commands/wp2zinnia.py
https://github.com/django-blog-zinnia/wordpress2zinnia/blob/656df6d431418a660f0e590d2226af5e6dd7a3e6/zinnia_wordpress/management/commands/wp2zinnia.py#L357-L387
def import_entries(self, items): """ Loops over items and find entry to import, an entry need to have 'post_type' set to 'post' and have content. """ self.write_out(self.style.STEP('- Importing entries\n')) for item_node in items: title = (item_node.find('title').text or '')[:255] post_type = item_node.find('{%s}post_type' % WP_NS).text content = item_node.find( '{http://purl.org/rss/1.0/modules/content/}encoded').text if post_type == 'post' and content and title: self.write_out('> %s... ' % title) entry, created = self.import_entry(title, content, item_node) if created: self.write_out(self.style.ITEM('OK\n')) image_id = self.find_image_id( item_node.findall('{%s}postmeta' % WP_NS)) if image_id: self.import_image(entry, items, image_id) self.import_comments(entry, item_node.findall( '{%s}comment' % WP_NS)) else: self.write_out(self.style.NOTICE( 'SKIPPED (already imported)\n')) else: self.write_out('> %s... ' % title, 2) self.write_out(self.style.NOTICE('SKIPPED (not a post)\n'), 2)
[ "def", "import_entries", "(", "self", ",", "items", ")", ":", "self", ".", "write_out", "(", "self", ".", "style", ".", "STEP", "(", "'- Importing entries\\n'", ")", ")", "for", "item_node", "in", "items", ":", "title", "=", "(", "item_node", ".", "find", "(", "'title'", ")", ".", "text", "or", "''", ")", "[", ":", "255", "]", "post_type", "=", "item_node", ".", "find", "(", "'{%s}post_type'", "%", "WP_NS", ")", ".", "text", "content", "=", "item_node", ".", "find", "(", "'{http://purl.org/rss/1.0/modules/content/}encoded'", ")", ".", "text", "if", "post_type", "==", "'post'", "and", "content", "and", "title", ":", "self", ".", "write_out", "(", "'> %s... '", "%", "title", ")", "entry", ",", "created", "=", "self", ".", "import_entry", "(", "title", ",", "content", ",", "item_node", ")", "if", "created", ":", "self", ".", "write_out", "(", "self", ".", "style", ".", "ITEM", "(", "'OK\\n'", ")", ")", "image_id", "=", "self", ".", "find_image_id", "(", "item_node", ".", "findall", "(", "'{%s}postmeta'", "%", "WP_NS", ")", ")", "if", "image_id", ":", "self", ".", "import_image", "(", "entry", ",", "items", ",", "image_id", ")", "self", ".", "import_comments", "(", "entry", ",", "item_node", ".", "findall", "(", "'{%s}comment'", "%", "WP_NS", ")", ")", "else", ":", "self", ".", "write_out", "(", "self", ".", "style", ".", "NOTICE", "(", "'SKIPPED (already imported)\\n'", ")", ")", "else", ":", "self", ".", "write_out", "(", "'> %s... '", "%", "title", ",", "2", ")", "self", ".", "write_out", "(", "self", ".", "style", ".", "NOTICE", "(", "'SKIPPED (not a post)\\n'", ")", ",", "2", ")" ]
Loops over items and find entry to import, an entry need to have 'post_type' set to 'post' and have content.
[ "Loops", "over", "items", "and", "find", "entry", "to", "import", "an", "entry", "need", "to", "have", "post_type", "set", "to", "post", "and", "have", "content", "." ]
python
train
44.935484
thorgate/django-esteid
esteid/digidocservice/service.py
https://github.com/thorgate/django-esteid/blob/407ae513e357fedea0e3e42198df8eb9d9ff0646/esteid/digidocservice/service.py#L299-L334
def mobile_sign(self, id_code, country, phone_nr, language=None, signing_profile='LT_TM'): """ This can be used to add a signature to existing data files WARNING: Must have at least one datafile in the session """ if not (self.container and isinstance(self.container, PreviouslyCreatedContainer)): assert self.data_files, 'To use MobileSign endpoint the application must ' \ 'add at least one data file to users session' response = self.__invoke('MobileSign', { 'SignerIDCode': id_code, 'SignersCountry': country, 'SignerPhoneNo': phone_nr, 'Language': self.parse_language(language), 'Role': SkipValue, 'City': SkipValue, 'StateOrProvince': SkipValue, 'PostalCode': SkipValue, 'CountryName': SkipValue, 'ServiceName': self.service_name, 'AdditionalDataToBeDisplayed': self.mobile_message, # Either LT or LT_TM, see: http://sk-eid.github.io/dds-documentation/api/api_docs/#mobilesign 'SigningProfile': signing_profile, 'MessagingMode': 'asynchClientServer', 'AsyncConfiguration': SkipValue, 'ReturnDocInfo': SkipValue, 'ReturnDocData': SkipValue, }) return response
[ "def", "mobile_sign", "(", "self", ",", "id_code", ",", "country", ",", "phone_nr", ",", "language", "=", "None", ",", "signing_profile", "=", "'LT_TM'", ")", ":", "if", "not", "(", "self", ".", "container", "and", "isinstance", "(", "self", ".", "container", ",", "PreviouslyCreatedContainer", ")", ")", ":", "assert", "self", ".", "data_files", ",", "'To use MobileSign endpoint the application must '", "'add at least one data file to users session'", "response", "=", "self", ".", "__invoke", "(", "'MobileSign'", ",", "{", "'SignerIDCode'", ":", "id_code", ",", "'SignersCountry'", ":", "country", ",", "'SignerPhoneNo'", ":", "phone_nr", ",", "'Language'", ":", "self", ".", "parse_language", "(", "language", ")", ",", "'Role'", ":", "SkipValue", ",", "'City'", ":", "SkipValue", ",", "'StateOrProvince'", ":", "SkipValue", ",", "'PostalCode'", ":", "SkipValue", ",", "'CountryName'", ":", "SkipValue", ",", "'ServiceName'", ":", "self", ".", "service_name", ",", "'AdditionalDataToBeDisplayed'", ":", "self", ".", "mobile_message", ",", "# Either LT or LT_TM, see: http://sk-eid.github.io/dds-documentation/api/api_docs/#mobilesign", "'SigningProfile'", ":", "signing_profile", ",", "'MessagingMode'", ":", "'asynchClientServer'", ",", "'AsyncConfiguration'", ":", "SkipValue", ",", "'ReturnDocInfo'", ":", "SkipValue", ",", "'ReturnDocData'", ":", "SkipValue", ",", "}", ")", "return", "response" ]
This can be used to add a signature to existing data files WARNING: Must have at least one datafile in the session
[ "This", "can", "be", "used", "to", "add", "a", "signature", "to", "existing", "data", "files" ]
python
train
37.5
nicolargo/glances
glances/outputs/glances_curses.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/outputs/glances_curses.py#L627-L652
def __display_header(self, stat_display): """Display the firsts lines (header) in the Curses interface. system + ip + uptime (cloud) """ # First line self.new_line() self.space_between_column = 0 l_uptime = (self.get_stats_display_width(stat_display["system"]) + self.get_stats_display_width(stat_display["ip"]) + self.get_stats_display_width(stat_display["uptime"]) + 1) self.display_plugin( stat_display["system"], display_optional=(self.screen.getmaxyx()[1] >= l_uptime)) self.space_between_column = 3 self.new_column() self.display_plugin(stat_display["ip"]) self.new_column() self.display_plugin( stat_display["uptime"], add_space=-(self.get_stats_display_width(stat_display["cloud"]) != 0)) # Second line (optional) self.init_column() self.new_line() self.display_plugin(stat_display["cloud"])
[ "def", "__display_header", "(", "self", ",", "stat_display", ")", ":", "# First line", "self", ".", "new_line", "(", ")", "self", ".", "space_between_column", "=", "0", "l_uptime", "=", "(", "self", ".", "get_stats_display_width", "(", "stat_display", "[", "\"system\"", "]", ")", "+", "self", ".", "get_stats_display_width", "(", "stat_display", "[", "\"ip\"", "]", ")", "+", "self", ".", "get_stats_display_width", "(", "stat_display", "[", "\"uptime\"", "]", ")", "+", "1", ")", "self", ".", "display_plugin", "(", "stat_display", "[", "\"system\"", "]", ",", "display_optional", "=", "(", "self", ".", "screen", ".", "getmaxyx", "(", ")", "[", "1", "]", ">=", "l_uptime", ")", ")", "self", ".", "space_between_column", "=", "3", "self", ".", "new_column", "(", ")", "self", ".", "display_plugin", "(", "stat_display", "[", "\"ip\"", "]", ")", "self", ".", "new_column", "(", ")", "self", ".", "display_plugin", "(", "stat_display", "[", "\"uptime\"", "]", ",", "add_space", "=", "-", "(", "self", ".", "get_stats_display_width", "(", "stat_display", "[", "\"cloud\"", "]", ")", "!=", "0", ")", ")", "# Second line (optional)", "self", ".", "init_column", "(", ")", "self", ".", "new_line", "(", ")", "self", ".", "display_plugin", "(", "stat_display", "[", "\"cloud\"", "]", ")" ]
Display the firsts lines (header) in the Curses interface. system + ip + uptime (cloud)
[ "Display", "the", "firsts", "lines", "(", "header", ")", "in", "the", "Curses", "interface", "." ]
python
train
38.730769
dereneaton/ipyrad
ipyrad/core/assembly.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/core/assembly.py#L1069-L1089
def _step4func(self, samples, force, ipyclient): """ hidden wrapped function to start step 4 """ if self._headers: print("\n Step 4: Joint estimation of error rate and heterozygosity") ## Get sample objects from list of strings samples = _get_samples(self, samples) ## Check if all/none in the right state if not self._samples_precheck(samples, 4, force): raise IPyradError(FIRST_RUN_3) elif not force: ## skip if all are finished if all([i.stats.state >= 4 for i in samples]): print(JOINTS_EXIST.format(len(samples))) return ## send to function assemble.jointestimate.run(self, samples, force, ipyclient)
[ "def", "_step4func", "(", "self", ",", "samples", ",", "force", ",", "ipyclient", ")", ":", "if", "self", ".", "_headers", ":", "print", "(", "\"\\n Step 4: Joint estimation of error rate and heterozygosity\"", ")", "## Get sample objects from list of strings", "samples", "=", "_get_samples", "(", "self", ",", "samples", ")", "## Check if all/none in the right state", "if", "not", "self", ".", "_samples_precheck", "(", "samples", ",", "4", ",", "force", ")", ":", "raise", "IPyradError", "(", "FIRST_RUN_3", ")", "elif", "not", "force", ":", "## skip if all are finished", "if", "all", "(", "[", "i", ".", "stats", ".", "state", ">=", "4", "for", "i", "in", "samples", "]", ")", ":", "print", "(", "JOINTS_EXIST", ".", "format", "(", "len", "(", "samples", ")", ")", ")", "return", "## send to function", "assemble", ".", "jointestimate", ".", "run", "(", "self", ",", "samples", ",", "force", ",", "ipyclient", ")" ]
hidden wrapped function to start step 4
[ "hidden", "wrapped", "function", "to", "start", "step", "4" ]
python
valid
35.380952
jonathf/chaospy
chaospy/distributions/operators/power.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/power.py#L149-L186
def _ppf(self, q, left, right, cache): """ Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])) [0.01 0.04 0.81] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9])) [0.52631579 0.55555556 0.90909091] >>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [1.07177346 1.14869835 1.86606598] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9])) [0.53588673 0.57434918 0.93303299] >>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9])) [8. 8. 8.] """ left = evaluation.get_inverse_cache(left, cache) right = evaluation.get_inverse_cache(right, cache) if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): return left**right else: out = evaluation.evaluate_inverse(right, q, cache=cache) out = numpy.where(left < 0, 1-out, out) out = left**out return out right = right + numpy.zeros(q.shape) q = numpy.where(right < 0, 1-q, q) out = evaluation.evaluate_inverse(left, q, cache=cache)**right return out
[ "def", "_ppf", "(", "self", ",", "q", ",", "left", ",", "right", ",", "cache", ")", ":", "left", "=", "evaluation", ".", "get_inverse_cache", "(", "left", ",", "cache", ")", "right", "=", "evaluation", ".", "get_inverse_cache", "(", "right", ",", "cache", ")", "if", "isinstance", "(", "left", ",", "Dist", ")", ":", "if", "isinstance", "(", "right", ",", "Dist", ")", ":", "raise", "StochasticallyDependentError", "(", "\"under-defined distribution {} or {}\"", ".", "format", "(", "left", ",", "right", ")", ")", "elif", "not", "isinstance", "(", "right", ",", "Dist", ")", ":", "return", "left", "**", "right", "else", ":", "out", "=", "evaluation", ".", "evaluate_inverse", "(", "right", ",", "q", ",", "cache", "=", "cache", ")", "out", "=", "numpy", ".", "where", "(", "left", "<", "0", ",", "1", "-", "out", ",", "out", ")", "out", "=", "left", "**", "out", "return", "out", "right", "=", "right", "+", "numpy", ".", "zeros", "(", "q", ".", "shape", ")", "q", "=", "numpy", ".", "where", "(", "right", "<", "0", ",", "1", "-", "q", ",", "q", ")", "out", "=", "evaluation", ".", "evaluate_inverse", "(", "left", ",", "q", ",", "cache", "=", "cache", ")", "**", "right", "return", "out" ]
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9])) [0.01 0.04 0.81] >>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9])) [0.52631579 0.55555556 0.90909091] >>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [1.07177346 1.14869835 1.86606598] >>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9])) [0.53588673 0.57434918 0.93303299] >>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9])) [8. 8. 8.]
[ "Point", "percentile", "function", "." ]
python
train
39.605263
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L641-L647
def decode(self, data): """ Decode an armoured string into a chunk. The decoded output is null-terminated, so it may be treated as a string, if that's what it was prior to encoding. """ return Zchunk(lib.zarmour_decode(self._as_parameter_, data), True)
[ "def", "decode", "(", "self", ",", "data", ")", ":", "return", "Zchunk", "(", "lib", ".", "zarmour_decode", "(", "self", ".", "_as_parameter_", ",", "data", ")", ",", "True", ")" ]
Decode an armoured string into a chunk. The decoded output is null-terminated, so it may be treated as a string, if that's what it was prior to encoding.
[ "Decode", "an", "armoured", "string", "into", "a", "chunk", ".", "The", "decoded", "output", "is", "null", "-", "terminated", "so", "it", "may", "be", "treated", "as", "a", "string", "if", "that", "s", "what", "it", "was", "prior", "to", "encoding", "." ]
python
train
39.714286
jedie/DragonPy
dragonpy/core/machine.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/core/machine.py#L95-L111
def inject_basic_program(self, ascii_listing): """ save the given ASCII BASIC program listing into the emulator RAM. """ program_start = self.cpu.memory.read_word( self.machine_api.PROGRAM_START_ADDR ) tokens = self.machine_api.ascii_listing2program_dump(ascii_listing) self.cpu.memory.load(program_start, tokens) log.critical("BASIC program injected into Memory.") # Update the BASIC addresses: program_end = program_start + len(tokens) self.cpu.memory.write_word(self.machine_api.VARIABLES_START_ADDR, program_end) self.cpu.memory.write_word(self.machine_api.ARRAY_START_ADDR, program_end) self.cpu.memory.write_word(self.machine_api.FREE_SPACE_START_ADDR, program_end) log.critical("BASIC addresses updated.")
[ "def", "inject_basic_program", "(", "self", ",", "ascii_listing", ")", ":", "program_start", "=", "self", ".", "cpu", ".", "memory", ".", "read_word", "(", "self", ".", "machine_api", ".", "PROGRAM_START_ADDR", ")", "tokens", "=", "self", ".", "machine_api", ".", "ascii_listing2program_dump", "(", "ascii_listing", ")", "self", ".", "cpu", ".", "memory", ".", "load", "(", "program_start", ",", "tokens", ")", "log", ".", "critical", "(", "\"BASIC program injected into Memory.\"", ")", "# Update the BASIC addresses:", "program_end", "=", "program_start", "+", "len", "(", "tokens", ")", "self", ".", "cpu", ".", "memory", ".", "write_word", "(", "self", ".", "machine_api", ".", "VARIABLES_START_ADDR", ",", "program_end", ")", "self", ".", "cpu", ".", "memory", ".", "write_word", "(", "self", ".", "machine_api", ".", "ARRAY_START_ADDR", ",", "program_end", ")", "self", ".", "cpu", ".", "memory", ".", "write_word", "(", "self", ".", "machine_api", ".", "FREE_SPACE_START_ADDR", ",", "program_end", ")", "log", ".", "critical", "(", "\"BASIC addresses updated.\"", ")" ]
save the given ASCII BASIC program listing into the emulator RAM.
[ "save", "the", "given", "ASCII", "BASIC", "program", "listing", "into", "the", "emulator", "RAM", "." ]
python
train
48.294118
pyapi-gitlab/pyapi-gitlab
gitlab/__init__.py
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L1134-L1161
def createmergerequest(self, project_id, sourcebranch, targetbranch, title, target_project_id=None, assignee_id=None): """ Create a new merge request. :param project_id: ID of the project originating the merge request :param sourcebranch: name of the branch to merge from :param targetbranch: name of the branch to merge to :param title: Title of the merge request :param assignee_id: Assignee user ID :return: dict of the new merge request """ data = { 'source_branch': sourcebranch, 'target_branch': targetbranch, 'title': title, 'assignee_id': assignee_id, 'target_project_id': target_project_id } request = requests.post( '{0}/{1}/merge_requests'.format(self.projects_url, project_id), data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 201: return request.json() else: return False
[ "def", "createmergerequest", "(", "self", ",", "project_id", ",", "sourcebranch", ",", "targetbranch", ",", "title", ",", "target_project_id", "=", "None", ",", "assignee_id", "=", "None", ")", ":", "data", "=", "{", "'source_branch'", ":", "sourcebranch", ",", "'target_branch'", ":", "targetbranch", ",", "'title'", ":", "title", ",", "'assignee_id'", ":", "assignee_id", ",", "'target_project_id'", ":", "target_project_id", "}", "request", "=", "requests", ".", "post", "(", "'{0}/{1}/merge_requests'", ".", "format", "(", "self", ".", "projects_url", ",", "project_id", ")", ",", "data", "=", "data", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "self", ".", "verify_ssl", ",", "auth", "=", "self", ".", "auth", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "request", ".", "status_code", "==", "201", ":", "return", "request", ".", "json", "(", ")", "else", ":", "return", "False" ]
Create a new merge request. :param project_id: ID of the project originating the merge request :param sourcebranch: name of the branch to merge from :param targetbranch: name of the branch to merge to :param title: Title of the merge request :param assignee_id: Assignee user ID :return: dict of the new merge request
[ "Create", "a", "new", "merge", "request", "." ]
python
train
38.535714
AASHE/django-constant-contact
django_constant_contact/models.py
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L216-L219
def pre_save(cls, sender, instance, *args, **kwargs): """Pull constant_contact_id out of data. """ instance.constant_contact_id = str(instance.data['id'])
[ "def", "pre_save", "(", "cls", ",", "sender", ",", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", ".", "constant_contact_id", "=", "str", "(", "instance", ".", "data", "[", "'id'", "]", ")" ]
Pull constant_contact_id out of data.
[ "Pull", "constant_contact_id", "out", "of", "data", "." ]
python
train
43.75
SeabornGames/Meta
seaborn_meta/calling_function.py
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L223-L249
def func_frame(function_index, function_name=None): """ This will return the class_name and function_name of the function traced back two functions. :param function_index: int of how many frames back the program should look (2 will give the parent of the caller) :param function_name: str of what function to look for (should not be used with function_index :return frame: this will return the frame of the calling function """ frm = inspect.currentframe() if function_name is not None: function_name = function_name.split('*')[0] # todo replace this # todo with regex for i in range(1000): if frm.f_code.co_name.startswith(function_name): break frm = frm.f_back else: for i in range(function_index): frm = frm.f_back try: # this is pycharm debugger inserting middleware if frm.f_code.co_name == 'run_code': frm = frm.f_back except: pass return frm
[ "def", "func_frame", "(", "function_index", ",", "function_name", "=", "None", ")", ":", "frm", "=", "inspect", ".", "currentframe", "(", ")", "if", "function_name", "is", "not", "None", ":", "function_name", "=", "function_name", ".", "split", "(", "'*'", ")", "[", "0", "]", "# todo replace this", "# todo with regex", "for", "i", "in", "range", "(", "1000", ")", ":", "if", "frm", ".", "f_code", ".", "co_name", ".", "startswith", "(", "function_name", ")", ":", "break", "frm", "=", "frm", ".", "f_back", "else", ":", "for", "i", "in", "range", "(", "function_index", ")", ":", "frm", "=", "frm", ".", "f_back", "try", ":", "# this is pycharm debugger inserting middleware", "if", "frm", ".", "f_code", ".", "co_name", "==", "'run_code'", ":", "frm", "=", "frm", ".", "f_back", "except", ":", "pass", "return", "frm" ]
This will return the class_name and function_name of the function traced back two functions. :param function_index: int of how many frames back the program should look (2 will give the parent of the caller) :param function_name: str of what function to look for (should not be used with function_index :return frame: this will return the frame of the calling function
[ "This", "will", "return", "the", "class_name", "and", "function_name", "of", "the", "function", "traced", "back", "two", "functions", "." ]
python
train
40.259259
Jaymon/pout
pout/value.py
https://github.com/Jaymon/pout/blob/fa71b64384ddeb3b538855ed93e785d9985aad05/pout/value.py#L393-L407
def _add_indent(self, val, indent_count): ''' add whitespace to the beginning of each line of val link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/ val -- string indent -- integer -- how much whitespace we want in front of each line of val return -- string -- val with more whitespace ''' if isinstance(val, Value): val = val.string_value() return String(val).indent(indent_count)
[ "def", "_add_indent", "(", "self", ",", "val", ",", "indent_count", ")", ":", "if", "isinstance", "(", "val", ",", "Value", ")", ":", "val", "=", "val", ".", "string_value", "(", ")", "return", "String", "(", "val", ")", ".", "indent", "(", "indent_count", ")" ]
add whitespace to the beginning of each line of val link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/ val -- string indent -- integer -- how much whitespace we want in front of each line of val return -- string -- val with more whitespace
[ "add", "whitespace", "to", "the", "beginning", "of", "each", "line", "of", "val" ]
python
train
33.533333
iotile/coretools
iotilecore/iotile/core/hw/transport/adapter/mixin_notifications.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/mixin_notifications.py#L152-L196
async def _notify_event_internal(self, conn_string, name, event): """Notify that an event has occured. This method will send a notification and ensure that all callbacks registered for it have completed by the time it returns. In particular, if the callbacks are awaitable, this method will await them before returning. The order in which the callbacks are called is undefined. This is a low level method that is not intended to be called directly. You should use the high level public notify_* methods for each of the types of events to ensure consistency in how the event objects are created. Args: conn_string (str): The connection string for the device that the event is associated with. name (str): The name of the event. Must be in SUPPORTED_EVENTS. event (object): The event object. The type of this object will depend on what is being notified. """ try: self._currently_notifying = True conn_id = self._get_conn_id(conn_string) event_maps = self._monitors.get(conn_string, {}) wildcard_maps = self._monitors.get(None, {}) wildcard_handlers = wildcard_maps.get(name, {}) event_handlers = event_maps.get(name, {}) for handler, func in itertools.chain(event_handlers.items(), wildcard_handlers.items()): try: result = func(conn_string, conn_id, name, event) if inspect.isawaitable(result): await result except: #pylint:disable=bare-except;This is a background function and we are logging exceptions self._logger.warning("Error calling notification callback id=%s, func=%s", handler, func, exc_info=True) finally: for action in self._deferred_adjustments: self._adjust_monitor_internal(*action) self._deferred_adjustments = [] self._currently_notifying = False
[ "async", "def", "_notify_event_internal", "(", "self", ",", "conn_string", ",", "name", ",", "event", ")", ":", "try", ":", "self", ".", "_currently_notifying", "=", "True", "conn_id", "=", "self", ".", "_get_conn_id", "(", "conn_string", ")", "event_maps", "=", "self", ".", "_monitors", ".", "get", "(", "conn_string", ",", "{", "}", ")", "wildcard_maps", "=", "self", ".", "_monitors", ".", "get", "(", "None", ",", "{", "}", ")", "wildcard_handlers", "=", "wildcard_maps", ".", "get", "(", "name", ",", "{", "}", ")", "event_handlers", "=", "event_maps", ".", "get", "(", "name", ",", "{", "}", ")", "for", "handler", ",", "func", "in", "itertools", ".", "chain", "(", "event_handlers", ".", "items", "(", ")", ",", "wildcard_handlers", ".", "items", "(", ")", ")", ":", "try", ":", "result", "=", "func", "(", "conn_string", ",", "conn_id", ",", "name", ",", "event", ")", "if", "inspect", ".", "isawaitable", "(", "result", ")", ":", "await", "result", "except", ":", "#pylint:disable=bare-except;This is a background function and we are logging exceptions", "self", ".", "_logger", ".", "warning", "(", "\"Error calling notification callback id=%s, func=%s\"", ",", "handler", ",", "func", ",", "exc_info", "=", "True", ")", "finally", ":", "for", "action", "in", "self", ".", "_deferred_adjustments", ":", "self", ".", "_adjust_monitor_internal", "(", "*", "action", ")", "self", ".", "_deferred_adjustments", "=", "[", "]", "self", ".", "_currently_notifying", "=", "False" ]
Notify that an event has occured. This method will send a notification and ensure that all callbacks registered for it have completed by the time it returns. In particular, if the callbacks are awaitable, this method will await them before returning. The order in which the callbacks are called is undefined. This is a low level method that is not intended to be called directly. You should use the high level public notify_* methods for each of the types of events to ensure consistency in how the event objects are created. Args: conn_string (str): The connection string for the device that the event is associated with. name (str): The name of the event. Must be in SUPPORTED_EVENTS. event (object): The event object. The type of this object will depend on what is being notified.
[ "Notify", "that", "an", "event", "has", "occured", "." ]
python
train
45.866667
ssalentin/plip
plip/modules/preparation.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/preparation.py#L43-L117
def parse_pdb(self): """Extracts additional information from PDB files. I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously. In PDB files, TER records are also counted, leading to a different numbering system. This functions reads in a PDB file and provides a mapping as a dictionary. II. Additionally, it returns a list of modified residues. III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified IV. Alternative conformations """ if self.as_string: fil = self.pdbpath.rstrip('\n').split('\n') # Removing trailing newline character else: f = read(self.pdbpath) fil = f.readlines() f.close() corrected_lines = [] i, j = 0, 0 # idx and PDB numbering d = {} modres = set() covalent = [] alt = [] previous_ter = False # Standard without fixing if not config.NOFIX: if not config.PLUGIN_MODE: lastnum = 0 # Atom numbering (has to be consecutive) other_models = False for line in fil: if not other_models: # Only consider the first model in an NRM structure corrected_line, newnum = self.fix_pdbline(line, lastnum) if corrected_line is not None: if corrected_line.startswith('MODEL'): try: # Get number of MODEL (1,2,3) model_num = int(corrected_line[10:14]) if model_num > 1: # MODEL 2,3,4 etc. other_models = True except ValueError: write_message("Ignoring invalid MODEL entry: %s\n" % corrected_line, mtype='debug') corrected_lines.append(corrected_line) lastnum = newnum corrected_pdb = ''.join(corrected_lines) else: corrected_pdb = self.pdbpath corrected_lines = fil else: corrected_pdb = self.pdbpath corrected_lines = fil for line in corrected_lines: if line.startswith(("ATOM", "HETATM")): # Retrieve alternate conformations atomid, location = int(line[6:11]), line[16] location = 'A' if location == ' ' else location if location != 'A': alt.append(atomid) if not previous_ter: i += 1 j += 1 else: i += 1 j += 2 d[i] = j previous_ter = False # Numbering Changes at TER records if line.startswith("TER"): previous_ter = True # Get modified residues if line.startswith("MODRES"): modres.add(line[12:15].strip()) # Get covalent linkages between ligands if line.startswith("LINK"): covalent.append(self.get_linkage(line)) return d, modres, covalent, alt, corrected_pdb
[ "def", "parse_pdb", "(", "self", ")", ":", "if", "self", ".", "as_string", ":", "fil", "=", "self", ".", "pdbpath", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "'\\n'", ")", "# Removing trailing newline character", "else", ":", "f", "=", "read", "(", "self", ".", "pdbpath", ")", "fil", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "corrected_lines", "=", "[", "]", "i", ",", "j", "=", "0", ",", "0", "# idx and PDB numbering", "d", "=", "{", "}", "modres", "=", "set", "(", ")", "covalent", "=", "[", "]", "alt", "=", "[", "]", "previous_ter", "=", "False", "# Standard without fixing", "if", "not", "config", ".", "NOFIX", ":", "if", "not", "config", ".", "PLUGIN_MODE", ":", "lastnum", "=", "0", "# Atom numbering (has to be consecutive)", "other_models", "=", "False", "for", "line", "in", "fil", ":", "if", "not", "other_models", ":", "# Only consider the first model in an NRM structure", "corrected_line", ",", "newnum", "=", "self", ".", "fix_pdbline", "(", "line", ",", "lastnum", ")", "if", "corrected_line", "is", "not", "None", ":", "if", "corrected_line", ".", "startswith", "(", "'MODEL'", ")", ":", "try", ":", "# Get number of MODEL (1,2,3)", "model_num", "=", "int", "(", "corrected_line", "[", "10", ":", "14", "]", ")", "if", "model_num", ">", "1", ":", "# MODEL 2,3,4 etc.", "other_models", "=", "True", "except", "ValueError", ":", "write_message", "(", "\"Ignoring invalid MODEL entry: %s\\n\"", "%", "corrected_line", ",", "mtype", "=", "'debug'", ")", "corrected_lines", ".", "append", "(", "corrected_line", ")", "lastnum", "=", "newnum", "corrected_pdb", "=", "''", ".", "join", "(", "corrected_lines", ")", "else", ":", "corrected_pdb", "=", "self", ".", "pdbpath", "corrected_lines", "=", "fil", "else", ":", "corrected_pdb", "=", "self", ".", "pdbpath", "corrected_lines", "=", "fil", "for", "line", "in", "corrected_lines", ":", "if", "line", ".", "startswith", "(", "(", "\"ATOM\"", ",", "\"HETATM\"", ")", ")", ":", "# Retrieve alternate conformations", "atomid", ",", "location", "=", "int", "(", "line", "[", "6", ":", "11", "]", ")", ",", "line", "[", "16", "]", "location", "=", "'A'", "if", "location", "==", "' '", "else", "location", "if", "location", "!=", "'A'", ":", "alt", ".", "append", "(", "atomid", ")", "if", "not", "previous_ter", ":", "i", "+=", "1", "j", "+=", "1", "else", ":", "i", "+=", "1", "j", "+=", "2", "d", "[", "i", "]", "=", "j", "previous_ter", "=", "False", "# Numbering Changes at TER records", "if", "line", ".", "startswith", "(", "\"TER\"", ")", ":", "previous_ter", "=", "True", "# Get modified residues", "if", "line", ".", "startswith", "(", "\"MODRES\"", ")", ":", "modres", ".", "add", "(", "line", "[", "12", ":", "15", "]", ".", "strip", "(", ")", ")", "# Get covalent linkages between ligands", "if", "line", ".", "startswith", "(", "\"LINK\"", ")", ":", "covalent", ".", "append", "(", "self", ".", "get_linkage", "(", "line", ")", ")", "return", "d", ",", "modres", ",", "covalent", ",", "alt", ",", "corrected_pdb" ]
Extracts additional information from PDB files. I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously. In PDB files, TER records are also counted, leading to a different numbering system. This functions reads in a PDB file and provides a mapping as a dictionary. II. Additionally, it returns a list of modified residues. III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified IV. Alternative conformations
[ "Extracts", "additional", "information", "from", "PDB", "files", ".", "I", ".", "When", "reading", "in", "a", "PDB", "file", "OpenBabel", "numbers", "ATOMS", "and", "HETATOMS", "continously", ".", "In", "PDB", "files", "TER", "records", "are", "also", "counted", "leading", "to", "a", "different", "numbering", "system", ".", "This", "functions", "reads", "in", "a", "PDB", "file", "and", "provides", "a", "mapping", "as", "a", "dictionary", ".", "II", ".", "Additionally", "it", "returns", "a", "list", "of", "modified", "residues", ".", "III", ".", "Furthermore", "covalent", "linkages", "between", "ligands", "and", "protein", "residues", "/", "other", "ligands", "are", "identified", "IV", ".", "Alternative", "conformations" ]
python
train
43.6
ejeschke/ginga
ginga/BaseImage.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L167-L183
def set_data(self, data_np, metadata=None, order=None, astype=None): """Use this method to SHARE (not copy) the incoming array. """ if astype: data = data_np.astype(astype, copy=False) else: data = data_np self._data = data self._calc_order(order) if metadata: self.update_metadata(metadata) self._set_minmax() self.make_callback('modified')
[ "def", "set_data", "(", "self", ",", "data_np", ",", "metadata", "=", "None", ",", "order", "=", "None", ",", "astype", "=", "None", ")", ":", "if", "astype", ":", "data", "=", "data_np", ".", "astype", "(", "astype", ",", "copy", "=", "False", ")", "else", ":", "data", "=", "data_np", "self", ".", "_data", "=", "data", "self", ".", "_calc_order", "(", "order", ")", "if", "metadata", ":", "self", ".", "update_metadata", "(", "metadata", ")", "self", ".", "_set_minmax", "(", ")", "self", ".", "make_callback", "(", "'modified'", ")" ]
Use this method to SHARE (not copy) the incoming array.
[ "Use", "this", "method", "to", "SHARE", "(", "not", "copy", ")", "the", "incoming", "array", "." ]
python
train
25.705882
cggh/scikit-allel
allel/stats/sf.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L37-L66
def sfs(dac, n=None): """Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles. """ # check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s
[ "def", "sfs", "(", "dac", ",", "n", "=", "None", ")", ":", "# check input", "dac", ",", "n", "=", "_check_dac_n", "(", "dac", ",", "n", ")", "# need platform integer for bincount", "dac", "=", "dac", ".", "astype", "(", "int", ",", "copy", "=", "False", ")", "# compute site frequency spectrum", "x", "=", "n", "+", "1", "s", "=", "np", ".", "bincount", "(", "dac", ",", "minlength", "=", "x", ")", "return", "s" ]
Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles.
[ "Compute", "the", "site", "frequency", "spectrum", "given", "derived", "allele", "counts", "at", "a", "set", "of", "biallelic", "variants", "." ]
python
train
23.5
apache/incubator-mxnet
example/speech_recognition/stt_datagenerator.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/speech_recognition/stt_datagenerator.py#L80-L128
def load_metadata_from_desc_file(self, desc_file, partition='train', max_duration=16.0,): """ Read metadata from the description file (possibly takes long, depending on the filesize) Params: desc_file (str): Path to a JSON-line file that contains labels and paths to the audio files partition (str): One of 'train', 'validation' or 'test' max_duration (float): In seconds, the maximum duration of utterances to train or test on """ logger = logUtil.getlogger() logger.info('Reading description file: {} for partition: {}' .format(desc_file, partition)) audio_paths, durations, texts = [], [], [] with open(desc_file) as json_line_file: for line_num, json_line in enumerate(json_line_file): try: spec = json.loads(json_line) if float(spec['duration']) > max_duration: continue audio_paths.append(spec['key']) durations.append(float(spec['duration'])) texts.append(spec['text']) except Exception as e: # Change to (KeyError, ValueError) or # (KeyError,json.decoder.JSONDecodeError), depending on # json module version logger.warn('Error reading line #{}: {}' .format(line_num, json_line)) logger.warn(str(e)) if partition == 'train': self.count = len(audio_paths) self.train_audio_paths = audio_paths self.train_durations = durations self.train_texts = texts elif partition == 'validation': self.val_audio_paths = audio_paths self.val_durations = durations self.val_texts = texts self.val_count = len(audio_paths) elif partition == 'test': self.test_audio_paths = audio_paths self.test_durations = durations self.test_texts = texts else: raise Exception("Invalid partition to load metadata. " "Must be train/validation/test")
[ "def", "load_metadata_from_desc_file", "(", "self", ",", "desc_file", ",", "partition", "=", "'train'", ",", "max_duration", "=", "16.0", ",", ")", ":", "logger", "=", "logUtil", ".", "getlogger", "(", ")", "logger", ".", "info", "(", "'Reading description file: {} for partition: {}'", ".", "format", "(", "desc_file", ",", "partition", ")", ")", "audio_paths", ",", "durations", ",", "texts", "=", "[", "]", ",", "[", "]", ",", "[", "]", "with", "open", "(", "desc_file", ")", "as", "json_line_file", ":", "for", "line_num", ",", "json_line", "in", "enumerate", "(", "json_line_file", ")", ":", "try", ":", "spec", "=", "json", ".", "loads", "(", "json_line", ")", "if", "float", "(", "spec", "[", "'duration'", "]", ")", ">", "max_duration", ":", "continue", "audio_paths", ".", "append", "(", "spec", "[", "'key'", "]", ")", "durations", ".", "append", "(", "float", "(", "spec", "[", "'duration'", "]", ")", ")", "texts", ".", "append", "(", "spec", "[", "'text'", "]", ")", "except", "Exception", "as", "e", ":", "# Change to (KeyError, ValueError) or", "# (KeyError,json.decoder.JSONDecodeError), depending on", "# json module version", "logger", ".", "warn", "(", "'Error reading line #{}: {}'", ".", "format", "(", "line_num", ",", "json_line", ")", ")", "logger", ".", "warn", "(", "str", "(", "e", ")", ")", "if", "partition", "==", "'train'", ":", "self", ".", "count", "=", "len", "(", "audio_paths", ")", "self", ".", "train_audio_paths", "=", "audio_paths", "self", ".", "train_durations", "=", "durations", "self", ".", "train_texts", "=", "texts", "elif", "partition", "==", "'validation'", ":", "self", ".", "val_audio_paths", "=", "audio_paths", "self", ".", "val_durations", "=", "durations", "self", ".", "val_texts", "=", "texts", "self", ".", "val_count", "=", "len", "(", "audio_paths", ")", "elif", "partition", "==", "'test'", ":", "self", ".", "test_audio_paths", "=", "audio_paths", "self", ".", "test_durations", "=", "durations", "self", ".", "test_texts", "=", "texts", "else", ":", "raise", "Exception", "(", "\"Invalid partition to load metadata. \"", "\"Must be train/validation/test\"", ")" ]
Read metadata from the description file (possibly takes long, depending on the filesize) Params: desc_file (str): Path to a JSON-line file that contains labels and paths to the audio files partition (str): One of 'train', 'validation' or 'test' max_duration (float): In seconds, the maximum duration of utterances to train or test on
[ "Read", "metadata", "from", "the", "description", "file", "(", "possibly", "takes", "long", "depending", "on", "the", "filesize", ")", "Params", ":", "desc_file", "(", "str", ")", ":", "Path", "to", "a", "JSON", "-", "line", "file", "that", "contains", "labels", "and", "paths", "to", "the", "audio", "files", "partition", "(", "str", ")", ":", "One", "of", "train", "validation", "or", "test", "max_duration", "(", "float", ")", ":", "In", "seconds", "the", "maximum", "duration", "of", "utterances", "to", "train", "or", "test", "on" ]
python
train
46.408163
elastic/elasticsearch-py
elasticsearch/client/xpack/sql.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/sql.py#L33-L43
def translate(self, body, params=None): """ `<Translate SQL into Elasticsearch queries>`_ :arg body: Specify the query in the `query` element. """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "POST", "/_sql/translate", params=params, body=body )
[ "def", "translate", "(", "self", ",", "body", ",", "params", "=", "None", ")", ":", "if", "body", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"Empty value passed for a required argument 'body'.\"", ")", "return", "self", ".", "transport", ".", "perform_request", "(", "\"POST\"", ",", "\"/_sql/translate\"", ",", "params", "=", "params", ",", "body", "=", "body", ")" ]
`<Translate SQL into Elasticsearch queries>`_ :arg body: Specify the query in the `query` element.
[ "<Translate", "SQL", "into", "Elasticsearch", "queries", ">", "_" ]
python
train
36.909091
klen/starter
starter/core.py
https://github.com/klen/starter/blob/24a65c10d4ac5a9ca8fc1d8b3d54b3fb13603f5f/starter/core.py#L180-L211
def copy(self): """ Prepare and paste self templates. """ templates = self.prepare_templates() if self.params.interactive: keys = list(self.parser.default) for key in keys: if key.startswith('_'): continue prompt = "{0} (default is \"{1}\")? ".format( key, self.parser.default[key]) if _compat.PY2: value = raw_input(prompt.encode('utf-8')).decode('utf-8') else: value = input(prompt.encode('utf-8')) value = value.strip() if value: self.parser.default[key] = value self.parser.default['templates'] = tt = ','.join( t.name for t in templates) logging.warning("Paste templates: {0}".format(tt)) self.make_directory(self.params.TARGET) logging.debug("\nDefault context:\n----------------") logging.debug( ''.join('{0:<15} {1}\n'.format(*v) for v in self.parser.default.items()) ) return [t.paste( **dict(self.parser.default.items())) for t in templates]
[ "def", "copy", "(", "self", ")", ":", "templates", "=", "self", ".", "prepare_templates", "(", ")", "if", "self", ".", "params", ".", "interactive", ":", "keys", "=", "list", "(", "self", ".", "parser", ".", "default", ")", "for", "key", "in", "keys", ":", "if", "key", ".", "startswith", "(", "'_'", ")", ":", "continue", "prompt", "=", "\"{0} (default is \\\"{1}\\\")? \"", ".", "format", "(", "key", ",", "self", ".", "parser", ".", "default", "[", "key", "]", ")", "if", "_compat", ".", "PY2", ":", "value", "=", "raw_input", "(", "prompt", ".", "encode", "(", "'utf-8'", ")", ")", ".", "decode", "(", "'utf-8'", ")", "else", ":", "value", "=", "input", "(", "prompt", ".", "encode", "(", "'utf-8'", ")", ")", "value", "=", "value", ".", "strip", "(", ")", "if", "value", ":", "self", ".", "parser", ".", "default", "[", "key", "]", "=", "value", "self", ".", "parser", ".", "default", "[", "'templates'", "]", "=", "tt", "=", "','", ".", "join", "(", "t", ".", "name", "for", "t", "in", "templates", ")", "logging", ".", "warning", "(", "\"Paste templates: {0}\"", ".", "format", "(", "tt", ")", ")", "self", ".", "make_directory", "(", "self", ".", "params", ".", "TARGET", ")", "logging", ".", "debug", "(", "\"\\nDefault context:\\n----------------\"", ")", "logging", ".", "debug", "(", "''", ".", "join", "(", "'{0:<15} {1}\\n'", ".", "format", "(", "*", "v", ")", "for", "v", "in", "self", ".", "parser", ".", "default", ".", "items", "(", ")", ")", ")", "return", "[", "t", ".", "paste", "(", "*", "*", "dict", "(", "self", ".", "parser", ".", "default", ".", "items", "(", ")", ")", ")", "for", "t", "in", "templates", "]" ]
Prepare and paste self templates.
[ "Prepare", "and", "paste", "self", "templates", "." ]
python
train
36.875
Geotab/mygeotab-python
mygeotab/api.py
https://github.com/Geotab/mygeotab-python/blob/baa678e7df90bdd15f5dc55c1374b5c048791a94/mygeotab/api.py#L336-L357
def server_call(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters): """Makes a call to an un-authenticated method on a server :param method: The method name. :type method: str :param server: The MyGeotab server. :type server: str :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :type timeout: float :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :type verify_ssl: bool :param parameters: Additional parameters to send (for example, search=dict(id='b123') ). :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The result from the server. """ if method is None: raise Exception("A method name must be specified") if server is None: raise Exception("A server (eg. my3.geotab.com) must be specified") parameters = process_parameters(parameters) return _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl)
[ "def", "server_call", "(", "method", ",", "server", ",", "timeout", "=", "DEFAULT_TIMEOUT", ",", "verify_ssl", "=", "True", ",", "*", "*", "parameters", ")", ":", "if", "method", "is", "None", ":", "raise", "Exception", "(", "\"A method name must be specified\"", ")", "if", "server", "is", "None", ":", "raise", "Exception", "(", "\"A server (eg. my3.geotab.com) must be specified\"", ")", "parameters", "=", "process_parameters", "(", "parameters", ")", "return", "_query", "(", "server", ",", "method", ",", "parameters", ",", "timeout", "=", "timeout", ",", "verify_ssl", "=", "verify_ssl", ")" ]
Makes a call to an un-authenticated method on a server :param method: The method name. :type method: str :param server: The MyGeotab server. :type server: str :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :type timeout: float :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :type verify_ssl: bool :param parameters: Additional parameters to send (for example, search=dict(id='b123') ). :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The result from the server.
[ "Makes", "a", "call", "to", "an", "un", "-", "authenticated", "method", "on", "a", "server" ]
python
train
51.727273