repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/rotmat.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/rotmat.py#L295-L310
def from_axis_angle(self, axis, angle): '''create a rotation matrix from axis and angle''' ux = axis.x uy = axis.y uz = axis.z ct = cos(angle) st = sin(angle) self.a.x = ct + (1-ct) * ux**2 self.a.y = ux*uy*(1-ct) - uz*st self.a.z = ux*uz*(1-ct) + uy*st self.b.x = uy*ux*(1-ct) + uz*st self.b.y = ct + (1-ct) * uy**2 self.b.z = uy*uz*(1-ct) - ux*st self.c.x = uz*ux*(1-ct) - uy*st self.c.y = uz*uy*(1-ct) + ux*st self.c.z = ct + (1-ct) * uz**2
[ "def", "from_axis_angle", "(", "self", ",", "axis", ",", "angle", ")", ":", "ux", "=", "axis", ".", "x", "uy", "=", "axis", ".", "y", "uz", "=", "axis", ".", "z", "ct", "=", "cos", "(", "angle", ")", "st", "=", "sin", "(", "angle", ")", "self", ".", "a", ".", "x", "=", "ct", "+", "(", "1", "-", "ct", ")", "*", "ux", "**", "2", "self", ".", "a", ".", "y", "=", "ux", "*", "uy", "*", "(", "1", "-", "ct", ")", "-", "uz", "*", "st", "self", ".", "a", ".", "z", "=", "ux", "*", "uz", "*", "(", "1", "-", "ct", ")", "+", "uy", "*", "st", "self", ".", "b", ".", "x", "=", "uy", "*", "ux", "*", "(", "1", "-", "ct", ")", "+", "uz", "*", "st", "self", ".", "b", ".", "y", "=", "ct", "+", "(", "1", "-", "ct", ")", "*", "uy", "**", "2", "self", ".", "b", ".", "z", "=", "uy", "*", "uz", "*", "(", "1", "-", "ct", ")", "-", "ux", "*", "st", "self", ".", "c", ".", "x", "=", "uz", "*", "ux", "*", "(", "1", "-", "ct", ")", "-", "uy", "*", "st", "self", ".", "c", ".", "y", "=", "uz", "*", "uy", "*", "(", "1", "-", "ct", ")", "+", "ux", "*", "st", "self", ".", "c", ".", "z", "=", "ct", "+", "(", "1", "-", "ct", ")", "*", "uz", "**", "2" ]
create a rotation matrix from axis and angle
[ "create", "a", "rotation", "matrix", "from", "axis", "and", "angle" ]
python
train
34.25
mwouts/jupytext
jupytext/paired_paths.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/paired_paths.py#L13-L58
def base_path(main_path, fmt): """Given a path and options for a format (ext, suffix, prefix), return the corresponding base path""" if not fmt: return os.path.splitext(main_path)[0] fmt = long_form_one_format(fmt) fmt_ext = fmt['extension'] suffix = fmt.get('suffix') prefix = fmt.get('prefix') base, ext = os.path.splitext(main_path) if ext != fmt_ext: raise InconsistentPath(u"Notebook path '{}' was expected to have extension '{}'".format(main_path, fmt_ext)) if suffix: if not base.endswith(suffix): raise InconsistentPath(u"Notebook name '{}' was expected to end with suffix '{}'".format(base, suffix)) base = base[:-len(suffix)] if not prefix: return base prefix_dir, prefix_file_name = os.path.split(prefix) notebook_dir, notebook_file_name = os.path.split(base) sep = base[len(notebook_dir):-len(notebook_file_name)] if prefix_file_name: if not notebook_file_name.startswith(prefix_file_name): raise InconsistentPath(u"Notebook name '{}' was expected to start with prefix '{}'" .format(notebook_file_name, prefix_file_name)) notebook_file_name = notebook_file_name[len(prefix_file_name):] if prefix_dir: if not notebook_dir.endswith(prefix_dir): raise InconsistentPath(u"Notebook directory '{}' was expected to end with directory prefix '{}'" .format(notebook_dir, prefix_dir)) notebook_dir = notebook_dir[:-len(prefix_dir)] if not notebook_dir: return notebook_file_name # Does notebook_dir ends with a path separator? if notebook_dir[-1:] == sep: return notebook_dir + notebook_file_name return notebook_dir + sep + notebook_file_name
[ "def", "base_path", "(", "main_path", ",", "fmt", ")", ":", "if", "not", "fmt", ":", "return", "os", ".", "path", ".", "splitext", "(", "main_path", ")", "[", "0", "]", "fmt", "=", "long_form_one_format", "(", "fmt", ")", "fmt_ext", "=", "fmt", "[", "'extension'", "]", "suffix", "=", "fmt", ".", "get", "(", "'suffix'", ")", "prefix", "=", "fmt", ".", "get", "(", "'prefix'", ")", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "main_path", ")", "if", "ext", "!=", "fmt_ext", ":", "raise", "InconsistentPath", "(", "u\"Notebook path '{}' was expected to have extension '{}'\"", ".", "format", "(", "main_path", ",", "fmt_ext", ")", ")", "if", "suffix", ":", "if", "not", "base", ".", "endswith", "(", "suffix", ")", ":", "raise", "InconsistentPath", "(", "u\"Notebook name '{}' was expected to end with suffix '{}'\"", ".", "format", "(", "base", ",", "suffix", ")", ")", "base", "=", "base", "[", ":", "-", "len", "(", "suffix", ")", "]", "if", "not", "prefix", ":", "return", "base", "prefix_dir", ",", "prefix_file_name", "=", "os", ".", "path", ".", "split", "(", "prefix", ")", "notebook_dir", ",", "notebook_file_name", "=", "os", ".", "path", ".", "split", "(", "base", ")", "sep", "=", "base", "[", "len", "(", "notebook_dir", ")", ":", "-", "len", "(", "notebook_file_name", ")", "]", "if", "prefix_file_name", ":", "if", "not", "notebook_file_name", ".", "startswith", "(", "prefix_file_name", ")", ":", "raise", "InconsistentPath", "(", "u\"Notebook name '{}' was expected to start with prefix '{}'\"", ".", "format", "(", "notebook_file_name", ",", "prefix_file_name", ")", ")", "notebook_file_name", "=", "notebook_file_name", "[", "len", "(", "prefix_file_name", ")", ":", "]", "if", "prefix_dir", ":", "if", "not", "notebook_dir", ".", "endswith", "(", "prefix_dir", ")", ":", "raise", "InconsistentPath", "(", "u\"Notebook directory '{}' was expected to end with directory prefix '{}'\"", ".", "format", "(", "notebook_dir", ",", "prefix_dir", ")", ")", "notebook_dir", "=", "notebook_dir", "[", ":", "-", "len", "(", "prefix_dir", ")", "]", "if", "not", "notebook_dir", ":", "return", "notebook_file_name", "# Does notebook_dir ends with a path separator?", "if", "notebook_dir", "[", "-", "1", ":", "]", "==", "sep", ":", "return", "notebook_dir", "+", "notebook_file_name", "return", "notebook_dir", "+", "sep", "+", "notebook_file_name" ]
Given a path and options for a format (ext, suffix, prefix), return the corresponding base path
[ "Given", "a", "path", "and", "options", "for", "a", "format", "(", "ext", "suffix", "prefix", ")", "return", "the", "corresponding", "base", "path" ]
python
train
38.695652
chaoss/grimoirelab-perceval
perceval/backends/core/googlehits.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/googlehits.py#L96-L111
def fetch_items(self, category, **kwargs): """Fetch Google hit items :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ logger.info("Fetching data for '%s'", self.keywords) hits_raw = self.client.hits(self.keywords) hits = self.__parse_hits(hits_raw) yield hits logger.info("Fetch process completed")
[ "def", "fetch_items", "(", "self", ",", "category", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "\"Fetching data for '%s'\"", ",", "self", ".", "keywords", ")", "hits_raw", "=", "self", ".", "client", ".", "hits", "(", "self", ".", "keywords", ")", "hits", "=", "self", ".", "__parse_hits", "(", "hits_raw", ")", "yield", "hits", "logger", ".", "info", "(", "\"Fetch process completed\"", ")" ]
Fetch Google hit items :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items
[ "Fetch", "Google", "hit", "items" ]
python
test
27.1875
raiden-network/raiden
raiden/network/transport/matrix/transport.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/transport/matrix/transport.py#L1264-L1304
def _get_room_ids_for_address( self, address: Address, filter_private: bool = None, ) -> List[_RoomID]: """ Uses GMatrixClient.get_account_data to get updated mapping of address->rooms It'll filter only existing rooms. If filter_private=True, also filter out public rooms. If filter_private=None, filter according to self._private_rooms """ address_hex: AddressHex = to_checksum_address(address) with self._account_data_lock: room_ids = self._client.account_data.get( 'network.raiden.rooms', {}, ).get(address_hex) self.log.debug('matrix get account data', room_ids=room_ids, for_address=address_hex) if not room_ids: # None or empty room_ids = list() if not isinstance(room_ids, list): # old version, single room room_ids = [room_ids] if filter_private is None: filter_private = self._private_rooms if not filter_private: # existing rooms room_ids = [ room_id for room_id in room_ids if room_id in self._client.rooms ] else: # existing and private rooms room_ids = [ room_id for room_id in room_ids if room_id in self._client.rooms and self._client.rooms[room_id].invite_only ] return room_ids
[ "def", "_get_room_ids_for_address", "(", "self", ",", "address", ":", "Address", ",", "filter_private", ":", "bool", "=", "None", ",", ")", "->", "List", "[", "_RoomID", "]", ":", "address_hex", ":", "AddressHex", "=", "to_checksum_address", "(", "address", ")", "with", "self", ".", "_account_data_lock", ":", "room_ids", "=", "self", ".", "_client", ".", "account_data", ".", "get", "(", "'network.raiden.rooms'", ",", "{", "}", ",", ")", ".", "get", "(", "address_hex", ")", "self", ".", "log", ".", "debug", "(", "'matrix get account data'", ",", "room_ids", "=", "room_ids", ",", "for_address", "=", "address_hex", ")", "if", "not", "room_ids", ":", "# None or empty", "room_ids", "=", "list", "(", ")", "if", "not", "isinstance", "(", "room_ids", ",", "list", ")", ":", "# old version, single room", "room_ids", "=", "[", "room_ids", "]", "if", "filter_private", "is", "None", ":", "filter_private", "=", "self", ".", "_private_rooms", "if", "not", "filter_private", ":", "# existing rooms", "room_ids", "=", "[", "room_id", "for", "room_id", "in", "room_ids", "if", "room_id", "in", "self", ".", "_client", ".", "rooms", "]", "else", ":", "# existing and private rooms", "room_ids", "=", "[", "room_id", "for", "room_id", "in", "room_ids", "if", "room_id", "in", "self", ".", "_client", ".", "rooms", "and", "self", ".", "_client", ".", "rooms", "[", "room_id", "]", ".", "invite_only", "]", "return", "room_ids" ]
Uses GMatrixClient.get_account_data to get updated mapping of address->rooms It'll filter only existing rooms. If filter_private=True, also filter out public rooms. If filter_private=None, filter according to self._private_rooms
[ "Uses", "GMatrixClient", ".", "get_account_data", "to", "get", "updated", "mapping", "of", "address", "-", ">", "rooms" ]
python
train
38.02439
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L86-L101
def show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isl_linknumber(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_linkinfo = ET.Element("show_linkinfo") config = show_linkinfo output = ET.SubElement(show_linkinfo, "output") show_link_info = ET.SubElement(output, "show-link-info") linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid") linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid') linkinfo_isl = ET.SubElement(show_link_info, "linkinfo-isl") linkinfo_isl_linknumber = ET.SubElement(linkinfo_isl, "linkinfo-isl-linknumber") linkinfo_isl_linknumber.text = kwargs.pop('linkinfo_isl_linknumber') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isl_linknumber", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_linkinfo", "=", "ET", ".", "Element", "(", "\"show_linkinfo\"", ")", "config", "=", "show_linkinfo", "output", "=", "ET", ".", "SubElement", "(", "show_linkinfo", ",", "\"output\"", ")", "show_link_info", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-link-info\"", ")", "linkinfo_rbridgeid_key", "=", "ET", ".", "SubElement", "(", "show_link_info", ",", "\"linkinfo-rbridgeid\"", ")", "linkinfo_rbridgeid_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'linkinfo_rbridgeid'", ")", "linkinfo_isl", "=", "ET", ".", "SubElement", "(", "show_link_info", ",", "\"linkinfo-isl\"", ")", "linkinfo_isl_linknumber", "=", "ET", ".", "SubElement", "(", "linkinfo_isl", ",", "\"linkinfo-isl-linknumber\"", ")", "linkinfo_isl_linknumber", ".", "text", "=", "kwargs", ".", "pop", "(", "'linkinfo_isl_linknumber'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52.8125
alexmojaki/outdated
outdated/__init__.py
https://github.com/alexmojaki/outdated/blob/565bb3fe1adc30da5e50249912cd2ac494662659/outdated/__init__.py#L68-L112
def warn_if_outdated(package, version, raise_exceptions=False, background=True, ): """ Higher level convenience function using check_outdated. The package and version arguments are the same. If the package is outdated, a warning (OutdatedPackageWarning) will be emitted. Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning) unless raise_exceptions if True. If background is True (the default), the check will run in a background thread so this function will return immediately. In this case if an exception is raised and raise_exceptions if True the traceback will be printed to stderr but the program will not be interrupted. This function doesn't return anything. """ def check(): # noinspection PyUnusedLocal is_outdated = False with utils.exception_to_warning('check for latest version of package', OutdatedCheckFailedWarning, always_raise=raise_exceptions): is_outdated, latest = check_outdated(package, version) if is_outdated: warn_with_ignore( 'The package %s is out of date. Your version is %s, the latest is %s.' % (package, version, latest), OutdatedPackageWarning, ) if background: thread = Thread(target=check) thread.start() else: check()
[ "def", "warn_if_outdated", "(", "package", ",", "version", ",", "raise_exceptions", "=", "False", ",", "background", "=", "True", ",", ")", ":", "def", "check", "(", ")", ":", "# noinspection PyUnusedLocal", "is_outdated", "=", "False", "with", "utils", ".", "exception_to_warning", "(", "'check for latest version of package'", ",", "OutdatedCheckFailedWarning", ",", "always_raise", "=", "raise_exceptions", ")", ":", "is_outdated", ",", "latest", "=", "check_outdated", "(", "package", ",", "version", ")", "if", "is_outdated", ":", "warn_with_ignore", "(", "'The package %s is out of date. Your version is %s, the latest is %s.'", "%", "(", "package", ",", "version", ",", "latest", ")", ",", "OutdatedPackageWarning", ",", ")", "if", "background", ":", "thread", "=", "Thread", "(", "target", "=", "check", ")", "thread", ".", "start", "(", ")", "else", ":", "check", "(", ")" ]
Higher level convenience function using check_outdated. The package and version arguments are the same. If the package is outdated, a warning (OutdatedPackageWarning) will be emitted. Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning) unless raise_exceptions if True. If background is True (the default), the check will run in a background thread so this function will return immediately. In this case if an exception is raised and raise_exceptions if True the traceback will be printed to stderr but the program will not be interrupted. This function doesn't return anything.
[ "Higher", "level", "convenience", "function", "using", "check_outdated", "." ]
python
train
33.933333
twilio/twilio-python
twilio/rest/preview/deployed_devices/fleet/key.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/deployed_devices/fleet/key.py#L58-L80
def stream(self, device_sid=values.unset, limit=None, page_size=None): """ Streams KeyInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode device_sid: Find all Keys authenticating specified Device. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.deployed_devices.fleet.key.KeyInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(device_sid=device_sid, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
[ "def", "stream", "(", "self", ",", "device_sid", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "limits", "=", "self", ".", "_version", ".", "read_limits", "(", "limit", ",", "page_size", ")", "page", "=", "self", ".", "page", "(", "device_sid", "=", "device_sid", ",", "page_size", "=", "limits", "[", "'page_size'", "]", ",", ")", "return", "self", ".", "_version", ".", "stream", "(", "page", ",", "limits", "[", "'limit'", "]", ",", "limits", "[", "'page_limit'", "]", ")" ]
Streams KeyInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode device_sid: Find all Keys authenticating specified Device. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.deployed_devices.fleet.key.KeyInstance]
[ "Streams", "KeyInstance", "records", "from", "the", "API", "as", "a", "generator", "stream", ".", "This", "operation", "lazily", "loads", "records", "as", "efficiently", "as", "possible", "until", "the", "limit", "is", "reached", ".", "The", "results", "are", "returned", "as", "a", "generator", "so", "this", "operation", "is", "memory", "efficient", "." ]
python
train
57.913043
RedHatInsights/insights-core
insights/parsers/alternatives.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/alternatives.py#L79-L127
def parse_content(self, content): """ Parse the output of the ``alternatives`` command. """ self.program = None self.status = None self.link = None self.best = None self.paths = [] current_path = None # Set up instance variable for line in content: words = line.split(None) if ' - status is' in line: # alternatives only displays one program, so finding # this line again is an error. if self.program: raise ParseException( "Program line for {newprog} found in output for {oldprog}".format( newprog=words[0], oldprog=self.program ) ) # Set up new program data self.program = words[0] self.status = words[4][:-1] # remove trailing . self.alternatives = [] current_path = {} elif not self.program: # Lines before 'status is' line are ignored continue elif line.startswith(' link currently points to ') and len(words) == 5: # line: ' link currently points to /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java' self.link = words[4] elif ' - priority ' in line and len(words) == 4 and words[3].isdigit(): # line: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java - priority 16091 # New path - save current path if set self.paths.append({ 'path': words[0], 'priority': int(words[3]), 'slave': {}, }) current_path = self.paths[-1] elif line.startswith(' slave ') and len(words) == 3 and current_path: # line: ' slave ControlPanel: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel' current_path['slave'][words[1][:-1]] = words[2] # remove final : from program elif line.startswith("Current `best' version is ") and len(words) == 5: # line: 'Current `best' version is /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java.' self.best = words[4][:-1]
[ "def", "parse_content", "(", "self", ",", "content", ")", ":", "self", ".", "program", "=", "None", "self", ".", "status", "=", "None", "self", ".", "link", "=", "None", "self", ".", "best", "=", "None", "self", ".", "paths", "=", "[", "]", "current_path", "=", "None", "# Set up instance variable", "for", "line", "in", "content", ":", "words", "=", "line", ".", "split", "(", "None", ")", "if", "' - status is'", "in", "line", ":", "# alternatives only displays one program, so finding", "# this line again is an error.", "if", "self", ".", "program", ":", "raise", "ParseException", "(", "\"Program line for {newprog} found in output for {oldprog}\"", ".", "format", "(", "newprog", "=", "words", "[", "0", "]", ",", "oldprog", "=", "self", ".", "program", ")", ")", "# Set up new program data", "self", ".", "program", "=", "words", "[", "0", "]", "self", ".", "status", "=", "words", "[", "4", "]", "[", ":", "-", "1", "]", "# remove trailing .", "self", ".", "alternatives", "=", "[", "]", "current_path", "=", "{", "}", "elif", "not", "self", ".", "program", ":", "# Lines before 'status is' line are ignored", "continue", "elif", "line", ".", "startswith", "(", "' link currently points to '", ")", "and", "len", "(", "words", ")", "==", "5", ":", "# line: ' link currently points to /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java'", "self", ".", "link", "=", "words", "[", "4", "]", "elif", "' - priority '", "in", "line", "and", "len", "(", "words", ")", "==", "4", "and", "words", "[", "3", "]", ".", "isdigit", "(", ")", ":", "# line: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java - priority 16091", "# New path - save current path if set", "self", ".", "paths", ".", "append", "(", "{", "'path'", ":", "words", "[", "0", "]", ",", "'priority'", ":", "int", "(", "words", "[", "3", "]", ")", ",", "'slave'", ":", "{", "}", ",", "}", ")", "current_path", "=", "self", ".", "paths", "[", "-", "1", "]", "elif", "line", ".", "startswith", "(", "' slave '", ")", "and", "len", "(", "words", ")", "==", "3", "and", "current_path", ":", "# line: ' slave ControlPanel: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel'", "current_path", "[", "'slave'", "]", "[", "words", "[", "1", "]", "[", ":", "-", "1", "]", "]", "=", "words", "[", "2", "]", "# remove final : from program", "elif", "line", ".", "startswith", "(", "\"Current `best' version is \"", ")", "and", "len", "(", "words", ")", "==", "5", ":", "# line: 'Current `best' version is /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java.'", "self", ".", "best", "=", "words", "[", "4", "]", "[", ":", "-", "1", "]" ]
Parse the output of the ``alternatives`` command.
[ "Parse", "the", "output", "of", "the", "alternatives", "command", "." ]
python
train
46.673469
pawelad/pymonzo
src/pymonzo/monzo_api.py
https://github.com/pawelad/pymonzo/blob/b5c8d4f46dcb3a2f475797a8b8ef1c15f6493fb9/src/pymonzo/monzo_api.py#L305-L329
def pots(self, refresh=False): """ Returns a list of pots owned by the currently authorised user. Official docs: https://monzo.com/docs/#pots :param refresh: decides if the pots information should be refreshed. :type refresh: bool :returns: list of Monzo pots :rtype: list of MonzoPot """ if not refresh and self._cached_pots: return self._cached_pots endpoint = '/pots/listV1' response = self._get_response( method='get', endpoint=endpoint, ) pots_json = response.json()['pots'] pots = [MonzoPot(data=pot) for pot in pots_json] self._cached_pots = pots return pots
[ "def", "pots", "(", "self", ",", "refresh", "=", "False", ")", ":", "if", "not", "refresh", "and", "self", ".", "_cached_pots", ":", "return", "self", ".", "_cached_pots", "endpoint", "=", "'/pots/listV1'", "response", "=", "self", ".", "_get_response", "(", "method", "=", "'get'", ",", "endpoint", "=", "endpoint", ",", ")", "pots_json", "=", "response", ".", "json", "(", ")", "[", "'pots'", "]", "pots", "=", "[", "MonzoPot", "(", "data", "=", "pot", ")", "for", "pot", "in", "pots_json", "]", "self", ".", "_cached_pots", "=", "pots", "return", "pots" ]
Returns a list of pots owned by the currently authorised user. Official docs: https://monzo.com/docs/#pots :param refresh: decides if the pots information should be refreshed. :type refresh: bool :returns: list of Monzo pots :rtype: list of MonzoPot
[ "Returns", "a", "list", "of", "pots", "owned", "by", "the", "currently", "authorised", "user", "." ]
python
train
28.4
jrief/django-sass-processor
sass_processor/management/commands/compilescss.py
https://github.com/jrief/django-sass-processor/blob/3ca746258432b1428daee9a2b2f7e05a1e327492/sass_processor/management/commands/compilescss.py#L199-L214
def parse_source(self, filename): """ Extract the statements from the given file, look for function calls `sass_processor(scss_file)` and compile the filename into CSS. """ callvisitor = FuncCallVisitor('sass_processor') tree = ast.parse(open(filename, 'rb').read()) callvisitor.visit(tree) for sass_fileurl in callvisitor.sass_files: sass_filename = find_file(sass_fileurl) if not sass_filename or sass_filename in self.processed_files: continue if self.delete_files: self.delete_file(sass_filename, sass_fileurl) else: self.compile_sass(sass_filename, sass_fileurl)
[ "def", "parse_source", "(", "self", ",", "filename", ")", ":", "callvisitor", "=", "FuncCallVisitor", "(", "'sass_processor'", ")", "tree", "=", "ast", ".", "parse", "(", "open", "(", "filename", ",", "'rb'", ")", ".", "read", "(", ")", ")", "callvisitor", ".", "visit", "(", "tree", ")", "for", "sass_fileurl", "in", "callvisitor", ".", "sass_files", ":", "sass_filename", "=", "find_file", "(", "sass_fileurl", ")", "if", "not", "sass_filename", "or", "sass_filename", "in", "self", ".", "processed_files", ":", "continue", "if", "self", ".", "delete_files", ":", "self", ".", "delete_file", "(", "sass_filename", ",", "sass_fileurl", ")", "else", ":", "self", ".", "compile_sass", "(", "sass_filename", ",", "sass_fileurl", ")" ]
Extract the statements from the given file, look for function calls `sass_processor(scss_file)` and compile the filename into CSS.
[ "Extract", "the", "statements", "from", "the", "given", "file", "look", "for", "function", "calls", "sass_processor", "(", "scss_file", ")", "and", "compile", "the", "filename", "into", "CSS", "." ]
python
train
44.5
dbcli/athenacli
athenacli/packages/filepaths.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/packages/filepaths.py#L5-L14
def list_path(root_dir): """List directory if exists. :param dir: str :return: list """ res = [] if os.path.isdir(root_dir): for name in os.listdir(root_dir): res.append(name) return res
[ "def", "list_path", "(", "root_dir", ")", ":", "res", "=", "[", "]", "if", "os", ".", "path", ".", "isdir", "(", "root_dir", ")", ":", "for", "name", "in", "os", ".", "listdir", "(", "root_dir", ")", ":", "res", ".", "append", "(", "name", ")", "return", "res" ]
List directory if exists. :param dir: str :return: list
[ "List", "directory", "if", "exists", ".", ":", "param", "dir", ":", "str", ":", "return", ":", "list" ]
python
train
22.5
explosion/spaCy
spacy/cli/train.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/cli/train.py#L371-L382
def _score_for_model(meta): """ Returns mean score between tasks in pipeline that can be used for early stopping. """ mean_acc = list() pipes = meta["pipeline"] acc = meta["accuracy"] if "tagger" in pipes: mean_acc.append(acc["tags_acc"]) if "parser" in pipes: mean_acc.append((acc["uas"] + acc["las"]) / 2) if "ner" in pipes: mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3) return sum(mean_acc) / len(mean_acc)
[ "def", "_score_for_model", "(", "meta", ")", ":", "mean_acc", "=", "list", "(", ")", "pipes", "=", "meta", "[", "\"pipeline\"", "]", "acc", "=", "meta", "[", "\"accuracy\"", "]", "if", "\"tagger\"", "in", "pipes", ":", "mean_acc", ".", "append", "(", "acc", "[", "\"tags_acc\"", "]", ")", "if", "\"parser\"", "in", "pipes", ":", "mean_acc", ".", "append", "(", "(", "acc", "[", "\"uas\"", "]", "+", "acc", "[", "\"las\"", "]", ")", "/", "2", ")", "if", "\"ner\"", "in", "pipes", ":", "mean_acc", ".", "append", "(", "(", "acc", "[", "\"ents_p\"", "]", "+", "acc", "[", "\"ents_r\"", "]", "+", "acc", "[", "\"ents_f\"", "]", ")", "/", "3", ")", "return", "sum", "(", "mean_acc", ")", "/", "len", "(", "mean_acc", ")" ]
Returns mean score between tasks in pipeline that can be used for early stopping.
[ "Returns", "mean", "score", "between", "tasks", "in", "pipeline", "that", "can", "be", "used", "for", "early", "stopping", "." ]
python
train
39.75
KelSolaar/Umbra
umbra/ui/visual_accelerators.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/visual_accelerators.py#L42-L65
def highlight_current_line(editor): """ Highlights given editor current line. :param editor: Document editor. :type editor: QWidget :return: Method success. :rtype: bool """ format = editor.language.theme.get("accelerator.line") if not format: return False extra_selections = editor.extraSelections() or [] if not editor.isReadOnly(): selection = QTextEdit.ExtraSelection() selection.format.setBackground(format.background()) selection.format.setProperty(QTextFormat.FullWidthSelection, True) selection.cursor = editor.textCursor() selection.cursor.clearSelection() extra_selections.append(selection) editor.setExtraSelections(extra_selections) return True
[ "def", "highlight_current_line", "(", "editor", ")", ":", "format", "=", "editor", ".", "language", ".", "theme", ".", "get", "(", "\"accelerator.line\"", ")", "if", "not", "format", ":", "return", "False", "extra_selections", "=", "editor", ".", "extraSelections", "(", ")", "or", "[", "]", "if", "not", "editor", ".", "isReadOnly", "(", ")", ":", "selection", "=", "QTextEdit", ".", "ExtraSelection", "(", ")", "selection", ".", "format", ".", "setBackground", "(", "format", ".", "background", "(", ")", ")", "selection", ".", "format", ".", "setProperty", "(", "QTextFormat", ".", "FullWidthSelection", ",", "True", ")", "selection", ".", "cursor", "=", "editor", ".", "textCursor", "(", ")", "selection", ".", "cursor", ".", "clearSelection", "(", ")", "extra_selections", ".", "append", "(", "selection", ")", "editor", ".", "setExtraSelections", "(", "extra_selections", ")", "return", "True" ]
Highlights given editor current line. :param editor: Document editor. :type editor: QWidget :return: Method success. :rtype: bool
[ "Highlights", "given", "editor", "current", "line", "." ]
python
train
31
inorton/junit2html
junit2htmlreport/parser.py
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L439-L529
def process(self): """ populate the report from the xml :return: """ suites = None if isinstance(self.tree, ET.Element): root = self.tree else: root = self.tree.getroot() if root.tag == "testrun": root = root[0] if root.tag == "testsuite": suites = [root] if root.tag == "testsuites": suites = [x for x in root] assert suites, "could not find test suites in results xml" for suite in suites: cursuite = Suite() self.suites.append(cursuite) cursuite.name = suite.attrib["name"] if "package" in suite.attrib: cursuite.package = suite.attrib["package"] cursuite.duration = float(suite.attrib.get("time", '0').replace(',','')) for element in suite: if element.tag == "error": # top level error? errtag = { "message": element.attrib.get("message", ""), "type": element.attrib.get("type", ""), "text": element.text } cursuite.errors.append(errtag) if element.tag == "system-out": cursuite.stdout = element.text if element.tag == "system-err": cursuite.stderr = element.text if element.tag == "properties": for prop in element: if prop.tag == "property": cursuite.properties[prop.attrib["name"]] = prop.attrib["value"] if element.tag == "testcase": testcase = element if not testcase.attrib.get("classname", None): testcase.attrib["classname"] = NO_CLASSNAME if testcase.attrib["classname"] not in cursuite: testclass = Class() testclass.name = testcase.attrib["classname"] cursuite[testclass.name] = testclass testclass = cursuite[testcase.attrib["classname"]] newcase = Case() newcase.name = testcase.attrib["name"] newcase.testclass = testclass newcase.duration = float(testcase.attrib.get("time", '0').replace(',','')) testclass.cases.append(newcase) # does this test case have any children? for child in testcase: if child.tag == "skipped": newcase.skipped = child.text if "message" in child.attrib: newcase.skipped_msg = child.attrib["message"] elif child.tag == "system-out": newcase.stdout = child.text elif child.tag == "system-err": newcase.stderr = child.text elif child.tag == "failure": newcase.failure = child.text if "message" in child.attrib: newcase.failure_msg = child.attrib["message"] elif child.tag == "error": newcase.failure = child.text if "message" in child.attrib: newcase.failure_msg = child.attrib["message"] elif child.tag == "properties": for property in child: newproperty = Property() newproperty.name = property.attrib["name"] newproperty.value = property.attrib["value"] newcase.properties.append(newproperty)
[ "def", "process", "(", "self", ")", ":", "suites", "=", "None", "if", "isinstance", "(", "self", ".", "tree", ",", "ET", ".", "Element", ")", ":", "root", "=", "self", ".", "tree", "else", ":", "root", "=", "self", ".", "tree", ".", "getroot", "(", ")", "if", "root", ".", "tag", "==", "\"testrun\"", ":", "root", "=", "root", "[", "0", "]", "if", "root", ".", "tag", "==", "\"testsuite\"", ":", "suites", "=", "[", "root", "]", "if", "root", ".", "tag", "==", "\"testsuites\"", ":", "suites", "=", "[", "x", "for", "x", "in", "root", "]", "assert", "suites", ",", "\"could not find test suites in results xml\"", "for", "suite", "in", "suites", ":", "cursuite", "=", "Suite", "(", ")", "self", ".", "suites", ".", "append", "(", "cursuite", ")", "cursuite", ".", "name", "=", "suite", ".", "attrib", "[", "\"name\"", "]", "if", "\"package\"", "in", "suite", ".", "attrib", ":", "cursuite", ".", "package", "=", "suite", ".", "attrib", "[", "\"package\"", "]", "cursuite", ".", "duration", "=", "float", "(", "suite", ".", "attrib", ".", "get", "(", "\"time\"", ",", "'0'", ")", ".", "replace", "(", "','", ",", "''", ")", ")", "for", "element", "in", "suite", ":", "if", "element", ".", "tag", "==", "\"error\"", ":", "# top level error?", "errtag", "=", "{", "\"message\"", ":", "element", ".", "attrib", ".", "get", "(", "\"message\"", ",", "\"\"", ")", ",", "\"type\"", ":", "element", ".", "attrib", ".", "get", "(", "\"type\"", ",", "\"\"", ")", ",", "\"text\"", ":", "element", ".", "text", "}", "cursuite", ".", "errors", ".", "append", "(", "errtag", ")", "if", "element", ".", "tag", "==", "\"system-out\"", ":", "cursuite", ".", "stdout", "=", "element", ".", "text", "if", "element", ".", "tag", "==", "\"system-err\"", ":", "cursuite", ".", "stderr", "=", "element", ".", "text", "if", "element", ".", "tag", "==", "\"properties\"", ":", "for", "prop", "in", "element", ":", "if", "prop", ".", "tag", "==", "\"property\"", ":", "cursuite", ".", "properties", "[", "prop", ".", "attrib", "[", "\"name\"", "]", "]", "=", "prop", ".", "attrib", "[", "\"value\"", "]", "if", "element", ".", "tag", "==", "\"testcase\"", ":", "testcase", "=", "element", "if", "not", "testcase", ".", "attrib", ".", "get", "(", "\"classname\"", ",", "None", ")", ":", "testcase", ".", "attrib", "[", "\"classname\"", "]", "=", "NO_CLASSNAME", "if", "testcase", ".", "attrib", "[", "\"classname\"", "]", "not", "in", "cursuite", ":", "testclass", "=", "Class", "(", ")", "testclass", ".", "name", "=", "testcase", ".", "attrib", "[", "\"classname\"", "]", "cursuite", "[", "testclass", ".", "name", "]", "=", "testclass", "testclass", "=", "cursuite", "[", "testcase", ".", "attrib", "[", "\"classname\"", "]", "]", "newcase", "=", "Case", "(", ")", "newcase", ".", "name", "=", "testcase", ".", "attrib", "[", "\"name\"", "]", "newcase", ".", "testclass", "=", "testclass", "newcase", ".", "duration", "=", "float", "(", "testcase", ".", "attrib", ".", "get", "(", "\"time\"", ",", "'0'", ")", ".", "replace", "(", "','", ",", "''", ")", ")", "testclass", ".", "cases", ".", "append", "(", "newcase", ")", "# does this test case have any children?", "for", "child", "in", "testcase", ":", "if", "child", ".", "tag", "==", "\"skipped\"", ":", "newcase", ".", "skipped", "=", "child", ".", "text", "if", "\"message\"", "in", "child", ".", "attrib", ":", "newcase", ".", "skipped_msg", "=", "child", ".", "attrib", "[", "\"message\"", "]", "elif", "child", ".", "tag", "==", "\"system-out\"", ":", "newcase", ".", "stdout", "=", "child", ".", "text", "elif", "child", ".", "tag", "==", "\"system-err\"", ":", "newcase", ".", "stderr", "=", "child", ".", "text", "elif", "child", ".", "tag", "==", "\"failure\"", ":", "newcase", ".", "failure", "=", "child", ".", "text", "if", "\"message\"", "in", "child", ".", "attrib", ":", "newcase", ".", "failure_msg", "=", "child", ".", "attrib", "[", "\"message\"", "]", "elif", "child", ".", "tag", "==", "\"error\"", ":", "newcase", ".", "failure", "=", "child", ".", "text", "if", "\"message\"", "in", "child", ".", "attrib", ":", "newcase", ".", "failure_msg", "=", "child", ".", "attrib", "[", "\"message\"", "]", "elif", "child", ".", "tag", "==", "\"properties\"", ":", "for", "property", "in", "child", ":", "newproperty", "=", "Property", "(", ")", "newproperty", ".", "name", "=", "property", ".", "attrib", "[", "\"name\"", "]", "newproperty", ".", "value", "=", "property", ".", "attrib", "[", "\"value\"", "]", "newcase", ".", "properties", ".", "append", "(", "newproperty", ")" ]
populate the report from the xml :return:
[ "populate", "the", "report", "from", "the", "xml", ":", "return", ":" ]
python
train
42.659341
xmartlabs/benderthon
benderthon/tf_freeze.py
https://github.com/xmartlabs/benderthon/blob/810b6fb90f56136257e7ed12e5a30d17ad7ce6ba/benderthon/tf_freeze.py#L42-L48
def save_graph_only(sess, output_file_path, output_node_names, as_text=False): """Save a small version of the graph based on a session and the output node names.""" for node in sess.graph_def.node: node.device = '' graph_def = graph_util.extract_sub_graph(sess.graph_def, output_node_names) output_dir, output_filename = os.path.split(output_file_path) graph_io.write_graph(graph_def, output_dir, output_filename, as_text=as_text)
[ "def", "save_graph_only", "(", "sess", ",", "output_file_path", ",", "output_node_names", ",", "as_text", "=", "False", ")", ":", "for", "node", "in", "sess", ".", "graph_def", ".", "node", ":", "node", ".", "device", "=", "''", "graph_def", "=", "graph_util", ".", "extract_sub_graph", "(", "sess", ".", "graph_def", ",", "output_node_names", ")", "output_dir", ",", "output_filename", "=", "os", ".", "path", ".", "split", "(", "output_file_path", ")", "graph_io", ".", "write_graph", "(", "graph_def", ",", "output_dir", ",", "output_filename", ",", "as_text", "=", "as_text", ")" ]
Save a small version of the graph based on a session and the output node names.
[ "Save", "a", "small", "version", "of", "the", "graph", "based", "on", "a", "session", "and", "the", "output", "node", "names", "." ]
python
test
64.571429
datacamp/sqlwhat
sqlwhat/checks/check_funcs.py
https://github.com/datacamp/sqlwhat/blob/9ae798c63124f994607a0e2c120b24ebbb2bdbe9/sqlwhat/checks/check_funcs.py#L11-L29
def has_no_error( state, incorrect_msg="Your code generated an error. Fix it and try again!" ): """Check whether the submission did not generate a runtime error. Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors. By default, after the entire SCT finished executing, ``sqlwhat`` will check for errors before marking the exercise as correct. You can disable this behavior by using ``Ex().allow_error()``. Args: incorrect_msg: If specified, this overrides the automatically generated feedback message in case the student's query did not return a result. """ if state.reporter.get_errors(): state.do_test(incorrect_msg) return state
[ "def", "has_no_error", "(", "state", ",", "incorrect_msg", "=", "\"Your code generated an error. Fix it and try again!\"", ")", ":", "if", "state", ".", "reporter", ".", "get_errors", "(", ")", ":", "state", ".", "do_test", "(", "incorrect_msg", ")", "return", "state" ]
Check whether the submission did not generate a runtime error. Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors. By default, after the entire SCT finished executing, ``sqlwhat`` will check for errors before marking the exercise as correct. You can disable this behavior by using ``Ex().allow_error()``. Args: incorrect_msg: If specified, this overrides the automatically generated feedback message in case the student's query did not return a result.
[ "Check", "whether", "the", "submission", "did", "not", "generate", "a", "runtime", "error", "." ]
python
train
38.315789
crossbario/txaio
txaio/tx.py
https://github.com/crossbario/txaio/blob/29c77ff1210cabd4cc03f16f34672612e7eef704/txaio/tx.py#L437-L462
def make_batched_timer(self, bucket_seconds, chunk_size=100): """ Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor. """ def get_seconds(): return self._get_loop().seconds() def create_delayed_call(delay, fun, *args, **kwargs): return self._get_loop().callLater(delay, fun, *args, **kwargs) return _BatchedTimer( bucket_seconds * 1000.0, chunk_size, seconds_provider=get_seconds, delayed_call_creator=create_delayed_call, )
[ "def", "make_batched_timer", "(", "self", ",", "bucket_seconds", ",", "chunk_size", "=", "100", ")", ":", "def", "get_seconds", "(", ")", ":", "return", "self", ".", "_get_loop", "(", ")", ".", "seconds", "(", ")", "def", "create_delayed_call", "(", "delay", ",", "fun", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_loop", "(", ")", ".", "callLater", "(", "delay", ",", "fun", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_BatchedTimer", "(", "bucket_seconds", "*", "1000.0", ",", "chunk_size", ",", "seconds_provider", "=", "get_seconds", ",", "delayed_call_creator", "=", "create_delayed_call", ",", ")" ]
Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor.
[ "Creates", "and", "returns", "an", "object", "implementing", ":", "class", ":", "txaio", ".", "IBatchedTimer", "." ]
python
train
38.423077
saltstack/salt
salt/modules/influxdb08mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdb08mod.py#L93-L122
def db_exists(name, user=None, password=None, host=None, port=None): ''' Checks if a database exists in Influxdb name Database name to create user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.db_exists <name> salt '*' influxdb08.db_exists <name> <user> <password> <host> <port> ''' dbs = db_list(user, password, host, port) if not isinstance(dbs, list): return False return name in [db['name'] for db in dbs]
[ "def", "db_exists", "(", "name", ",", "user", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "dbs", "=", "db_list", "(", "user", ",", "password", ",", "host", ",", "port", ")", "if", "not", "isinstance", "(", "dbs", ",", "list", ")", ":", "return", "False", "return", "name", "in", "[", "db", "[", "'name'", "]", "for", "db", "in", "dbs", "]" ]
Checks if a database exists in Influxdb name Database name to create user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example: .. code-block:: bash salt '*' influxdb08.db_exists <name> salt '*' influxdb08.db_exists <name> <user> <password> <host> <port>
[ "Checks", "if", "a", "database", "exists", "in", "Influxdb" ]
python
train
20.833333
ejhigson/nestcheck
nestcheck/dummy_data.py
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/dummy_data.py#L50-L96
def get_dummy_run(nthread, nsamples, **kwargs): """Generate dummy data for a nested sampling run. Log-likelihood values of points are generated from a uniform distribution in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is not -np.inf). Theta values of each point are each generated from a uniform distribution in (0, 1). Parameters ---------- nthreads: int Number of threads in the run. nsamples: int Number of samples in thread. ndim: int, optional Number of dimensions. seed: int, optional If not False, the seed is set with np.random.seed(seed). logl_start: float, optional logl at which thread starts. logl_range: float, optional Scale factor applied to logl values. """ seed = kwargs.pop('seed', False) ndim = kwargs.pop('ndim', 2) logl_start = kwargs.pop('logl_start', -np.inf) logl_range = kwargs.pop('logl_range', 1) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) threads = [] # set seed before generating any threads and do not reset for each thread if seed is not False: np.random.seed(seed) threads = [] for _ in range(nthread): threads.append(get_dummy_thread( nsamples, ndim=ndim, seed=False, logl_start=logl_start, logl_range=logl_range)) # Sort threads in order of starting logl so labels match labels that would # have been given processing a dead points array. N.B. this only works when # all threads have same start_logl threads = sorted(threads, key=lambda th: th['logl'][0]) for i, _ in enumerate(threads): threads[i]['thread_labels'] = np.full(nsamples, i) # Use combine_ns_runs rather than combine threads as this relabels the # threads according to their order return nestcheck.ns_run_utils.combine_threads(threads)
[ "def", "get_dummy_run", "(", "nthread", ",", "nsamples", ",", "*", "*", "kwargs", ")", ":", "seed", "=", "kwargs", ".", "pop", "(", "'seed'", ",", "False", ")", "ndim", "=", "kwargs", ".", "pop", "(", "'ndim'", ",", "2", ")", "logl_start", "=", "kwargs", ".", "pop", "(", "'logl_start'", ",", "-", "np", ".", "inf", ")", "logl_range", "=", "kwargs", ".", "pop", "(", "'logl_range'", ",", "1", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "'Unexpected **kwargs: {0}'", ".", "format", "(", "kwargs", ")", ")", "threads", "=", "[", "]", "# set seed before generating any threads and do not reset for each thread", "if", "seed", "is", "not", "False", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "threads", "=", "[", "]", "for", "_", "in", "range", "(", "nthread", ")", ":", "threads", ".", "append", "(", "get_dummy_thread", "(", "nsamples", ",", "ndim", "=", "ndim", ",", "seed", "=", "False", ",", "logl_start", "=", "logl_start", ",", "logl_range", "=", "logl_range", ")", ")", "# Sort threads in order of starting logl so labels match labels that would", "# have been given processing a dead points array. N.B. this only works when", "# all threads have same start_logl", "threads", "=", "sorted", "(", "threads", ",", "key", "=", "lambda", "th", ":", "th", "[", "'logl'", "]", "[", "0", "]", ")", "for", "i", ",", "_", "in", "enumerate", "(", "threads", ")", ":", "threads", "[", "i", "]", "[", "'thread_labels'", "]", "=", "np", ".", "full", "(", "nsamples", ",", "i", ")", "# Use combine_ns_runs rather than combine threads as this relabels the", "# threads according to their order", "return", "nestcheck", ".", "ns_run_utils", ".", "combine_threads", "(", "threads", ")" ]
Generate dummy data for a nested sampling run. Log-likelihood values of points are generated from a uniform distribution in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is not -np.inf). Theta values of each point are each generated from a uniform distribution in (0, 1). Parameters ---------- nthreads: int Number of threads in the run. nsamples: int Number of samples in thread. ndim: int, optional Number of dimensions. seed: int, optional If not False, the seed is set with np.random.seed(seed). logl_start: float, optional logl at which thread starts. logl_range: float, optional Scale factor applied to logl values.
[ "Generate", "dummy", "data", "for", "a", "nested", "sampling", "run", "." ]
python
train
39.893617
BernardFW/bernard
src/bernard/engine/fsm.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L221-L245
async def _build_state(self, request: Request, message: BaseMessage, responder: Responder) \ -> Tuple[ Optional[BaseState], Optional[BaseTrigger], Optional[bool], ]: """ Build the state for this request. """ trigger, state_class, dnr = await self._find_trigger(request) if trigger is None: if not message.should_confuse(): return None, None, None state_class = self._confused_state(request) logger.debug('Next state: %s (confused)', state_class.name()) else: logger.debug('Next state: %s', state_class.name()) state = state_class(request, responder, trigger, trigger) return state, trigger, dnr
[ "async", "def", "_build_state", "(", "self", ",", "request", ":", "Request", ",", "message", ":", "BaseMessage", ",", "responder", ":", "Responder", ")", "->", "Tuple", "[", "Optional", "[", "BaseState", "]", ",", "Optional", "[", "BaseTrigger", "]", ",", "Optional", "[", "bool", "]", ",", "]", ":", "trigger", ",", "state_class", ",", "dnr", "=", "await", "self", ".", "_find_trigger", "(", "request", ")", "if", "trigger", "is", "None", ":", "if", "not", "message", ".", "should_confuse", "(", ")", ":", "return", "None", ",", "None", ",", "None", "state_class", "=", "self", ".", "_confused_state", "(", "request", ")", "logger", ".", "debug", "(", "'Next state: %s (confused)'", ",", "state_class", ".", "name", "(", ")", ")", "else", ":", "logger", ".", "debug", "(", "'Next state: %s'", ",", "state_class", ".", "name", "(", ")", ")", "state", "=", "state_class", "(", "request", ",", "responder", ",", "trigger", ",", "trigger", ")", "return", "state", ",", "trigger", ",", "dnr" ]
Build the state for this request.
[ "Build", "the", "state", "for", "this", "request", "." ]
python
train
34.16
cs50/check50
check50/__main__.py
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/__main__.py#L147-L173
def await_results(url, pings=45, sleep=2): """ Ping {url} until it returns a results payload, timing out after {pings} pings and waiting {sleep} seconds between pings. """ print("Checking...", end="", flush=True) for _ in range(pings): # Query for check results. res = requests.post(url) if res.status_code != 200: continue payload = res.json() if payload["complete"]: break print(".", end="", flush=True) time.sleep(sleep) else: # Terminate if no response print() raise Error( _("check50 is taking longer than normal!\nSee https://cs50.me/checks/{} for more detail.").format(commit_hash)) print() # TODO: Should probably check payload["checks"]["version"] here to make sure major version is same as __version__ # (otherwise we may not be able to parse results) return (CheckResult(**result) for result in payload["checks"]["results"])
[ "def", "await_results", "(", "url", ",", "pings", "=", "45", ",", "sleep", "=", "2", ")", ":", "print", "(", "\"Checking...\"", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "for", "_", "in", "range", "(", "pings", ")", ":", "# Query for check results.", "res", "=", "requests", ".", "post", "(", "url", ")", "if", "res", ".", "status_code", "!=", "200", ":", "continue", "payload", "=", "res", ".", "json", "(", ")", "if", "payload", "[", "\"complete\"", "]", ":", "break", "print", "(", "\".\"", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "time", ".", "sleep", "(", "sleep", ")", "else", ":", "# Terminate if no response", "print", "(", ")", "raise", "Error", "(", "_", "(", "\"check50 is taking longer than normal!\\nSee https://cs50.me/checks/{} for more detail.\"", ")", ".", "format", "(", "commit_hash", ")", ")", "print", "(", ")", "# TODO: Should probably check payload[\"checks\"][\"version\"] here to make sure major version is same as __version__", "# (otherwise we may not be able to parse results)", "return", "(", "CheckResult", "(", "*", "*", "result", ")", "for", "result", "in", "payload", "[", "\"checks\"", "]", "[", "\"results\"", "]", ")" ]
Ping {url} until it returns a results payload, timing out after {pings} pings and waiting {sleep} seconds between pings.
[ "Ping", "{", "url", "}", "until", "it", "returns", "a", "results", "payload", "timing", "out", "after", "{", "pings", "}", "pings", "and", "waiting", "{", "sleep", "}", "seconds", "between", "pings", "." ]
python
train
35.962963
saltstack/salt
salt/modules/smartos_imgadm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smartos_imgadm.py#L104-L114
def _split_docker_uuid(uuid): ''' Split a smartos docker uuid into repo and tag ''' if uuid: uuid = uuid.split(':') if len(uuid) == 2: tag = uuid[1] repo = uuid[0] return repo, tag return None, None
[ "def", "_split_docker_uuid", "(", "uuid", ")", ":", "if", "uuid", ":", "uuid", "=", "uuid", ".", "split", "(", "':'", ")", "if", "len", "(", "uuid", ")", "==", "2", ":", "tag", "=", "uuid", "[", "1", "]", "repo", "=", "uuid", "[", "0", "]", "return", "repo", ",", "tag", "return", "None", ",", "None" ]
Split a smartos docker uuid into repo and tag
[ "Split", "a", "smartos", "docker", "uuid", "into", "repo", "and", "tag" ]
python
train
23.636364
google/openhtf
openhtf/core/phase_group.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/phase_group.py#L183-L189
def load_code_info(self): """Load coded info for all contained phases.""" return PhaseGroup( setup=load_code_info(self.setup), main=load_code_info(self.main), teardown=load_code_info(self.teardown), name=self.name)
[ "def", "load_code_info", "(", "self", ")", ":", "return", "PhaseGroup", "(", "setup", "=", "load_code_info", "(", "self", ".", "setup", ")", ",", "main", "=", "load_code_info", "(", "self", ".", "main", ")", ",", "teardown", "=", "load_code_info", "(", "self", ".", "teardown", ")", ",", "name", "=", "self", ".", "name", ")" ]
Load coded info for all contained phases.
[ "Load", "coded", "info", "for", "all", "contained", "phases", "." ]
python
train
35.428571
geophysics-ubonn/reda
lib/reda/importers/legacy/eit160.py
https://github.com/geophysics-ubonn/reda/blob/46a939729e40c7c4723315c03679c40761152e9e/lib/reda/importers/legacy/eit160.py#L148-L168
def _read_mat_mnu0(filename): """Import a .mat file with single potentials (a b m) into a pandas DataFrame Also export some variables of the MD struct into a separate structure """ print('read_mag_single_file: {0}'.format(filename)) mat = sio.loadmat(filename, squeeze_me=True) # check the version version = mat['MP']['Version'].item() if version != 'FZJ-EZ-2017': raise Exception( 'This data format is not supported (expected: FZJ-EZ-2017)' + ' got: {}'.format(version) ) df_emd = _extract_emd(mat, filename=filename) df_md = _extract_md(mat) return df_emd, df_md
[ "def", "_read_mat_mnu0", "(", "filename", ")", ":", "print", "(", "'read_mag_single_file: {0}'", ".", "format", "(", "filename", ")", ")", "mat", "=", "sio", ".", "loadmat", "(", "filename", ",", "squeeze_me", "=", "True", ")", "# check the version", "version", "=", "mat", "[", "'MP'", "]", "[", "'Version'", "]", ".", "item", "(", ")", "if", "version", "!=", "'FZJ-EZ-2017'", ":", "raise", "Exception", "(", "'This data format is not supported (expected: FZJ-EZ-2017)'", "+", "' got: {}'", ".", "format", "(", "version", ")", ")", "df_emd", "=", "_extract_emd", "(", "mat", ",", "filename", "=", "filename", ")", "df_md", "=", "_extract_md", "(", "mat", ")", "return", "df_emd", ",", "df_md" ]
Import a .mat file with single potentials (a b m) into a pandas DataFrame Also export some variables of the MD struct into a separate structure
[ "Import", "a", ".", "mat", "file", "with", "single", "potentials", "(", "a", "b", "m", ")", "into", "a", "pandas", "DataFrame" ]
python
train
30.285714
bcbio/bcbio-nextgen
bcbio/utils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/utils.py#L344-L349
def copy_plus(orig, new): """Copy a fils, including biological index files. """ for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]: if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)): shutil.copyfile(orig + ext, new + ext)
[ "def", "copy_plus", "(", "orig", ",", "new", ")", ":", "for", "ext", "in", "[", "\"\"", ",", "\".idx\"", ",", "\".gbi\"", ",", "\".tbi\"", ",", "\".bai\"", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "orig", "+", "ext", ")", "and", "(", "not", "os", ".", "path", ".", "lexists", "(", "new", "+", "ext", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "new", "+", "ext", ")", ")", ":", "shutil", ".", "copyfile", "(", "orig", "+", "ext", ",", "new", "+", "ext", ")" ]
Copy a fils, including biological index files.
[ "Copy", "a", "fils", "including", "biological", "index", "files", "." ]
python
train
49.166667
heroku/sf-suds
suds/sax/enc.py
https://github.com/heroku/sf-suds/blob/44b6743a45ff4447157605d6fecc9bf5922ce68a/suds/sax/enc.py#L55-L66
def encode(self, s): """ Encode special characters found in string I{s}. @param s: A string to encode. @type s: str @return: The encoded string. @rtype: str """ if isinstance(s, basestring) and self.needsEncoding(s): for x in self.encodings: s = s.replace(x[0], x[1]) return s
[ "def", "encode", "(", "self", ",", "s", ")", ":", "if", "isinstance", "(", "s", ",", "basestring", ")", "and", "self", ".", "needsEncoding", "(", "s", ")", ":", "for", "x", "in", "self", ".", "encodings", ":", "s", "=", "s", ".", "replace", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "return", "s" ]
Encode special characters found in string I{s}. @param s: A string to encode. @type s: str @return: The encoded string. @rtype: str
[ "Encode", "special", "characters", "found", "in", "string", "I", "{", "s", "}", "." ]
python
train
30.416667
ARMmbed/icetea
icetea_lib/tools/tools.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/tools.py#L92-L103
def check_int(integer): """ Check if number is integer or not. :param integer: Number as str :return: Boolean """ if not isinstance(integer, str): return False if integer[0] in ('-', '+'): return integer[1:].isdigit() return integer.isdigit()
[ "def", "check_int", "(", "integer", ")", ":", "if", "not", "isinstance", "(", "integer", ",", "str", ")", ":", "return", "False", "if", "integer", "[", "0", "]", "in", "(", "'-'", ",", "'+'", ")", ":", "return", "integer", "[", "1", ":", "]", ".", "isdigit", "(", ")", "return", "integer", ".", "isdigit", "(", ")" ]
Check if number is integer or not. :param integer: Number as str :return: Boolean
[ "Check", "if", "number", "is", "integer", "or", "not", "." ]
python
train
23.333333
apache/incubator-heron
heronpy/api/bolt/window_bolt.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/api/bolt/window_bolt.py#L175-L183
def process_tick(self, tup): """Called every window_duration """ curtime = int(time.time()) window_info = WindowContext(curtime - self.window_duration, curtime) self.processWindow(window_info, list(self.current_tuples)) for tup in self.current_tuples: self.ack(tup) self.current_tuples.clear()
[ "def", "process_tick", "(", "self", ",", "tup", ")", ":", "curtime", "=", "int", "(", "time", ".", "time", "(", ")", ")", "window_info", "=", "WindowContext", "(", "curtime", "-", "self", ".", "window_duration", ",", "curtime", ")", "self", ".", "processWindow", "(", "window_info", ",", "list", "(", "self", ".", "current_tuples", ")", ")", "for", "tup", "in", "self", ".", "current_tuples", ":", "self", ".", "ack", "(", "tup", ")", "self", ".", "current_tuples", ".", "clear", "(", ")" ]
Called every window_duration
[ "Called", "every", "window_duration" ]
python
valid
35.444444
stevearc/dynamo3
dynamo3/connection.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L570-L604
def get_item(self, tablename, key, attributes=None, consistent=False, return_capacity=None): """ Fetch a single item from a table This uses the older version of the DynamoDB API. See also: :meth:`~.get_item2`. Parameters ---------- tablename : str Name of the table to fetch from key : dict Primary key dict specifying the hash key and, if applicable, the range key of the item. attributes : list, optional If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) """ kwargs = { 'TableName': tablename, 'Key': self.dynamizer.encode_keys(key), 'ConsistentRead': consistent, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), } if attributes is not None: kwargs['AttributesToGet'] = attributes data = self.call('get_item', **kwargs) return Result(self.dynamizer, data, 'Item')
[ "def", "get_item", "(", "self", ",", "tablename", ",", "key", ",", "attributes", "=", "None", ",", "consistent", "=", "False", ",", "return_capacity", "=", "None", ")", ":", "kwargs", "=", "{", "'TableName'", ":", "tablename", ",", "'Key'", ":", "self", ".", "dynamizer", ".", "encode_keys", "(", "key", ")", ",", "'ConsistentRead'", ":", "consistent", ",", "'ReturnConsumedCapacity'", ":", "self", ".", "_default_capacity", "(", "return_capacity", ")", ",", "}", "if", "attributes", "is", "not", "None", ":", "kwargs", "[", "'AttributesToGet'", "]", "=", "attributes", "data", "=", "self", ".", "call", "(", "'get_item'", ",", "*", "*", "kwargs", ")", "return", "Result", "(", "self", ".", "dynamizer", ",", "data", ",", "'Item'", ")" ]
Fetch a single item from a table This uses the older version of the DynamoDB API. See also: :meth:`~.get_item2`. Parameters ---------- tablename : str Name of the table to fetch from key : dict Primary key dict specifying the hash key and, if applicable, the range key of the item. attributes : list, optional If present, only fetch these attributes from the item consistent : bool, optional Perform a strongly consistent read of the data (default False) return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE)
[ "Fetch", "a", "single", "item", "from", "a", "table" ]
python
train
38.514286
minhhoit/yacms
yacms/blog/management/base.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/management/base.py#L146-L240
def handle(self, *args, **options): """ Processes the converted data into the yacms database correctly. Attributes: yacms_user: the user to put this data in against date_format: the format the dates are in for posts and comments """ yacms_user = options.get("yacms_user") site = Site.objects.get_current() verbosity = int(options.get("verbosity", 1)) prompt = options.get("interactive") # Validate the yacms user. if yacms_user is None: raise CommandError("No yacms user has been specified") try: yacms_user = User.objects.get(username=yacms_user) except User.DoesNotExist: raise CommandError("Invalid yacms user: %s" % yacms_user) # Run the subclassed ``handle_import`` and save posts, tags, # categories, and comments to the DB. self.handle_import(options) for post_data in self.posts: categories = post_data.pop("categories") tags = post_data.pop("tags") comments = post_data.pop("comments") old_url = post_data.pop("old_url") post_data = self.trunc(BlogPost, prompt, **post_data) initial = { "title": post_data.pop("title"), "user": yacms_user, } if post_data["publish_date"] is None: post_data["status"] = CONTENT_STATUS_DRAFT post, created = BlogPost.objects.get_or_create(**initial) for k, v in post_data.items(): setattr(post, k, v) post.save() if created and verbosity >= 1: print("Imported post: %s" % post) for name in categories: cat = self.trunc(BlogCategory, prompt, title=name) if not cat["title"]: continue cat, created = BlogCategory.objects.get_or_create(**cat) if created and verbosity >= 1: print("Imported category: %s" % cat) post.categories.add(cat) for comment in comments: comment = self.trunc(ThreadedComment, prompt, **comment) comment["site"] = site post.comments.create(**comment) if verbosity >= 1: print("Imported comment by: %s" % comment["user_name"]) self.add_meta(post, tags, prompt, verbosity, old_url) # Create any pages imported (Wordpress can include pages) in_menus = [] footer = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES if menu[-1] == "pages/menus/footer.html"] if options["in_navigation"]: in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES] if footer and not options["in_footer"]: in_menus.remove(footer[0]) elif footer and options["in_footer"]: in_menus = footer parents = [] for page in self.pages: tags = page.pop("tags") old_url = page.pop("old_url") old_id = page.pop("old_id") old_parent_id = page.pop("old_parent_id") page = self.trunc(RichTextPage, prompt, **page) page["status"] = CONTENT_STATUS_PUBLISHED page["in_menus"] = in_menus page, created = RichTextPage.objects.get_or_create(**page) if created and verbosity >= 1: print("Imported page: %s" % page) self.add_meta(page, tags, prompt, verbosity, old_url) parents.append({ 'old_id': old_id, 'old_parent_id': old_parent_id, 'page': page, }) for obj in parents: if obj['old_parent_id']: for parent in parents: if parent['old_id'] == obj['old_parent_id']: obj['page'].parent = parent['page'] obj['page'].save() break
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "yacms_user", "=", "options", ".", "get", "(", "\"yacms_user\"", ")", "site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "verbosity", "=", "int", "(", "options", ".", "get", "(", "\"verbosity\"", ",", "1", ")", ")", "prompt", "=", "options", ".", "get", "(", "\"interactive\"", ")", "# Validate the yacms user.", "if", "yacms_user", "is", "None", ":", "raise", "CommandError", "(", "\"No yacms user has been specified\"", ")", "try", ":", "yacms_user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "yacms_user", ")", "except", "User", ".", "DoesNotExist", ":", "raise", "CommandError", "(", "\"Invalid yacms user: %s\"", "%", "yacms_user", ")", "# Run the subclassed ``handle_import`` and save posts, tags,", "# categories, and comments to the DB.", "self", ".", "handle_import", "(", "options", ")", "for", "post_data", "in", "self", ".", "posts", ":", "categories", "=", "post_data", ".", "pop", "(", "\"categories\"", ")", "tags", "=", "post_data", ".", "pop", "(", "\"tags\"", ")", "comments", "=", "post_data", ".", "pop", "(", "\"comments\"", ")", "old_url", "=", "post_data", ".", "pop", "(", "\"old_url\"", ")", "post_data", "=", "self", ".", "trunc", "(", "BlogPost", ",", "prompt", ",", "*", "*", "post_data", ")", "initial", "=", "{", "\"title\"", ":", "post_data", ".", "pop", "(", "\"title\"", ")", ",", "\"user\"", ":", "yacms_user", ",", "}", "if", "post_data", "[", "\"publish_date\"", "]", "is", "None", ":", "post_data", "[", "\"status\"", "]", "=", "CONTENT_STATUS_DRAFT", "post", ",", "created", "=", "BlogPost", ".", "objects", ".", "get_or_create", "(", "*", "*", "initial", ")", "for", "k", ",", "v", "in", "post_data", ".", "items", "(", ")", ":", "setattr", "(", "post", ",", "k", ",", "v", ")", "post", ".", "save", "(", ")", "if", "created", "and", "verbosity", ">=", "1", ":", "print", "(", "\"Imported post: %s\"", "%", "post", ")", "for", "name", "in", "categories", ":", "cat", "=", "self", ".", "trunc", "(", "BlogCategory", ",", "prompt", ",", "title", "=", "name", ")", "if", "not", "cat", "[", "\"title\"", "]", ":", "continue", "cat", ",", "created", "=", "BlogCategory", ".", "objects", ".", "get_or_create", "(", "*", "*", "cat", ")", "if", "created", "and", "verbosity", ">=", "1", ":", "print", "(", "\"Imported category: %s\"", "%", "cat", ")", "post", ".", "categories", ".", "add", "(", "cat", ")", "for", "comment", "in", "comments", ":", "comment", "=", "self", ".", "trunc", "(", "ThreadedComment", ",", "prompt", ",", "*", "*", "comment", ")", "comment", "[", "\"site\"", "]", "=", "site", "post", ".", "comments", ".", "create", "(", "*", "*", "comment", ")", "if", "verbosity", ">=", "1", ":", "print", "(", "\"Imported comment by: %s\"", "%", "comment", "[", "\"user_name\"", "]", ")", "self", ".", "add_meta", "(", "post", ",", "tags", ",", "prompt", ",", "verbosity", ",", "old_url", ")", "# Create any pages imported (Wordpress can include pages)", "in_menus", "=", "[", "]", "footer", "=", "[", "menu", "[", "0", "]", "for", "menu", "in", "settings", ".", "PAGE_MENU_TEMPLATES", "if", "menu", "[", "-", "1", "]", "==", "\"pages/menus/footer.html\"", "]", "if", "options", "[", "\"in_navigation\"", "]", ":", "in_menus", "=", "[", "menu", "[", "0", "]", "for", "menu", "in", "settings", ".", "PAGE_MENU_TEMPLATES", "]", "if", "footer", "and", "not", "options", "[", "\"in_footer\"", "]", ":", "in_menus", ".", "remove", "(", "footer", "[", "0", "]", ")", "elif", "footer", "and", "options", "[", "\"in_footer\"", "]", ":", "in_menus", "=", "footer", "parents", "=", "[", "]", "for", "page", "in", "self", ".", "pages", ":", "tags", "=", "page", ".", "pop", "(", "\"tags\"", ")", "old_url", "=", "page", ".", "pop", "(", "\"old_url\"", ")", "old_id", "=", "page", ".", "pop", "(", "\"old_id\"", ")", "old_parent_id", "=", "page", ".", "pop", "(", "\"old_parent_id\"", ")", "page", "=", "self", ".", "trunc", "(", "RichTextPage", ",", "prompt", ",", "*", "*", "page", ")", "page", "[", "\"status\"", "]", "=", "CONTENT_STATUS_PUBLISHED", "page", "[", "\"in_menus\"", "]", "=", "in_menus", "page", ",", "created", "=", "RichTextPage", ".", "objects", ".", "get_or_create", "(", "*", "*", "page", ")", "if", "created", "and", "verbosity", ">=", "1", ":", "print", "(", "\"Imported page: %s\"", "%", "page", ")", "self", ".", "add_meta", "(", "page", ",", "tags", ",", "prompt", ",", "verbosity", ",", "old_url", ")", "parents", ".", "append", "(", "{", "'old_id'", ":", "old_id", ",", "'old_parent_id'", ":", "old_parent_id", ",", "'page'", ":", "page", ",", "}", ")", "for", "obj", "in", "parents", ":", "if", "obj", "[", "'old_parent_id'", "]", ":", "for", "parent", "in", "parents", ":", "if", "parent", "[", "'old_id'", "]", "==", "obj", "[", "'old_parent_id'", "]", ":", "obj", "[", "'page'", "]", ".", "parent", "=", "parent", "[", "'page'", "]", "obj", "[", "'page'", "]", ".", "save", "(", ")", "break" ]
Processes the converted data into the yacms database correctly. Attributes: yacms_user: the user to put this data in against date_format: the format the dates are in for posts and comments
[ "Processes", "the", "converted", "data", "into", "the", "yacms", "database", "correctly", "." ]
python
train
41.747368
datacamp/shellwhat
shellwhat/checks/check_funcs.py
https://github.com/datacamp/shellwhat/blob/ee2f875e3db0eb06d69cc946c8e9700e0edceea2/shellwhat/checks/check_funcs.py#L15-L60
def has_code(state, text, incorrect_msg="The checker expected to find `{{text}}` in your command.", fixed=False): """Check whether the student code contains text. This function is a simpler override of the `has_code` function in protowhat, because ``ast_node._get_text()`` is not implemented in the OSH parser Using ``has_code()`` should be a last resort. It is always better to look at the result of code or the side effects they had on the state of your program. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student code. fixed: whether to match ``text`` exactly, rather than using regular expressions. :Example: Suppose the solution requires you to do: :: git push origin master The following SCT can be written: :: Ex().has_code(r'git\\s+push\\s+origin\\s+master') Submissions that would pass: :: git push origin master git push origin master Submissions that would fail: :: git push --force origin master """ stu_code = state.student_code # either simple text matching or regex test res = text in stu_code if fixed else re.search(text, stu_code) if not res: _msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text }) state.do_test(_msg) return state
[ "def", "has_code", "(", "state", ",", "text", ",", "incorrect_msg", "=", "\"The checker expected to find `{{text}}` in your command.\"", ",", "fixed", "=", "False", ")", ":", "stu_code", "=", "state", ".", "student_code", "# either simple text matching or regex test", "res", "=", "text", "in", "stu_code", "if", "fixed", "else", "re", ".", "search", "(", "text", ",", "stu_code", ")", "if", "not", "res", ":", "_msg", "=", "state", ".", "build_message", "(", "incorrect_msg", ",", "fmt_kwargs", "=", "{", "'text'", ":", "text", "}", ")", "state", ".", "do_test", "(", "_msg", ")", "return", "state" ]
Check whether the student code contains text. This function is a simpler override of the `has_code` function in protowhat, because ``ast_node._get_text()`` is not implemented in the OSH parser Using ``has_code()`` should be a last resort. It is always better to look at the result of code or the side effects they had on the state of your program. Args: state: State instance describing student and solution code. Can be omitted if used with Ex(). text : text that student code must contain. Can be a regex pattern or a simple string. incorrect_msg: if specified, this overrides the automatically generated feedback message in case ``text`` is not found in the student code. fixed: whether to match ``text`` exactly, rather than using regular expressions. :Example: Suppose the solution requires you to do: :: git push origin master The following SCT can be written: :: Ex().has_code(r'git\\s+push\\s+origin\\s+master') Submissions that would pass: :: git push origin master git push origin master Submissions that would fail: :: git push --force origin master
[ "Check", "whether", "the", "student", "code", "contains", "text", "." ]
python
train
35.391304
Capitains/flask-capitains-nemo
flask_nemo/__init__.py
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L309-L351
def transform(self, work, xml, objectId, subreference=None): """ Transform input according to potentially registered XSLT .. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called .. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt .. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \ it is not possible to use strip tags in the xslt given to this application :param work: Work object containing metadata about the xml :type work: MyCapytains.resources.inventory.Text :param xml: XML to transform :type xml: etree._Element :param objectId: Object Identifier :type objectId: str :param subreference: Subreference :type subreference: str :return: String representation of transformed resource :rtype: str """ # We check first that we don't have if str(objectId) in self._transform: func = self._transform[str(objectId)] else: func = self._transform["default"] # If we have a string, it means we get a XSL filepath if isinstance(func, str): with open(func) as f: xslt = etree.XSLT(etree.parse(f)) return etree.tostring( xslt(xml), encoding=str, method="html", xml_declaration=None, pretty_print=False, with_tail=True, standalone=None ) # If we have a function, it means we return the result of the function elif isinstance(func, Callable): return func(work, xml, objectId, subreference) # If we have None, it means we just give back the xml elif func is None: return etree.tostring(xml, encoding=str)
[ "def", "transform", "(", "self", ",", "work", ",", "xml", ",", "objectId", ",", "subreference", "=", "None", ")", ":", "# We check first that we don't have", "if", "str", "(", "objectId", ")", "in", "self", ".", "_transform", ":", "func", "=", "self", ".", "_transform", "[", "str", "(", "objectId", ")", "]", "else", ":", "func", "=", "self", ".", "_transform", "[", "\"default\"", "]", "# If we have a string, it means we get a XSL filepath", "if", "isinstance", "(", "func", ",", "str", ")", ":", "with", "open", "(", "func", ")", "as", "f", ":", "xslt", "=", "etree", ".", "XSLT", "(", "etree", ".", "parse", "(", "f", ")", ")", "return", "etree", ".", "tostring", "(", "xslt", "(", "xml", ")", ",", "encoding", "=", "str", ",", "method", "=", "\"html\"", ",", "xml_declaration", "=", "None", ",", "pretty_print", "=", "False", ",", "with_tail", "=", "True", ",", "standalone", "=", "None", ")", "# If we have a function, it means we return the result of the function", "elif", "isinstance", "(", "func", ",", "Callable", ")", ":", "return", "func", "(", "work", ",", "xml", ",", "objectId", ",", "subreference", ")", "# If we have None, it means we just give back the xml", "elif", "func", "is", "None", ":", "return", "etree", ".", "tostring", "(", "xml", ",", "encoding", "=", "str", ")" ]
Transform input according to potentially registered XSLT .. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called .. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt .. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \ it is not possible to use strip tags in the xslt given to this application :param work: Work object containing metadata about the xml :type work: MyCapytains.resources.inventory.Text :param xml: XML to transform :type xml: etree._Element :param objectId: Object Identifier :type objectId: str :param subreference: Subreference :type subreference: str :return: String representation of transformed resource :rtype: str
[ "Transform", "input", "according", "to", "potentially", "registered", "XSLT" ]
python
valid
43.069767
Opentrons/opentrons
api/src/opentrons/deck_calibration/endpoints.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/endpoints.py#L209-L245
async def attach_tip(data): """ Attach a tip to the current pipette :param data: Information obtained from a POST request. The content type is application/json. The correct packet form should be as follows: { 'token': UUID token from current session start 'command': 'attach tip' 'tipLength': a float representing how much the length of a pipette increases when a tip is added } """ global session tip_length = data.get('tipLength') if not tip_length: message = 'Error: "tipLength" must be specified in request' status = 400 else: if not feature_flags.use_protocol_api_v2(): pipette = session.pipettes[session.current_mount] if pipette.tip_attached: log.warning('attach tip called while tip already attached') pipette._remove_tip(pipette._tip_length) pipette._add_tip(tip_length) else: session.adapter.add_tip(session.current_mount, tip_length) if session.cp: session.cp = CriticalPoint.FRONT_NOZZLE session.tip_length = tip_length message = "Tip length set: {}".format(tip_length) status = 200 return web.json_response({'message': message}, status=status)
[ "async", "def", "attach_tip", "(", "data", ")", ":", "global", "session", "tip_length", "=", "data", ".", "get", "(", "'tipLength'", ")", "if", "not", "tip_length", ":", "message", "=", "'Error: \"tipLength\" must be specified in request'", "status", "=", "400", "else", ":", "if", "not", "feature_flags", ".", "use_protocol_api_v2", "(", ")", ":", "pipette", "=", "session", ".", "pipettes", "[", "session", ".", "current_mount", "]", "if", "pipette", ".", "tip_attached", ":", "log", ".", "warning", "(", "'attach tip called while tip already attached'", ")", "pipette", ".", "_remove_tip", "(", "pipette", ".", "_tip_length", ")", "pipette", ".", "_add_tip", "(", "tip_length", ")", "else", ":", "session", ".", "adapter", ".", "add_tip", "(", "session", ".", "current_mount", ",", "tip_length", ")", "if", "session", ".", "cp", ":", "session", ".", "cp", "=", "CriticalPoint", ".", "FRONT_NOZZLE", "session", ".", "tip_length", "=", "tip_length", "message", "=", "\"Tip length set: {}\"", ".", "format", "(", "tip_length", ")", "status", "=", "200", "return", "web", ".", "json_response", "(", "{", "'message'", ":", "message", "}", ",", "status", "=", "status", ")" ]
Attach a tip to the current pipette :param data: Information obtained from a POST request. The content type is application/json. The correct packet form should be as follows: { 'token': UUID token from current session start 'command': 'attach tip' 'tipLength': a float representing how much the length of a pipette increases when a tip is added }
[ "Attach", "a", "tip", "to", "the", "current", "pipette" ]
python
train
34.216216
CodyKochmann/generators
generators/inline_tools.py
https://github.com/CodyKochmann/generators/blob/e4ca4dd25d5023a94b0349c69d6224070cc2526f/generators/inline_tools.py#L14-L33
def asserts(input_value, rule, message=''): """ this function allows you to write asserts in generators since there are moments where you actually want the program to halt when certain values are seen. """ assert callable(rule) or type(rule)==bool, 'asserts needs rule to be a callable function or a test boolean' assert isinstance(message, str), 'asserts needs message to be a string' # if the message is empty and rule is callable, fill message with rule's source code if len(message)==0 and callable(rule): try: s = getsource(rule).splitlines()[0].strip() except: s = repr(rule).strip() message = 'illegal input of {} breaks - {}'.format(input_value, s) if callable(rule): # if rule is a function, run the function and assign it to rule rule = rule(input_value) # now, assert the rule and return the input value assert rule, message return input_value
[ "def", "asserts", "(", "input_value", ",", "rule", ",", "message", "=", "''", ")", ":", "assert", "callable", "(", "rule", ")", "or", "type", "(", "rule", ")", "==", "bool", ",", "'asserts needs rule to be a callable function or a test boolean'", "assert", "isinstance", "(", "message", ",", "str", ")", ",", "'asserts needs message to be a string'", "# if the message is empty and rule is callable, fill message with rule's source code", "if", "len", "(", "message", ")", "==", "0", "and", "callable", "(", "rule", ")", ":", "try", ":", "s", "=", "getsource", "(", "rule", ")", ".", "splitlines", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "except", ":", "s", "=", "repr", "(", "rule", ")", ".", "strip", "(", ")", "message", "=", "'illegal input of {} breaks - {}'", ".", "format", "(", "input_value", ",", "s", ")", "if", "callable", "(", "rule", ")", ":", "# if rule is a function, run the function and assign it to rule", "rule", "=", "rule", "(", "input_value", ")", "# now, assert the rule and return the input value", "assert", "rule", ",", "message", "return", "input_value" ]
this function allows you to write asserts in generators since there are moments where you actually want the program to halt when certain values are seen.
[ "this", "function", "allows", "you", "to", "write", "asserts", "in", "generators", "since", "there", "are", "moments", "where", "you", "actually", "want", "the", "program", "to", "halt", "when", "certain", "values", "are", "seen", "." ]
python
train
47.75
spyder-ide/spyder
spyder/preferences/shortcuts.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L478-L484
def accept_override(self): """Unbind all conflicted shortcuts, and accept the new one""" conflicts = self.check_conflicts() if conflicts: for shortcut in conflicts: shortcut.key = '' self.accept()
[ "def", "accept_override", "(", "self", ")", ":", "conflicts", "=", "self", ".", "check_conflicts", "(", ")", "if", "conflicts", ":", "for", "shortcut", "in", "conflicts", ":", "shortcut", ".", "key", "=", "''", "self", ".", "accept", "(", ")" ]
Unbind all conflicted shortcuts, and accept the new one
[ "Unbind", "all", "conflicted", "shortcuts", "and", "accept", "the", "new", "one" ]
python
train
36.571429
RedHatInsights/insights-core
insights/core/ls_parser.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/ls_parser.py#L181-L224
def parse(lines, root=None): """ Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza. """ doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if not line: continue if line and line[0] == "/" and line[-1] == ":": if name is None: name = line[:-1] if entries: d = Directory(name, total or len(entries), entries) doc[root] = d total = None entries = [] else: d = Directory(name, total or len(entries), entries) doc[name or root] = d total = None entries = [] name = line[:-1] continue if line.startswith("total"): total = int(line.split(None, 1)[1]) continue entries.append(line) name = name or root doc[name] = Directory(name, total or len(entries), entries) return doc
[ "def", "parse", "(", "lines", ",", "root", "=", "None", ")", ":", "doc", "=", "{", "}", "entries", "=", "[", "]", "name", "=", "None", "total", "=", "None", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "if", "line", "and", "line", "[", "0", "]", "==", "\"/\"", "and", "line", "[", "-", "1", "]", "==", "\":\"", ":", "if", "name", "is", "None", ":", "name", "=", "line", "[", ":", "-", "1", "]", "if", "entries", ":", "d", "=", "Directory", "(", "name", ",", "total", "or", "len", "(", "entries", ")", ",", "entries", ")", "doc", "[", "root", "]", "=", "d", "total", "=", "None", "entries", "=", "[", "]", "else", ":", "d", "=", "Directory", "(", "name", ",", "total", "or", "len", "(", "entries", ")", ",", "entries", ")", "doc", "[", "name", "or", "root", "]", "=", "d", "total", "=", "None", "entries", "=", "[", "]", "name", "=", "line", "[", ":", "-", "1", "]", "continue", "if", "line", ".", "startswith", "(", "\"total\"", ")", ":", "total", "=", "int", "(", "line", ".", "split", "(", "None", ",", "1", ")", "[", "1", "]", ")", "continue", "entries", ".", "append", "(", "line", ")", "name", "=", "name", "or", "root", "doc", "[", "name", "]", "=", "Directory", "(", "name", ",", "total", "or", "len", "(", "entries", ")", ",", "entries", ")", "return", "doc" ]
Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza.
[ "Parses", "a", "list", "of", "lines", "from", "ls", "into", "dictionaries", "representing", "their", "components", "." ]
python
train
30.272727
grycap/RADL
radl/radl_parse.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl_parse.py#L362-L383
def parse_radl(data): """ Parse a RADL document. Args: - data(str): filepath to a RADL content or a string with content. Return: RADL object. """ if data is None: return None elif os.path.isfile(data): f = open(data) data = "".join(f.readlines()) f.close() elif data.strip() == "": return RADL() data = data + "\n" parser = RADLParser(lextab='radl') return parser.parse(data)
[ "def", "parse_radl", "(", "data", ")", ":", "if", "data", "is", "None", ":", "return", "None", "elif", "os", ".", "path", ".", "isfile", "(", "data", ")", ":", "f", "=", "open", "(", "data", ")", "data", "=", "\"\"", ".", "join", "(", "f", ".", "readlines", "(", ")", ")", "f", ".", "close", "(", ")", "elif", "data", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "RADL", "(", ")", "data", "=", "data", "+", "\"\\n\"", "parser", "=", "RADLParser", "(", "lextab", "=", "'radl'", ")", "return", "parser", ".", "parse", "(", "data", ")" ]
Parse a RADL document. Args: - data(str): filepath to a RADL content or a string with content. Return: RADL object.
[ "Parse", "a", "RADL", "document", "." ]
python
train
20.272727
willkg/socorro-siggen
siggen/utils.py
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/utils.py#L91-L102
def drop_bad_characters(text): """Takes a text and drops all non-printable and non-ascii characters and also any whitespace characters that aren't space. :arg str text: the text to fix :returns: text with all bad characters dropped """ # Strip all non-ascii and non-printable characters text = ''.join([c for c in text if c in ALLOWED_CHARS]) return text
[ "def", "drop_bad_characters", "(", "text", ")", ":", "# Strip all non-ascii and non-printable characters", "text", "=", "''", ".", "join", "(", "[", "c", "for", "c", "in", "text", "if", "c", "in", "ALLOWED_CHARS", "]", ")", "return", "text" ]
Takes a text and drops all non-printable and non-ascii characters and also any whitespace characters that aren't space. :arg str text: the text to fix :returns: text with all bad characters dropped
[ "Takes", "a", "text", "and", "drops", "all", "non", "-", "printable", "and", "non", "-", "ascii", "characters", "and", "also", "any", "whitespace", "characters", "that", "aren", "t", "space", "." ]
python
train
31.5
ff0000/scarlet
scarlet/versioning/models.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/versioning/models.py#L221-L229
def save(self, *args, **kwargs): """ Takes an optional last_save keyword argument other wise last_save will be set to timezone.now() Calls super to actually save the object. """ self.last_save = kwargs.pop('last_save', timezone.now()) super(Cloneable, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "last_save", "=", "kwargs", ".", "pop", "(", "'last_save'", ",", "timezone", ".", "now", "(", ")", ")", "super", "(", "Cloneable", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Takes an optional last_save keyword argument other wise last_save will be set to timezone.now() Calls super to actually save the object.
[ "Takes", "an", "optional", "last_save", "keyword", "argument", "other", "wise", "last_save", "will", "be", "set", "to", "timezone", ".", "now", "()" ]
python
train
36.444444
pmacosta/pexdoc
pexdoc/exh.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/exh.py#L1065-L1070
def _get_ex_data(self): """Return hierarchical function name.""" func_id, func_name = self._get_callable_path() if self._full_cname: func_name = self.encode_call(func_name) return func_id, func_name
[ "def", "_get_ex_data", "(", "self", ")", ":", "func_id", ",", "func_name", "=", "self", ".", "_get_callable_path", "(", ")", "if", "self", ".", "_full_cname", ":", "func_name", "=", "self", ".", "encode_call", "(", "func_name", ")", "return", "func_id", ",", "func_name" ]
Return hierarchical function name.
[ "Return", "hierarchical", "function", "name", "." ]
python
train
39.5
CityOfZion/neo-python
neo/Core/Blockchain.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/Blockchain.py#L414-L425
def GetSysFeeAmountByHeight(self, height): """ Get the system fee for the specified block. Args: height (int): block height. Returns: int: """ hash = self.GetBlockHash(height) return self.GetSysFeeAmount(hash)
[ "def", "GetSysFeeAmountByHeight", "(", "self", ",", "height", ")", ":", "hash", "=", "self", ".", "GetBlockHash", "(", "height", ")", "return", "self", ".", "GetSysFeeAmount", "(", "hash", ")" ]
Get the system fee for the specified block. Args: height (int): block height. Returns: int:
[ "Get", "the", "system", "fee", "for", "the", "specified", "block", "." ]
python
train
23.333333
thewca/wca-regulations-compiler
wrc/codegen/cghtml.py
https://github.com/thewca/wca-regulations-compiler/blob/3ebbd8fe8fec7c9167296f59b2677696fe61a954/wrc/codegen/cghtml.py#L30-L53
def special_links_replace(text, urls): ''' Replace simplified Regulations and Guidelines links into actual links. 'urls' dictionary is expected to provide actual links to the targeted Regulations and Guidelines, as well as to the PDF file. ''' match_number = r'([A-Za-z0-9]+)' + r'(\+*)' reference_list = [(r'regulations:article:' + match_number, urls['regulations']), (r'regulations:regulation:' + match_number, urls['regulations']), (r'guidelines:article:' + match_number, urls['guidelines']), (r'guidelines:guideline:' + match_number, urls['guidelines']), ] anchor_list = [(r'regulations:contents', urls['regulations'] + r'#contents'), (r'guidelines:contents', urls['guidelines'] + r'#contents'), (r'regulations:top', urls['regulations'] + r'#'), (r'guidelines:top', urls['guidelines'] + r'#'), (r'link:pdf', urls['pdf'] + '.pdf'), ] retval = text for match, repl in reference_list: retval = re.sub(match, repl + r'#\1\2', retval) for match, repl in anchor_list: retval = re.sub(match, repl, retval) return retval
[ "def", "special_links_replace", "(", "text", ",", "urls", ")", ":", "match_number", "=", "r'([A-Za-z0-9]+)'", "+", "r'(\\+*)'", "reference_list", "=", "[", "(", "r'regulations:article:'", "+", "match_number", ",", "urls", "[", "'regulations'", "]", ")", ",", "(", "r'regulations:regulation:'", "+", "match_number", ",", "urls", "[", "'regulations'", "]", ")", ",", "(", "r'guidelines:article:'", "+", "match_number", ",", "urls", "[", "'guidelines'", "]", ")", ",", "(", "r'guidelines:guideline:'", "+", "match_number", ",", "urls", "[", "'guidelines'", "]", ")", ",", "]", "anchor_list", "=", "[", "(", "r'regulations:contents'", ",", "urls", "[", "'regulations'", "]", "+", "r'#contents'", ")", ",", "(", "r'guidelines:contents'", ",", "urls", "[", "'guidelines'", "]", "+", "r'#contents'", ")", ",", "(", "r'regulations:top'", ",", "urls", "[", "'regulations'", "]", "+", "r'#'", ")", ",", "(", "r'guidelines:top'", ",", "urls", "[", "'guidelines'", "]", "+", "r'#'", ")", ",", "(", "r'link:pdf'", ",", "urls", "[", "'pdf'", "]", "+", "'.pdf'", ")", ",", "]", "retval", "=", "text", "for", "match", ",", "repl", "in", "reference_list", ":", "retval", "=", "re", ".", "sub", "(", "match", ",", "repl", "+", "r'#\\1\\2'", ",", "retval", ")", "for", "match", ",", "repl", "in", "anchor_list", ":", "retval", "=", "re", ".", "sub", "(", "match", ",", "repl", ",", "retval", ")", "return", "retval" ]
Replace simplified Regulations and Guidelines links into actual links. 'urls' dictionary is expected to provide actual links to the targeted Regulations and Guidelines, as well as to the PDF file.
[ "Replace", "simplified", "Regulations", "and", "Guidelines", "links", "into", "actual", "links", ".", "urls", "dictionary", "is", "expected", "to", "provide", "actual", "links", "to", "the", "targeted", "Regulations", "and", "Guidelines", "as", "well", "as", "to", "the", "PDF", "file", "." ]
python
train
51.583333
openvax/topiary
topiary/filters.py
https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/filters.py#L109-L151
def apply_effect_expression_filters( effects, gene_expression_dict, gene_expression_threshold, transcript_expression_dict, transcript_expression_threshold): """ Filter collection of varcode effects by given gene and transcript expression thresholds. Parameters ---------- effects : varcode.EffectCollection gene_expression_dict : dict gene_expression_threshold : float transcript_expression_dict : dict transcript_expression_threshold : float """ if gene_expression_dict: effects = apply_filter( lambda effect: ( gene_expression_dict.get(effect.gene_id, 0.0) >= gene_expression_threshold), effects, result_fn=effects.clone_with_new_elements, filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold) if transcript_expression_dict: effects = apply_filter( lambda effect: ( transcript_expression_dict.get(effect.transcript_id, 0.0) >= transcript_expression_threshold ), effects, result_fn=effects.clone_with_new_elements, filter_name=( "Effect transcript expression (min=%0.4f)" % ( transcript_expression_threshold,))) return effects
[ "def", "apply_effect_expression_filters", "(", "effects", ",", "gene_expression_dict", ",", "gene_expression_threshold", ",", "transcript_expression_dict", ",", "transcript_expression_threshold", ")", ":", "if", "gene_expression_dict", ":", "effects", "=", "apply_filter", "(", "lambda", "effect", ":", "(", "gene_expression_dict", ".", "get", "(", "effect", ".", "gene_id", ",", "0.0", ")", ">=", "gene_expression_threshold", ")", ",", "effects", ",", "result_fn", "=", "effects", ".", "clone_with_new_elements", ",", "filter_name", "=", "\"Effect gene expression (min = %0.4f)\"", "%", "gene_expression_threshold", ")", "if", "transcript_expression_dict", ":", "effects", "=", "apply_filter", "(", "lambda", "effect", ":", "(", "transcript_expression_dict", ".", "get", "(", "effect", ".", "transcript_id", ",", "0.0", ")", ">=", "transcript_expression_threshold", ")", ",", "effects", ",", "result_fn", "=", "effects", ".", "clone_with_new_elements", ",", "filter_name", "=", "(", "\"Effect transcript expression (min=%0.4f)\"", "%", "(", "transcript_expression_threshold", ",", ")", ")", ")", "return", "effects" ]
Filter collection of varcode effects by given gene and transcript expression thresholds. Parameters ---------- effects : varcode.EffectCollection gene_expression_dict : dict gene_expression_threshold : float transcript_expression_dict : dict transcript_expression_threshold : float
[ "Filter", "collection", "of", "varcode", "effects", "by", "given", "gene", "and", "transcript", "expression", "thresholds", "." ]
python
train
31.093023
tensorlayer/tensorlayer
tensorlayer/layers/utils.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/utils.py#L64-L99
def flatten_reshape(variable, name='flatten'): """Reshapes a high-dimension vector input. [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask] Parameters ---------- variable : TensorFlow variable or tensor The variable or tensor to be flatten. name : str A unique layer name. Returns ------- Tensor Flatten Tensor Examples -------- >>> import tensorflow as tf >>> import tensorlayer as tl >>> x = tf.placeholder(tf.float32, [None, 128, 128, 3]) >>> # Convolution Layer with 32 filters and a kernel size of 5 >>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) >>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 >>> network = tf.layers.max_pooling2d(network, 2, 2) >>> print(network.get_shape()[:].as_list()) >>> [None, 62, 62, 32] >>> network = tl.layers.flatten_reshape(network) >>> print(network.get_shape()[:].as_list()[1:]) >>> [None, 123008] """ dim = 1 for d in variable.get_shape()[1:].as_list(): dim *= d return tf.reshape(variable, shape=[-1, dim], name=name)
[ "def", "flatten_reshape", "(", "variable", ",", "name", "=", "'flatten'", ")", ":", "dim", "=", "1", "for", "d", "in", "variable", ".", "get_shape", "(", ")", "[", "1", ":", "]", ".", "as_list", "(", ")", ":", "dim", "*=", "d", "return", "tf", ".", "reshape", "(", "variable", ",", "shape", "=", "[", "-", "1", ",", "dim", "]", ",", "name", "=", "name", ")" ]
Reshapes a high-dimension vector input. [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask] Parameters ---------- variable : TensorFlow variable or tensor The variable or tensor to be flatten. name : str A unique layer name. Returns ------- Tensor Flatten Tensor Examples -------- >>> import tensorflow as tf >>> import tensorlayer as tl >>> x = tf.placeholder(tf.float32, [None, 128, 128, 3]) >>> # Convolution Layer with 32 filters and a kernel size of 5 >>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) >>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 >>> network = tf.layers.max_pooling2d(network, 2, 2) >>> print(network.get_shape()[:].as_list()) >>> [None, 62, 62, 32] >>> network = tl.layers.flatten_reshape(network) >>> print(network.get_shape()[:].as_list()[1:]) >>> [None, 123008]
[ "Reshapes", "a", "high", "-", "dimension", "vector", "input", "." ]
python
valid
31.861111
raff/dynash
dynash/dynash.py
https://github.com/raff/dynash/blob/a2b4fab67dd85ceaa9c1bb7604ebc1768a7fc28e/dynash/dynash.py#L298-L309
def do_login(self, line): "login aws-acces-key aws-secret" if line: args = self.getargs(line) self.conn = boto.connect_dynamodb( aws_access_key_id=args[0], aws_secret_access_key=args[1]) else: self.conn = boto.connect_dynamodb() self.do_tables('')
[ "def", "do_login", "(", "self", ",", "line", ")", ":", "if", "line", ":", "args", "=", "self", ".", "getargs", "(", "line", ")", "self", ".", "conn", "=", "boto", ".", "connect_dynamodb", "(", "aws_access_key_id", "=", "args", "[", "0", "]", ",", "aws_secret_access_key", "=", "args", "[", "1", "]", ")", "else", ":", "self", ".", "conn", "=", "boto", ".", "connect_dynamodb", "(", ")", "self", ".", "do_tables", "(", "''", ")" ]
login aws-acces-key aws-secret
[ "login", "aws", "-", "acces", "-", "key", "aws", "-", "secret" ]
python
train
28.166667
mbedmicro/pyOCD
pyocd/coresight/ap.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/ap.py#L136-L144
def _locked(func): """! Decorator to automatically lock an AccessPort method.""" def _locking(self, *args, **kwargs): try: self.lock() return func(self, *args, **kwargs) finally: self.unlock() return _locking
[ "def", "_locked", "(", "func", ")", ":", "def", "_locking", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "lock", "(", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "self", ".", "unlock", "(", ")", "return", "_locking" ]
! Decorator to automatically lock an AccessPort method.
[ "!", "Decorator", "to", "automatically", "lock", "an", "AccessPort", "method", "." ]
python
train
29.333333
SpriteLink/NIPAP
nipap/nipap/authlib.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap/nipap/authlib.py#L105-L119
def _init_backends(self): """ Initialize auth backends. """ # fetch auth backends from config file self._backends = {} for section in self._config.sections(): # does the section define an auth backend? section_components = section.rsplit('.', 1) if section_components[0] == 'auth.backends': auth_backend = section_components[1] self._backends[auth_backend] = eval(self._config.get(section, 'type')) self._logger.debug("Registered auth backends %s" % str(self._backends))
[ "def", "_init_backends", "(", "self", ")", ":", "# fetch auth backends from config file", "self", ".", "_backends", "=", "{", "}", "for", "section", "in", "self", ".", "_config", ".", "sections", "(", ")", ":", "# does the section define an auth backend?", "section_components", "=", "section", ".", "rsplit", "(", "'.'", ",", "1", ")", "if", "section_components", "[", "0", "]", "==", "'auth.backends'", ":", "auth_backend", "=", "section_components", "[", "1", "]", "self", ".", "_backends", "[", "auth_backend", "]", "=", "eval", "(", "self", ".", "_config", ".", "get", "(", "section", ",", "'type'", ")", ")", "self", ".", "_logger", ".", "debug", "(", "\"Registered auth backends %s\"", "%", "str", "(", "self", ".", "_backends", ")", ")" ]
Initialize auth backends.
[ "Initialize", "auth", "backends", "." ]
python
train
38.333333
wummel/linkchecker
linkcheck/ansicolor.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/ansicolor.py#L269-L274
def _write_color (self, text, color=None): """Print text with given color. If color is None, print text as-is.""" if color is None: self.fp.write(text) else: write_color(self.fp, text, color)
[ "def", "_write_color", "(", "self", ",", "text", ",", "color", "=", "None", ")", ":", "if", "color", "is", "None", ":", "self", ".", "fp", ".", "write", "(", "text", ")", "else", ":", "write_color", "(", "self", ".", "fp", ",", "text", ",", "color", ")" ]
Print text with given color. If color is None, print text as-is.
[ "Print", "text", "with", "given", "color", ".", "If", "color", "is", "None", "print", "text", "as", "-", "is", "." ]
python
train
39
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3690-L3701
def tickets_update_many(self, data, ids=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tickets#update-many-tickets" api_path = "/api/v2/tickets/update_many.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if ids: api_query.update({ "ids": ids, }) return self.call(api_path, query=api_query, method="PUT", data=data, **kwargs)
[ "def", "tickets_update_many", "(", "self", ",", "data", ",", "ids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/tickets/update_many.json\"", "api_query", "=", "{", "}", "if", "\"query\"", "in", "kwargs", ".", "keys", "(", ")", ":", "api_query", ".", "update", "(", "kwargs", "[", "\"query\"", "]", ")", "del", "kwargs", "[", "\"query\"", "]", "if", "ids", ":", "api_query", ".", "update", "(", "{", "\"ids\"", ":", "ids", ",", "}", ")", "return", "self", ".", "call", "(", "api_path", ",", "query", "=", "api_query", ",", "method", "=", "\"PUT\"", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/tickets#update-many-tickets
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "tickets#update", "-", "many", "-", "tickets" ]
python
train
41.75
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L30840-L30856
def wait_processed(self, timeout): """Wait until time outs, or this event is processed. Event must be waitable for this operation to have described semantics, for non-waitable returns true immediately. in timeout of type int Maximum time to wait for event processing, in ms; 0 = no wait, -1 = indefinite wait. return result of type bool If this event was processed before timeout. """ if not isinstance(timeout, baseinteger): raise TypeError("timeout can only be an instance of type baseinteger") result = self._call("waitProcessed", in_p=[timeout]) return result
[ "def", "wait_processed", "(", "self", ",", "timeout", ")", ":", "if", "not", "isinstance", "(", "timeout", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"timeout can only be an instance of type baseinteger\"", ")", "result", "=", "self", ".", "_call", "(", "\"waitProcessed\"", ",", "in_p", "=", "[", "timeout", "]", ")", "return", "result" ]
Wait until time outs, or this event is processed. Event must be waitable for this operation to have described semantics, for non-waitable returns true immediately. in timeout of type int Maximum time to wait for event processing, in ms; 0 = no wait, -1 = indefinite wait. return result of type bool If this event was processed before timeout.
[ "Wait", "until", "time", "outs", "or", "this", "event", "is", "processed", ".", "Event", "must", "be", "waitable", "for", "this", "operation", "to", "have", "described", "semantics", "for", "non", "-", "waitable", "returns", "true", "immediately", "." ]
python
train
40.176471
saltstack/salt
salt/engines/libvirt_events.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/libvirt_events.py#L453-L459
def _domain_event_job_completed_cb(conn, domain, params, opaque): ''' Domain job completion events handler ''' _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'params': params })
[ "def", "_domain_event_job_completed_cb", "(", "conn", ",", "domain", ",", "params", ",", "opaque", ")", ":", "_salt_send_domain_event", "(", "opaque", ",", "conn", ",", "domain", ",", "opaque", "[", "'event'", "]", ",", "{", "'params'", ":", "params", "}", ")" ]
Domain job completion events handler
[ "Domain", "job", "completion", "events", "handler" ]
python
train
31
kyuupichan/aiorpcX
aiorpcx/session.py
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/session.py#L631-L634
async def send_request(self, method, args=()): '''Send an RPC request over the network.''' message, event = self.connection.send_request(Request(method, args)) return await self._send_concurrent(message, event, 1)
[ "async", "def", "send_request", "(", "self", ",", "method", ",", "args", "=", "(", ")", ")", ":", "message", ",", "event", "=", "self", ".", "connection", ".", "send_request", "(", "Request", "(", "method", ",", "args", ")", ")", "return", "await", "self", ".", "_send_concurrent", "(", "message", ",", "event", ",", "1", ")" ]
Send an RPC request over the network.
[ "Send", "an", "RPC", "request", "over", "the", "network", "." ]
python
train
58.5
lsst-epo/vela
astropixie-widgets/astropixie_widgets/visual.py
https://github.com/lsst-epo/vela/blob/8e17ebec509be5c3cc2063f4645dfe9e26b49c18/astropixie-widgets/astropixie_widgets/visual.py#L164-L172
def hr_diagram(cluster_name, output=None): """Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R diagram using the cluster_name; then show it. Re """ cluster = get_hr_data(cluster_name) pf = hr_diagram_figure(cluster) show_with_bokeh_server(pf)
[ "def", "hr_diagram", "(", "cluster_name", ",", "output", "=", "None", ")", ":", "cluster", "=", "get_hr_data", "(", "cluster_name", ")", "pf", "=", "hr_diagram_figure", "(", "cluster", ")", "show_with_bokeh_server", "(", "pf", ")" ]
Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R diagram using the cluster_name; then show it. Re
[ "Create", "a", ":", "class", ":", "~bokeh", ".", "plotting", ".", "figure", ".", "Figure", "to", "create", "an", "H", "-", "R", "diagram", "using", "the", "cluster_name", ";", "then", "show", "it", "." ]
python
valid
31
aws/chalice
chalice/awsclient.py
https://github.com/aws/chalice/blob/10d7fb52e68bd1c52aae251c97e3939fc0190412/chalice/awsclient.py#L515-L528
def get_function_policy(self, function_name): # type: (str) -> Dict[str, Any] """Return the function policy for a lambda function. This function will extract the policy string as a json document and return the json.loads(...) version of the policy. """ client = self._client('lambda') try: policy = client.get_policy(FunctionName=function_name) return json.loads(policy['Policy']) except client.exceptions.ResourceNotFoundException: return {'Statement': []}
[ "def", "get_function_policy", "(", "self", ",", "function_name", ")", ":", "# type: (str) -> Dict[str, Any]", "client", "=", "self", ".", "_client", "(", "'lambda'", ")", "try", ":", "policy", "=", "client", ".", "get_policy", "(", "FunctionName", "=", "function_name", ")", "return", "json", ".", "loads", "(", "policy", "[", "'Policy'", "]", ")", "except", "client", ".", "exceptions", ".", "ResourceNotFoundException", ":", "return", "{", "'Statement'", ":", "[", "]", "}" ]
Return the function policy for a lambda function. This function will extract the policy string as a json document and return the json.loads(...) version of the policy.
[ "Return", "the", "function", "policy", "for", "a", "lambda", "function", "." ]
python
train
39
mongodb/mongo-python-driver
pymongo/collection.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L1847-L1875
def __create_index(self, keys, index_options, session, **kwargs): """Internal create index helper. :Parameters: - `keys`: a list of tuples [(key, type), (key, type), ...] - `index_options`: a dict of index options. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. """ index_doc = helpers._index_document(keys) index = {"key": index_doc} collation = validate_collation_or_none( index_options.pop('collation', None)) index.update(index_options) with self._socket_for_writes(session) as sock_info: if collation is not None: if sock_info.max_wire_version < 5: raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use collations.') else: index['collation'] = collation cmd = SON([('createIndexes', self.name), ('indexes', [index])]) cmd.update(kwargs) self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self._write_concern_for(session), session=session)
[ "def", "__create_index", "(", "self", ",", "keys", ",", "index_options", ",", "session", ",", "*", "*", "kwargs", ")", ":", "index_doc", "=", "helpers", ".", "_index_document", "(", "keys", ")", "index", "=", "{", "\"key\"", ":", "index_doc", "}", "collation", "=", "validate_collation_or_none", "(", "index_options", ".", "pop", "(", "'collation'", ",", "None", ")", ")", "index", ".", "update", "(", "index_options", ")", "with", "self", ".", "_socket_for_writes", "(", "session", ")", "as", "sock_info", ":", "if", "collation", "is", "not", "None", ":", "if", "sock_info", ".", "max_wire_version", "<", "5", ":", "raise", "ConfigurationError", "(", "'Must be connected to MongoDB 3.4+ to use collations.'", ")", "else", ":", "index", "[", "'collation'", "]", "=", "collation", "cmd", "=", "SON", "(", "[", "(", "'createIndexes'", ",", "self", ".", "name", ")", ",", "(", "'indexes'", ",", "[", "index", "]", ")", "]", ")", "cmd", ".", "update", "(", "kwargs", ")", "self", ".", "_command", "(", "sock_info", ",", "cmd", ",", "read_preference", "=", "ReadPreference", ".", "PRIMARY", ",", "codec_options", "=", "_UNICODE_REPLACE_CODEC_OPTIONS", ",", "write_concern", "=", "self", ".", "_write_concern_for", "(", "session", ")", ",", "session", "=", "session", ")" ]
Internal create index helper. :Parameters: - `keys`: a list of tuples [(key, type), (key, type), ...] - `index_options`: a dict of index options. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`.
[ "Internal", "create", "index", "helper", "." ]
python
train
43.586207
haifengat/hf_ctp_py_proxy
py_ctp/trade.py
https://github.com/haifengat/hf_ctp_py_proxy/blob/c2dc6dbde45aa6b097f75380474e91510d3f5d12/py_ctp/trade.py#L515-L533
def ReqOrderAction(self, OrderID: str): """撤单 :param OrderID: """ of = self.orders[OrderID] if not of: return -1 else: pOrderId = of.OrderID return self.t.ReqOrderAction( self.broker, self.investor, OrderRef=pOrderId.split('|')[2], FrontID=int(pOrderId.split('|')[1]), SessionID=int(pOrderId.split('|')[0]), InstrumentID=of.InstrumentID, ActionFlag=TThostFtdcActionFlagType.THOST_FTDC_AF_Delete)
[ "def", "ReqOrderAction", "(", "self", ",", "OrderID", ":", "str", ")", ":", "of", "=", "self", ".", "orders", "[", "OrderID", "]", "if", "not", "of", ":", "return", "-", "1", "else", ":", "pOrderId", "=", "of", ".", "OrderID", "return", "self", ".", "t", ".", "ReqOrderAction", "(", "self", ".", "broker", ",", "self", ".", "investor", ",", "OrderRef", "=", "pOrderId", ".", "split", "(", "'|'", ")", "[", "2", "]", ",", "FrontID", "=", "int", "(", "pOrderId", ".", "split", "(", "'|'", ")", "[", "1", "]", ")", ",", "SessionID", "=", "int", "(", "pOrderId", ".", "split", "(", "'|'", ")", "[", "0", "]", ")", ",", "InstrumentID", "=", "of", ".", "InstrumentID", ",", "ActionFlag", "=", "TThostFtdcActionFlagType", ".", "THOST_FTDC_AF_Delete", ")" ]
撤单 :param OrderID:
[ "撤单" ]
python
train
30.263158
sorgerlab/indra
indra/tools/assemble_corpus.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L1042-L1097
def filter_human_only(stmts_in, **kwargs): """Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ from indra.databases import uniprot_client if 'remove_bound' in kwargs and kwargs['remove_bound']: remove_bound = True else: remove_bound = False dump_pkl = kwargs.get('save') logger.info('Filtering %d statements for human genes only...' % len(stmts_in)) stmts_out = [] def criterion(agent): upid = agent.db_refs.get('UP') if upid and not uniprot_client.is_human(upid): return False else: return True for st in stmts_in: human_genes = True for agent in st.agent_list(): if agent is not None: if not criterion(agent): human_genes = False break if remove_bound: _remove_bound_conditions(agent, criterion) elif _any_bound_condition_fails_criterion(agent, criterion): human_genes = False break if human_genes: stmts_out.append(st) logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_human_only", "(", "stmts_in", ",", "*", "*", "kwargs", ")", ":", "from", "indra", ".", "databases", "import", "uniprot_client", "if", "'remove_bound'", "in", "kwargs", "and", "kwargs", "[", "'remove_bound'", "]", ":", "remove_bound", "=", "True", "else", ":", "remove_bound", "=", "False", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "logger", ".", "info", "(", "'Filtering %d statements for human genes only...'", "%", "len", "(", "stmts_in", ")", ")", "stmts_out", "=", "[", "]", "def", "criterion", "(", "agent", ")", ":", "upid", "=", "agent", ".", "db_refs", ".", "get", "(", "'UP'", ")", "if", "upid", "and", "not", "uniprot_client", ".", "is_human", "(", "upid", ")", ":", "return", "False", "else", ":", "return", "True", "for", "st", "in", "stmts_in", ":", "human_genes", "=", "True", "for", "agent", "in", "st", ".", "agent_list", "(", ")", ":", "if", "agent", "is", "not", "None", ":", "if", "not", "criterion", "(", "agent", ")", ":", "human_genes", "=", "False", "break", "if", "remove_bound", ":", "_remove_bound_conditions", "(", "agent", ",", "criterion", ")", "elif", "_any_bound_condition_fails_criterion", "(", "agent", ",", "criterion", ")", ":", "human_genes", "=", "False", "break", "if", "human_genes", ":", "stmts_out", ".", "append", "(", "st", ")", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter out statements that are grounded, but not to a human gene. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. remove_bound: Optional[bool] If true, removes all bound conditions that are grounded but not to human genes. If false (default), filters out statements with boundary conditions that are grounded to non-human genes. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "out", "statements", "that", "are", "grounded", "but", "not", "to", "a", "human", "gene", "." ]
python
train
32.642857
TurboGears/backlash
backlash/utils.py
https://github.com/TurboGears/backlash/blob/b8c73a6c8a203843f5a52c43b858ae5907fb2a4f/backlash/utils.py#L8-L32
def escape(s, quote=False): """Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag `quote` is `True`, the quotation mark character is also translated. There is a special handling for `None` which escapes to an empty string. :param s: the string to escape. :param quote: set to true to also escape double quotes. """ if s is None: return '' if hasattr(s, '__html__'): return s.__html__() if not isinstance(s, (text_type, binary_type)): s = text_type(s) if isinstance(s, binary_type): try: s.decode('ascii') except: s = s.decode('utf-8', 'replace') s = s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') if quote: s = s.replace('"', "&quot;") return s
[ "def", "escape", "(", "s", ",", "quote", "=", "False", ")", ":", "if", "s", "is", "None", ":", "return", "''", "if", "hasattr", "(", "s", ",", "'__html__'", ")", ":", "return", "s", ".", "__html__", "(", ")", "if", "not", "isinstance", "(", "s", ",", "(", "text_type", ",", "binary_type", ")", ")", ":", "s", "=", "text_type", "(", "s", ")", "if", "isinstance", "(", "s", ",", "binary_type", ")", ":", "try", ":", "s", ".", "decode", "(", "'ascii'", ")", "except", ":", "s", "=", "s", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", "s", "=", "s", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ".", "replace", "(", "'>'", ",", "'&gt;'", ")", "if", "quote", ":", "s", "=", "s", ".", "replace", "(", "'\"'", ",", "\"&quot;\"", ")", "return", "s" ]
Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag `quote` is `True`, the quotation mark character is also translated. There is a special handling for `None` which escapes to an empty string. :param s: the string to escape. :param quote: set to true to also escape double quotes.
[ "Replace", "special", "characters", "&", "<", "and", ">", "to", "HTML", "-", "safe", "sequences", ".", "If", "the", "optional", "flag", "quote", "is", "True", "the", "quotation", "mark", "character", "is", "also", "translated", "." ]
python
train
32.32
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L402-L410
def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ t = [prob for (val, prob) in self.d.iteritems() if val > x] return sum(t)
[ "def", "ProbGreater", "(", "self", ",", "x", ")", ":", "t", "=", "[", "prob", "for", "(", "val", ",", "prob", ")", "in", "self", ".", "d", ".", "iteritems", "(", ")", "if", "val", ">", "x", "]", "return", "sum", "(", "t", ")" ]
Probability that a sample from this Pmf exceeds x. x: number returns: float probability
[ "Probability", "that", "a", "sample", "from", "this", "Pmf", "exceeds", "x", "." ]
python
train
26.222222
saltstack/salt
salt/proxy/bluecoat_sslv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/bluecoat_sslv.py#L201-L213
def logout(session, cookies, csrf_token): ''' Closes the session with the device. ''' payload = {"jsonrpc": "2.0", "id": "ID0", "method": "logout", "params": [] } session.post(DETAILS['url'], data=json.dumps(payload), cookies=cookies, headers={'X-CSRF-Token': csrf_token})
[ "def", "logout", "(", "session", ",", "cookies", ",", "csrf_token", ")", ":", "payload", "=", "{", "\"jsonrpc\"", ":", "\"2.0\"", ",", "\"id\"", ":", "\"ID0\"", ",", "\"method\"", ":", "\"logout\"", ",", "\"params\"", ":", "[", "]", "}", "session", ".", "post", "(", "DETAILS", "[", "'url'", "]", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ",", "cookies", "=", "cookies", ",", "headers", "=", "{", "'X-CSRF-Token'", ":", "csrf_token", "}", ")" ]
Closes the session with the device.
[ "Closes", "the", "session", "with", "the", "device", "." ]
python
train
30.076923
DataKitchen/DKCloudCommand
DKCloudCommand/modules/DKCloudAPIMock.py
https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKCloudAPIMock.py#L37-L48
def delete_orderrun(self, orderrun_id): """ :param self: self :param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value :rtype: DKReturnCode """ rc = DKReturnCode() if orderrun_id == 'good': rc.set(rc.DK_SUCCESS, None, None) else: rc.set(rc.DK_FAIL, 'ServingDeleteV2: unable to delete OrderRun') return rc
[ "def", "delete_orderrun", "(", "self", ",", "orderrun_id", ")", ":", "rc", "=", "DKReturnCode", "(", ")", "if", "orderrun_id", "==", "'good'", ":", "rc", ".", "set", "(", "rc", ".", "DK_SUCCESS", ",", "None", ",", "None", ")", "else", ":", "rc", ".", "set", "(", "rc", ".", "DK_FAIL", ",", "'ServingDeleteV2: unable to delete OrderRun'", ")", "return", "rc" ]
:param self: self :param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value :rtype: DKReturnCode
[ ":", "param", "self", ":", "self", ":", "param", "orderrun_id", ":", "string", ";", "good", "return", "a", "good", "value", ";", "bad", "return", "a", "bad", "value", ":", "rtype", ":", "DKReturnCode" ]
python
train
34.583333
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L1336-L1357
def parse_resource(library, session, resource_name): """Parse a resource string to get the interface information. Corresponds to viParseRsrc function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Resource Manager session (should always be the Default Resource Manager for VISA returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :return: Resource information with interface type and board number, return value of the library call. :rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode` """ interface_type = ViUInt16() interface_board_number = ViUInt16() # [ViSession, ViRsrc, ViPUInt16, ViPUInt16] # ViRsrc converts from (str, unicode, bytes) to bytes ret = library.viParseRsrc(session, resource_name, byref(interface_type), byref(interface_board_number)) return ResourceInfo(constants.InterfaceType(interface_type.value), interface_board_number.value, None, None, None), ret
[ "def", "parse_resource", "(", "library", ",", "session", ",", "resource_name", ")", ":", "interface_type", "=", "ViUInt16", "(", ")", "interface_board_number", "=", "ViUInt16", "(", ")", "# [ViSession, ViRsrc, ViPUInt16, ViPUInt16]", "# ViRsrc converts from (str, unicode, bytes) to bytes", "ret", "=", "library", ".", "viParseRsrc", "(", "session", ",", "resource_name", ",", "byref", "(", "interface_type", ")", ",", "byref", "(", "interface_board_number", ")", ")", "return", "ResourceInfo", "(", "constants", ".", "InterfaceType", "(", "interface_type", ".", "value", ")", ",", "interface_board_number", ".", "value", ",", "None", ",", "None", ",", "None", ")", ",", "ret" ]
Parse a resource string to get the interface information. Corresponds to viParseRsrc function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Resource Manager session (should always be the Default Resource Manager for VISA returned from open_default_resource_manager()). :param resource_name: Unique symbolic name of a resource. :return: Resource information with interface type and board number, return value of the library call. :rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
[ "Parse", "a", "resource", "string", "to", "get", "the", "interface", "information", "." ]
python
train
51.727273
cyface/django-termsandconditions
termsandconditions/views.py
https://github.com/cyface/django-termsandconditions/blob/e18f06d0bad1e047f99222d1153f6e2b3bd5224f/termsandconditions/views.py#L28-L41
def get_terms(self, kwargs): """Checks URL parameters for slug and/or version to pull the right TermsAndConditions object""" slug = kwargs.get("slug") version = kwargs.get("version") if slug and version: terms = [TermsAndConditions.objects.filter(slug=slug, version_number=version).latest('date_active')] elif slug: terms = [TermsAndConditions.get_active(slug)] else: # Return a list of not agreed to terms for the current user for the list view terms = TermsAndConditions.get_active_terms_not_agreed_to(self.request.user) return terms
[ "def", "get_terms", "(", "self", ",", "kwargs", ")", ":", "slug", "=", "kwargs", ".", "get", "(", "\"slug\"", ")", "version", "=", "kwargs", ".", "get", "(", "\"version\"", ")", "if", "slug", "and", "version", ":", "terms", "=", "[", "TermsAndConditions", ".", "objects", ".", "filter", "(", "slug", "=", "slug", ",", "version_number", "=", "version", ")", ".", "latest", "(", "'date_active'", ")", "]", "elif", "slug", ":", "terms", "=", "[", "TermsAndConditions", ".", "get_active", "(", "slug", ")", "]", "else", ":", "# Return a list of not agreed to terms for the current user for the list view", "terms", "=", "TermsAndConditions", ".", "get_active_terms_not_agreed_to", "(", "self", ".", "request", ".", "user", ")", "return", "terms" ]
Checks URL parameters for slug and/or version to pull the right TermsAndConditions object
[ "Checks", "URL", "parameters", "for", "slug", "and", "/", "or", "version", "to", "pull", "the", "right", "TermsAndConditions", "object" ]
python
train
44.857143
kislyuk/aegea
aegea/packages/github3/issues/issue.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/issues/issue.py#L224-L233
def iter_events(self, number=-1): """Iterate over events associated with this issue only. :param int number: (optional), number of events to return. Default: -1 returns all events available. :returns: generator of :class:`IssueEvent <github3.issues.event.IssueEvent>`\ s """ url = self._build_url('events', base_url=self._api) return self._iter(int(number), url, IssueEvent)
[ "def", "iter_events", "(", "self", ",", "number", "=", "-", "1", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'events'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "IssueEvent", ")" ]
Iterate over events associated with this issue only. :param int number: (optional), number of events to return. Default: -1 returns all events available. :returns: generator of :class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
[ "Iterate", "over", "events", "associated", "with", "this", "issue", "only", "." ]
python
train
43.8
vertical-knowledge/ripozo-sqlalchemy
ripozo_sqlalchemy/alchemymanager.py
https://github.com/vertical-knowledge/ripozo-sqlalchemy/blob/4bcc57ec6db1b39b84b50553bb264e4950ce4ec2/ripozo_sqlalchemy/alchemymanager.py#L316-L333
def _set_values_on_model(self, model, values, fields=None): """ Updates the values with the specified values. :param Model model: The sqlalchemy model instance :param dict values: The dictionary of attributes and the values to set. :param list fields: A list of strings indicating the valid fields. Defaults to self.fields. :return: The model with the updated :rtype: Model """ fields = fields or self.fields for name, val in six.iteritems(values): if name not in fields: continue setattr(model, name, val) return model
[ "def", "_set_values_on_model", "(", "self", ",", "model", ",", "values", ",", "fields", "=", "None", ")", ":", "fields", "=", "fields", "or", "self", ".", "fields", "for", "name", ",", "val", "in", "six", ".", "iteritems", "(", "values", ")", ":", "if", "name", "not", "in", "fields", ":", "continue", "setattr", "(", "model", ",", "name", ",", "val", ")", "return", "model" ]
Updates the values with the specified values. :param Model model: The sqlalchemy model instance :param dict values: The dictionary of attributes and the values to set. :param list fields: A list of strings indicating the valid fields. Defaults to self.fields. :return: The model with the updated :rtype: Model
[ "Updates", "the", "values", "with", "the", "specified", "values", "." ]
python
train
36.388889
Nic30/hwtGraph
hwtGraph/elk/fromHwt/statementRenderer.py
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/statementRenderer.py#L42-L67
def detectRamPorts(stm: IfContainer, current_en: RtlSignalBase): """ Detect RAM ports in If statement :param stm: statement to detect the ram ports in :param current_en: curent en/clk signal """ if stm.ifFalse or stm.elIfs: return for _stm in stm.ifTrue: if isinstance(_stm, IfContainer): yield from detectRamPorts(_stm, _stm.cond & current_en) elif isinstance(_stm, Assignment): if isinstance(_stm.dst._dtype, HArray): assert len(_stm.indexes) == 1, "one address per RAM port" w_addr = _stm.indexes[0] mem = _stm.dst yield (RAM_WRITE, mem, w_addr, current_en, _stm.src) elif _stm.src.hidden and len(_stm.src.drivers) == 1: op = _stm.src.drivers[0] mem = op.operands[0] if isinstance(mem._dtype, HArray) and op.operator == AllOps.INDEX: r_addr = op.operands[1] if _stm.indexes: raise NotImplementedError() yield (RAM_READ, mem, r_addr, current_en, _stm.dst)
[ "def", "detectRamPorts", "(", "stm", ":", "IfContainer", ",", "current_en", ":", "RtlSignalBase", ")", ":", "if", "stm", ".", "ifFalse", "or", "stm", ".", "elIfs", ":", "return", "for", "_stm", "in", "stm", ".", "ifTrue", ":", "if", "isinstance", "(", "_stm", ",", "IfContainer", ")", ":", "yield", "from", "detectRamPorts", "(", "_stm", ",", "_stm", ".", "cond", "&", "current_en", ")", "elif", "isinstance", "(", "_stm", ",", "Assignment", ")", ":", "if", "isinstance", "(", "_stm", ".", "dst", ".", "_dtype", ",", "HArray", ")", ":", "assert", "len", "(", "_stm", ".", "indexes", ")", "==", "1", ",", "\"one address per RAM port\"", "w_addr", "=", "_stm", ".", "indexes", "[", "0", "]", "mem", "=", "_stm", ".", "dst", "yield", "(", "RAM_WRITE", ",", "mem", ",", "w_addr", ",", "current_en", ",", "_stm", ".", "src", ")", "elif", "_stm", ".", "src", ".", "hidden", "and", "len", "(", "_stm", ".", "src", ".", "drivers", ")", "==", "1", ":", "op", "=", "_stm", ".", "src", ".", "drivers", "[", "0", "]", "mem", "=", "op", ".", "operands", "[", "0", "]", "if", "isinstance", "(", "mem", ".", "_dtype", ",", "HArray", ")", "and", "op", ".", "operator", "==", "AllOps", ".", "INDEX", ":", "r_addr", "=", "op", ".", "operands", "[", "1", "]", "if", "_stm", ".", "indexes", ":", "raise", "NotImplementedError", "(", ")", "yield", "(", "RAM_READ", ",", "mem", ",", "r_addr", ",", "current_en", ",", "_stm", ".", "dst", ")" ]
Detect RAM ports in If statement :param stm: statement to detect the ram ports in :param current_en: curent en/clk signal
[ "Detect", "RAM", "ports", "in", "If", "statement" ]
python
train
42.961538
bcbio/bcbio-nextgen
bcbio/qc/qualimap.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/qc/qualimap.py#L141-L149
def _parse_qualimap_coverage(table): """Parse summary qualimap coverage metrics. """ out = {} for row in table.find_all("tr"): col, val = [x.text for x in row.find_all("td")] if col == "Mean": out["Coverage (Mean)"] = val return out
[ "def", "_parse_qualimap_coverage", "(", "table", ")", ":", "out", "=", "{", "}", "for", "row", "in", "table", ".", "find_all", "(", "\"tr\"", ")", ":", "col", ",", "val", "=", "[", "x", ".", "text", "for", "x", "in", "row", ".", "find_all", "(", "\"td\"", ")", "]", "if", "col", "==", "\"Mean\"", ":", "out", "[", "\"Coverage (Mean)\"", "]", "=", "val", "return", "out" ]
Parse summary qualimap coverage metrics.
[ "Parse", "summary", "qualimap", "coverage", "metrics", "." ]
python
train
30.222222
Nixiware/viper
nx/viper/application.py
https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/application.py#L155-L171
def addModel(self, moduleName, modelName, model): """ Add a model instance to the application model pool. :param moduleName: <str> module name in which the model is located :param modelName: <str> model name :param model: <object> model instance :return: <void> """ modelIdentifier = "{}.{}".format(moduleName, modelName) if modelIdentifier not in self._models: self._models[modelIdentifier] = model else: message = "Application - addModel() - " \ "A model with the identifier {} already exists." \ .format(modelIdentifier) raise Exception(message)
[ "def", "addModel", "(", "self", ",", "moduleName", ",", "modelName", ",", "model", ")", ":", "modelIdentifier", "=", "\"{}.{}\"", ".", "format", "(", "moduleName", ",", "modelName", ")", "if", "modelIdentifier", "not", "in", "self", ".", "_models", ":", "self", ".", "_models", "[", "modelIdentifier", "]", "=", "model", "else", ":", "message", "=", "\"Application - addModel() - \"", "\"A model with the identifier {} already exists.\"", ".", "format", "(", "modelIdentifier", ")", "raise", "Exception", "(", "message", ")" ]
Add a model instance to the application model pool. :param moduleName: <str> module name in which the model is located :param modelName: <str> model name :param model: <object> model instance :return: <void>
[ "Add", "a", "model", "instance", "to", "the", "application", "model", "pool", "." ]
python
train
40.764706
RiotGames/cloud-inquisitor
plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-cloudtrail/cinq_auditor_cloudtrail/__init__.py#L132-L210
def run(self): """Configures and enables a CloudTrail trail and logging on a single AWS Account. Has the capability to create both single region and multi-region trails. Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question, as well as reverting any manual changes to the trails if applicable. Returns: None """ for aws_region in AWS_REGIONS: self.log.debug('Checking trails for {}/{}'.format( self.account.account_name, aws_region )) ct = self.session.client('cloudtrail', region_name=aws_region) trails = ct.describe_trails() if len(trails['trailList']) == 0: if aws_region == self.global_ct_region: self.create_cloudtrail(aws_region) else: for trail in trails['trailList']: if trail['Name'] in ('Default', self.trail_name): if not trail['IsMultiRegionTrail']: if trail['Name'] == self.trail_name and self.global_ct_region == aws_region: ct.update_trail( Name=trail['Name'], IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True ) auditlog( event='cloudtrail.update_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'changes': [ { 'setting': 'IsMultiRegionTrail', 'oldValue': False, 'newValue': True } ] } ) else: ct.delete_trail(name=trail['Name']) auditlog( event='cloudtrail.delete_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect region, name or not multi-regional' } ) else: if trail['HomeRegion'] == aws_region: if self.global_ct_region != aws_region or trail['Name'] == 'Default': ct.delete_trail(Name=trail['Name']) auditlog( event='cloudtrail.delete_trail', actor=self.ns, data={ 'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect name or region for multi-region trail' } ) trails = ct.describe_trails() for trail in trails['trailList']: if trail['Name'] == self.trail_name and trail['HomeRegion'] == aws_region: self.validate_trail_settings(ct, aws_region, trail)
[ "def", "run", "(", "self", ")", ":", "for", "aws_region", "in", "AWS_REGIONS", ":", "self", ".", "log", ".", "debug", "(", "'Checking trails for {}/{}'", ".", "format", "(", "self", ".", "account", ".", "account_name", ",", "aws_region", ")", ")", "ct", "=", "self", ".", "session", ".", "client", "(", "'cloudtrail'", ",", "region_name", "=", "aws_region", ")", "trails", "=", "ct", ".", "describe_trails", "(", ")", "if", "len", "(", "trails", "[", "'trailList'", "]", ")", "==", "0", ":", "if", "aws_region", "==", "self", ".", "global_ct_region", ":", "self", ".", "create_cloudtrail", "(", "aws_region", ")", "else", ":", "for", "trail", "in", "trails", "[", "'trailList'", "]", ":", "if", "trail", "[", "'Name'", "]", "in", "(", "'Default'", ",", "self", ".", "trail_name", ")", ":", "if", "not", "trail", "[", "'IsMultiRegionTrail'", "]", ":", "if", "trail", "[", "'Name'", "]", "==", "self", ".", "trail_name", "and", "self", ".", "global_ct_region", "==", "aws_region", ":", "ct", ".", "update_trail", "(", "Name", "=", "trail", "[", "'Name'", "]", ",", "IncludeGlobalServiceEvents", "=", "True", ",", "IsMultiRegionTrail", "=", "True", ")", "auditlog", "(", "event", "=", "'cloudtrail.update_trail'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'trailName'", ":", "trail", "[", "'Name'", "]", ",", "'account'", ":", "self", ".", "account", ".", "account_name", ",", "'region'", ":", "aws_region", ",", "'changes'", ":", "[", "{", "'setting'", ":", "'IsMultiRegionTrail'", ",", "'oldValue'", ":", "False", ",", "'newValue'", ":", "True", "}", "]", "}", ")", "else", ":", "ct", ".", "delete_trail", "(", "name", "=", "trail", "[", "'Name'", "]", ")", "auditlog", "(", "event", "=", "'cloudtrail.delete_trail'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'trailName'", ":", "trail", "[", "'Name'", "]", ",", "'account'", ":", "self", ".", "account", ".", "account_name", ",", "'region'", ":", "aws_region", ",", "'reason'", ":", "'Incorrect region, name or not multi-regional'", "}", ")", "else", ":", "if", "trail", "[", "'HomeRegion'", "]", "==", "aws_region", ":", "if", "self", ".", "global_ct_region", "!=", "aws_region", "or", "trail", "[", "'Name'", "]", "==", "'Default'", ":", "ct", ".", "delete_trail", "(", "Name", "=", "trail", "[", "'Name'", "]", ")", "auditlog", "(", "event", "=", "'cloudtrail.delete_trail'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'trailName'", ":", "trail", "[", "'Name'", "]", ",", "'account'", ":", "self", ".", "account", ".", "account_name", ",", "'region'", ":", "aws_region", ",", "'reason'", ":", "'Incorrect name or region for multi-region trail'", "}", ")", "trails", "=", "ct", ".", "describe_trails", "(", ")", "for", "trail", "in", "trails", "[", "'trailList'", "]", ":", "if", "trail", "[", "'Name'", "]", "==", "self", ".", "trail_name", "and", "trail", "[", "'HomeRegion'", "]", "==", "aws_region", ":", "self", ".", "validate_trail_settings", "(", "ct", ",", "aws_region", ",", "trail", ")" ]
Configures and enables a CloudTrail trail and logging on a single AWS Account. Has the capability to create both single region and multi-region trails. Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question, as well as reverting any manual changes to the trails if applicable. Returns: None
[ "Configures", "and", "enables", "a", "CloudTrail", "trail", "and", "logging", "on", "a", "single", "AWS", "Account", "." ]
python
train
51.911392
adamziel/python_translate
python_translate/glue.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/glue.py#L100-L123
def write_translations(self, catalogue, format, options={}): """ Writes translation from the catalogue according to the selected format. @type catalogue: MessageCatalogue @param catalogue: The message catalogue to dump @type format: string @param format: The format to use to dump the messages @type options: array @param options: Options that are passed to the dumper @raises: ValueError """ if format not in self.dumpers: raise ValueError( 'There is no dumper associated with format "{0}"'.format(format)) dumper = self.dumpers[format] if "path" in options and not os.path.isdir(options['path']): os.mkdir(options['path']) dumper.dump(catalogue, options)
[ "def", "write_translations", "(", "self", ",", "catalogue", ",", "format", ",", "options", "=", "{", "}", ")", ":", "if", "format", "not", "in", "self", ".", "dumpers", ":", "raise", "ValueError", "(", "'There is no dumper associated with format \"{0}\"'", ".", "format", "(", "format", ")", ")", "dumper", "=", "self", ".", "dumpers", "[", "format", "]", "if", "\"path\"", "in", "options", "and", "not", "os", ".", "path", ".", "isdir", "(", "options", "[", "'path'", "]", ")", ":", "os", ".", "mkdir", "(", "options", "[", "'path'", "]", ")", "dumper", ".", "dump", "(", "catalogue", ",", "options", ")" ]
Writes translation from the catalogue according to the selected format. @type catalogue: MessageCatalogue @param catalogue: The message catalogue to dump @type format: string @param format: The format to use to dump the messages @type options: array @param options: Options that are passed to the dumper @raises: ValueError
[ "Writes", "translation", "from", "the", "catalogue", "according", "to", "the", "selected", "format", "." ]
python
train
32.958333
quodlibet/mutagen
mutagen/id3/_id3v1.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_id3v1.py#L163-L218
def MakeID3v1(id3): """Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.""" v1 = {} for v2id, name in {"TIT2": "title", "TPE1": "artist", "TALB": "album"}.items(): if v2id in id3: text = id3[v2id].text[0].encode('latin1', 'replace')[:30] else: text = b"" v1[name] = text + (b"\x00" * (30 - len(text))) if "COMM" in id3: cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28] else: cmnt = b"" v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt))) if "TRCK" in id3: try: v1["track"] = chr_(+id3["TRCK"]) except ValueError: v1["track"] = b"\x00" else: v1["track"] = b"\x00" if "TCON" in id3: try: genre = id3["TCON"].genres[0] except IndexError: pass else: if genre in TCON.GENRES: v1["genre"] = chr_(TCON.GENRES.index(genre)) if "genre" not in v1: v1["genre"] = b"\xff" if "TDRC" in id3: year = text_type(id3["TDRC"]).encode('ascii') elif "TYER" in id3: year = text_type(id3["TYER"]).encode('ascii') else: year = b"" v1["year"] = (year + b"\x00\x00\x00\x00")[:4] return ( b"TAG" + v1["title"] + v1["artist"] + v1["album"] + v1["year"] + v1["comment"] + v1["track"] + v1["genre"] )
[ "def", "MakeID3v1", "(", "id3", ")", ":", "v1", "=", "{", "}", "for", "v2id", ",", "name", "in", "{", "\"TIT2\"", ":", "\"title\"", ",", "\"TPE1\"", ":", "\"artist\"", ",", "\"TALB\"", ":", "\"album\"", "}", ".", "items", "(", ")", ":", "if", "v2id", "in", "id3", ":", "text", "=", "id3", "[", "v2id", "]", ".", "text", "[", "0", "]", ".", "encode", "(", "'latin1'", ",", "'replace'", ")", "[", ":", "30", "]", "else", ":", "text", "=", "b\"\"", "v1", "[", "name", "]", "=", "text", "+", "(", "b\"\\x00\"", "*", "(", "30", "-", "len", "(", "text", ")", ")", ")", "if", "\"COMM\"", "in", "id3", ":", "cmnt", "=", "id3", "[", "\"COMM\"", "]", ".", "text", "[", "0", "]", ".", "encode", "(", "'latin1'", ",", "'replace'", ")", "[", ":", "28", "]", "else", ":", "cmnt", "=", "b\"\"", "v1", "[", "\"comment\"", "]", "=", "cmnt", "+", "(", "b\"\\x00\"", "*", "(", "29", "-", "len", "(", "cmnt", ")", ")", ")", "if", "\"TRCK\"", "in", "id3", ":", "try", ":", "v1", "[", "\"track\"", "]", "=", "chr_", "(", "+", "id3", "[", "\"TRCK\"", "]", ")", "except", "ValueError", ":", "v1", "[", "\"track\"", "]", "=", "b\"\\x00\"", "else", ":", "v1", "[", "\"track\"", "]", "=", "b\"\\x00\"", "if", "\"TCON\"", "in", "id3", ":", "try", ":", "genre", "=", "id3", "[", "\"TCON\"", "]", ".", "genres", "[", "0", "]", "except", "IndexError", ":", "pass", "else", ":", "if", "genre", "in", "TCON", ".", "GENRES", ":", "v1", "[", "\"genre\"", "]", "=", "chr_", "(", "TCON", ".", "GENRES", ".", "index", "(", "genre", ")", ")", "if", "\"genre\"", "not", "in", "v1", ":", "v1", "[", "\"genre\"", "]", "=", "b\"\\xff\"", "if", "\"TDRC\"", "in", "id3", ":", "year", "=", "text_type", "(", "id3", "[", "\"TDRC\"", "]", ")", ".", "encode", "(", "'ascii'", ")", "elif", "\"TYER\"", "in", "id3", ":", "year", "=", "text_type", "(", "id3", "[", "\"TYER\"", "]", ")", ".", "encode", "(", "'ascii'", ")", "else", ":", "year", "=", "b\"\"", "v1", "[", "\"year\"", "]", "=", "(", "year", "+", "b\"\\x00\\x00\\x00\\x00\"", ")", "[", ":", "4", "]", "return", "(", "b\"TAG\"", "+", "v1", "[", "\"title\"", "]", "+", "v1", "[", "\"artist\"", "]", "+", "v1", "[", "\"album\"", "]", "+", "v1", "[", "\"year\"", "]", "+", "v1", "[", "\"comment\"", "]", "+", "v1", "[", "\"track\"", "]", "+", "v1", "[", "\"genre\"", "]", ")" ]
Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.
[ "Return", "an", "ID3v1", ".", "1", "tag", "string", "from", "a", "dict", "of", "ID3v2", ".", "4", "frames", "." ]
python
train
25.25
peshay/tpm
tpm.py
https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L510-L514
def change_user_password(self, ID, data): """Change password of a User.""" # http://teampasswordmanager.com/docs/api-users/#change_password log.info('Change user %s password' % ID) self.put('users/%s/change_password.json' % ID, data)
[ "def", "change_user_password", "(", "self", ",", "ID", ",", "data", ")", ":", "# http://teampasswordmanager.com/docs/api-users/#change_password", "log", ".", "info", "(", "'Change user %s password'", "%", "ID", ")", "self", ".", "put", "(", "'users/%s/change_password.json'", "%", "ID", ",", "data", ")" ]
Change password of a User.
[ "Change", "password", "of", "a", "User", "." ]
python
train
52.2
pmelchior/proxmin
proxmin/utils.py
https://github.com/pmelchior/proxmin/blob/60e49d90c67c46329cc1d3b5c484951dc8bd2c3f/proxmin/utils.py#L300-L310
def get_step_f(step_f, lR2, lS2): """Update the stepsize of given the primal and dual errors. See Boyd (2011), section 3.4.1 """ mu, tau = 10, 2 if lR2 > mu*lS2: return step_f * tau elif lS2 > mu*lR2: return step_f / tau return step_f
[ "def", "get_step_f", "(", "step_f", ",", "lR2", ",", "lS2", ")", ":", "mu", ",", "tau", "=", "10", ",", "2", "if", "lR2", ">", "mu", "*", "lS2", ":", "return", "step_f", "*", "tau", "elif", "lS2", ">", "mu", "*", "lR2", ":", "return", "step_f", "/", "tau", "return", "step_f" ]
Update the stepsize of given the primal and dual errors. See Boyd (2011), section 3.4.1
[ "Update", "the", "stepsize", "of", "given", "the", "primal", "and", "dual", "errors", "." ]
python
train
24.454545
TrafficSenseMSD/SumoTools
traci/_vehicle.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L1121-L1127
def setEmergencyDecel(self, vehID, decel): """setEmergencyDecel(string, double) -> None Sets the maximal physically possible deceleration in m/s^2 for this vehicle. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EMERGENCY_DECEL, vehID, decel)
[ "def", "setEmergencyDecel", "(", "self", ",", "vehID", ",", "decel", ")", ":", "self", ".", "_connection", ".", "_sendDoubleCmd", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_EMERGENCY_DECEL", ",", "vehID", ",", "decel", ")" ]
setEmergencyDecel(string, double) -> None Sets the maximal physically possible deceleration in m/s^2 for this vehicle.
[ "setEmergencyDecel", "(", "string", "double", ")", "-", ">", "None" ]
python
train
43.857143
belbio/bel
bel/db/arangodb.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/db/arangodb.py#L247-L279
def get_belapi_handle(client, username=None, password=None): """Get BEL API arango db handle""" (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "belapi" try: if username and password: belapi_db = sys_db.create_database( name=belapi_db_name, users=[{"username": username, "password": password, "active": True}], ) else: belapi_db = sys_db.create_database(name=belapi_db_name) except arango.exceptions.DatabaseCreateError: if username and password: belapi_db = client.db(belapi_db_name, username=username, password=password) else: belapi_db = client.db(belapi_db_name) try: belapi_db.create_collection(belapi_settings_name) except Exception: pass try: belapi_db.create_collection(belapi_statemgmt_name) except Exception: pass return belapi_db
[ "def", "get_belapi_handle", "(", "client", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "(", "username", ",", "password", ")", "=", "get_user_creds", "(", "username", ",", "password", ")", "sys_db", "=", "client", ".", "db", "(", "\"_system\"", ",", "username", "=", "username", ",", "password", "=", "password", ")", "# Create a new database named \"belapi\"", "try", ":", "if", "username", "and", "password", ":", "belapi_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "belapi_db_name", ",", "users", "=", "[", "{", "\"username\"", ":", "username", ",", "\"password\"", ":", "password", ",", "\"active\"", ":", "True", "}", "]", ",", ")", "else", ":", "belapi_db", "=", "sys_db", ".", "create_database", "(", "name", "=", "belapi_db_name", ")", "except", "arango", ".", "exceptions", ".", "DatabaseCreateError", ":", "if", "username", "and", "password", ":", "belapi_db", "=", "client", ".", "db", "(", "belapi_db_name", ",", "username", "=", "username", ",", "password", "=", "password", ")", "else", ":", "belapi_db", "=", "client", ".", "db", "(", "belapi_db_name", ")", "try", ":", "belapi_db", ".", "create_collection", "(", "belapi_settings_name", ")", "except", "Exception", ":", "pass", "try", ":", "belapi_db", ".", "create_collection", "(", "belapi_statemgmt_name", ")", "except", "Exception", ":", "pass", "return", "belapi_db" ]
Get BEL API arango db handle
[ "Get", "BEL", "API", "arango", "db", "handle" ]
python
train
30.969697
BlueBrain/nat
nat/gitManager.py
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/gitManager.py#L141-L167
def push(self): """ Adding the no_thin argument to the GIT push because we had some issues pushing previously. According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917, "a new optimization which causes git to send as little data as possible over the network caused this bug to manifest, so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)." """ if not self.canRunRemoteCmd(): return None try: fetchInfo = self.repo.remotes.origin.push(no_thin=True)[0] except exc.GitCommandError as e: print(dir(e)) print(e) raise if fetchInfo.flags & fetchInfo.ERROR: try: raise IOError("An error occured while trying to push the GIT repository from the server. Error flag: '" + str(fetchInfo.flags) + "', message: '" + str(fetchInfo.note) + "'.") except: IOError("An error occured while trying to push the GIT repository from the server.") return fetchInfo
[ "def", "push", "(", "self", ")", ":", "if", "not", "self", ".", "canRunRemoteCmd", "(", ")", ":", "return", "None", "try", ":", "fetchInfo", "=", "self", ".", "repo", ".", "remotes", ".", "origin", ".", "push", "(", "no_thin", "=", "True", ")", "[", "0", "]", "except", "exc", ".", "GitCommandError", "as", "e", ":", "print", "(", "dir", "(", "e", ")", ")", "print", "(", "e", ")", "raise", "if", "fetchInfo", ".", "flags", "&", "fetchInfo", ".", "ERROR", ":", "try", ":", "raise", "IOError", "(", "\"An error occured while trying to push the GIT repository from the server. Error flag: '\"", "+", "str", "(", "fetchInfo", ".", "flags", ")", "+", "\"', message: '\"", "+", "str", "(", "fetchInfo", ".", "note", ")", "+", "\"'.\"", ")", "except", ":", "IOError", "(", "\"An error occured while trying to push the GIT repository from the server.\"", ")", "return", "fetchInfo" ]
Adding the no_thin argument to the GIT push because we had some issues pushing previously. According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917, "a new optimization which causes git to send as little data as possible over the network caused this bug to manifest, so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)."
[ "Adding", "the", "no_thin", "argument", "to", "the", "GIT", "push", "because", "we", "had", "some", "issues", "pushing", "previously", ".", "According", "to", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "16586642", "/", "git", "-", "unpack", "-", "error", "-", "on", "-", "push", "-", "to", "-", "gerrit#comment42953435_23610917", "a", "new", "optimization", "which", "causes", "git", "to", "send", "as", "little", "data", "as", "possible", "over", "the", "network", "caused", "this", "bug", "to", "manifest", "so", "my", "guess", "is", "--", "no", "-", "thin", "just", "turns", "these", "optimizations", "off", ".", "From", "git", "push", "--", "help", ":", "A", "thin", "transfer", "significantly", "reduces", "the", "amount", "of", "sent", "data", "when", "the", "sender", "and", "receiver", "share", "many", "of", "the", "same", "objects", "in", "common", ".", "(", "--", "thin", "is", "the", "default", ")", "." ]
python
train
49.111111
ngmarchant/oasis
oasis/kad.py
https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/kad.py#L215-L241
def _update_cov_model(self, strata_to_update='all'): """ strata_to_update : array-like or 'all' array containing stratum indices to update """ if strata_to_update == 'all': strata_to_update = self.strata.indices_ #: Otherwise assume strata_to_update is valid (no duplicates etc.) #: Update covariance matrices #: We usually update only one stratum at a time, so for loop is ok n_sampled = np.clip(self.strata._n_sampled, 2, np.inf) #: adding 2 avoids undef. cov factor = n_sampled/(n_sampled - 1) for k in strata_to_update: TP = self._BB_TP.theta_[k] PP = self._BB_PP.theta_[k] P = self._BB_P.theta_[k] self.cov_model_[k,0,0] = factor[k] * TP * (1 - TP) self.cov_model_[k,0,1] = factor[k] * TP * (1 - PP) self.cov_model_[k,0,2] = factor[k] * TP * (1 - P) self.cov_model_[k,1,1] = factor[k] * PP * (1 - PP) self.cov_model_[k,1,2] = factor[k] * (TP - PP * P) self.cov_model_[k,2,2] = factor[k] * P * (1 - P) self.cov_model_[k,1,0] = self.cov_model_[k,0,1] self.cov_model_[k,2,0] = self.cov_model_[k,0,2] self.cov_model_[k,2,1] = self.cov_model_[k,1,2]
[ "def", "_update_cov_model", "(", "self", ",", "strata_to_update", "=", "'all'", ")", ":", "if", "strata_to_update", "==", "'all'", ":", "strata_to_update", "=", "self", ".", "strata", ".", "indices_", "#: Otherwise assume strata_to_update is valid (no duplicates etc.)", "#: Update covariance matrices", "#: We usually update only one stratum at a time, so for loop is ok", "n_sampled", "=", "np", ".", "clip", "(", "self", ".", "strata", ".", "_n_sampled", ",", "2", ",", "np", ".", "inf", ")", "#: adding 2 avoids undef. cov", "factor", "=", "n_sampled", "/", "(", "n_sampled", "-", "1", ")", "for", "k", "in", "strata_to_update", ":", "TP", "=", "self", ".", "_BB_TP", ".", "theta_", "[", "k", "]", "PP", "=", "self", ".", "_BB_PP", ".", "theta_", "[", "k", "]", "P", "=", "self", ".", "_BB_P", ".", "theta_", "[", "k", "]", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "0", "]", "=", "factor", "[", "k", "]", "*", "TP", "*", "(", "1", "-", "TP", ")", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "1", "]", "=", "factor", "[", "k", "]", "*", "TP", "*", "(", "1", "-", "PP", ")", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "2", "]", "=", "factor", "[", "k", "]", "*", "TP", "*", "(", "1", "-", "P", ")", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "1", "]", "=", "factor", "[", "k", "]", "*", "PP", "*", "(", "1", "-", "PP", ")", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "2", "]", "=", "factor", "[", "k", "]", "*", "(", "TP", "-", "PP", "*", "P", ")", "self", ".", "cov_model_", "[", "k", ",", "2", ",", "2", "]", "=", "factor", "[", "k", "]", "*", "P", "*", "(", "1", "-", "P", ")", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "0", "]", "=", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "1", "]", "self", ".", "cov_model_", "[", "k", ",", "2", ",", "0", "]", "=", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "2", "]", "self", ".", "cov_model_", "[", "k", ",", "2", ",", "1", "]", "=", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "2", "]" ]
strata_to_update : array-like or 'all' array containing stratum indices to update
[ "strata_to_update", ":", "array", "-", "like", "or", "all", "array", "containing", "stratum", "indices", "to", "update" ]
python
train
47.148148
apache/airflow
airflow/task/task_runner/base_task_runner.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/task/task_runner/base_task_runner.py#L101-L135
def run_command(self, run_with=None, join_args=False): """ Run the task command. :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']`` :type run_with: list :param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs ``['airflow run']`` :param join_args: bool :return: the process that was run :rtype: subprocess.Popen """ run_with = run_with or [] cmd = [" ".join(self._command)] if join_args else self._command full_cmd = run_with + cmd self.log.info('Running: %s', full_cmd) proc = subprocess.Popen( full_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, close_fds=True, env=os.environ.copy(), preexec_fn=os.setsid ) # Start daemon thread to read subprocess logging output log_reader = threading.Thread( target=self._read_task_logs, args=(proc.stdout,), ) log_reader.daemon = True log_reader.start() return proc
[ "def", "run_command", "(", "self", ",", "run_with", "=", "None", ",", "join_args", "=", "False", ")", ":", "run_with", "=", "run_with", "or", "[", "]", "cmd", "=", "[", "\" \"", ".", "join", "(", "self", ".", "_command", ")", "]", "if", "join_args", "else", "self", ".", "_command", "full_cmd", "=", "run_with", "+", "cmd", "self", ".", "log", ".", "info", "(", "'Running: %s'", ",", "full_cmd", ")", "proc", "=", "subprocess", ".", "Popen", "(", "full_cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ",", "close_fds", "=", "True", ",", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", ",", "preexec_fn", "=", "os", ".", "setsid", ")", "# Start daemon thread to read subprocess logging output", "log_reader", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_read_task_logs", ",", "args", "=", "(", "proc", ".", "stdout", ",", ")", ",", ")", "log_reader", ".", "daemon", "=", "True", "log_reader", ".", "start", "(", ")", "return", "proc" ]
Run the task command. :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']`` :type run_with: list :param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs ``['airflow run']`` :param join_args: bool :return: the process that was run :rtype: subprocess.Popen
[ "Run", "the", "task", "command", "." ]
python
test
33.428571
sethmlarson/virtualbox-python
virtualbox/library_ext/appliance.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library_ext/appliance.py#L28-L37
def find_description(self, name): "Find a description for the given appliance name." for desc in self.virtual_system_descriptions: values = desc.get_values_by_type(DescType.name, DescValueType.original) if name in values: break else: raise Exception("Failed to find description for %s" % name) return desc
[ "def", "find_description", "(", "self", ",", "name", ")", ":", "for", "desc", "in", "self", ".", "virtual_system_descriptions", ":", "values", "=", "desc", ".", "get_values_by_type", "(", "DescType", ".", "name", ",", "DescValueType", ".", "original", ")", "if", "name", "in", "values", ":", "break", "else", ":", "raise", "Exception", "(", "\"Failed to find description for %s\"", "%", "name", ")", "return", "desc" ]
Find a description for the given appliance name.
[ "Find", "a", "description", "for", "the", "given", "appliance", "name", "." ]
python
train
42.5
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1744-L1756
def del_all_host_downtimes(self, host): """Delete all host downtimes Format of the line that triggers function call:: DEL_ALL_HOST_DOWNTIMES;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for downtime in host.downtimes: self.del_host_downtime(downtime) self.send_an_element(host.get_update_status_brok())
[ "def", "del_all_host_downtimes", "(", "self", ",", "host", ")", ":", "for", "downtime", "in", "host", ".", "downtimes", ":", "self", ".", "del_host_downtime", "(", "downtime", ")", "self", ".", "send_an_element", "(", "host", ".", "get_update_status_brok", "(", ")", ")" ]
Delete all host downtimes Format of the line that triggers function call:: DEL_ALL_HOST_DOWNTIMES;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None
[ "Delete", "all", "host", "downtimes", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
32.692308
joke2k/faker
faker/providers/isbn/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/isbn/__init__.py#L23-L42
def _body(self): """ Generate the information required to create an ISBN-10 or ISBN-13. """ ean = self.random_element(RULES.keys()) reg_group = self.random_element(RULES[ean].keys()) # Given the chosen ean/group, decide how long the # registrant/publication string may be. # We must allocate for the calculated check digit, so # subtract 1 reg_pub_len = ISBN.MAX_LENGTH - len(ean) - len(reg_group) - 1 # Generate a registrant/publication combination reg_pub = self.numerify('#' * reg_pub_len) # Use rules to separate the registrant from the publication rules = RULES[ean][reg_group] registrant, publication = self._registrant_publication(reg_pub, rules) return [ean, reg_group, registrant, publication]
[ "def", "_body", "(", "self", ")", ":", "ean", "=", "self", ".", "random_element", "(", "RULES", ".", "keys", "(", ")", ")", "reg_group", "=", "self", ".", "random_element", "(", "RULES", "[", "ean", "]", ".", "keys", "(", ")", ")", "# Given the chosen ean/group, decide how long the", "# registrant/publication string may be.", "# We must allocate for the calculated check digit, so", "# subtract 1", "reg_pub_len", "=", "ISBN", ".", "MAX_LENGTH", "-", "len", "(", "ean", ")", "-", "len", "(", "reg_group", ")", "-", "1", "# Generate a registrant/publication combination", "reg_pub", "=", "self", ".", "numerify", "(", "'#'", "*", "reg_pub_len", ")", "# Use rules to separate the registrant from the publication", "rules", "=", "RULES", "[", "ean", "]", "[", "reg_group", "]", "registrant", ",", "publication", "=", "self", ".", "_registrant_publication", "(", "reg_pub", ",", "rules", ")", "return", "[", "ean", ",", "reg_group", ",", "registrant", ",", "publication", "]" ]
Generate the information required to create an ISBN-10 or ISBN-13.
[ "Generate", "the", "information", "required", "to", "create", "an", "ISBN", "-", "10", "or", "ISBN", "-", "13", "." ]
python
train
40.9
iterative/dvc
dvc/config.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/config.py#L491-L539
def get_remote_settings(self, name): import posixpath """ Args: name (str): The name of the remote that we want to retrieve Returns: dict: The content beneath the given remote name. Example: >>> config = {'remote "server"': {'url': 'ssh://localhost/'}} >>> get_remote_settings("server") {'url': 'ssh://localhost/'} """ settings = self.config[self.SECTION_REMOTE_FMT.format(name)] parsed = urlparse(settings["url"]) # Support for cross referenced remotes. # This will merge the settings, giving priority to the outer reference. # For example, having: # # dvc remote add server ssh://localhost # dvc remote modify server user root # dvc remote modify server ask_password true # # dvc remote add images remote://server/tmp/pictures # dvc remote modify images user alice # dvc remote modify images ask_password false # dvc remote modify images password asdf1234 # # Results on a config dictionary like: # # { # "url": "ssh://localhost/tmp/pictures", # "user": "alice", # "password": "asdf1234", # "ask_password": False, # } # if parsed.scheme == "remote": reference = self.get_remote_settings(parsed.netloc) url = posixpath.join(reference["url"], parsed.path.lstrip("/")) merged = reference.copy() merged.update(settings) merged["url"] = url return merged return settings
[ "def", "get_remote_settings", "(", "self", ",", "name", ")", ":", "import", "posixpath", "settings", "=", "self", ".", "config", "[", "self", ".", "SECTION_REMOTE_FMT", ".", "format", "(", "name", ")", "]", "parsed", "=", "urlparse", "(", "settings", "[", "\"url\"", "]", ")", "# Support for cross referenced remotes.", "# This will merge the settings, giving priority to the outer reference.", "# For example, having:", "#", "# dvc remote add server ssh://localhost", "# dvc remote modify server user root", "# dvc remote modify server ask_password true", "#", "# dvc remote add images remote://server/tmp/pictures", "# dvc remote modify images user alice", "# dvc remote modify images ask_password false", "# dvc remote modify images password asdf1234", "#", "# Results on a config dictionary like:", "#", "# {", "# \"url\": \"ssh://localhost/tmp/pictures\",", "# \"user\": \"alice\",", "# \"password\": \"asdf1234\",", "# \"ask_password\": False,", "# }", "#", "if", "parsed", ".", "scheme", "==", "\"remote\"", ":", "reference", "=", "self", ".", "get_remote_settings", "(", "parsed", ".", "netloc", ")", "url", "=", "posixpath", ".", "join", "(", "reference", "[", "\"url\"", "]", ",", "parsed", ".", "path", ".", "lstrip", "(", "\"/\"", ")", ")", "merged", "=", "reference", ".", "copy", "(", ")", "merged", ".", "update", "(", "settings", ")", "merged", "[", "\"url\"", "]", "=", "url", "return", "merged", "return", "settings" ]
Args: name (str): The name of the remote that we want to retrieve Returns: dict: The content beneath the given remote name. Example: >>> config = {'remote "server"': {'url': 'ssh://localhost/'}} >>> get_remote_settings("server") {'url': 'ssh://localhost/'}
[ "Args", ":", "name", "(", "str", ")", ":", "The", "name", "of", "the", "remote", "that", "we", "want", "to", "retrieve" ]
python
train
34.816327
cthorey/pdsimage
pdsimage/PDS_Extractor.py
https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L946-L963
def _map_center(self, coord, val): ''' Identitify the center of the Image correspond to one coordinate. ''' if self.ppd in [4, 16, 64, 128]: res = {'lat': 0, 'long': 360} return res[coord] / 2.0 elif self.ppd in [256]: res = {'lat': 90, 'long': 180} c = (val // res[coord] + 1) * res[coord] return c - res[coord], c elif self.ppd in [512]: res = {'lat': 45, 'long': 90} c = (val // res[coord] + 1) * res[coord] return c - res[coord], c elif self.ppd in [1024]: res = {'lat': 15, 'long': 30} c = (val // res[coord] + 1) * res[coord] return c - res[coord], c
[ "def", "_map_center", "(", "self", ",", "coord", ",", "val", ")", ":", "if", "self", ".", "ppd", "in", "[", "4", ",", "16", ",", "64", ",", "128", "]", ":", "res", "=", "{", "'lat'", ":", "0", ",", "'long'", ":", "360", "}", "return", "res", "[", "coord", "]", "/", "2.0", "elif", "self", ".", "ppd", "in", "[", "256", "]", ":", "res", "=", "{", "'lat'", ":", "90", ",", "'long'", ":", "180", "}", "c", "=", "(", "val", "//", "res", "[", "coord", "]", "+", "1", ")", "*", "res", "[", "coord", "]", "return", "c", "-", "res", "[", "coord", "]", ",", "c", "elif", "self", ".", "ppd", "in", "[", "512", "]", ":", "res", "=", "{", "'lat'", ":", "45", ",", "'long'", ":", "90", "}", "c", "=", "(", "val", "//", "res", "[", "coord", "]", "+", "1", ")", "*", "res", "[", "coord", "]", "return", "c", "-", "res", "[", "coord", "]", ",", "c", "elif", "self", ".", "ppd", "in", "[", "1024", "]", ":", "res", "=", "{", "'lat'", ":", "15", ",", "'long'", ":", "30", "}", "c", "=", "(", "val", "//", "res", "[", "coord", "]", "+", "1", ")", "*", "res", "[", "coord", "]", "return", "c", "-", "res", "[", "coord", "]", ",", "c" ]
Identitify the center of the Image correspond to one coordinate.
[ "Identitify", "the", "center", "of", "the", "Image", "correspond", "to", "one", "coordinate", "." ]
python
train
39.555556
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/client/client.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/client.py#L1093-L1139
def abort(self, jobs=None, targets=None, block=None): """Abort specific jobs from the execution queues of target(s). This is a mechanism to prevent jobs that have already been submitted from executing. Parameters ---------- jobs : msg_id, list of msg_ids, or AsyncResult The jobs to be aborted If unspecified/None: abort all outstanding jobs. """ block = self.block if block is None else block jobs = jobs if jobs is not None else list(self.outstanding) targets = self._build_targets(targets)[0] msg_ids = [] if isinstance(jobs, (basestring,AsyncResult)): jobs = [jobs] bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs) if bad_ids: raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0]) for j in jobs: if isinstance(j, AsyncResult): msg_ids.extend(j.msg_ids) else: msg_ids.append(j) content = dict(msg_ids=msg_ids) for t in targets: self.session.send(self._control_socket, 'abort_request', content=content, ident=t) error = False if block: self._flush_ignored_control() for i in range(len(targets)): idents,msg = self.session.recv(self._control_socket,0) if self.debug: pprint(msg) if msg['content']['status'] != 'ok': error = self._unwrap_exception(msg['content']) else: self._ignored_control_replies += len(targets) if error: raise error
[ "def", "abort", "(", "self", ",", "jobs", "=", "None", ",", "targets", "=", "None", ",", "block", "=", "None", ")", ":", "block", "=", "self", ".", "block", "if", "block", "is", "None", "else", "block", "jobs", "=", "jobs", "if", "jobs", "is", "not", "None", "else", "list", "(", "self", ".", "outstanding", ")", "targets", "=", "self", ".", "_build_targets", "(", "targets", ")", "[", "0", "]", "msg_ids", "=", "[", "]", "if", "isinstance", "(", "jobs", ",", "(", "basestring", ",", "AsyncResult", ")", ")", ":", "jobs", "=", "[", "jobs", "]", "bad_ids", "=", "filter", "(", "lambda", "obj", ":", "not", "isinstance", "(", "obj", ",", "(", "basestring", ",", "AsyncResult", ")", ")", ",", "jobs", ")", "if", "bad_ids", ":", "raise", "TypeError", "(", "\"Invalid msg_id type %r, expected str or AsyncResult\"", "%", "bad_ids", "[", "0", "]", ")", "for", "j", "in", "jobs", ":", "if", "isinstance", "(", "j", ",", "AsyncResult", ")", ":", "msg_ids", ".", "extend", "(", "j", ".", "msg_ids", ")", "else", ":", "msg_ids", ".", "append", "(", "j", ")", "content", "=", "dict", "(", "msg_ids", "=", "msg_ids", ")", "for", "t", "in", "targets", ":", "self", ".", "session", ".", "send", "(", "self", ".", "_control_socket", ",", "'abort_request'", ",", "content", "=", "content", ",", "ident", "=", "t", ")", "error", "=", "False", "if", "block", ":", "self", ".", "_flush_ignored_control", "(", ")", "for", "i", "in", "range", "(", "len", "(", "targets", ")", ")", ":", "idents", ",", "msg", "=", "self", ".", "session", ".", "recv", "(", "self", ".", "_control_socket", ",", "0", ")", "if", "self", ".", "debug", ":", "pprint", "(", "msg", ")", "if", "msg", "[", "'content'", "]", "[", "'status'", "]", "!=", "'ok'", ":", "error", "=", "self", ".", "_unwrap_exception", "(", "msg", "[", "'content'", "]", ")", "else", ":", "self", ".", "_ignored_control_replies", "+=", "len", "(", "targets", ")", "if", "error", ":", "raise", "error" ]
Abort specific jobs from the execution queues of target(s). This is a mechanism to prevent jobs that have already been submitted from executing. Parameters ---------- jobs : msg_id, list of msg_ids, or AsyncResult The jobs to be aborted If unspecified/None: abort all outstanding jobs.
[ "Abort", "specific", "jobs", "from", "the", "execution", "queues", "of", "target", "(", "s", ")", "." ]
python
test
36.574468
aws/aws-encryption-sdk-python
examples/src/basic_file_encryption_with_multiple_providers.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/examples/src/basic_file_encryption_with_multiple_providers.py#L39-L60
def _get_raw_key(self, key_id): """Retrieves a static, randomly generated, RSA key for the specified key id. :param str key_id: User-defined ID for the static key :returns: Wrapping key that contains the specified static key :rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey` """ try: static_key = self._static_keys[key_id] except KeyError: private_key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend()) static_key = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) self._static_keys[key_id] = static_key return WrappingKey( wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA1_MGF1, wrapping_key=static_key, wrapping_key_type=EncryptionKeyType.PRIVATE, )
[ "def", "_get_raw_key", "(", "self", ",", "key_id", ")", ":", "try", ":", "static_key", "=", "self", ".", "_static_keys", "[", "key_id", "]", "except", "KeyError", ":", "private_key", "=", "rsa", ".", "generate_private_key", "(", "public_exponent", "=", "65537", ",", "key_size", "=", "4096", ",", "backend", "=", "default_backend", "(", ")", ")", "static_key", "=", "private_key", ".", "private_bytes", "(", "encoding", "=", "serialization", ".", "Encoding", ".", "PEM", ",", "format", "=", "serialization", ".", "PrivateFormat", ".", "PKCS8", ",", "encryption_algorithm", "=", "serialization", ".", "NoEncryption", "(", ")", ",", ")", "self", ".", "_static_keys", "[", "key_id", "]", "=", "static_key", "return", "WrappingKey", "(", "wrapping_algorithm", "=", "WrappingAlgorithm", ".", "RSA_OAEP_SHA1_MGF1", ",", "wrapping_key", "=", "static_key", ",", "wrapping_key_type", "=", "EncryptionKeyType", ".", "PRIVATE", ",", ")" ]
Retrieves a static, randomly generated, RSA key for the specified key id. :param str key_id: User-defined ID for the static key :returns: Wrapping key that contains the specified static key :rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
[ "Retrieves", "a", "static", "randomly", "generated", "RSA", "key", "for", "the", "specified", "key", "id", "." ]
python
train
46.045455
bfontaine/crosswords
crosswords/words.py
https://github.com/bfontaine/crosswords/blob/042b3cdd00a59d193ee559368910a8faa54565f5/crosswords/words.py#L16-L39
def get_matches(pattern, language, max_count=8): """ take a word pattern or a Python regexp and a language name, and return a list of all matching words. """ if str(pattern) == pattern: pattern = compile_pattern(pattern) results = [] if not dicts.exists(language): print("The language '%s' is not available locally." % language) return [] with open(dicts.filepath(language), 'r') as f: for word in f: if max_count <= 0: break w = word.strip() if pattern.match(w) and w not in results: results.append(w) max_count -= 1 return results
[ "def", "get_matches", "(", "pattern", ",", "language", ",", "max_count", "=", "8", ")", ":", "if", "str", "(", "pattern", ")", "==", "pattern", ":", "pattern", "=", "compile_pattern", "(", "pattern", ")", "results", "=", "[", "]", "if", "not", "dicts", ".", "exists", "(", "language", ")", ":", "print", "(", "\"The language '%s' is not available locally.\"", "%", "language", ")", "return", "[", "]", "with", "open", "(", "dicts", ".", "filepath", "(", "language", ")", ",", "'r'", ")", "as", "f", ":", "for", "word", "in", "f", ":", "if", "max_count", "<=", "0", ":", "break", "w", "=", "word", ".", "strip", "(", ")", "if", "pattern", ".", "match", "(", "w", ")", "and", "w", "not", "in", "results", ":", "results", ".", "append", "(", "w", ")", "max_count", "-=", "1", "return", "results" ]
take a word pattern or a Python regexp and a language name, and return a list of all matching words.
[ "take", "a", "word", "pattern", "or", "a", "Python", "regexp", "and", "a", "language", "name", "and", "return", "a", "list", "of", "all", "matching", "words", "." ]
python
train
27.708333
bcbio/bcbio-nextgen
bcbio/pipeline/rnaseq.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/rnaseq.py#L159-L184
def quantitate(data): """CWL target for quantitation. XXX Needs to be split and parallelized by expression caller, with merging of multiple calls. """ data = to_single_data(to_single_data(data)) data = generate_transcript_counts(data)[0][0] data["quant"] = {} if "sailfish" in dd.get_expression_caller(data): data = to_single_data(sailfish.run_sailfish(data)[0]) data["quant"]["tsv"] = data["sailfish"] data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["sailfish"]), "abundance.h5") if ("kallisto" in dd.get_expression_caller(data) or "pizzly" in dd.get_fusion_caller(data, [])): data = to_single_data(kallisto.run_kallisto_rnaseq(data)[0]) data["quant"]["tsv"] = os.path.join(data["kallisto_quant"], "abundance.tsv") data["quant"]["hdf5"] = os.path.join(data["kallisto_quant"], "abundance.h5") if (os.path.exists(os.path.join(data["kallisto_quant"], "fusion.txt"))): data["quant"]["fusion"] = os.path.join(data["kallisto_quant"], "fusion.txt") else: data["quant"]["fusion"] = None if "salmon" in dd.get_expression_caller(data): data = to_single_data(salmon.run_salmon_reads(data)[0]) data["quant"]["tsv"] = data["salmon"] data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["salmon"]), "abundance.h5") return [[data]]
[ "def", "quantitate", "(", "data", ")", ":", "data", "=", "to_single_data", "(", "to_single_data", "(", "data", ")", ")", "data", "=", "generate_transcript_counts", "(", "data", ")", "[", "0", "]", "[", "0", "]", "data", "[", "\"quant\"", "]", "=", "{", "}", "if", "\"sailfish\"", "in", "dd", ".", "get_expression_caller", "(", "data", ")", ":", "data", "=", "to_single_data", "(", "sailfish", ".", "run_sailfish", "(", "data", ")", "[", "0", "]", ")", "data", "[", "\"quant\"", "]", "[", "\"tsv\"", "]", "=", "data", "[", "\"sailfish\"", "]", "data", "[", "\"quant\"", "]", "[", "\"hdf5\"", "]", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "data", "[", "\"sailfish\"", "]", ")", ",", "\"abundance.h5\"", ")", "if", "(", "\"kallisto\"", "in", "dd", ".", "get_expression_caller", "(", "data", ")", "or", "\"pizzly\"", "in", "dd", ".", "get_fusion_caller", "(", "data", ",", "[", "]", ")", ")", ":", "data", "=", "to_single_data", "(", "kallisto", ".", "run_kallisto_rnaseq", "(", "data", ")", "[", "0", "]", ")", "data", "[", "\"quant\"", "]", "[", "\"tsv\"", "]", "=", "os", ".", "path", ".", "join", "(", "data", "[", "\"kallisto_quant\"", "]", ",", "\"abundance.tsv\"", ")", "data", "[", "\"quant\"", "]", "[", "\"hdf5\"", "]", "=", "os", ".", "path", ".", "join", "(", "data", "[", "\"kallisto_quant\"", "]", ",", "\"abundance.h5\"", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "data", "[", "\"kallisto_quant\"", "]", ",", "\"fusion.txt\"", ")", ")", ")", ":", "data", "[", "\"quant\"", "]", "[", "\"fusion\"", "]", "=", "os", ".", "path", ".", "join", "(", "data", "[", "\"kallisto_quant\"", "]", ",", "\"fusion.txt\"", ")", "else", ":", "data", "[", "\"quant\"", "]", "[", "\"fusion\"", "]", "=", "None", "if", "\"salmon\"", "in", "dd", ".", "get_expression_caller", "(", "data", ")", ":", "data", "=", "to_single_data", "(", "salmon", ".", "run_salmon_reads", "(", "data", ")", "[", "0", "]", ")", "data", "[", "\"quant\"", "]", "[", "\"tsv\"", "]", "=", "data", "[", "\"salmon\"", "]", "data", "[", "\"quant\"", "]", "[", "\"hdf5\"", "]", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "data", "[", "\"salmon\"", "]", ")", ",", "\"abundance.h5\"", ")", "return", "[", "[", "data", "]", "]" ]
CWL target for quantitation. XXX Needs to be split and parallelized by expression caller, with merging of multiple calls.
[ "CWL", "target", "for", "quantitation", "." ]
python
train
51.846154
p1c2u/openapi-core
openapi_core/schema/schemas/models.py
https://github.com/p1c2u/openapi-core/blob/f274836c4dd45729b1634aff8758c63323173947/openapi_core/schema/schemas/models.py#L191-L205
def unmarshal(self, value, custom_formatters=None, strict=True): """Unmarshal parameter from the value.""" if self.deprecated: warnings.warn("The schema is deprecated", DeprecationWarning) casted = self.cast(value, custom_formatters=custom_formatters, strict=strict) if casted is None and not self.required: return None if self.enum and casted not in self.enum: raise InvalidSchemaValue( "Value {value} not in enum choices: {type}", value, self.enum) return casted
[ "def", "unmarshal", "(", "self", ",", "value", ",", "custom_formatters", "=", "None", ",", "strict", "=", "True", ")", ":", "if", "self", ".", "deprecated", ":", "warnings", ".", "warn", "(", "\"The schema is deprecated\"", ",", "DeprecationWarning", ")", "casted", "=", "self", ".", "cast", "(", "value", ",", "custom_formatters", "=", "custom_formatters", ",", "strict", "=", "strict", ")", "if", "casted", "is", "None", "and", "not", "self", ".", "required", ":", "return", "None", "if", "self", ".", "enum", "and", "casted", "not", "in", "self", ".", "enum", ":", "raise", "InvalidSchemaValue", "(", "\"Value {value} not in enum choices: {type}\"", ",", "value", ",", "self", ".", "enum", ")", "return", "casted" ]
Unmarshal parameter from the value.
[ "Unmarshal", "parameter", "from", "the", "value", "." ]
python
train
36.933333
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/lib/mavmemlog.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/lib/mavmemlog.py#L68-L76
def rewind(self): '''rewind to start''' self._index = 0 self.percent = 0 self.messages = {} self._flightmode_index = 0 self._timestamp = None self.flightmode = None self.params = {}
[ "def", "rewind", "(", "self", ")", ":", "self", ".", "_index", "=", "0", "self", ".", "percent", "=", "0", "self", ".", "messages", "=", "{", "}", "self", ".", "_flightmode_index", "=", "0", "self", ".", "_timestamp", "=", "None", "self", ".", "flightmode", "=", "None", "self", ".", "params", "=", "{", "}" ]
rewind to start
[ "rewind", "to", "start" ]
python
train
26.333333
thefactory/marathon-python
marathon/client.py
https://github.com/thefactory/marathon-python/blob/592b253aa8edf2475c97ca438ad7b6936652caf2/marathon/client.py#L654-L664
def create_event_subscription(self, url): """Register a callback URL as an event subscriber. :param str url: callback URL :returns: the created event subscription :rtype: dict """ params = {'callbackUrl': url} response = self._do_request('POST', '/v2/eventSubscriptions', params) return response.json()
[ "def", "create_event_subscription", "(", "self", ",", "url", ")", ":", "params", "=", "{", "'callbackUrl'", ":", "url", "}", "response", "=", "self", ".", "_do_request", "(", "'POST'", ",", "'/v2/eventSubscriptions'", ",", "params", ")", "return", "response", ".", "json", "(", ")" ]
Register a callback URL as an event subscriber. :param str url: callback URL :returns: the created event subscription :rtype: dict
[ "Register", "a", "callback", "URL", "as", "an", "event", "subscriber", "." ]
python
train
32.545455
Jajcus/pyxmpp2
pyxmpp2/ext/dataforms.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/dataforms.py#L441-L460
def _new_from_xml(cls, xmlnode): """Create a new `Item` object from an XML element. :Parameters: - `xmlnode`: the XML element. :Types: - `xmlnode`: `libxml2.xmlNode` :return: the object created. :returntype: `Item` """ child = xmlnode.children fields = [] while child: if child.type != "element" or child.ns().content != DATAFORM_NS: pass elif child.name == "field": fields.append(Field._new_from_xml(child)) child = child.next return cls(fields)
[ "def", "_new_from_xml", "(", "cls", ",", "xmlnode", ")", ":", "child", "=", "xmlnode", ".", "children", "fields", "=", "[", "]", "while", "child", ":", "if", "child", ".", "type", "!=", "\"element\"", "or", "child", ".", "ns", "(", ")", ".", "content", "!=", "DATAFORM_NS", ":", "pass", "elif", "child", ".", "name", "==", "\"field\"", ":", "fields", ".", "append", "(", "Field", ".", "_new_from_xml", "(", "child", ")", ")", "child", "=", "child", ".", "next", "return", "cls", "(", "fields", ")" ]
Create a new `Item` object from an XML element. :Parameters: - `xmlnode`: the XML element. :Types: - `xmlnode`: `libxml2.xmlNode` :return: the object created. :returntype: `Item`
[ "Create", "a", "new", "Item", "object", "from", "an", "XML", "element", "." ]
python
valid
30.05
saltstack/salt
salt/states/glance_image.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/glance_image.py#L37-L75
def present(name, auth=None, **kwargs): ''' Ensure image exists and is up-to-date name Name of the image enabled Boolean to control if image is enabled description An arbitrary description of the image ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} kwargs = __utils__['args.clean_kwargs'](**kwargs) __salt__['glanceng.setup_clouds'](auth) image = __salt__['glanceng.image_get'](name=name) if not image: if __opts__['test']: ret['result'] = None ret['changes'] = kwargs ret['comment'] = 'Image {} will be created.'.format(name) return ret kwargs['name'] = name image = __salt__['glanceng.image_create'](**kwargs) ret['changes'] = image ret['comment'] = 'Created image' return ret # TODO(SamYaple): Compare and update image properties here return ret
[ "def", "present", "(", "name", ",", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "kwargs", "=", "__utils__", "[", "'args.clean_kwargs'", "]", "(", "*", "*", "kwargs", ")", "__salt__", "[", "'glanceng.setup_clouds'", "]", "(", "auth", ")", "image", "=", "__salt__", "[", "'glanceng.image_get'", "]", "(", "name", "=", "name", ")", "if", "not", "image", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "kwargs", "ret", "[", "'comment'", "]", "=", "'Image {} will be created.'", ".", "format", "(", "name", ")", "return", "ret", "kwargs", "[", "'name'", "]", "=", "name", "image", "=", "__salt__", "[", "'glanceng.image_create'", "]", "(", "*", "*", "kwargs", ")", "ret", "[", "'changes'", "]", "=", "image", "ret", "[", "'comment'", "]", "=", "'Created image'", "return", "ret", "# TODO(SamYaple): Compare and update image properties here", "return", "ret" ]
Ensure image exists and is up-to-date name Name of the image enabled Boolean to control if image is enabled description An arbitrary description of the image
[ "Ensure", "image", "exists", "and", "is", "up", "-", "to", "-", "date" ]
python
train
24.307692
jobovy/galpy
galpy/actionAngle/actionAngleTorus_c.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleTorus_c.py#L38-L140
def actionAngleTorus_xvFreqs_c(pot,jr,jphi,jz, angler,anglephi,anglez, tol=0.003): """ NAME: actionAngleTorus_xvFreqs_c PURPOSE: compute configuration (x,v) and frequencies of a set of angles on a single torus INPUT: pot - Potential object or list thereof jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) angler - radial angle (array [N]) anglephi - azimuthal angle (array [N]) anglez - vertical angle (array [N]) tol= (0.003) goal for |dJ|/|J| along the torus OUTPUT: (R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag) HISTORY: 2015-08-05/07 - Written - Bovy (UofT) """ #Parse the potential from galpy.orbit.integrateFullOrbit import _parse_pot npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True) #Set up result arrays R= numpy.empty(len(angler)) vR= numpy.empty(len(angler)) vT= numpy.empty(len(angler)) z= numpy.empty(len(angler)) vz= numpy.empty(len(angler)) phi= numpy.empty(len(angler)) Omegar= numpy.empty(1) Omegaphi= numpy.empty(1) Omegaz= numpy.empty(1) flag= ctypes.c_int(0) #Set up the C code ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE') actionAngleTorus_xvFreqsFunc= _lib.actionAngleTorus_xvFreqs actionAngleTorus_xvFreqsFunc.argtypes=\ [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.c_int, ndpointer(dtype=numpy.int32,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.c_double, ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.POINTER(ctypes.c_int)] #Array requirements, first store old order f_cont= [angler.flags['F_CONTIGUOUS'], anglephi.flags['F_CONTIGUOUS'], anglez.flags['F_CONTIGUOUS']] angler= numpy.require(angler,dtype=numpy.float64,requirements=['C','W']) anglephi= numpy.require(anglephi,dtype=numpy.float64,requirements=['C','W']) anglez= numpy.require(anglez,dtype=numpy.float64,requirements=['C','W']) R= numpy.require(R,dtype=numpy.float64,requirements=['C','W']) vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W']) vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W']) z= numpy.require(z,dtype=numpy.float64,requirements=['C','W']) vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W']) phi= numpy.require(phi,dtype=numpy.float64,requirements=['C','W']) Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W']) Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W']) Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W']) #Run the C code actionAngleTorus_xvFreqsFunc(ctypes.c_double(jr), ctypes.c_double(jphi), ctypes.c_double(jz), ctypes.c_int(len(angler)), angler, anglephi, anglez, ctypes.c_int(npot), pot_type, pot_args, ctypes.c_double(tol), R,vR,vT,z,vz,phi, Omegar,Omegaphi,Omegaz, ctypes.byref(flag)) #Reset input arrays if f_cont[0]: angler= numpy.asfortranarray(angler) if f_cont[1]: anglephi= numpy.asfortranarray(anglephi) if f_cont[2]: anglez= numpy.asfortranarray(anglez) return (R,vR,vT,z,vz,phi,Omegar[0],Omegaphi[0],Omegaz[0],flag.value)
[ "def", "actionAngleTorus_xvFreqs_c", "(", "pot", ",", "jr", ",", "jphi", ",", "jz", ",", "angler", ",", "anglephi", ",", "anglez", ",", "tol", "=", "0.003", ")", ":", "#Parse the potential", "from", "galpy", ".", "orbit", ".", "integrateFullOrbit", "import", "_parse_pot", "npot", ",", "pot_type", ",", "pot_args", "=", "_parse_pot", "(", "pot", ",", "potfortorus", "=", "True", ")", "#Set up result arrays", "R", "=", "numpy", ".", "empty", "(", "len", "(", "angler", ")", ")", "vR", "=", "numpy", ".", "empty", "(", "len", "(", "angler", ")", ")", "vT", "=", "numpy", ".", "empty", "(", "len", "(", "angler", ")", ")", "z", "=", "numpy", ".", "empty", "(", "len", "(", "angler", ")", ")", "vz", "=", "numpy", ".", "empty", "(", "len", "(", "angler", ")", ")", "phi", "=", "numpy", ".", "empty", "(", "len", "(", "angler", ")", ")", "Omegar", "=", "numpy", ".", "empty", "(", "1", ")", "Omegaphi", "=", "numpy", ".", "empty", "(", "1", ")", "Omegaz", "=", "numpy", ".", "empty", "(", "1", ")", "flag", "=", "ctypes", ".", "c_int", "(", "0", ")", "#Set up the C code", "ndarrayFlags", "=", "(", "'C_CONTIGUOUS'", ",", "'WRITEABLE'", ")", "actionAngleTorus_xvFreqsFunc", "=", "_lib", ".", "actionAngleTorus_xvFreqs", "actionAngleTorus_xvFreqsFunc", ".", "argtypes", "=", "[", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_int", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ctypes", ".", "c_int", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "int32", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ctypes", ".", "c_double", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_int", ")", "]", "#Array requirements, first store old order", "f_cont", "=", "[", "angler", ".", "flags", "[", "'F_CONTIGUOUS'", "]", ",", "anglephi", ".", "flags", "[", "'F_CONTIGUOUS'", "]", ",", "anglez", ".", "flags", "[", "'F_CONTIGUOUS'", "]", "]", "angler", "=", "numpy", ".", "require", "(", "angler", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "anglephi", "=", "numpy", ".", "require", "(", "anglephi", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "anglez", "=", "numpy", ".", "require", "(", "anglez", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "R", "=", "numpy", ".", "require", "(", "R", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "vR", "=", "numpy", ".", "require", "(", "vR", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "vT", "=", "numpy", ".", "require", "(", "vT", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "z", "=", "numpy", ".", "require", "(", "z", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "vz", "=", "numpy", ".", "require", "(", "vz", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "phi", "=", "numpy", ".", "require", "(", "phi", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "Omegar", "=", "numpy", ".", "require", "(", "Omegar", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "Omegaphi", "=", "numpy", ".", "require", "(", "Omegaphi", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "Omegaz", "=", "numpy", ".", "require", "(", "Omegaz", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "#Run the C code", "actionAngleTorus_xvFreqsFunc", "(", "ctypes", ".", "c_double", "(", "jr", ")", ",", "ctypes", ".", "c_double", "(", "jphi", ")", ",", "ctypes", ".", "c_double", "(", "jz", ")", ",", "ctypes", ".", "c_int", "(", "len", "(", "angler", ")", ")", ",", "angler", ",", "anglephi", ",", "anglez", ",", "ctypes", ".", "c_int", "(", "npot", ")", ",", "pot_type", ",", "pot_args", ",", "ctypes", ".", "c_double", "(", "tol", ")", ",", "R", ",", "vR", ",", "vT", ",", "z", ",", "vz", ",", "phi", ",", "Omegar", ",", "Omegaphi", ",", "Omegaz", ",", "ctypes", ".", "byref", "(", "flag", ")", ")", "#Reset input arrays", "if", "f_cont", "[", "0", "]", ":", "angler", "=", "numpy", ".", "asfortranarray", "(", "angler", ")", "if", "f_cont", "[", "1", "]", ":", "anglephi", "=", "numpy", ".", "asfortranarray", "(", "anglephi", ")", "if", "f_cont", "[", "2", "]", ":", "anglez", "=", "numpy", ".", "asfortranarray", "(", "anglez", ")", "return", "(", "R", ",", "vR", ",", "vT", ",", "z", ",", "vz", ",", "phi", ",", "Omegar", "[", "0", "]", ",", "Omegaphi", "[", "0", "]", ",", "Omegaz", "[", "0", "]", ",", "flag", ".", "value", ")" ]
NAME: actionAngleTorus_xvFreqs_c PURPOSE: compute configuration (x,v) and frequencies of a set of angles on a single torus INPUT: pot - Potential object or list thereof jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) angler - radial angle (array [N]) anglephi - azimuthal angle (array [N]) anglez - vertical angle (array [N]) tol= (0.003) goal for |dJ|/|J| along the torus OUTPUT: (R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag) HISTORY: 2015-08-05/07 - Written - Bovy (UofT)
[ "NAME", ":", "actionAngleTorus_xvFreqs_c", "PURPOSE", ":", "compute", "configuration", "(", "x", "v", ")", "and", "frequencies", "of", "a", "set", "of", "angles", "on", "a", "single", "torus", "INPUT", ":", "pot", "-", "Potential", "object", "or", "list", "thereof", "jr", "-", "radial", "action", "(", "scalar", ")", "jphi", "-", "azimuthal", "action", "(", "scalar", ")", "jz", "-", "vertical", "action", "(", "scalar", ")", "angler", "-", "radial", "angle", "(", "array", "[", "N", "]", ")", "anglephi", "-", "azimuthal", "angle", "(", "array", "[", "N", "]", ")", "anglez", "-", "vertical", "angle", "(", "array", "[", "N", "]", ")", "tol", "=", "(", "0", ".", "003", ")", "goal", "for", "|dJ|", "/", "|J|", "along", "the", "torus", "OUTPUT", ":", "(", "R", "vR", "vT", "z", "vz", "phi", "Omegar", "Omegaphi", "Omegaz", "flag", ")", "HISTORY", ":", "2015", "-", "08", "-", "05", "/", "07", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
42.941748
guaix-ucm/numina
numina/array/interpolation.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/interpolation.py#L140-L144
def _create_p(s, h): """Parabolic derivative""" p = np.zeros_like(s) p[1:] = (s[:-1]*h[1:] + s[1:] * h[:-1]) / (h[1:] + h[:-1]) return p
[ "def", "_create_p", "(", "s", ",", "h", ")", ":", "p", "=", "np", ".", "zeros_like", "(", "s", ")", "p", "[", "1", ":", "]", "=", "(", "s", "[", ":", "-", "1", "]", "*", "h", "[", "1", ":", "]", "+", "s", "[", "1", ":", "]", "*", "h", "[", ":", "-", "1", "]", ")", "/", "(", "h", "[", "1", ":", "]", "+", "h", "[", ":", "-", "1", "]", ")", "return", "p" ]
Parabolic derivative
[ "Parabolic", "derivative" ]
python
train
32.8
androguard/androguard
androguard/misc.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/misc.py#L180-L237
def clean_file_name(filename, unique=True, replace="_", force_nt=False): """ Return a filename version, which has no characters in it which are forbidden. On Windows these are for example <, /, ?, ... The intention of this function is to allow distribution of files to different OSes. :param filename: string to clean :param unique: check if the filename is already taken and append an integer to be unique (default: True) :param replace: replacement character. (default: '_') :param force_nt: Force shortening of paths like on NT systems (default: False) :return: clean string """ if re.match(r'[<>:"/\\|?* .\x00-\x1f]', replace): raise ValueError("replacement character is not allowed!") path, fname = os.path.split(filename) # For Windows see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx # Other operating systems seems to be more tolerant... # Not allowed filenames, attach replace character if necessary if re.match(r'(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])', fname): fname += replace # reserved characters fname = re.sub(r'[<>:"/\\|?*\x00-\x1f]', replace, fname) # Do not end with dot or space fname = re.sub(r'[ .]$', replace, fname) if force_nt or os.name == 'nt': PATH_MAX_LENGTH = 230 # give extra space for other stuff... # Check filename length limit, usually a problem on older Windows versions if len(fname) > PATH_MAX_LENGTH: if "." in fname: f, ext = fname.rsplit(".", 1) fname = "{}.{}".format(f[:PATH_MAX_LENGTH-(len(ext)+1)], ext) else: fname = fname[:PATH_MAX_LENGTH] # Special behaviour... On Windows, there is also a problem with the maximum path length in explorer.exe # maximum length is limited to 260 chars, so use 250 to have room for other stuff if len(os.path.abspath(os.path.join(path, fname))) > 250: fname = fname[:250 - (len(os.path.abspath(path)) + 1)] if unique: counter = 0 origname = fname while os.path.isfile(os.path.join(path, fname)): if "." in fname: # assume extension f, ext = origname.rsplit(".", 1) fname = "{}_{}.{}".format(f, counter, ext) else: fname = "{}_{}".format(origname, counter) counter += 1 return os.path.join(path, fname)
[ "def", "clean_file_name", "(", "filename", ",", "unique", "=", "True", ",", "replace", "=", "\"_\"", ",", "force_nt", "=", "False", ")", ":", "if", "re", ".", "match", "(", "r'[<>:\"/\\\\|?* .\\x00-\\x1f]'", ",", "replace", ")", ":", "raise", "ValueError", "(", "\"replacement character is not allowed!\"", ")", "path", ",", "fname", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "# For Windows see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx", "# Other operating systems seems to be more tolerant...", "# Not allowed filenames, attach replace character if necessary", "if", "re", ".", "match", "(", "r'(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])'", ",", "fname", ")", ":", "fname", "+=", "replace", "# reserved characters", "fname", "=", "re", ".", "sub", "(", "r'[<>:\"/\\\\|?*\\x00-\\x1f]'", ",", "replace", ",", "fname", ")", "# Do not end with dot or space", "fname", "=", "re", ".", "sub", "(", "r'[ .]$'", ",", "replace", ",", "fname", ")", "if", "force_nt", "or", "os", ".", "name", "==", "'nt'", ":", "PATH_MAX_LENGTH", "=", "230", "# give extra space for other stuff...", "# Check filename length limit, usually a problem on older Windows versions", "if", "len", "(", "fname", ")", ">", "PATH_MAX_LENGTH", ":", "if", "\".\"", "in", "fname", ":", "f", ",", "ext", "=", "fname", ".", "rsplit", "(", "\".\"", ",", "1", ")", "fname", "=", "\"{}.{}\"", ".", "format", "(", "f", "[", ":", "PATH_MAX_LENGTH", "-", "(", "len", "(", "ext", ")", "+", "1", ")", "]", ",", "ext", ")", "else", ":", "fname", "=", "fname", "[", ":", "PATH_MAX_LENGTH", "]", "# Special behaviour... On Windows, there is also a problem with the maximum path length in explorer.exe", "# maximum length is limited to 260 chars, so use 250 to have room for other stuff", "if", "len", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "fname", ")", ")", ")", ">", "250", ":", "fname", "=", "fname", "[", ":", "250", "-", "(", "len", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", "+", "1", ")", "]", "if", "unique", ":", "counter", "=", "0", "origname", "=", "fname", "while", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "fname", ")", ")", ":", "if", "\".\"", "in", "fname", ":", "# assume extension", "f", ",", "ext", "=", "origname", ".", "rsplit", "(", "\".\"", ",", "1", ")", "fname", "=", "\"{}_{}.{}\"", ".", "format", "(", "f", ",", "counter", ",", "ext", ")", "else", ":", "fname", "=", "\"{}_{}\"", ".", "format", "(", "origname", ",", "counter", ")", "counter", "+=", "1", "return", "os", ".", "path", ".", "join", "(", "path", ",", "fname", ")" ]
Return a filename version, which has no characters in it which are forbidden. On Windows these are for example <, /, ?, ... The intention of this function is to allow distribution of files to different OSes. :param filename: string to clean :param unique: check if the filename is already taken and append an integer to be unique (default: True) :param replace: replacement character. (default: '_') :param force_nt: Force shortening of paths like on NT systems (default: False) :return: clean string
[ "Return", "a", "filename", "version", "which", "has", "no", "characters", "in", "it", "which", "are", "forbidden", ".", "On", "Windows", "these", "are", "for", "example", "<", "/", "?", "..." ]
python
train
41.913793
oscarbranson/latools
latools/D_obj.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L155-L191
def setfocus(self, focus): """ Set the 'focus' attribute of the data file. The 'focus' attribute of the object points towards data from a particular stage of analysis. It is used to identify the 'working stage' of the data. Processing functions operate on the 'focus' stage, so if steps are done out of sequence, things will break. Names of analysis stages: * 'rawdata': raw data, loaded from csv file when object is initialised. * 'despiked': despiked data. * 'signal'/'background': isolated signal and background data, padded with np.nan. Created by self.separate, after signal and background regions have been identified by self.autorange. * 'bkgsub': background subtracted data, created by self.bkg_correct * 'ratios': element ratio data, created by self.ratio. * 'calibrated': ratio data calibrated to standards, created by self.calibrate. Parameters ---------- focus : str The name of the analysis stage desired. Returns ------- None """ self.focus = self.data[focus] self.focus_stage = focus self.__dict__.update(self.focus)
[ "def", "setfocus", "(", "self", ",", "focus", ")", ":", "self", ".", "focus", "=", "self", ".", "data", "[", "focus", "]", "self", ".", "focus_stage", "=", "focus", "self", ".", "__dict__", ".", "update", "(", "self", ".", "focus", ")" ]
Set the 'focus' attribute of the data file. The 'focus' attribute of the object points towards data from a particular stage of analysis. It is used to identify the 'working stage' of the data. Processing functions operate on the 'focus' stage, so if steps are done out of sequence, things will break. Names of analysis stages: * 'rawdata': raw data, loaded from csv file when object is initialised. * 'despiked': despiked data. * 'signal'/'background': isolated signal and background data, padded with np.nan. Created by self.separate, after signal and background regions have been identified by self.autorange. * 'bkgsub': background subtracted data, created by self.bkg_correct * 'ratios': element ratio data, created by self.ratio. * 'calibrated': ratio data calibrated to standards, created by self.calibrate. Parameters ---------- focus : str The name of the analysis stage desired. Returns ------- None
[ "Set", "the", "focus", "attribute", "of", "the", "data", "file", "." ]
python
test
33.972973
JnyJny/Geometry
Geometry/ellipse.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/ellipse.py#L132-L136
def yAxisIsMinor(self): ''' Returns True if the minor axis is parallel to the Y axis, boolean. ''' return min(self.radius.x, self.radius.y) == self.radius.y
[ "def", "yAxisIsMinor", "(", "self", ")", ":", "return", "min", "(", "self", ".", "radius", ".", "x", ",", "self", ".", "radius", ".", "y", ")", "==", "self", ".", "radius", ".", "y" ]
Returns True if the minor axis is parallel to the Y axis, boolean.
[ "Returns", "True", "if", "the", "minor", "axis", "is", "parallel", "to", "the", "Y", "axis", "boolean", "." ]
python
train
36.8
eng-tools/sfsimodels
sfsimodels/models/foundations.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L347-L357
def pad_position_w(self, i): """ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: """ if i >= self.n_pads_w: raise ModelError("pad index out-of-bounds") return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2
[ "def", "pad_position_w", "(", "self", ",", "i", ")", ":", "if", "i", ">=", "self", ".", "n_pads_w", ":", "raise", "ModelError", "(", "\"pad index out-of-bounds\"", ")", "return", "(", "self", ".", "width", "-", "self", ".", "pad_width", ")", "/", "(", "self", ".", "n_pads_w", "-", "1", ")", "*", "i", "+", "self", ".", "pad_width", "/", "2" ]
Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return:
[ "Determines", "the", "position", "of", "the", "ith", "pad", "in", "the", "width", "direction", ".", "Assumes", "equally", "spaced", "pads", "." ]
python
train
37.636364
facelessuser/backrefs
backrefs/_bregex_parse.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bregex_parse.py#L1290-L1332
def expand(self, m): """Using the template, expand the string.""" if m is None: raise ValueError("Match is None!") sep = m.string[:0] if isinstance(sep, bytes) != self._bytes: raise TypeError('Match string type does not match expander string type!') text = [] # Expand string for x in range(0, len(self.literals)): index = x l = self.literals[x] if l is None: g_index = self._get_group_index(index) span_case, single_case, capture = self._get_group_attributes(index) if not self.use_format: # Non format replace try: l = m.group(g_index) except IndexError: # pragma: no cover raise IndexError("'%d' is out of range!" % capture) else: # String format replace try: obj = m.captures(g_index) except IndexError: # pragma: no cover raise IndexError("'%d' is out of range!" % g_index) l = _util.format_string(m, obj, capture, self._bytes) if span_case is not None: if span_case == _LOWER: l = l.lower() else: l = l.upper() if single_case is not None: if single_case == _LOWER: l = l[0:1].lower() + l[1:] else: l = l[0:1].upper() + l[1:] text.append(l) return sep.join(text)
[ "def", "expand", "(", "self", ",", "m", ")", ":", "if", "m", "is", "None", ":", "raise", "ValueError", "(", "\"Match is None!\"", ")", "sep", "=", "m", ".", "string", "[", ":", "0", "]", "if", "isinstance", "(", "sep", ",", "bytes", ")", "!=", "self", ".", "_bytes", ":", "raise", "TypeError", "(", "'Match string type does not match expander string type!'", ")", "text", "=", "[", "]", "# Expand string", "for", "x", "in", "range", "(", "0", ",", "len", "(", "self", ".", "literals", ")", ")", ":", "index", "=", "x", "l", "=", "self", ".", "literals", "[", "x", "]", "if", "l", "is", "None", ":", "g_index", "=", "self", ".", "_get_group_index", "(", "index", ")", "span_case", ",", "single_case", ",", "capture", "=", "self", ".", "_get_group_attributes", "(", "index", ")", "if", "not", "self", ".", "use_format", ":", "# Non format replace", "try", ":", "l", "=", "m", ".", "group", "(", "g_index", ")", "except", "IndexError", ":", "# pragma: no cover", "raise", "IndexError", "(", "\"'%d' is out of range!\"", "%", "capture", ")", "else", ":", "# String format replace", "try", ":", "obj", "=", "m", ".", "captures", "(", "g_index", ")", "except", "IndexError", ":", "# pragma: no cover", "raise", "IndexError", "(", "\"'%d' is out of range!\"", "%", "g_index", ")", "l", "=", "_util", ".", "format_string", "(", "m", ",", "obj", ",", "capture", ",", "self", ".", "_bytes", ")", "if", "span_case", "is", "not", "None", ":", "if", "span_case", "==", "_LOWER", ":", "l", "=", "l", ".", "lower", "(", ")", "else", ":", "l", "=", "l", ".", "upper", "(", ")", "if", "single_case", "is", "not", "None", ":", "if", "single_case", "==", "_LOWER", ":", "l", "=", "l", "[", "0", ":", "1", "]", ".", "lower", "(", ")", "+", "l", "[", "1", ":", "]", "else", ":", "l", "=", "l", "[", "0", ":", "1", "]", ".", "upper", "(", ")", "+", "l", "[", "1", ":", "]", "text", ".", "append", "(", "l", ")", "return", "sep", ".", "join", "(", "text", ")" ]
Using the template, expand the string.
[ "Using", "the", "template", "expand", "the", "string", "." ]
python
train
38.930233
ChristianTremblay/BAC0
BAC0/core/functions/GetIPAddr.py
https://github.com/ChristianTremblay/BAC0/blob/8d95b065ea068524a08f5b0c34322ebeeba95d06/BAC0/core/functions/GetIPAddr.py#L101-L150
def _findSubnetMask(self, ip): """ Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String """ ip = ip if "win32" in sys.platform: try: proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = ( proc.stdout.readline() .rstrip() .split(b":")[-1] .replace(b" ", b"") .decode() ) except: raise NetworkInterfaceException("Cannot read IP parameters from OS") else: """ This procedure could use more direct way of obtaining the broadcast IP as it is really simple in Unix ifconfig gives Bcast directly for example or use something like : iface = "eth0" socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24]) """ pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})") try: proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if ip.encode() in line: break mask = re.findall(pattern, line.decode())[0] except: mask = "255.255.255.255" # self._log.debug('Mask found : %s' % mask) return mask
[ "def", "_findSubnetMask", "(", "self", ",", "ip", ")", ":", "ip", "=", "ip", "if", "\"win32\"", "in", "sys", ".", "platform", ":", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "\"ipconfig\"", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "while", "True", ":", "line", "=", "proc", ".", "stdout", ".", "readline", "(", ")", "if", "ip", ".", "encode", "(", ")", "in", "line", ":", "break", "mask", "=", "(", "proc", ".", "stdout", ".", "readline", "(", ")", ".", "rstrip", "(", ")", ".", "split", "(", "b\":\"", ")", "[", "-", "1", "]", ".", "replace", "(", "b\" \"", ",", "b\"\"", ")", ".", "decode", "(", ")", ")", "except", ":", "raise", "NetworkInterfaceException", "(", "\"Cannot read IP parameters from OS\"", ")", "else", ":", "\"\"\"\n This procedure could use more direct way of obtaining the broadcast IP\n as it is really simple in Unix\n ifconfig gives Bcast directly for example\n or use something like :\n iface = \"eth0\"\n socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24])\n \"\"\"", "pattern", "=", "re", ".", "compile", "(", "r\"(255.\\d{1,3}.\\d{1,3}.\\d{1,3})\"", ")", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "\"ifconfig\"", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "while", "True", ":", "line", "=", "proc", ".", "stdout", ".", "readline", "(", ")", "if", "ip", ".", "encode", "(", ")", "in", "line", ":", "break", "mask", "=", "re", ".", "findall", "(", "pattern", ",", "line", ".", "decode", "(", ")", ")", "[", "0", "]", "except", ":", "mask", "=", "\"255.255.255.255\"", "# self._log.debug('Mask found : %s' % mask)", "return", "mask" ]
Retrieve the broadcast IP address connected to internet... used as a default IP address when defining Script :param ip: (str) optionnal IP address. If not provided, default to getIPAddr() :param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC) :returns: broadcast IP Adress as String
[ "Retrieve", "the", "broadcast", "IP", "address", "connected", "to", "internet", "...", "used", "as", "a", "default", "IP", "address", "when", "defining", "Script" ]
python
train
39.28