text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def update_fallbackserver(self, serverid, data): """Update Fallback server""" return self.api_call( ENDPOINTS['fallbackservers']['update'], dict(serverid=serverid), body=data)
[ "def", "update_fallbackserver", "(", "self", ",", "serverid", ",", "data", ")", ":", "return", "self", ".", "api_call", "(", "ENDPOINTS", "[", "'fallbackservers'", "]", "[", "'update'", "]", ",", "dict", "(", "serverid", "=", "serverid", ")", ",", "body", "=", "data", ")" ]
37
8.666667
def _extractReporterIons(ionArrays, reporterMz, mzTolerance): """Find and a list of reporter ions and return mz and intensity values. Expected reporter mz values are searched in "ionArray['mz']" and reported if the observed relative deviation is less than specified by "mzTolerance". In the case of multiple matches, the one with the minimal deviation is picked. If no matching entries are found numpy.nan is returned for the mz value and an intensity of 0. The returned arrays are in the order of "reporterMz" values. :param ionArrays: a dictionary containing two numpy arrays of equal size, {"i": an array of ion intensities, "mz" an array of ion mz values} :param reporterMz: a list of reporter mz values :param mzTolerance: maximum allowed relative mz deviation :returns: {'mz': numpy.array(), 'i': numpy.array()} """ reporterIons = {'mz': [], 'i': []} for reporterMzValue in reporterMz: limHi = reporterMzValue * (1+mzTolerance) limLo = reporterMzValue * (1-mzTolerance) loPos = bisect.bisect_left(ionArrays['mz'], limLo) upPos = bisect.bisect_right(ionArrays['mz'], limHi) matchingValues = ionArrays['mz'][loPos:upPos] if matchingValues.size == 0: reporterIons['i'].append(0) reporterIons['mz'].append(numpy.nan) elif matchingValues.size == 1: reporterIons['i'].append(ionArrays['i'][loPos]) reporterIons['mz'].append(ionArrays['mz'][loPos]) else: mzDeviations = numpy.abs(matchingValues-reporterMzValue) minDeviationPos = numpy.argmin(mzDeviations) bestMatchArrayPos = range(loPos, upPos)[minDeviationPos] reporterIons['i'].append(ionArrays['i'][bestMatchArrayPos]) reporterIons['mz'].append(ionArrays['mz'][bestMatchArrayPos]) reporterIons['mz'] = numpy.array(reporterIons['mz'], dtype=ionArrays['mz'].dtype ) reporterIons['i'] = numpy.array(reporterIons['i'], dtype=ionArrays['i'].dtype ) return reporterIons
[ "def", "_extractReporterIons", "(", "ionArrays", ",", "reporterMz", ",", "mzTolerance", ")", ":", "reporterIons", "=", "{", "'mz'", ":", "[", "]", ",", "'i'", ":", "[", "]", "}", "for", "reporterMzValue", "in", "reporterMz", ":", "limHi", "=", "reporterMzValue", "*", "(", "1", "+", "mzTolerance", ")", "limLo", "=", "reporterMzValue", "*", "(", "1", "-", "mzTolerance", ")", "loPos", "=", "bisect", ".", "bisect_left", "(", "ionArrays", "[", "'mz'", "]", ",", "limLo", ")", "upPos", "=", "bisect", ".", "bisect_right", "(", "ionArrays", "[", "'mz'", "]", ",", "limHi", ")", "matchingValues", "=", "ionArrays", "[", "'mz'", "]", "[", "loPos", ":", "upPos", "]", "if", "matchingValues", ".", "size", "==", "0", ":", "reporterIons", "[", "'i'", "]", ".", "append", "(", "0", ")", "reporterIons", "[", "'mz'", "]", ".", "append", "(", "numpy", ".", "nan", ")", "elif", "matchingValues", ".", "size", "==", "1", ":", "reporterIons", "[", "'i'", "]", ".", "append", "(", "ionArrays", "[", "'i'", "]", "[", "loPos", "]", ")", "reporterIons", "[", "'mz'", "]", ".", "append", "(", "ionArrays", "[", "'mz'", "]", "[", "loPos", "]", ")", "else", ":", "mzDeviations", "=", "numpy", ".", "abs", "(", "matchingValues", "-", "reporterMzValue", ")", "minDeviationPos", "=", "numpy", ".", "argmin", "(", "mzDeviations", ")", "bestMatchArrayPos", "=", "range", "(", "loPos", ",", "upPos", ")", "[", "minDeviationPos", "]", "reporterIons", "[", "'i'", "]", ".", "append", "(", "ionArrays", "[", "'i'", "]", "[", "bestMatchArrayPos", "]", ")", "reporterIons", "[", "'mz'", "]", ".", "append", "(", "ionArrays", "[", "'mz'", "]", "[", "bestMatchArrayPos", "]", ")", "reporterIons", "[", "'mz'", "]", "=", "numpy", ".", "array", "(", "reporterIons", "[", "'mz'", "]", ",", "dtype", "=", "ionArrays", "[", "'mz'", "]", ".", "dtype", ")", "reporterIons", "[", "'i'", "]", "=", "numpy", ".", "array", "(", "reporterIons", "[", "'i'", "]", ",", "dtype", "=", "ionArrays", "[", "'i'", "]", ".", "dtype", ")", "return", "reporterIons" ]
48.266667
21.066667
def add_chapter(self, title): ''' Adds a new chapter to the report. :param str title: Title of the chapter. ''' chap_id = 'chap%s' % self.chap_counter self.chap_counter += 1 self.sidebar += '<a href="#%s" class="list-group-item">%s</a>\n' % ( chap_id, title) self.body += '<h1 id="%s">%s</h1>\n' % (chap_id, title)
[ "def", "add_chapter", "(", "self", ",", "title", ")", ":", "chap_id", "=", "'chap%s'", "%", "self", ".", "chap_counter", "self", ".", "chap_counter", "+=", "1", "self", ".", "sidebar", "+=", "'<a href=\"#%s\" class=\"list-group-item\">%s</a>\\n'", "%", "(", "chap_id", ",", "title", ")", "self", ".", "body", "+=", "'<h1 id=\"%s\">%s</h1>\\n'", "%", "(", "chap_id", ",", "title", ")" ]
34.636364
18.636364
def items(self, limit=10000): """ Return all of the Items and Collections for this search """ _limit = 500 if 'ids' in self.kwargs: col = self.kwargs.get('query', {}).get('collection', {}).get('eq', None) if col is None: raise SatSearchError('Collection required when searching by id') return self.items_by_id(self.kwargs['ids'], col) items = [] found = self.found() if found > limit: logger.warning('There are more items found (%s) than the limit (%s) provided.' % (found, limit)) maxitems = min(found, limit) kwargs = { 'page': 1, 'limit': min(_limit, maxitems) } kwargs.update(self.kwargs) while len(items) < maxitems: items += [Item(i) for i in self.query(**kwargs)['features']] kwargs['page'] += 1 # retrieve collections collections = [] for c in set([item.properties['collection'] for item in items if 'collection' in item.properties]): collections.append(self.collection(c)) #del collections[c]['links'] # merge collections into items #_items = [] #for item in items: # import pdb; pdb.set_trace() # if 'collection' in item['properties']: # item = dict_merge(item, collections[item['properties']['collection']]) # _items.append(Item(item)) return Items(items, collections=collections, search=self.kwargs)
[ "def", "items", "(", "self", ",", "limit", "=", "10000", ")", ":", "_limit", "=", "500", "if", "'ids'", "in", "self", ".", "kwargs", ":", "col", "=", "self", ".", "kwargs", ".", "get", "(", "'query'", ",", "{", "}", ")", ".", "get", "(", "'collection'", ",", "{", "}", ")", ".", "get", "(", "'eq'", ",", "None", ")", "if", "col", "is", "None", ":", "raise", "SatSearchError", "(", "'Collection required when searching by id'", ")", "return", "self", ".", "items_by_id", "(", "self", ".", "kwargs", "[", "'ids'", "]", ",", "col", ")", "items", "=", "[", "]", "found", "=", "self", ".", "found", "(", ")", "if", "found", ">", "limit", ":", "logger", ".", "warning", "(", "'There are more items found (%s) than the limit (%s) provided.'", "%", "(", "found", ",", "limit", ")", ")", "maxitems", "=", "min", "(", "found", ",", "limit", ")", "kwargs", "=", "{", "'page'", ":", "1", ",", "'limit'", ":", "min", "(", "_limit", ",", "maxitems", ")", "}", "kwargs", ".", "update", "(", "self", ".", "kwargs", ")", "while", "len", "(", "items", ")", "<", "maxitems", ":", "items", "+=", "[", "Item", "(", "i", ")", "for", "i", "in", "self", ".", "query", "(", "*", "*", "kwargs", ")", "[", "'features'", "]", "]", "kwargs", "[", "'page'", "]", "+=", "1", "# retrieve collections", "collections", "=", "[", "]", "for", "c", "in", "set", "(", "[", "item", ".", "properties", "[", "'collection'", "]", "for", "item", "in", "items", "if", "'collection'", "in", "item", ".", "properties", "]", ")", ":", "collections", ".", "append", "(", "self", ".", "collection", "(", "c", ")", ")", "#del collections[c]['links']", "# merge collections into items", "#_items = []", "#for item in items:", "# import pdb; pdb.set_trace()", "# if 'collection' in item['properties']:", "# item = dict_merge(item, collections[item['properties']['collection']])", "# _items.append(Item(item))", "return", "Items", "(", "items", ",", "collections", "=", "collections", ",", "search", "=", "self", ".", "kwargs", ")" ]
39.631579
20.815789
def _start_primary(self): """Start as the primary""" self.em.start() self.em.set_secondary_state(_STATE_RUNNING) self._set_shared_instances()
[ "def", "_start_primary", "(", "self", ")", ":", "self", ".", "em", ".", "start", "(", ")", "self", ".", "em", ".", "set_secondary_state", "(", "_STATE_RUNNING", ")", "self", ".", "_set_shared_instances", "(", ")" ]
33.8
9.4
async def process_graph_input(graph, stream, value, rpc_executor): """Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. This coroutine is an asyncio compatible version of SensorGraph.process_input() Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that. """ graph.sensor_log.push(stream, value) # FIXME: This should be specified in our device model if stream.important: associated_output = stream.associated_stream() graph.sensor_log.push(associated_output, value) to_check = deque([x for x in graph.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, graph.mark_streamer) for result in results: if inspect.iscoroutine(result.value): result.value = await asyncio.ensure_future(result.value) result.raw_time = value.raw_time graph.sensor_log.push(node.stream, result) except: logging.getLogger(__name__).exception("Unhandled exception in graph node processing function for node %s", str(node)) # If we generated any outputs, notify our downstream nodes # so that they are also checked to see if they should run. if len(results) > 0: to_check.extend(node.outputs)
[ "async", "def", "process_graph_input", "(", "graph", ",", "stream", ",", "value", ",", "rpc_executor", ")", ":", "graph", ".", "sensor_log", ".", "push", "(", "stream", ",", "value", ")", "# FIXME: This should be specified in our device model", "if", "stream", ".", "important", ":", "associated_output", "=", "stream", ".", "associated_stream", "(", ")", "graph", ".", "sensor_log", ".", "push", "(", "associated_output", ",", "value", ")", "to_check", "=", "deque", "(", "[", "x", "for", "x", "in", "graph", ".", "roots", "]", ")", "while", "len", "(", "to_check", ")", ">", "0", ":", "node", "=", "to_check", ".", "popleft", "(", ")", "if", "node", ".", "triggered", "(", ")", ":", "try", ":", "results", "=", "node", ".", "process", "(", "rpc_executor", ",", "graph", ".", "mark_streamer", ")", "for", "result", "in", "results", ":", "if", "inspect", ".", "iscoroutine", "(", "result", ".", "value", ")", ":", "result", ".", "value", "=", "await", "asyncio", ".", "ensure_future", "(", "result", ".", "value", ")", "result", ".", "raw_time", "=", "value", ".", "raw_time", "graph", ".", "sensor_log", ".", "push", "(", "node", ".", "stream", ",", "result", ")", "except", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "exception", "(", "\"Unhandled exception in graph node processing function for node %s\"", ",", "str", "(", "node", ")", ")", "# If we generated any outputs, notify our downstream nodes", "# so that they are also checked to see if they should run.", "if", "len", "(", "results", ")", ">", "0", ":", "to_check", ".", "extend", "(", "node", ".", "outputs", ")" ]
40.829268
23.536585
def update_event_hub(self, hub_name, hub=None): ''' Updates an Event Hub. hub_name: Name of event hub. hub: Optional. Event hub properties. Instance of EventHub class. hub.message_retention_in_days: Number of days to retain the events for this Event Hub. ''' _validate_not_none('hub_name', hub_name) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = '/' + _str(hub_name) + '?api-version=2014-01' request.body = _get_request_body(_convert_event_hub_to_xml(hub)) request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers.append(('If-Match', '*')) request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _convert_response_to_event_hub(response)
[ "def", "update_event_hub", "(", "self", ",", "hub_name", ",", "hub", "=", "None", ")", ":", "_validate_not_none", "(", "'hub_name'", ",", "hub_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'PUT'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "'/'", "+", "_str", "(", "hub_name", ")", "+", "'?api-version=2014-01'", "request", ".", "body", "=", "_get_request_body", "(", "_convert_event_hub_to_xml", "(", "hub", ")", ")", "request", ".", "path", ",", "request", ".", "query", "=", "self", ".", "_httpclient", ".", "_update_request_uri_query", "(", "request", ")", "# pylint: disable=protected-access", "request", ".", "headers", ".", "append", "(", "(", "'If-Match'", ",", "'*'", ")", ")", "request", ".", "headers", "=", "self", ".", "_update_service_bus_header", "(", "request", ")", "response", "=", "self", ".", "_perform_request", "(", "request", ")", "return", "_convert_response_to_event_hub", "(", "response", ")" ]
41.956522
22.130435
def dof(self, index=None): """The number of degrees of freedom""" if index is None: dof = 0 for i in range(self.len): dof += self.A[i].shape[0] * self.F[i].shape[1] return dof else: return self.A[index].shape[0] * self.F[index].shape[1]
[ "def", "dof", "(", "self", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "dof", "=", "0", "for", "i", "in", "range", "(", "self", ".", "len", ")", ":", "dof", "+=", "self", ".", "A", "[", "i", "]", ".", "shape", "[", "0", "]", "*", "self", ".", "F", "[", "i", "]", ".", "shape", "[", "1", "]", "return", "dof", "else", ":", "return", "self", ".", "A", "[", "index", "]", ".", "shape", "[", "0", "]", "*", "self", ".", "F", "[", "index", "]", ".", "shape", "[", "1", "]" ]
35.111111
16.222222
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the GetAttributes response payload and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. """ super(GetAttributesResponsePayload, self).read( input_buffer, kmip_version=kmip_version ) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer): unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) unique_identifier.read(local_buffer, kmip_version=kmip_version) self.unique_identifier = unique_identifier.value else: raise exceptions.InvalidKmipEncoding( "The GetAttributes response payload encoding is missing the " "unique identifier." ) if kmip_version < enums.KMIPVersion.KMIP_2_0: self._attributes = list() while self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer): attribute = objects.Attribute() attribute.read(local_buffer, kmip_version=kmip_version) self._attributes.append(attribute) else: if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer): attributes = objects.Attributes() attributes.read(local_buffer, kmip_version=kmip_version) # TODO (ph) Add a new utility to avoid using TemplateAttributes temp_attr = objects.convert_attributes_to_template_attribute( attributes ) self._attributes = temp_attr.attributes else: raise exceptions.InvalidKmipEncoding( "The GetAttributes response payload encoding is missing " "the attributes structure." ) self.is_oversized(local_buffer)
[ "def", "read", "(", "self", ",", "input_buffer", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "GetAttributesResponsePayload", ",", "self", ")", ".", "read", "(", "input_buffer", ",", "kmip_version", "=", "kmip_version", ")", "local_buffer", "=", "utils", ".", "BytearrayStream", "(", "input_buffer", ".", "read", "(", "self", ".", "length", ")", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "UNIQUE_IDENTIFIER", ",", "local_buffer", ")", ":", "unique_identifier", "=", "primitives", ".", "TextString", "(", "tag", "=", "enums", ".", "Tags", ".", "UNIQUE_IDENTIFIER", ")", "unique_identifier", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "unique_identifier", "=", "unique_identifier", ".", "value", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The GetAttributes response payload encoding is missing the \"", "\"unique identifier.\"", ")", "if", "kmip_version", "<", "enums", ".", "KMIPVersion", ".", "KMIP_2_0", ":", "self", ".", "_attributes", "=", "list", "(", ")", "while", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "ATTRIBUTE", ",", "local_buffer", ")", ":", "attribute", "=", "objects", ".", "Attribute", "(", ")", "attribute", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "_attributes", ".", "append", "(", "attribute", ")", "else", ":", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "ATTRIBUTES", ",", "local_buffer", ")", ":", "attributes", "=", "objects", ".", "Attributes", "(", ")", "attributes", ".", "read", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "# TODO (ph) Add a new utility to avoid using TemplateAttributes", "temp_attr", "=", "objects", ".", "convert_attributes_to_template_attribute", "(", "attributes", ")", "self", ".", "_attributes", "=", "temp_attr", ".", "attributes", "else", ":", "raise", "exceptions", ".", "InvalidKmipEncoding", "(", "\"The GetAttributes response payload encoding is missing \"", "\"the attributes structure.\"", ")", "self", ".", "is_oversized", "(", "local_buffer", ")" ]
43.811321
22
async def send_message(self, segments, image_file=None, image_id=None, image_user_id=None): """Send a message to this conversation. A per-conversation lock is acquired to ensure that messages are sent in the correct order when this method is called multiple times asynchronously. Args: segments: List of :class:`.ChatMessageSegment` objects to include in the message. image_file: (optional) File-like object containing an image to be attached to the message. image_id: (optional) ID of an Picasa photo to be attached to the message. If you specify both ``image_file`` and ``image_id`` together, ``image_file`` takes precedence and ``image_id`` will be ignored. image_user_id: (optional) Picasa user ID, required only if ``image_id`` refers to an image from a different Picasa user, such as Google's sticker user. Raises: .NetworkError: If the message cannot be sent. """ async with self._send_message_lock: if image_file: try: uploaded_image = await self._client.upload_image( image_file, return_uploaded_image=True ) except exceptions.NetworkError as e: logger.warning('Failed to upload image: {}'.format(e)) raise image_id = uploaded_image.image_id try: request = hangouts_pb2.SendChatMessageRequest( request_header=self._client.get_request_header(), event_request_header=self._get_event_request_header(), message_content=hangouts_pb2.MessageContent( segment=[seg.serialize() for seg in segments], ), ) if image_id is not None: request.existing_media.photo.photo_id = image_id if image_user_id is not None: request.existing_media.photo.user_id = image_user_id request.existing_media.photo.is_custom_user_id = True await self._client.send_chat_message(request) except exceptions.NetworkError as e: logger.warning('Failed to send message: {}'.format(e)) raise
[ "async", "def", "send_message", "(", "self", ",", "segments", ",", "image_file", "=", "None", ",", "image_id", "=", "None", ",", "image_user_id", "=", "None", ")", ":", "async", "with", "self", ".", "_send_message_lock", ":", "if", "image_file", ":", "try", ":", "uploaded_image", "=", "await", "self", ".", "_client", ".", "upload_image", "(", "image_file", ",", "return_uploaded_image", "=", "True", ")", "except", "exceptions", ".", "NetworkError", "as", "e", ":", "logger", ".", "warning", "(", "'Failed to upload image: {}'", ".", "format", "(", "e", ")", ")", "raise", "image_id", "=", "uploaded_image", ".", "image_id", "try", ":", "request", "=", "hangouts_pb2", ".", "SendChatMessageRequest", "(", "request_header", "=", "self", ".", "_client", ".", "get_request_header", "(", ")", ",", "event_request_header", "=", "self", ".", "_get_event_request_header", "(", ")", ",", "message_content", "=", "hangouts_pb2", ".", "MessageContent", "(", "segment", "=", "[", "seg", ".", "serialize", "(", ")", "for", "seg", "in", "segments", "]", ",", ")", ",", ")", "if", "image_id", "is", "not", "None", ":", "request", ".", "existing_media", ".", "photo", ".", "photo_id", "=", "image_id", "if", "image_user_id", "is", "not", "None", ":", "request", ".", "existing_media", ".", "photo", ".", "user_id", "=", "image_user_id", "request", ".", "existing_media", ".", "photo", ".", "is_custom_user_id", "=", "True", "await", "self", ".", "_client", ".", "send_chat_message", "(", "request", ")", "except", "exceptions", ".", "NetworkError", "as", "e", ":", "logger", ".", "warning", "(", "'Failed to send message: {}'", ".", "format", "(", "e", ")", ")", "raise" ]
47.823529
22.490196
def get_clan(self, tag: crtag, timeout: int=None): """Get inforamtion about a clan Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.CLAN + '/' + tag return self._get_model(url, FullClan, timeout=timeout)
[ "def", "get_clan", "(", "self", ",", "tag", ":", "crtag", ",", "timeout", ":", "int", "=", "None", ")", ":", "url", "=", "self", ".", "api", ".", "CLAN", "+", "'/'", "+", "tag", "return", "self", ".", "_get_model", "(", "url", ",", "FullClan", ",", "timeout", "=", "timeout", ")" ]
34.153846
13.692308
def get_object(self, pid=None, type=None, create=None): """ Initialize a single object from Fedora, or create a new one, with the same Fedora configuration and credentials. :param pid: pid of the object to request, or a function that can be called to get one. if not specified, :meth:`get_next_pid` will be called if a pid is needed :param type: type of object to return; defaults to :class:`DigitalObject` :rtype: single object of the type specified :create: boolean: create a new object? (if not specified, defaults to False when pid is specified, and True when it is not) """ objtype = type or self.default_object_type if pid is None: if create is None: create = True else: if create is None: create = False return objtype(self.api, pid, create, default_pidspace=self.default_pidspace)
[ "def", "get_object", "(", "self", ",", "pid", "=", "None", ",", "type", "=", "None", ",", "create", "=", "None", ")", ":", "objtype", "=", "type", "or", "self", ".", "default_object_type", "if", "pid", "is", "None", ":", "if", "create", "is", "None", ":", "create", "=", "True", "else", ":", "if", "create", "is", "None", ":", "create", "=", "False", "return", "objtype", "(", "self", ".", "api", ",", "pid", ",", "create", ",", "default_pidspace", "=", "self", ".", "default_pidspace", ")" ]
41.666667
21.166667
def sum(self): """Summary Returns: TYPE: Description """ return GroupedDataFrameWeld( grizzly_impl.groupby_sum( self.columns, self.column_types, self.grouping_columns, self.grouping_column_types ), self.grouping_column_names, self.column_names, self.grouping_column_types, self.column_types )
[ "def", "sum", "(", "self", ")", ":", "return", "GroupedDataFrameWeld", "(", "grizzly_impl", ".", "groupby_sum", "(", "self", ".", "columns", ",", "self", ".", "column_types", ",", "self", ".", "grouping_columns", ",", "self", ".", "grouping_column_types", ")", ",", "self", ".", "grouping_column_names", ",", "self", ".", "column_names", ",", "self", ".", "grouping_column_types", ",", "self", ".", "column_types", ")" ]
25.777778
11.611111
def _init_dates(self): """Initialize all dates properties """ if self.total_transactions == 0: return None self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start)) self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish))
[ "def", "_init_dates", "(", "self", ")", ":", "if", "self", ".", "total_transactions", "==", "0", ":", "return", "None", "self", ".", "epoch_start", "=", "Result", ".", "select", "(", "Result", ".", "epoch", ")", ".", "order_by", "(", "Result", ".", "epoch", ".", "asc", "(", ")", ")", ".", "limit", "(", "1", ")", ".", "get", "(", ")", ".", "epoch", "self", ".", "epoch_finish", "=", "Result", ".", "select", "(", "Result", ".", "epoch", ")", ".", "order_by", "(", "Result", ".", "epoch", ".", "desc", "(", ")", ")", ".", "limit", "(", "1", ")", ".", "get", "(", ")", ".", "epoch", "self", ".", "start_datetime", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "self", ".", "epoch_start", ")", ")", "self", ".", "finish_datetime", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "self", ".", "epoch_finish", ")", ")" ]
60.666667
31.444444
def _iter(self): """Generate (name, est, weight) tuples excluding None transformers """ get_weight = (self.transformer_weights or {}).get return ((name, trans, get_weight(name)) for name, trans in self.transformer_list if trans is not None)
[ "def", "_iter", "(", "self", ")", ":", "get_weight", "=", "(", "self", ".", "transformer_weights", "or", "{", "}", ")", ".", "get", "return", "(", "(", "name", ",", "trans", ",", "get_weight", "(", "name", ")", ")", "for", "name", ",", "trans", "in", "self", ".", "transformer_list", "if", "trans", "is", "not", "None", ")" ]
42.571429
9.571429
def showImage(layout, imagePath="", imageObj=None, offset=(0, 0), bgcolor=COLORS.Off, brightness=255): """Display an image on the matrix""" if not isinstance(layout, Matrix): raise RuntimeError("Must use Matrix with showImage!") layout.all_off() return show_image(layout.set, layout.width, layout.height, imagePath, imageObj, offset, bgcolor, brightness)
[ "def", "showImage", "(", "layout", ",", "imagePath", "=", "\"\"", ",", "imageObj", "=", "None", ",", "offset", "=", "(", "0", ",", "0", ")", ",", "bgcolor", "=", "COLORS", ".", "Off", ",", "brightness", "=", "255", ")", ":", "if", "not", "isinstance", "(", "layout", ",", "Matrix", ")", ":", "raise", "RuntimeError", "(", "\"Must use Matrix with showImage!\"", ")", "layout", ".", "all_off", "(", ")", "return", "show_image", "(", "layout", ".", "set", ",", "layout", ".", "width", ",", "layout", ".", "height", ",", "imagePath", ",", "imageObj", ",", "offset", ",", "bgcolor", ",", "brightness", ")" ]
40.7
21.1
def save(self, request, connect=False): """ Saves a new account. Note that while the account is new, the user may be an existing one (when connecting accounts) """ assert not self.is_existing user = self.user user.save() self.account.user = user self.account.save() if app_settings.STORE_TOKENS and self.token: self.token.account = self.account self.token.save() if connect: # TODO: Add any new email addresses automatically? pass else: setup_user_email(request, user, self.email_addresses)
[ "def", "save", "(", "self", ",", "request", ",", "connect", "=", "False", ")", ":", "assert", "not", "self", ".", "is_existing", "user", "=", "self", ".", "user", "user", ".", "save", "(", ")", "self", ".", "account", ".", "user", "=", "user", "self", ".", "account", ".", "save", "(", ")", "if", "app_settings", ".", "STORE_TOKENS", "and", "self", ".", "token", ":", "self", ".", "token", ".", "account", "=", "self", ".", "account", "self", ".", "token", ".", "save", "(", ")", "if", "connect", ":", "# TODO: Add any new email addresses automatically?", "pass", "else", ":", "setup_user_email", "(", "request", ",", "user", ",", "self", ".", "email_addresses", ")" ]
34.944444
14.5
def get_raw(self, url: str, _attempt=1) -> requests.Response: """Downloads a file anonymously. :raises QueryReturnedNotFoundException: When the server responds with a 404. :raises QueryReturnedForbiddenException: When the server responds with a 403. :raises ConnectionException: When download failed. .. versionadded:: 4.2.1""" with self.get_anonymous_session() as anonymous_session: resp = anonymous_session.get(url, stream=True) if resp.status_code == 200: resp.raw.decode_content = True return resp else: if resp.status_code == 403: # suspected invalid URL signature raise QueryReturnedForbiddenException("403 when accessing {}.".format(url)) if resp.status_code == 404: # 404 not worth retrying. raise QueryReturnedNotFoundException("404 when accessing {}.".format(url)) raise ConnectionException("HTTP error code {}.".format(resp.status_code))
[ "def", "get_raw", "(", "self", ",", "url", ":", "str", ",", "_attempt", "=", "1", ")", "->", "requests", ".", "Response", ":", "with", "self", ".", "get_anonymous_session", "(", ")", "as", "anonymous_session", ":", "resp", "=", "anonymous_session", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "if", "resp", ".", "status_code", "==", "200", ":", "resp", ".", "raw", ".", "decode_content", "=", "True", "return", "resp", "else", ":", "if", "resp", ".", "status_code", "==", "403", ":", "# suspected invalid URL signature", "raise", "QueryReturnedForbiddenException", "(", "\"403 when accessing {}.\"", ".", "format", "(", "url", ")", ")", "if", "resp", ".", "status_code", "==", "404", ":", "# 404 not worth retrying.", "raise", "QueryReturnedNotFoundException", "(", "\"404 when accessing {}.\"", ".", "format", "(", "url", ")", ")", "raise", "ConnectionException", "(", "\"HTTP error code {}.\"", ".", "format", "(", "resp", ".", "status_code", ")", ")" ]
49.047619
21.809524
def blit_2x( self, console: tcod.console.Console, dest_x: int, dest_y: int, img_x: int = 0, img_y: int = 0, img_width: int = -1, img_height: int = -1, ) -> None: """Blit onto a Console with double resolution. Args: console (Console): Blit destination Console. dest_x (int): Console tile X position starting from the left at 0. dest_y (int): Console tile Y position starting from the top at 0. img_x (int): Left corner pixel of the Image to blit img_y (int): Top corner pixel of the Image to blit img_width (int): Width of the Image to blit. Use -1 for the full Image width. img_height (int): Height of the Image to blit. Use -1 for the full Image height. """ lib.TCOD_image_blit_2x( self.image_c, _console(console), dest_x, dest_y, img_x, img_y, img_width, img_height, )
[ "def", "blit_2x", "(", "self", ",", "console", ":", "tcod", ".", "console", ".", "Console", ",", "dest_x", ":", "int", ",", "dest_y", ":", "int", ",", "img_x", ":", "int", "=", "0", ",", "img_y", ":", "int", "=", "0", ",", "img_width", ":", "int", "=", "-", "1", ",", "img_height", ":", "int", "=", "-", "1", ",", ")", "->", "None", ":", "lib", ".", "TCOD_image_blit_2x", "(", "self", ".", "image_c", ",", "_console", "(", "console", ")", ",", "dest_x", ",", "dest_y", ",", "img_x", ",", "img_y", ",", "img_width", ",", "img_height", ",", ")" ]
32.909091
19.606061
def cache_files(db, aid: int, anime_files: AnimeFiles) -> None: """Cache files for anime.""" with db: cache_status(db, aid) db.cursor().execute( """UPDATE cache_anime SET anime_files=? WHERE aid=?""", (anime_files.to_json(), aid))
[ "def", "cache_files", "(", "db", ",", "aid", ":", "int", ",", "anime_files", ":", "AnimeFiles", ")", "->", "None", ":", "with", "db", ":", "cache_status", "(", "db", ",", "aid", ")", "db", ".", "cursor", "(", ")", ".", "execute", "(", "\"\"\"UPDATE cache_anime\n SET anime_files=?\n WHERE aid=?\"\"\"", ",", "(", "anime_files", ".", "to_json", "(", ")", ",", "aid", ")", ")" ]
32.666667
9.555556
def _chunk(self, response, size=4096): """ downloads a web response in pieces """ method = response.headers.get("content-encoding") if method == "gzip": d = zlib.decompressobj(16+zlib.MAX_WBITS) b = response.read(size) while b: data = d.decompress(b) yield data b = response.read(size) del data else: while True: chunk = response.read(size) if not chunk: break yield chunk
[ "def", "_chunk", "(", "self", ",", "response", ",", "size", "=", "4096", ")", ":", "method", "=", "response", ".", "headers", ".", "get", "(", "\"content-encoding\"", ")", "if", "method", "==", "\"gzip\"", ":", "d", "=", "zlib", ".", "decompressobj", "(", "16", "+", "zlib", ".", "MAX_WBITS", ")", "b", "=", "response", ".", "read", "(", "size", ")", "while", "b", ":", "data", "=", "d", ".", "decompress", "(", "b", ")", "yield", "data", "b", "=", "response", ".", "read", "(", "size", ")", "del", "data", "else", ":", "while", "True", ":", "chunk", "=", "response", ".", "read", "(", "size", ")", "if", "not", "chunk", ":", "break", "yield", "chunk" ]
34.3125
10.4375
def get_floating_ip(self, ip): """ Returns a of FloatingIP object by its IP address. """ return FloatingIP.get_object(api_token=self.token, ip=ip)
[ "def", "get_floating_ip", "(", "self", ",", "ip", ")", ":", "return", "FloatingIP", ".", "get_object", "(", "api_token", "=", "self", ".", "token", ",", "ip", "=", "ip", ")" ]
35.6
11.2
def png_as_base64_str(self, scale=1, module_color=(0, 0, 0, 255), background=(255, 255, 255, 255), quiet_zone=4): """This method uses the png render and returns the PNG image encoded as base64 string. This can be useful for creating dynamic PNG images for web development, since no file needs to be created. Example: >>> code = pyqrcode.create('Are you suggesting coconuts migrate?') >>> image_as_str = code.png_as_base64_str(scale=5) >>> html_img = '<img src="data:image/png;base64,{}">'.format(image_as_str) The parameters are passed directly to the :py:meth:`png` method. Refer to that method's documentation for the meaning behind the parameters. .. note:: This method depends on the pypng module to actually create the PNG image. """ import io import base64 with io.BytesIO() as virtual_file: self.png(file=virtual_file, scale=scale, module_color=module_color, background=background, quiet_zone=quiet_zone) image_as_str = base64.b64encode(virtual_file.getvalue()).decode("ascii") return image_as_str
[ "def", "png_as_base64_str", "(", "self", ",", "scale", "=", "1", ",", "module_color", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ",", "background", "=", "(", "255", ",", "255", ",", "255", ",", "255", ")", ",", "quiet_zone", "=", "4", ")", ":", "import", "io", "import", "base64", "with", "io", ".", "BytesIO", "(", ")", "as", "virtual_file", ":", "self", ".", "png", "(", "file", "=", "virtual_file", ",", "scale", "=", "scale", ",", "module_color", "=", "module_color", ",", "background", "=", "background", ",", "quiet_zone", "=", "quiet_zone", ")", "image_as_str", "=", "base64", ".", "b64encode", "(", "virtual_file", ".", "getvalue", "(", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "return", "image_as_str" ]
45.703704
27.259259
def action_stats(self, hostname=None): "Shows stats (possibly limited by hostname)" format = "%-35s %-11s %-11s %-11s %-11s" print format % ("HOST", "OPEN", "COMPLETED", "BYTES IN", "BYTES OUT") for host, details in sorted(self.client.stats(hostname).items()): print format % ( host, details.get("open_requests", 0), details.get("completed_requests", 0), details.get("bytes_received", 0), details.get("bytes_sent", 0), )
[ "def", "action_stats", "(", "self", ",", "hostname", "=", "None", ")", ":", "format", "=", "\"%-35s %-11s %-11s %-11s %-11s\"", "print", "format", "%", "(", "\"HOST\"", ",", "\"OPEN\"", ",", "\"COMPLETED\"", ",", "\"BYTES IN\"", ",", "\"BYTES OUT\"", ")", "for", "host", ",", "details", "in", "sorted", "(", "self", ".", "client", ".", "stats", "(", "hostname", ")", ".", "items", "(", ")", ")", ":", "print", "format", "%", "(", "host", ",", "details", ".", "get", "(", "\"open_requests\"", ",", "0", ")", ",", "details", ".", "get", "(", "\"completed_requests\"", ",", "0", ")", ",", "details", ".", "get", "(", "\"bytes_received\"", ",", "0", ")", ",", "details", ".", "get", "(", "\"bytes_sent\"", ",", "0", ")", ",", ")" ]
45.416667
15.416667
def debug_application(self, environ, start_response): """Run the application and preserve the traceback frames. :param environ: The environment which is passed into the wsgi application :type environ: dict[str, object] :param start_response: The start_response function of the wsgi application :type start_response: (str, list[(str, str)]) -> None :rtype: generator[str] .. versionadded:: 0.1.0 """ adapter = self._debug_map.bind_to_environ(environ) if adapter.test(): _, args = adapter.match() return self.handle_debug(environ, start_response, args["traceback_id"]) else: return super(DebuggedJsonRpcApplication, self).debug_application(environ, start_response)
[ "def", "debug_application", "(", "self", ",", "environ", ",", "start_response", ")", ":", "adapter", "=", "self", ".", "_debug_map", ".", "bind_to_environ", "(", "environ", ")", "if", "adapter", ".", "test", "(", ")", ":", "_", ",", "args", "=", "adapter", ".", "match", "(", ")", "return", "self", ".", "handle_debug", "(", "environ", ",", "start_response", ",", "args", "[", "\"traceback_id\"", "]", ")", "else", ":", "return", "super", "(", "DebuggedJsonRpcApplication", ",", "self", ")", ".", "debug_application", "(", "environ", ",", "start_response", ")" ]
47.111111
23.222222
def directions(self, origin, destination, mode=None, alternatives=None, waypoints=None, optimize_waypoints=False, avoid=None, language=None, units=None, region=None, departure_time=None, arrival_time=None, sensor=None): """Get directions between locations :param origin: Origin location - string address; (latitude, longitude) two-tuple, dict with ("lat", "lon") keys or object with (lat, lon) attributes :param destination: Destination location - type same as origin :param mode: Travel mode as string, defaults to "driving". See `google docs details <https://developers.google.com/maps/documentation/directions/#TravelModes>`_ :param alternatives: True if provide it has to return more then one route alternative :param waypoints: Iterable with set of intermediate stops, like ("Munich", "Dallas") See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param optimize_waypoints: if true will attempt to re-order supplied waypoints to minimize overall cost of the route. If waypoints are optimized, the route returned will show the optimized order under "waypoint_order". See `google docs details <https://developers.google.com/maps/documentation/javascript/reference#DirectionsRequest>`_ :param avoid: Iterable with set of restrictions, like ("tolls", "highways"). For full list refer to `google docs details <https://developers.google.com/maps/documentation/directions/#Restrictions>`_ :param language: The language in which to return results. See `list of supported languages <https://developers.google.com/maps/faq#languagesupport>`_ :param units: Unit system for result. Defaults to unit system of origin's country. See `google docs details <https://developers.google.com/maps/documentation/directions/#UnitSystems>`_ :param region: The region code. Affects geocoding of origin and destination (see `gmaps.Geocoding.geocode` region parameter) :param departure_time: Desired time of departure as seconds since midnight, January 1, 1970 UTC :param arrival_time: Desired time of arrival for transit directions as seconds since midnight, January 1, 1970 UTC. """ # noqa if optimize_waypoints: waypoints.insert(0, "optimize:true") parameters = dict( origin=self.assume_latlon_or_address(origin), destination=self.assume_latlon_or_address(destination), mode=mode, alternatives=alternatives, waypoints=waypoints or [], avoid=avoid, language=language, units=units, region=region, departure_time=departure_time, arrival_time=arrival_time, sensor=sensor, ) return self._make_request(self.DIRECTIONS_URL, parameters, "routes")
[ "def", "directions", "(", "self", ",", "origin", ",", "destination", ",", "mode", "=", "None", ",", "alternatives", "=", "None", ",", "waypoints", "=", "None", ",", "optimize_waypoints", "=", "False", ",", "avoid", "=", "None", ",", "language", "=", "None", ",", "units", "=", "None", ",", "region", "=", "None", ",", "departure_time", "=", "None", ",", "arrival_time", "=", "None", ",", "sensor", "=", "None", ")", ":", "# noqa", "if", "optimize_waypoints", ":", "waypoints", ".", "insert", "(", "0", ",", "\"optimize:true\"", ")", "parameters", "=", "dict", "(", "origin", "=", "self", ".", "assume_latlon_or_address", "(", "origin", ")", ",", "destination", "=", "self", ".", "assume_latlon_or_address", "(", "destination", ")", ",", "mode", "=", "mode", ",", "alternatives", "=", "alternatives", ",", "waypoints", "=", "waypoints", "or", "[", "]", ",", "avoid", "=", "avoid", ",", "language", "=", "language", ",", "units", "=", "units", ",", "region", "=", "region", ",", "departure_time", "=", "departure_time", ",", "arrival_time", "=", "arrival_time", ",", "sensor", "=", "sensor", ",", ")", "return", "self", ".", "_make_request", "(", "self", ".", "DIRECTIONS_URL", ",", "parameters", ",", "\"routes\"", ")" ]
57.648148
26.574074
def load_reference(name, element_type, version): """ Look for an element of the given type, name and version and return its reference structure :type element_type: ``str`` :param element_type: the element type to look for (e.g. 'Segment') :type name: ``str`` :param name: the element name to look for (e.g. 'MSH') :type version: ``str`` :param version: the version of the library where to search the element (e.g. '2.6') :rtype: ``dict`` :return: a dictionary describing the element structure :raise: ``KeyError`` if the element has not been found The returned dictionary will contain the following keys: +--------------+--------------------------------------------+ |Key |Value | +==============+============================================+ |cls |an :class:`hl7apy.core.Element` subclass | +--------------+--------------------------------------------+ |name |the Element name (e.g. PID) | +--------------+--------------------------------------------+ |ref |a tuple of one of the following format: | | | | | |('leaf', <datatype>, <longName>, <table>) | | |('sequence', (<child>, (<min>, <max>), ...))| +--------------+--------------------------------------------+ >>> load_reference('UNKNOWN', 'Segment', '2.5') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ChildNotFound: No child named UNKNOWN >>> r = load_reference('ADT_A01', 'Message', '2.5') >>> print(r[0]) sequence >>> r = load_reference('MSH_3', 'Field', '2.5') >>> print(r[0]) sequence """ lib = load_library(version) ref = lib.get(name, element_type) return ref
[ "def", "load_reference", "(", "name", ",", "element_type", ",", "version", ")", ":", "lib", "=", "load_library", "(", "version", ")", "ref", "=", "lib", ".", "get", "(", "name", ",", "element_type", ")", "return", "ref" ]
43.046512
22.767442
def find_node_group_membership(self, node): """ Identifies the group for which a node belongs to. """ for group, nodelist in self.nodes.items(): if node in nodelist: return group
[ "def", "find_node_group_membership", "(", "self", ",", "node", ")", ":", "for", "group", ",", "nodelist", "in", "self", ".", "nodes", ".", "items", "(", ")", ":", "if", "node", "in", "nodelist", ":", "return", "group" ]
33.142857
7.142857
def work_once(self, free_pool_slots=1, max_jobs=None): """ Does one lookup for new jobs, inside the inner work loop """ dequeued_jobs = 0 available_queues = [ queue for queue in self.queues if queue.root_id not in self.paused_queues and queue.id not in self.paused_queues ] for queue_i in range(len(available_queues)): queue = available_queues[(queue_i + self.queue_offset) % len(available_queues)] max_jobs_per_queue = free_pool_slots - dequeued_jobs if max_jobs_per_queue <= 0: queue_i -= 1 break if self.config["dequeue_strategy"] == "parallel": max_jobs_per_queue = max(1, int(max_jobs_per_queue / (len(available_queues) - queue_i))) for job in queue.dequeue_jobs( max_jobs=max_jobs_per_queue, job_class=self.job_class, worker=self ): dequeued_jobs += 1 self.gevent_pool.spawn(self.perform_job, job) # At the next pass, start at the next queue to avoid always dequeuing the same one if self.config["dequeue_strategy"] == "parallel": self.queue_offset = (self.queue_offset + queue_i + 1) % len(self.queues) # TODO consider this when dequeuing jobs to have strict limits if max_jobs and self.done_jobs >= max_jobs: self.log.info("Reached max_jobs=%s" % self.done_jobs) return "break", dequeued_jobs # We seem to have exhausted available jobs, we can sleep for a # while. if dequeued_jobs == 0: if self.config["dequeue_strategy"] == "burst": self.log.info("Burst mode: stopping now because queues were empty") return "break", dequeued_jobs return "wait", dequeued_jobs return None, dequeued_jobs
[ "def", "work_once", "(", "self", ",", "free_pool_slots", "=", "1", ",", "max_jobs", "=", "None", ")", ":", "dequeued_jobs", "=", "0", "available_queues", "=", "[", "queue", "for", "queue", "in", "self", ".", "queues", "if", "queue", ".", "root_id", "not", "in", "self", ".", "paused_queues", "and", "queue", ".", "id", "not", "in", "self", ".", "paused_queues", "]", "for", "queue_i", "in", "range", "(", "len", "(", "available_queues", ")", ")", ":", "queue", "=", "available_queues", "[", "(", "queue_i", "+", "self", ".", "queue_offset", ")", "%", "len", "(", "available_queues", ")", "]", "max_jobs_per_queue", "=", "free_pool_slots", "-", "dequeued_jobs", "if", "max_jobs_per_queue", "<=", "0", ":", "queue_i", "-=", "1", "break", "if", "self", ".", "config", "[", "\"dequeue_strategy\"", "]", "==", "\"parallel\"", ":", "max_jobs_per_queue", "=", "max", "(", "1", ",", "int", "(", "max_jobs_per_queue", "/", "(", "len", "(", "available_queues", ")", "-", "queue_i", ")", ")", ")", "for", "job", "in", "queue", ".", "dequeue_jobs", "(", "max_jobs", "=", "max_jobs_per_queue", ",", "job_class", "=", "self", ".", "job_class", ",", "worker", "=", "self", ")", ":", "dequeued_jobs", "+=", "1", "self", ".", "gevent_pool", ".", "spawn", "(", "self", ".", "perform_job", ",", "job", ")", "# At the next pass, start at the next queue to avoid always dequeuing the same one", "if", "self", ".", "config", "[", "\"dequeue_strategy\"", "]", "==", "\"parallel\"", ":", "self", ".", "queue_offset", "=", "(", "self", ".", "queue_offset", "+", "queue_i", "+", "1", ")", "%", "len", "(", "self", ".", "queues", ")", "# TODO consider this when dequeuing jobs to have strict limits", "if", "max_jobs", "and", "self", ".", "done_jobs", ">=", "max_jobs", ":", "self", ".", "log", ".", "info", "(", "\"Reached max_jobs=%s\"", "%", "self", ".", "done_jobs", ")", "return", "\"break\"", ",", "dequeued_jobs", "# We seem to have exhausted available jobs, we can sleep for a", "# while.", "if", "dequeued_jobs", "==", "0", ":", "if", "self", ".", "config", "[", "\"dequeue_strategy\"", "]", "==", "\"burst\"", ":", "self", ".", "log", ".", "info", "(", "\"Burst mode: stopping now because queues were empty\"", ")", "return", "\"break\"", ",", "dequeued_jobs", "return", "\"wait\"", ",", "dequeued_jobs", "return", "None", ",", "dequeued_jobs" ]
35.679245
24.320755
def apply_vcc(self,vcc): """ Applies velocity contrast curve constraint to each population See :func:`vespa.stars.StarPopulation.apply_vcc`; all arguments passed to that function for each population. """ if 'secondary spectrum' not in self.constraints: self.constraints.append('secondary spectrum') for pop in self.poplist: if not pop.is_specific: try: pop.apply_vcc(vcc) except: logging.info('VCC constraint not applied to %s model' % (pop.model))
[ "def", "apply_vcc", "(", "self", ",", "vcc", ")", ":", "if", "'secondary spectrum'", "not", "in", "self", ".", "constraints", ":", "self", ".", "constraints", ".", "append", "(", "'secondary spectrum'", ")", "for", "pop", "in", "self", ".", "poplist", ":", "if", "not", "pop", ".", "is_specific", ":", "try", ":", "pop", ".", "apply_vcc", "(", "vcc", ")", "except", ":", "logging", ".", "info", "(", "'VCC constraint not applied to %s model'", "%", "(", "pop", ".", "model", ")", ")" ]
36.6875
18.8125
def compile(manager, path, allow_naked_names, allow_nested, disallow_unqualified_translocations, no_identifier_validation, no_citation_clearing, required_annotations, skip_tqdm, verbose): """Compile a BEL script to a graph.""" if verbose: logging.basicConfig(level=logging.DEBUG) log.setLevel(logging.DEBUG) log.debug('using connection: %s', manager.engine.url) click.secho('Compilation', fg='red', bold=True) if skip_tqdm: click.echo('```') graph = from_path( path, manager=manager, use_tqdm=(not (skip_tqdm or verbose)), allow_nested=allow_nested, allow_naked_names=allow_naked_names, disallow_unqualified_translocations=disallow_unqualified_translocations, citation_clearing=(not no_citation_clearing), required_annotations=required_annotations, no_identifier_validation=no_identifier_validation, allow_definition_failures=True, ) if skip_tqdm: click.echo('```') to_pickle(graph, get_corresponding_pickle_path(path)) click.echo('') _print_summary(graph, ticks=skip_tqdm) sys.exit(0 if 0 == graph.number_of_warnings() else 1)
[ "def", "compile", "(", "manager", ",", "path", ",", "allow_naked_names", ",", "allow_nested", ",", "disallow_unqualified_translocations", ",", "no_identifier_validation", ",", "no_citation_clearing", ",", "required_annotations", ",", "skip_tqdm", ",", "verbose", ")", ":", "if", "verbose", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "log", ".", "debug", "(", "'using connection: %s'", ",", "manager", ".", "engine", ".", "url", ")", "click", ".", "secho", "(", "'Compilation'", ",", "fg", "=", "'red'", ",", "bold", "=", "True", ")", "if", "skip_tqdm", ":", "click", ".", "echo", "(", "'```'", ")", "graph", "=", "from_path", "(", "path", ",", "manager", "=", "manager", ",", "use_tqdm", "=", "(", "not", "(", "skip_tqdm", "or", "verbose", ")", ")", ",", "allow_nested", "=", "allow_nested", ",", "allow_naked_names", "=", "allow_naked_names", ",", "disallow_unqualified_translocations", "=", "disallow_unqualified_translocations", ",", "citation_clearing", "=", "(", "not", "no_citation_clearing", ")", ",", "required_annotations", "=", "required_annotations", ",", "no_identifier_validation", "=", "no_identifier_validation", ",", "allow_definition_failures", "=", "True", ",", ")", "if", "skip_tqdm", ":", "click", ".", "echo", "(", "'```'", ")", "to_pickle", "(", "graph", ",", "get_corresponding_pickle_path", "(", "path", ")", ")", "click", ".", "echo", "(", "''", ")", "_print_summary", "(", "graph", ",", "ticks", "=", "skip_tqdm", ")", "sys", ".", "exit", "(", "0", "if", "0", "==", "graph", ".", "number_of_warnings", "(", ")", "else", "1", ")" ]
37.935484
20.516129
def check_version(version): """Takes a version string or tuple and raises ValueError in case the passed version is newer than the current version of pgi. Keep in mind that the pgi version is different from the pygobject one. """ if isinstance(version, string_types): version = tuple(map(int, version.split("."))) if version > version_info: str_version = ".".join(map(str, version)) raise ValueError("pgi version '%s' requested, '%s' available" % (str_version, __version__))
[ "def", "check_version", "(", "version", ")", ":", "if", "isinstance", "(", "version", ",", "string_types", ")", ":", "version", "=", "tuple", "(", "map", "(", "int", ",", "version", ".", "split", "(", "\".\"", ")", ")", ")", "if", "version", ">", "version_info", ":", "str_version", "=", "\".\"", ".", "join", "(", "map", "(", "str", ",", "version", ")", ")", "raise", "ValueError", "(", "\"pgi version '%s' requested, '%s' available\"", "%", "(", "str_version", ",", "__version__", ")", ")" ]
38.285714
19.071429
def resolve_config_file_path(self, config_filepath): """ Determines whether given path is valid, and if so uses it. Otherwise searches both the working directory and nomenclate/core for the specified config file. :param config_filepath: str, file path or relative file name within package :return: str, resolved full file path to the config file """ search_paths = [config_filepath, os.path.normpath(os.path.join(os.getcwd(), config_filepath)), os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), config_filepath))] for search_path in search_paths: try: self.validate_config_file(search_path) return search_path except (IOError, OSError): pass raise errors.SourceError( 'No config file found in current working directory or nomenclate/core and %s is not a valid YAML file')
[ "def", "resolve_config_file_path", "(", "self", ",", "config_filepath", ")", ":", "search_paths", "=", "[", "config_filepath", ",", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "config_filepath", ")", ")", ",", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "config_filepath", ")", ")", "]", "for", "search_path", "in", "search_paths", ":", "try", ":", "self", ".", "validate_config_file", "(", "search_path", ")", "return", "search_path", "except", "(", "IOError", ",", "OSError", ")", ":", "pass", "raise", "errors", ".", "SourceError", "(", "'No config file found in current working directory or nomenclate/core and %s is not a valid YAML file'", ")" ]
48.95
26.8
def rule_index(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index = ET.SubElement(rule, "index") index.text = kwargs.pop('index') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "rule_index", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "rule", "=", "ET", ".", "SubElement", "(", "config", ",", "\"rule\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "index", "=", "ET", ".", "SubElement", "(", "rule", ",", "\"index\"", ")", "index", ".", "text", "=", "kwargs", ".", "pop", "(", "'index'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
36.7
12.8
def get_at(self, root_node, key): """ Get value of a key when the root node was `root_node` :param root_node: :param key: :return: """ return self._get(root_node, bin_to_nibbles(to_string(key)))
[ "def", "get_at", "(", "self", ",", "root_node", ",", "key", ")", ":", "return", "self", ".", "_get", "(", "root_node", ",", "bin_to_nibbles", "(", "to_string", "(", "key", ")", ")", ")" ]
30.375
14.375
def register(self, metadata): """ Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() metadata.validate() d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) response = self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request)
[ "def", "register", "(", "self", ",", "metadata", ")", ":", "self", ".", "check_credentials", "(", ")", "metadata", ".", "validate", "(", ")", "d", "=", "metadata", ".", "todict", "(", ")", "d", "[", "':action'", "]", "=", "'verify'", "request", "=", "self", ".", "encode_request", "(", "d", ".", "items", "(", ")", ",", "[", "]", ")", "response", "=", "self", ".", "send_request", "(", "request", ")", "d", "[", "':action'", "]", "=", "'submit'", "request", "=", "self", ".", "encode_request", "(", "d", ".", "items", "(", ")", ",", "[", "]", ")", "return", "self", ".", "send_request", "(", "request", ")" ]
39.157895
14.947368
def _exec(self, globals_dict=None): """exec compiled code""" globals_dict = globals_dict or {} globals_dict.setdefault('__builtins__', {}) exec(self._code, globals_dict) return globals_dict
[ "def", "_exec", "(", "self", ",", "globals_dict", "=", "None", ")", ":", "globals_dict", "=", "globals_dict", "or", "{", "}", "globals_dict", ".", "setdefault", "(", "'__builtins__'", ",", "{", "}", ")", "exec", "(", "self", ".", "_code", ",", "globals_dict", ")", "return", "globals_dict" ]
37.333333
5.333333
def _convert_sam_function_resource(name, resource_properties, layers): """ Converts a AWS::Serverless::Function resource to a Function configuration usable by the provider. :param string name: LogicalID of the resource NOTE: This is *not* the function name because not all functions declare a name :param dict resource_properties: Properties of this resource :return samcli.commands.local.lib.provider.Function: Function configuration """ codeuri = SamFunctionProvider._extract_sam_function_codeuri(name, resource_properties, "CodeUri") LOG.debug("Found Serverless function with name='%s' and CodeUri='%s'", name, codeuri) return Function( name=name, runtime=resource_properties.get("Runtime"), memory=resource_properties.get("MemorySize"), timeout=resource_properties.get("Timeout"), handler=resource_properties.get("Handler"), codeuri=codeuri, environment=resource_properties.get("Environment"), rolearn=resource_properties.get("Role"), layers=layers )
[ "def", "_convert_sam_function_resource", "(", "name", ",", "resource_properties", ",", "layers", ")", ":", "codeuri", "=", "SamFunctionProvider", ".", "_extract_sam_function_codeuri", "(", "name", ",", "resource_properties", ",", "\"CodeUri\"", ")", "LOG", ".", "debug", "(", "\"Found Serverless function with name='%s' and CodeUri='%s'\"", ",", "name", ",", "codeuri", ")", "return", "Function", "(", "name", "=", "name", ",", "runtime", "=", "resource_properties", ".", "get", "(", "\"Runtime\"", ")", ",", "memory", "=", "resource_properties", ".", "get", "(", "\"MemorySize\"", ")", ",", "timeout", "=", "resource_properties", ".", "get", "(", "\"Timeout\"", ")", ",", "handler", "=", "resource_properties", ".", "get", "(", "\"Handler\"", ")", ",", "codeuri", "=", "codeuri", ",", "environment", "=", "resource_properties", ".", "get", "(", "\"Environment\"", ")", ",", "rolearn", "=", "resource_properties", ".", "get", "(", "\"Role\"", ")", ",", "layers", "=", "layers", ")" ]
45.36
28.96
def receive(self, data): """ Create and return a message from data, also triggers the **receive** event Returns: - Message: message object """ self.log_debug("Received: %s" % (data)) message = self.make_message(data) self.trigger("receive", data=data, message=message) return message
[ "def", "receive", "(", "self", ",", "data", ")", ":", "self", ".", "log_debug", "(", "\"Received: %s\"", "%", "(", "data", ")", ")", "message", "=", "self", ".", "make_message", "(", "data", ")", "self", ".", "trigger", "(", "\"receive\"", ",", "data", "=", "data", ",", "message", "=", "message", ")", "return", "message" ]
27.538462
16
def CreateAllStaticRAPIDFiles(in_drainage_line, river_id, length_id, slope_id, next_down_id, rapid_output_folder, kfac_celerity=1000.0/3600.0, kfac_formula_type=3, kfac_length_units="km", lambda_k=0.35, x_value=0.3, nhdplus=False, taudem_network_connectivity_tree_file=None, file_geodatabase=None): """ To generate the static RAPID files (rapid_connect.csv, riv_bas_id.csv, kfac.csv, k.csv, x.csv, comid_lat_lon_z.csv) with default values. Parameters ---------- in_drainage_line: str Path to the stream network (i.e. Drainage Line) shapefile. river_id: str The name of the field with the river ID (Ex. 'HydroID', 'COMID', or 'LINKNO'). length_id: str The field name containging the length of the river segment (Ex. 'LENGTHKM' or 'Length'). slope_id: str The field name containging the slope of the river segment (Ex. 'Avg_Slope' or 'Slope'). next_down_id: str The name of the field with the river ID of the next downstream river segment (Ex. 'NextDownID' or 'DSLINKNO'). rapid_output_folder: str The path to the folder where all of the RAPID output will be generated. kfac_celerity: float, optional The flow wave celerity for the watershed in meters per second. 1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown. kfac_formula_type: int, optional An integer representing the formula type to use when calculating kfac. Default is 3. kfac_length_units: str, optional The units for the length_id field. Supported types are "m" for meters and "km" for kilometers. Default is "km". lambda_k: float, optional The value for lambda given from RAPID after the calibration process. Default is 0.35. x_value: float, optional Value for the muskingum X parameter [0-0.5]. Default is 0.3. nhdplus: bool, optional If True, the drainage line is from the NHDPlus dataset with the VAA fields COMID, FROMNODE, TONODE, and DIVERGENCE. Default is False. taudem_network_connectivity_tree_file: str, optional If set, the connectivity file will be generated from the TauDEM connectivity tree file. file_geodatabase: str, optional Path to the file geodatabase. If you use this option, in_drainage_line is the name of the stream network feature class. (WARNING: Not always stable with GDAL.) Example:: from RAPIDpy.gis.workflow import CreateAllStaticRAPIDFiles CreateAllStaticRAPIDFiles( in_drainage_line="/path/to/drainage_line.shp", river_id="HydroID", length_id="LENGTHKM", slope_id="SLOPE", next_down_river_id="NextDownID", rapid_output_folder="/path/to/rapid/output", ) """ # RAPID connect file rapid_connect_file = os.path.join(rapid_output_folder, 'rapid_connect.csv') if nhdplus: CreateNetworkConnectivityNHDPlus(in_drainage_line, rapid_connect_file, file_geodatabase) elif taudem_network_connectivity_tree_file: CreateNetworkConnectivityTauDEMTree( taudem_network_connectivity_tree_file, rapid_connect_file) else: CreateNetworkConnectivity(in_drainage_line, river_id, next_down_id, rapid_connect_file, file_geodatabase) # river basin id file riv_bas_id_file = os.path.join(rapid_output_folder, 'riv_bas_id.csv') CreateSubsetFile(in_drainage_line, river_id, riv_bas_id_file, file_geodatabase) # kfac file kfac_file = os.path.join(rapid_output_folder, 'kfac.csv') CreateMuskingumKfacFile(in_drainage_line, river_id, length_id, slope_id, kfac_celerity, kfac_formula_type, rapid_connect_file, kfac_file, length_units=kfac_length_units, file_geodatabase=file_geodatabase) # k file k_file = os.path.join(rapid_output_folder, 'k.csv') CreateMuskingumKFile(lambda_k, kfac_file, k_file) # x file x_file = os.path.join(rapid_output_folder, 'x.csv') CreateConstMuskingumXFile(x_value, rapid_connect_file, x_file) # comid lat lon z file comid_lat_lon_z_file = \ os.path.join(rapid_output_folder, 'comid_lat_lon_z.csv') FlowlineToPoint(in_drainage_line, river_id, comid_lat_lon_z_file, file_geodatabase)
[ "def", "CreateAllStaticRAPIDFiles", "(", "in_drainage_line", ",", "river_id", ",", "length_id", ",", "slope_id", ",", "next_down_id", ",", "rapid_output_folder", ",", "kfac_celerity", "=", "1000.0", "/", "3600.0", ",", "kfac_formula_type", "=", "3", ",", "kfac_length_units", "=", "\"km\"", ",", "lambda_k", "=", "0.35", ",", "x_value", "=", "0.3", ",", "nhdplus", "=", "False", ",", "taudem_network_connectivity_tree_file", "=", "None", ",", "file_geodatabase", "=", "None", ")", ":", "# RAPID connect file", "rapid_connect_file", "=", "os", ".", "path", ".", "join", "(", "rapid_output_folder", ",", "'rapid_connect.csv'", ")", "if", "nhdplus", ":", "CreateNetworkConnectivityNHDPlus", "(", "in_drainage_line", ",", "rapid_connect_file", ",", "file_geodatabase", ")", "elif", "taudem_network_connectivity_tree_file", ":", "CreateNetworkConnectivityTauDEMTree", "(", "taudem_network_connectivity_tree_file", ",", "rapid_connect_file", ")", "else", ":", "CreateNetworkConnectivity", "(", "in_drainage_line", ",", "river_id", ",", "next_down_id", ",", "rapid_connect_file", ",", "file_geodatabase", ")", "# river basin id file", "riv_bas_id_file", "=", "os", ".", "path", ".", "join", "(", "rapid_output_folder", ",", "'riv_bas_id.csv'", ")", "CreateSubsetFile", "(", "in_drainage_line", ",", "river_id", ",", "riv_bas_id_file", ",", "file_geodatabase", ")", "# kfac file", "kfac_file", "=", "os", ".", "path", ".", "join", "(", "rapid_output_folder", ",", "'kfac.csv'", ")", "CreateMuskingumKfacFile", "(", "in_drainage_line", ",", "river_id", ",", "length_id", ",", "slope_id", ",", "kfac_celerity", ",", "kfac_formula_type", ",", "rapid_connect_file", ",", "kfac_file", ",", "length_units", "=", "kfac_length_units", ",", "file_geodatabase", "=", "file_geodatabase", ")", "# k file", "k_file", "=", "os", ".", "path", ".", "join", "(", "rapid_output_folder", ",", "'k.csv'", ")", "CreateMuskingumKFile", "(", "lambda_k", ",", "kfac_file", ",", "k_file", ")", "# x file", "x_file", "=", "os", ".", "path", ".", "join", "(", "rapid_output_folder", ",", "'x.csv'", ")", "CreateConstMuskingumXFile", "(", "x_value", ",", "rapid_connect_file", ",", "x_file", ")", "# comid lat lon z file", "comid_lat_lon_z_file", "=", "os", ".", "path", ".", "join", "(", "rapid_output_folder", ",", "'comid_lat_lon_z.csv'", ")", "FlowlineToPoint", "(", "in_drainage_line", ",", "river_id", ",", "comid_lat_lon_z_file", ",", "file_geodatabase", ")" ]
41.582677
15.88189
def c_metadata(api, args, verbose=False): """ Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal. """ obj = api.get_object(args['<URL>'].split('/')[-1]) if not set_metadata(args['<JSON>'], obj): return json.dumps(obj.metadata.read(), indent=4)
[ "def", "c_metadata", "(", "api", ",", "args", ",", "verbose", "=", "False", ")", ":", "obj", "=", "api", ".", "get_object", "(", "args", "[", "'<URL>'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "if", "not", "set_metadata", "(", "args", "[", "'<JSON>'", "]", ",", "obj", ")", ":", "return", "json", ".", "dumps", "(", "obj", ".", "metadata", ".", "read", "(", ")", ",", "indent", "=", "4", ")" ]
31.636364
12.545455
def _compute_standard_dev(self, rup, imt, C): """ Compute the the standard deviation in terms of magnitude described on page 744, eq. 4 """ sigma_mean = 0. if imt.name in "SA PGA": psi = -6.898E-3 else: psi = -3.054E-5 if rup.mag <= 6.5: sigma_mean = (C['c12'] * rup.mag) + C['c13'] elif rup.mag > 6.5: sigma_mean = (psi * rup.mag) + C['c14'] return sigma_mean
[ "def", "_compute_standard_dev", "(", "self", ",", "rup", ",", "imt", ",", "C", ")", ":", "sigma_mean", "=", "0.", "if", "imt", ".", "name", "in", "\"SA PGA\"", ":", "psi", "=", "-", "6.898E-3", "else", ":", "psi", "=", "-", "3.054E-5", "if", "rup", ".", "mag", "<=", "6.5", ":", "sigma_mean", "=", "(", "C", "[", "'c12'", "]", "*", "rup", ".", "mag", ")", "+", "C", "[", "'c13'", "]", "elif", "rup", ".", "mag", ">", "6.5", ":", "sigma_mean", "=", "(", "psi", "*", "rup", ".", "mag", ")", "+", "C", "[", "'c14'", "]", "return", "sigma_mean" ]
31.6
12
def extract(self, zip_archive, font_files): """ Extract files to install """ # Get a temp directory tmp_container = tempfile.mkdtemp(prefix='icomoon-tmp') self._debug("* Temporary dir for extracted archive: {}", tmp_container) # Extract manifest to temp directory zip_archive.extract(settings.ICOMOON_MANIFEST_FILENAME, tmp_container) # Then the font files for item in font_files: zip_archive.extract(item, tmp_container) # Get manifest for icon map webfont_store = WebfontStore(settings.ICOMOON_MANIFEST_FILENAME) webfont_store.get(self.webfont_name, { 'fontdir_path': tmp_container, }) icons = webfont_store.get_manifests()[self.webfont_name] #print json.dumps(icons, indent=4) # Render CSS icon part css_content = self.render_css(self.css_templatepath, icons) return tmp_container, css_content
[ "def", "extract", "(", "self", ",", "zip_archive", ",", "font_files", ")", ":", "# Get a temp directory", "tmp_container", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'icomoon-tmp'", ")", "self", ".", "_debug", "(", "\"* Temporary dir for extracted archive: {}\"", ",", "tmp_container", ")", "# Extract manifest to temp directory", "zip_archive", ".", "extract", "(", "settings", ".", "ICOMOON_MANIFEST_FILENAME", ",", "tmp_container", ")", "# Then the font files", "for", "item", "in", "font_files", ":", "zip_archive", ".", "extract", "(", "item", ",", "tmp_container", ")", "# Get manifest for icon map", "webfont_store", "=", "WebfontStore", "(", "settings", ".", "ICOMOON_MANIFEST_FILENAME", ")", "webfont_store", ".", "get", "(", "self", ".", "webfont_name", ",", "{", "'fontdir_path'", ":", "tmp_container", ",", "}", ")", "icons", "=", "webfont_store", ".", "get_manifests", "(", ")", "[", "self", ".", "webfont_name", "]", "#print json.dumps(icons, indent=4)", "# Render CSS icon part", "css_content", "=", "self", ".", "render_css", "(", "self", ".", "css_templatepath", ",", "icons", ")", "return", "tmp_container", ",", "css_content" ]
36.576923
17.5
def build_reply_markup(self, buttons, inline_only=False): """ Builds a :tl`ReplyInlineMarkup` or :tl:`ReplyKeyboardMarkup` for the given buttons, or does nothing if either no buttons are provided or the provided argument is already a reply markup. This will add any event handlers defined in the buttons and delete old ones not to call them twice, so you should probably call this method manually for serious bots instead re-adding handlers every time you send a message. Magic can only go so far. """ if buttons is None: return None try: if buttons.SUBCLASS_OF_ID == 0xe2e10ef2: return buttons # crc32(b'ReplyMarkup'): except AttributeError: pass if not utils.is_list_like(buttons): buttons = [[buttons]] elif not utils.is_list_like(buttons[0]): buttons = [buttons] is_inline = False is_normal = False resize = None single_use = None selective = None rows = [] for row in buttons: current = [] for button in row: if isinstance(button, custom.Button): if button.resize is not None: resize = button.resize if button.single_use is not None: single_use = button.single_use if button.selective is not None: selective = button.selective button = button.button elif isinstance(button, custom.MessageButton): button = button.button inline = custom.Button._is_inline(button) is_inline |= inline is_normal |= not inline if button.SUBCLASS_OF_ID == 0xbad74a3: # 0xbad74a3 == crc32(b'KeyboardButton') current.append(button) if current: rows.append(types.KeyboardButtonRow(current)) if inline_only and is_normal: raise ValueError('You cannot use non-inline buttons here') elif is_inline == is_normal and is_normal: raise ValueError('You cannot mix inline with normal buttons') elif is_inline: return types.ReplyInlineMarkup(rows) # elif is_normal: return types.ReplyKeyboardMarkup( rows, resize=resize, single_use=single_use, selective=selective)
[ "def", "build_reply_markup", "(", "self", ",", "buttons", ",", "inline_only", "=", "False", ")", ":", "if", "buttons", "is", "None", ":", "return", "None", "try", ":", "if", "buttons", ".", "SUBCLASS_OF_ID", "==", "0xe2e10ef2", ":", "return", "buttons", "# crc32(b'ReplyMarkup'):", "except", "AttributeError", ":", "pass", "if", "not", "utils", ".", "is_list_like", "(", "buttons", ")", ":", "buttons", "=", "[", "[", "buttons", "]", "]", "elif", "not", "utils", ".", "is_list_like", "(", "buttons", "[", "0", "]", ")", ":", "buttons", "=", "[", "buttons", "]", "is_inline", "=", "False", "is_normal", "=", "False", "resize", "=", "None", "single_use", "=", "None", "selective", "=", "None", "rows", "=", "[", "]", "for", "row", "in", "buttons", ":", "current", "=", "[", "]", "for", "button", "in", "row", ":", "if", "isinstance", "(", "button", ",", "custom", ".", "Button", ")", ":", "if", "button", ".", "resize", "is", "not", "None", ":", "resize", "=", "button", ".", "resize", "if", "button", ".", "single_use", "is", "not", "None", ":", "single_use", "=", "button", ".", "single_use", "if", "button", ".", "selective", "is", "not", "None", ":", "selective", "=", "button", ".", "selective", "button", "=", "button", ".", "button", "elif", "isinstance", "(", "button", ",", "custom", ".", "MessageButton", ")", ":", "button", "=", "button", ".", "button", "inline", "=", "custom", ".", "Button", ".", "_is_inline", "(", "button", ")", "is_inline", "|=", "inline", "is_normal", "|=", "not", "inline", "if", "button", ".", "SUBCLASS_OF_ID", "==", "0xbad74a3", ":", "# 0xbad74a3 == crc32(b'KeyboardButton')", "current", ".", "append", "(", "button", ")", "if", "current", ":", "rows", ".", "append", "(", "types", ".", "KeyboardButtonRow", "(", "current", ")", ")", "if", "inline_only", "and", "is_normal", ":", "raise", "ValueError", "(", "'You cannot use non-inline buttons here'", ")", "elif", "is_inline", "==", "is_normal", "and", "is_normal", ":", "raise", "ValueError", "(", "'You cannot mix inline with normal buttons'", ")", "elif", "is_inline", ":", "return", "types", ".", "ReplyInlineMarkup", "(", "rows", ")", "# elif is_normal:", "return", "types", ".", "ReplyKeyboardMarkup", "(", "rows", ",", "resize", "=", "resize", ",", "single_use", "=", "single_use", ",", "selective", "=", "selective", ")" ]
36.441176
18.117647
def add_ace(path, objectType, user, permission, acetype, propagation): r''' add an ace to an object path: path to the object (i.e. c:\\temp\\file, HKEY_LOCAL_MACHINE\\SOFTWARE\\KEY, etc) user: user to add permission: permissions for the user acetype: either allow/deny for each user/permission (ALLOW, DENY) propagation: how the ACE applies to children for Registry Keys and Directories(KEY, KEY&SUBKEYS, SUBKEYS) CLI Example: .. code-block:: bash allow domain\fakeuser full control on HKLM\\SOFTWARE\\somekey, propagate to this key and subkeys salt 'myminion' win_dacl.add_ace 'HKEY_LOCAL_MACHINE\\SOFTWARE\\somekey' 'Registry' 'domain\fakeuser' 'FULLCONTROL' 'ALLOW' 'KEY&SUBKEYS' ''' ret = {'result': None, 'changes': {}, 'comment': ''} if (path and user and permission and acetype and propagation): if objectType.upper() == "FILE": propagation = "FILE" dc = daclConstants() objectTypeBit = dc.getObjectTypeBit(objectType) path = dc.processPath(path, objectTypeBit) user = user.strip() permission = permission.strip().upper() acetype = acetype.strip().upper() propagation = propagation.strip().upper() sidRet = _getUserSid(user) if not sidRet['result']: return sidRet permissionbit = dc.getPermissionBit(objectTypeBit, permission) acetypebit = dc.getAceTypeBit(acetype) propagationbit = dc.getPropagationBit(objectTypeBit, propagation) dacl = _get_dacl(path, objectTypeBit) if dacl: acesAdded = [] try: if acetypebit == 0: dacl.AddAccessAllowedAceEx(win32security.ACL_REVISION, propagationbit, permissionbit, sidRet['sid']) elif acetypebit == 1: dacl.AddAccessDeniedAceEx(win32security.ACL_REVISION, propagationbit, permissionbit, sidRet['sid']) win32security.SetNamedSecurityInfo( path, objectTypeBit, win32security.DACL_SECURITY_INFORMATION, None, None, dacl, None) acesAdded.append(( '{0} {1} {2} on {3}' ).format( user, dc.getAceTypeText(acetype), dc.getPermissionText(objectTypeBit, permission), dc.getPropagationText(objectTypeBit, propagation))) ret['result'] = True except Exception as e: ret['comment'] = 'An error occurred attempting to add the ace. The error was {0}'.format(e) ret['result'] = False return ret if acesAdded: ret['changes']['Added ACEs'] = acesAdded else: ret['comment'] = 'Unable to obtain the DACL of {0}'.format(path) else: ret['comment'] = 'An empty value was specified for a required item.' ret['result'] = False return ret
[ "def", "add_ace", "(", "path", ",", "objectType", ",", "user", ",", "permission", ",", "acetype", ",", "propagation", ")", ":", "ret", "=", "{", "'result'", ":", "None", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "if", "(", "path", "and", "user", "and", "permission", "and", "acetype", "and", "propagation", ")", ":", "if", "objectType", ".", "upper", "(", ")", "==", "\"FILE\"", ":", "propagation", "=", "\"FILE\"", "dc", "=", "daclConstants", "(", ")", "objectTypeBit", "=", "dc", ".", "getObjectTypeBit", "(", "objectType", ")", "path", "=", "dc", ".", "processPath", "(", "path", ",", "objectTypeBit", ")", "user", "=", "user", ".", "strip", "(", ")", "permission", "=", "permission", ".", "strip", "(", ")", ".", "upper", "(", ")", "acetype", "=", "acetype", ".", "strip", "(", ")", ".", "upper", "(", ")", "propagation", "=", "propagation", ".", "strip", "(", ")", ".", "upper", "(", ")", "sidRet", "=", "_getUserSid", "(", "user", ")", "if", "not", "sidRet", "[", "'result'", "]", ":", "return", "sidRet", "permissionbit", "=", "dc", ".", "getPermissionBit", "(", "objectTypeBit", ",", "permission", ")", "acetypebit", "=", "dc", ".", "getAceTypeBit", "(", "acetype", ")", "propagationbit", "=", "dc", ".", "getPropagationBit", "(", "objectTypeBit", ",", "propagation", ")", "dacl", "=", "_get_dacl", "(", "path", ",", "objectTypeBit", ")", "if", "dacl", ":", "acesAdded", "=", "[", "]", "try", ":", "if", "acetypebit", "==", "0", ":", "dacl", ".", "AddAccessAllowedAceEx", "(", "win32security", ".", "ACL_REVISION", ",", "propagationbit", ",", "permissionbit", ",", "sidRet", "[", "'sid'", "]", ")", "elif", "acetypebit", "==", "1", ":", "dacl", ".", "AddAccessDeniedAceEx", "(", "win32security", ".", "ACL_REVISION", ",", "propagationbit", ",", "permissionbit", ",", "sidRet", "[", "'sid'", "]", ")", "win32security", ".", "SetNamedSecurityInfo", "(", "path", ",", "objectTypeBit", ",", "win32security", ".", "DACL_SECURITY_INFORMATION", ",", "None", ",", "None", ",", "dacl", ",", "None", ")", "acesAdded", ".", "append", "(", "(", "'{0} {1} {2} on {3}'", ")", ".", "format", "(", "user", ",", "dc", ".", "getAceTypeText", "(", "acetype", ")", ",", "dc", ".", "getPermissionText", "(", "objectTypeBit", ",", "permission", ")", ",", "dc", ".", "getPropagationText", "(", "objectTypeBit", ",", "propagation", ")", ")", ")", "ret", "[", "'result'", "]", "=", "True", "except", "Exception", "as", "e", ":", "ret", "[", "'comment'", "]", "=", "'An error occurred attempting to add the ace. The error was {0}'", ".", "format", "(", "e", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "if", "acesAdded", ":", "ret", "[", "'changes'", "]", "[", "'Added ACEs'", "]", "=", "acesAdded", "else", ":", "ret", "[", "'comment'", "]", "=", "'Unable to obtain the DACL of {0}'", ".", "format", "(", "path", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'An empty value was specified for a required item.'", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
42.2
24.457143
def execute(self): """ Execute the actions necessary to prepare the instances and returns None. :return: None """ self.print_info() if (self._config.state.prepared and not self._config.command_args.get('force')): msg = 'Skipping, instances already prepared.' LOG.warn(msg) return if not self._config.provisioner.playbooks.prepare: msg = 'Skipping, prepare playbook not configured.' LOG.warn(msg) return self._config.provisioner.prepare() self._config.state.change_state('prepared', True)
[ "def", "execute", "(", "self", ")", ":", "self", ".", "print_info", "(", ")", "if", "(", "self", ".", "_config", ".", "state", ".", "prepared", "and", "not", "self", ".", "_config", ".", "command_args", ".", "get", "(", "'force'", ")", ")", ":", "msg", "=", "'Skipping, instances already prepared.'", "LOG", ".", "warn", "(", "msg", ")", "return", "if", "not", "self", ".", "_config", ".", "provisioner", ".", "playbooks", ".", "prepare", ":", "msg", "=", "'Skipping, prepare playbook not configured.'", "LOG", ".", "warn", "(", "msg", ")", "return", "self", ".", "_config", ".", "provisioner", ".", "prepare", "(", ")", "self", ".", "_config", ".", "state", ".", "change_state", "(", "'prepared'", ",", "True", ")" ]
29
20.545455
def add(self, key, content, **metadata): """ :param key: Document unique identifier. :param str content: Content to store and index for search. :param metadata: Arbitrary key/value pairs to store for document. Add a document to the search index. """ self.members.add(key) document_hash = self._get_hash(key) document_hash.update(content=content, **metadata) for word, score in self.tokenizer.tokenize(content).items(): word_key = self.get_key(word) word_key[key] = -score
[ "def", "add", "(", "self", ",", "key", ",", "content", ",", "*", "*", "metadata", ")", ":", "self", ".", "members", ".", "add", "(", "key", ")", "document_hash", "=", "self", ".", "_get_hash", "(", "key", ")", "document_hash", ".", "update", "(", "content", "=", "content", ",", "*", "*", "metadata", ")", "for", "word", ",", "score", "in", "self", ".", "tokenizer", ".", "tokenize", "(", "content", ")", ".", "items", "(", ")", ":", "word_key", "=", "self", ".", "get_key", "(", "word", ")", "word_key", "[", "key", "]", "=", "-", "score" ]
37.533333
14.333333
def getEmptyCells(self): """return a (x, y) pair for each empty cell""" return [(x, y) for x in self.__size_range for y in self.__size_range if self.getCell(x, y) == 0]
[ "def", "getEmptyCells", "(", "self", ")", ":", "return", "[", "(", "x", ",", "y", ")", "for", "x", "in", "self", ".", "__size_range", "for", "y", "in", "self", ".", "__size_range", "if", "self", ".", "getCell", "(", "x", ",", "y", ")", "==", "0", "]" ]
42.4
13.2
def move_transition_point(self, fragment_index, value): """ Change the transition point between fragment ``fragment_index`` and the next fragment to the time value ``value``. This method fails silently (without changing the fragment list) if at least one of the following conditions holds: * ``fragment_index`` is negative * ``fragment_index`` is the last or the second-to-last * ``value`` is after the current end of the next fragment * the current fragment and the next one are not adjacent and both proper intervals (not zero length) The above conditions ensure that the move makes sense and that it keeps the list satisfying the constraints. :param int fragment_index: the fragment index whose end should be moved :param value: the new transition point :type value: :class:`~aeneas.exacttiming.TimeValue` """ self.log(u"Called move_transition_point with") self.log([u" fragment_index %d", fragment_index]) self.log([u" value %.3f", value]) if (fragment_index < 0) or (fragment_index > (len(self) - 3)): self.log(u"Bad fragment_index, returning") return current_interval = self[fragment_index].interval next_interval = self[fragment_index + 1].interval if value > next_interval.end: self.log(u"Bad value, returning") return if not current_interval.is_non_zero_before_non_zero(next_interval): self.log(u"Bad interval configuration, returning") return current_interval.end = value next_interval.begin = value self.log(u"Moved transition point")
[ "def", "move_transition_point", "(", "self", ",", "fragment_index", ",", "value", ")", ":", "self", ".", "log", "(", "u\"Called move_transition_point with\"", ")", "self", ".", "log", "(", "[", "u\" fragment_index %d\"", ",", "fragment_index", "]", ")", "self", ".", "log", "(", "[", "u\" value %.3f\"", ",", "value", "]", ")", "if", "(", "fragment_index", "<", "0", ")", "or", "(", "fragment_index", ">", "(", "len", "(", "self", ")", "-", "3", ")", ")", ":", "self", ".", "log", "(", "u\"Bad fragment_index, returning\"", ")", "return", "current_interval", "=", "self", "[", "fragment_index", "]", ".", "interval", "next_interval", "=", "self", "[", "fragment_index", "+", "1", "]", ".", "interval", "if", "value", ">", "next_interval", ".", "end", ":", "self", ".", "log", "(", "u\"Bad value, returning\"", ")", "return", "if", "not", "current_interval", ".", "is_non_zero_before_non_zero", "(", "next_interval", ")", ":", "self", ".", "log", "(", "u\"Bad interval configuration, returning\"", ")", "return", "current_interval", ".", "end", "=", "value", "next_interval", ".", "begin", "=", "value", "self", ".", "log", "(", "u\"Moved transition point\"", ")" ]
45.052632
19.421053
def uid_something_colon(self, node): """ Creates op_pos for node from uid to colon """ node.op_pos = [ NodeWithPosition(node.uid, (node.first_line, node.first_col)) ] position = (node.body[0].first_line, node.body[0].first_col) last, first = self.operators[':'].find_previous(position) node.op_pos.append(NodeWithPosition(last, first)) return last
[ "def", "uid_something_colon", "(", "self", ",", "node", ")", ":", "node", ".", "op_pos", "=", "[", "NodeWithPosition", "(", "node", ".", "uid", ",", "(", "node", ".", "first_line", ",", "node", ".", "first_col", ")", ")", "]", "position", "=", "(", "node", ".", "body", "[", "0", "]", ".", "first_line", ",", "node", ".", "body", "[", "0", "]", ".", "first_col", ")", "last", ",", "first", "=", "self", ".", "operators", "[", "':'", "]", ".", "find_previous", "(", "position", ")", "node", ".", "op_pos", ".", "append", "(", "NodeWithPosition", "(", "last", ",", "first", ")", ")", "return", "last" ]
45.222222
19.555556
def list(self, status=values.unset, unique_name=values.unset, date_created_after=values.unset, date_created_before=values.unset, limit=None, page_size=None): """ Lists RoomInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param RoomInstance.RoomStatus status: Only show Rooms with the given status. :param unicode unique_name: Only show Rooms with the provided Name. :param datetime date_created_after: Only show Rooms that started on or after this date, given as YYYY-MM-DD. :param datetime date_created_before: Only show Rooms that started before this date, given as YYYY-MM-DD. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.video.v1.room.RoomInstance] """ return list(self.stream( status=status, unique_name=unique_name, date_created_after=date_created_after, date_created_before=date_created_before, limit=limit, page_size=page_size, ))
[ "def", "list", "(", "self", ",", "status", "=", "values", ".", "unset", ",", "unique_name", "=", "values", ".", "unset", ",", "date_created_after", "=", "values", ".", "unset", ",", "date_created_before", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "return", "list", "(", "self", ".", "stream", "(", "status", "=", "status", ",", "unique_name", "=", "unique_name", ",", "date_created_after", "=", "date_created_after", ",", "date_created_before", "=", "date_created_before", ",", "limit", "=", "limit", ",", "page_size", "=", "page_size", ",", ")", ")" ]
55.933333
29.066667
def sparse_svd(sparse_matrix, num_values, max_iter): """Wrapper around SciPy's Singular Value Decomposition for sparse matrices. Args: sparse_matrix: a SciPy sparse matrix (typically large). num_values: the number of largest singular values to compute. max_iter: maximum number of iterations (>= 0) in the decomposition. If max_iter is None, runs FLAGS.max_iter_sparse_svd steps. If max_iter == 0, runs until convergence. Otherwise will run max_iter steps. Returns: A (u, s, v) tuple where s is an array entailing the singular values, and (u, v) the singular vector matrices. u is column orthogonal and v is row orthogonal. s is sorted in increasing order. """ if num_values <= 0: raise ValueError("num_values should be > 0 but instead is %d." % num_values) if max_iter is not None and max_iter < 0: raise ValueError("max_iter should be >= 0 but instead is %d." % max_iter) if max_iter is None: max_iter = FLAGS.max_iter_sparse_svd elif not max_iter: max_iter = None u, s, v = linalg.svds( sparse_matrix, k=num_values, maxiter=max_iter, return_singular_vectors=True) return (u, s, v)
[ "def", "sparse_svd", "(", "sparse_matrix", ",", "num_values", ",", "max_iter", ")", ":", "if", "num_values", "<=", "0", ":", "raise", "ValueError", "(", "\"num_values should be > 0 but instead is %d.\"", "%", "num_values", ")", "if", "max_iter", "is", "not", "None", "and", "max_iter", "<", "0", ":", "raise", "ValueError", "(", "\"max_iter should be >= 0 but instead is %d.\"", "%", "max_iter", ")", "if", "max_iter", "is", "None", ":", "max_iter", "=", "FLAGS", ".", "max_iter_sparse_svd", "elif", "not", "max_iter", ":", "max_iter", "=", "None", "u", ",", "s", ",", "v", "=", "linalg", ".", "svds", "(", "sparse_matrix", ",", "k", "=", "num_values", ",", "maxiter", "=", "max_iter", ",", "return_singular_vectors", "=", "True", ")", "return", "(", "u", ",", "s", ",", "v", ")" ]
35.84375
24.78125
def draw_route(self, df_route, cr, color=None, line_width=None): ''' Draw a line between electrodes listed in a route. Arguments --------- - `df_route`: * A `pandas.DataFrame` containing a column named `electrode_i`. * For each row, `electrode_i` corresponds to the integer index of the corresponding electrode. - `cr`: Cairo context. - `color`: Either a RGB or RGBA tuple, with each color channel in the range [0, 1]. If `color` is `None`, the electrode color is set to white. ''' df_route_centers = (self.canvas.df_shape_centers .ix[df_route.electrode_i][['x_center', 'y_center']]) df_endpoint_marker = (.6 * self.get_endpoint_marker(df_route_centers) + df_route_centers.iloc[-1].values) # Save cairo context to restore after drawing route. cr.save() if color is None: # Colors from ["Show me the numbers"][1]. # # [1]: http://blog.axc.net/its-the-colors-you-have/ # LiteOrange = rgb(251,178,88); # MedOrange = rgb(250,164,58); # LiteGreen = rgb(144,205,151); # MedGreen = rgb(96,189,104); color_rgb_255 = np.array([96,189,104, .8 * 255]) color = (color_rgb_255 / 255.).tolist() if len(color) < 4: color += [1.] * (4 - len(color)) cr.set_source_rgba(*color) cr.move_to(*df_route_centers.iloc[0]) for electrode_i, center_i in df_route_centers.iloc[1:].iterrows(): cr.line_to(*center_i) if line_width is None: line_width = np.sqrt((df_endpoint_marker.max().values - df_endpoint_marker.min().values).prod()) * .1 cr.set_line_width(4) cr.stroke() cr.move_to(*df_endpoint_marker.iloc[0]) for electrode_i, center_i in df_endpoint_marker.iloc[1:].iterrows(): cr.line_to(*center_i) cr.close_path() cr.set_source_rgba(*color) cr.fill() # Restore cairo context after drawing route. cr.restore()
[ "def", "draw_route", "(", "self", ",", "df_route", ",", "cr", ",", "color", "=", "None", ",", "line_width", "=", "None", ")", ":", "df_route_centers", "=", "(", "self", ".", "canvas", ".", "df_shape_centers", ".", "ix", "[", "df_route", ".", "electrode_i", "]", "[", "[", "'x_center'", ",", "'y_center'", "]", "]", ")", "df_endpoint_marker", "=", "(", ".6", "*", "self", ".", "get_endpoint_marker", "(", "df_route_centers", ")", "+", "df_route_centers", ".", "iloc", "[", "-", "1", "]", ".", "values", ")", "# Save cairo context to restore after drawing route.", "cr", ".", "save", "(", ")", "if", "color", "is", "None", ":", "# Colors from [\"Show me the numbers\"][1].", "#", "# [1]: http://blog.axc.net/its-the-colors-you-have/", "# LiteOrange = rgb(251,178,88);", "# MedOrange = rgb(250,164,58);", "# LiteGreen = rgb(144,205,151);", "# MedGreen = rgb(96,189,104);", "color_rgb_255", "=", "np", ".", "array", "(", "[", "96", ",", "189", ",", "104", ",", ".8", "*", "255", "]", ")", "color", "=", "(", "color_rgb_255", "/", "255.", ")", ".", "tolist", "(", ")", "if", "len", "(", "color", ")", "<", "4", ":", "color", "+=", "[", "1.", "]", "*", "(", "4", "-", "len", "(", "color", ")", ")", "cr", ".", "set_source_rgba", "(", "*", "color", ")", "cr", ".", "move_to", "(", "*", "df_route_centers", ".", "iloc", "[", "0", "]", ")", "for", "electrode_i", ",", "center_i", "in", "df_route_centers", ".", "iloc", "[", "1", ":", "]", ".", "iterrows", "(", ")", ":", "cr", ".", "line_to", "(", "*", "center_i", ")", "if", "line_width", "is", "None", ":", "line_width", "=", "np", ".", "sqrt", "(", "(", "df_endpoint_marker", ".", "max", "(", ")", ".", "values", "-", "df_endpoint_marker", ".", "min", "(", ")", ".", "values", ")", ".", "prod", "(", ")", ")", "*", ".1", "cr", ".", "set_line_width", "(", "4", ")", "cr", ".", "stroke", "(", ")", "cr", ".", "move_to", "(", "*", "df_endpoint_marker", ".", "iloc", "[", "0", "]", ")", "for", "electrode_i", ",", "center_i", "in", "df_endpoint_marker", ".", "iloc", "[", "1", ":", "]", ".", "iterrows", "(", ")", ":", "cr", ".", "line_to", "(", "*", "center_i", ")", "cr", ".", "close_path", "(", ")", "cr", ".", "set_source_rgba", "(", "*", "color", ")", "cr", ".", "fill", "(", ")", "# Restore cairo context after drawing route.", "cr", ".", "restore", "(", ")" ]
41.166667
20.5
def popen(self, cmd): """Execute an external command and return (rc, output). """ process = Popen(cmd, shell=True, stdout=PIPE, env=self.env) stdoutdata, stderrdata = process.communicate() return process.returncode, stdoutdata
[ "def", "popen", "(", "self", ",", "cmd", ")", ":", "process", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "env", "=", "self", ".", "env", ")", "stdoutdata", ",", "stderrdata", "=", "process", ".", "communicate", "(", ")", "return", "process", ".", "returncode", ",", "stdoutdata" ]
43.5
10.833333
def inconsistent(self): r""" Perform some consistency tests on the graph represented by this object Returns ------- consistent : bool or list False if consistent, else a list of inconsistency messages. Notes ----- This check is very time intensive and should not be executed on huge graphs, except for debugging purposes. """ messages = [] for node in list(self.__tweights.keys()): if not node <= self.__nodes: messages.append("Node {} in t-weights but not in nodes.".format(node)) for node in self.__snodes: if not node <= self.__nodes: messages.append("Node {} in s-nodes but not in nodes.".format(node)) for node in self.__tnodes: if not node <= self.__nodes: messages.append("Node {} in t-nodes but not in nodes.".format(node)) for e in list(self.__nweights.keys()): if not e[0] <= self.__nodes: messages.append("Node {} in edge {} but not in nodes.".format(e[0], e)) if not e[1] <= self.__nodes: messages.append("Node {} in edge {} but not in nodes.".format(e[1], e)) if (e[1], e[0]) in iter(list(self.__nweights.keys())): messages.append("The reversed edges of {} is also in the n-weights.".format(e)) if 0 == len(messages): return False else: return messages
[ "def", "inconsistent", "(", "self", ")", ":", "messages", "=", "[", "]", "for", "node", "in", "list", "(", "self", ".", "__tweights", ".", "keys", "(", ")", ")", ":", "if", "not", "node", "<=", "self", ".", "__nodes", ":", "messages", ".", "append", "(", "\"Node {} in t-weights but not in nodes.\"", ".", "format", "(", "node", ")", ")", "for", "node", "in", "self", ".", "__snodes", ":", "if", "not", "node", "<=", "self", ".", "__nodes", ":", "messages", ".", "append", "(", "\"Node {} in s-nodes but not in nodes.\"", ".", "format", "(", "node", ")", ")", "for", "node", "in", "self", ".", "__tnodes", ":", "if", "not", "node", "<=", "self", ".", "__nodes", ":", "messages", ".", "append", "(", "\"Node {} in t-nodes but not in nodes.\"", ".", "format", "(", "node", ")", ")", "for", "e", "in", "list", "(", "self", ".", "__nweights", ".", "keys", "(", ")", ")", ":", "if", "not", "e", "[", "0", "]", "<=", "self", ".", "__nodes", ":", "messages", ".", "append", "(", "\"Node {} in edge {} but not in nodes.\"", ".", "format", "(", "e", "[", "0", "]", ",", "e", ")", ")", "if", "not", "e", "[", "1", "]", "<=", "self", ".", "__nodes", ":", "messages", ".", "append", "(", "\"Node {} in edge {} but not in nodes.\"", ".", "format", "(", "e", "[", "1", "]", ",", "e", ")", ")", "if", "(", "e", "[", "1", "]", ",", "e", "[", "0", "]", ")", "in", "iter", "(", "list", "(", "self", ".", "__nweights", ".", "keys", "(", ")", ")", ")", ":", "messages", ".", "append", "(", "\"The reversed edges of {} is also in the n-weights.\"", ".", "format", "(", "e", ")", ")", "if", "0", "==", "len", "(", "messages", ")", ":", "return", "False", "else", ":", "return", "messages" ]
48.448276
30.137931
def print_fold(column_to_fold, total_columns, skips): """Print a row that removes the given column and shifts all the following columns.""" format_str = '{:<2}' * (total_columns - 1) cols = [] for i in range(column_to_fold): # print(i) if i in skips: cols.append(" ") else: cols.append("| ") for i in range(column_to_fold + 1, total_columns): # print(i) if i in skips: cols.append(" ") else: cols.append(" /") print(format_str.format(*cols))
[ "def", "print_fold", "(", "column_to_fold", ",", "total_columns", ",", "skips", ")", ":", "format_str", "=", "'{:<2}'", "*", "(", "total_columns", "-", "1", ")", "cols", "=", "[", "]", "for", "i", "in", "range", "(", "column_to_fold", ")", ":", "# print(i)", "if", "i", "in", "skips", ":", "cols", ".", "append", "(", "\" \"", ")", "else", ":", "cols", ".", "append", "(", "\"| \"", ")", "for", "i", "in", "range", "(", "column_to_fold", "+", "1", ",", "total_columns", ")", ":", "# print(i)", "if", "i", "in", "skips", ":", "cols", ".", "append", "(", "\" \"", ")", "else", ":", "cols", ".", "append", "(", "\" /\"", ")", "print", "(", "format_str", ".", "format", "(", "*", "cols", ")", ")" ]
30.555556
13.777778
def remove_elaborated(type_): """removes type-declaration class-binder :class:`elaborated_t` from the `type_` If `type_` is not :class:`elaborated_t`, it will be returned as is """ nake_type = remove_alias(type_) if not is_elaborated(nake_type): return type_ else: if isinstance(type_, cpptypes.elaborated_t): type_ = type_.base return type_
[ "def", "remove_elaborated", "(", "type_", ")", ":", "nake_type", "=", "remove_alias", "(", "type_", ")", "if", "not", "is_elaborated", "(", "nake_type", ")", ":", "return", "type_", "else", ":", "if", "isinstance", "(", "type_", ",", "cpptypes", ".", "elaborated_t", ")", ":", "type_", "=", "type_", ".", "base", "return", "type_" ]
30
16.307692
def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None): """Return the length of a variant Args: alt_len(int) ref_len(int) category(str) svtype(str) svlen(int) """ # -1 would indicate uncertain length length = -1 if category in ('snv', 'indel', 'cancer'): if ref_len == alt_len: length = alt_len else: length = abs(ref_len - alt_len) elif category == 'sv': if svtype == 'bnd': length = int(10e10) else: if svlen: length = abs(int(svlen)) # Some software does not give a length but they give END elif end: if end != pos: length = end - pos return length
[ "def", "get_length", "(", "alt_len", ",", "ref_len", ",", "category", ",", "pos", ",", "end", ",", "svtype", "=", "None", ",", "svlen", "=", "None", ")", ":", "# -1 would indicate uncertain length", "length", "=", "-", "1", "if", "category", "in", "(", "'snv'", ",", "'indel'", ",", "'cancer'", ")", ":", "if", "ref_len", "==", "alt_len", ":", "length", "=", "alt_len", "else", ":", "length", "=", "abs", "(", "ref_len", "-", "alt_len", ")", "elif", "category", "==", "'sv'", ":", "if", "svtype", "==", "'bnd'", ":", "length", "=", "int", "(", "10e10", ")", "else", ":", "if", "svlen", ":", "length", "=", "abs", "(", "int", "(", "svlen", ")", ")", "# Some software does not give a length but they give END", "elif", "end", ":", "if", "end", "!=", "pos", ":", "length", "=", "end", "-", "pos", "return", "length" ]
26.758621
17.172414
def _raw_object_cache_notify(self, data): """ Low-level notify hook. """ if self._object_cache_notify is None: return module_ptr = data.contents.module_ptr buf_ptr = data.contents.buf_ptr buf_len = data.contents.buf_len buf = string_at(buf_ptr, buf_len) module = self._find_module_ptr(module_ptr) if module is None: # The LLVM EE should only give notifications for modules # known by us. raise RuntimeError("object compilation notification " "for unknown module %s" % (module_ptr,)) self._object_cache_notify(module, buf)
[ "def", "_raw_object_cache_notify", "(", "self", ",", "data", ")", ":", "if", "self", ".", "_object_cache_notify", "is", "None", ":", "return", "module_ptr", "=", "data", ".", "contents", ".", "module_ptr", "buf_ptr", "=", "data", ".", "contents", ".", "buf_ptr", "buf_len", "=", "data", ".", "contents", ".", "buf_len", "buf", "=", "string_at", "(", "buf_ptr", ",", "buf_len", ")", "module", "=", "self", ".", "_find_module_ptr", "(", "module_ptr", ")", "if", "module", "is", "None", ":", "# The LLVM EE should only give notifications for modules", "# known by us.", "raise", "RuntimeError", "(", "\"object compilation notification \"", "\"for unknown module %s\"", "%", "(", "module_ptr", ",", ")", ")", "self", ".", "_object_cache_notify", "(", "module", ",", "buf", ")" ]
39.529412
10.235294
def _print_map_dict(self, argkey, filename, append): """Prints a dictionary that has variable => value mappings.""" result = [] skeys = list(sorted(self.curargs[argkey].keys())) for key in skeys: result.append("'{}' => {}".format(key, self.curargs[argkey][key])) self._redirect_output('\n'.join(result), filename, append, msg.info)
[ "def", "_print_map_dict", "(", "self", ",", "argkey", ",", "filename", ",", "append", ")", ":", "result", "=", "[", "]", "skeys", "=", "list", "(", "sorted", "(", "self", ".", "curargs", "[", "argkey", "]", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "skeys", ":", "result", ".", "append", "(", "\"'{}' => {}\"", ".", "format", "(", "key", ",", "self", ".", "curargs", "[", "argkey", "]", "[", "key", "]", ")", ")", "self", ".", "_redirect_output", "(", "'\\n'", ".", "join", "(", "result", ")", ",", "filename", ",", "append", ",", "msg", ".", "info", ")" ]
53.857143
19.857143
def sleep(self, seconds): """Lock the connection for a given number of seconds. :param seconds: Length of time to lock the connection. :type seconds: int """ try: self.lock() time.sleep(seconds) except compat.TimeoutException: _logger.debug("Connection %r timed out while waiting for lock acquisition.", self.container_id) finally: self.release()
[ "def", "sleep", "(", "self", ",", "seconds", ")", ":", "try", ":", "self", ".", "lock", "(", ")", "time", ".", "sleep", "(", "seconds", ")", "except", "compat", ".", "TimeoutException", ":", "_logger", ".", "debug", "(", "\"Connection %r timed out while waiting for lock acquisition.\"", ",", "self", ".", "container_id", ")", "finally", ":", "self", ".", "release", "(", ")" ]
33.769231
19.307692
def get_comment_form_for_create(self, reference_id, comment_record_types): """Gets the comment form for creating new comments. A new form should be requested for each create transaction. arg: reference_id (osid.id.Id): the ``Id`` for the reference object arg: comment_record_types (osid.type.Type[]): array of comment record types return: (osid.commenting.CommentForm) - the comment form raise: NullArgument - ``reference_id or comment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.CommentAdminSession.get_comment_form_for_create_template # These really need to be in module imports: if not isinstance(reference_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in comment_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if comment_record_types == []: # WHY are we passing book_id = self._catalog_id below, seems redundant: # Probably don't need to send effective_agent_id, since the form can get that from proxy. obj_form = objects.CommentForm( book_id=self._catalog_id, reference_id=reference_id, effective_agent_id=str(self.get_effective_agent_id()), catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.CommentForm( book_id=self._catalog_id, record_types=comment_record_types, reference_id=reference_id, effective_agent_id=self.get_effective_agent_id(), catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
[ "def", "get_comment_form_for_create", "(", "self", ",", "reference_id", ",", "comment_record_types", ")", ":", "# Implemented from template for", "# osid.relationship.CommentAdminSession.get_comment_form_for_create_template", "# These really need to be in module imports:", "if", "not", "isinstance", "(", "reference_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument is not a valid OSID Id'", ")", "for", "arg", "in", "comment_record_types", ":", "if", "not", "isinstance", "(", "arg", ",", "ABCType", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more argument array elements is not a valid OSID Type'", ")", "if", "comment_record_types", "==", "[", "]", ":", "# WHY are we passing book_id = self._catalog_id below, seems redundant:", "# Probably don't need to send effective_agent_id, since the form can get that from proxy.", "obj_form", "=", "objects", ".", "CommentForm", "(", "book_id", "=", "self", ".", "_catalog_id", ",", "reference_id", "=", "reference_id", ",", "effective_agent_id", "=", "str", "(", "self", ".", "get_effective_agent_id", "(", ")", ")", ",", "catalog_id", "=", "self", ".", "_catalog_id", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "else", ":", "obj_form", "=", "objects", ".", "CommentForm", "(", "book_id", "=", "self", ".", "_catalog_id", ",", "record_types", "=", "comment_record_types", ",", "reference_id", "=", "reference_id", ",", "effective_agent_id", "=", "self", ".", "get_effective_agent_id", "(", ")", ",", "catalog_id", "=", "self", ".", "_catalog_id", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "obj_form", ".", "_for_update", "=", "False", "self", ".", "_forms", "[", "obj_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "CREATED", "return", "obj_form" ]
48.183673
18.22449
def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None): """ Creates the TensorFlow operations for calculating the full loss of a batch. Args: states: Dict of state tensors. internals: List of prior internal state tensors. actions: Dict of action tensors. terminal: Terminal boolean tensor. reward: Reward tensor. next_states: Dict of successor state tensors. next_internals: List of posterior internal state tensors. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss tensor. """ # Mean loss per instance loss_per_instance = self.fn_loss_per_instance( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals, update=update, reference=reference ) # Returns no-op. updated = self.memory.update_batch(loss_per_instance=loss_per_instance) with tf.control_dependencies(control_inputs=(updated,)): loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0) # Loss without regularization summary. if 'losses' in self.summary_labels: tf.contrib.summary.scalar(name='loss-without-regularization', tensor=loss) # Regularization losses. losses = self.fn_regularization_losses(states=states, internals=internals, update=update) if len(losses) > 0: loss += tf.add_n(inputs=[losses[name] for name in sorted(losses)]) if 'regularization' in self.summary_labels: for name in sorted(losses): tf.contrib.summary.scalar(name=('regularization/' + name), tensor=losses[name]) # Total loss summary. if 'losses' in self.summary_labels or 'total-loss' in self.summary_labels: tf.contrib.summary.scalar(name='total-loss', tensor=loss) return loss
[ "def", "tf_loss", "(", "self", ",", "states", ",", "internals", ",", "actions", ",", "terminal", ",", "reward", ",", "next_states", ",", "next_internals", ",", "update", ",", "reference", "=", "None", ")", ":", "# Mean loss per instance", "loss_per_instance", "=", "self", ".", "fn_loss_per_instance", "(", "states", "=", "states", ",", "internals", "=", "internals", ",", "actions", "=", "actions", ",", "terminal", "=", "terminal", ",", "reward", "=", "reward", ",", "next_states", "=", "next_states", ",", "next_internals", "=", "next_internals", ",", "update", "=", "update", ",", "reference", "=", "reference", ")", "# Returns no-op.", "updated", "=", "self", ".", "memory", ".", "update_batch", "(", "loss_per_instance", "=", "loss_per_instance", ")", "with", "tf", ".", "control_dependencies", "(", "control_inputs", "=", "(", "updated", ",", ")", ")", ":", "loss", "=", "tf", ".", "reduce_mean", "(", "input_tensor", "=", "loss_per_instance", ",", "axis", "=", "0", ")", "# Loss without regularization summary.", "if", "'losses'", "in", "self", ".", "summary_labels", ":", "tf", ".", "contrib", ".", "summary", ".", "scalar", "(", "name", "=", "'loss-without-regularization'", ",", "tensor", "=", "loss", ")", "# Regularization losses.", "losses", "=", "self", ".", "fn_regularization_losses", "(", "states", "=", "states", ",", "internals", "=", "internals", ",", "update", "=", "update", ")", "if", "len", "(", "losses", ")", ">", "0", ":", "loss", "+=", "tf", ".", "add_n", "(", "inputs", "=", "[", "losses", "[", "name", "]", "for", "name", "in", "sorted", "(", "losses", ")", "]", ")", "if", "'regularization'", "in", "self", ".", "summary_labels", ":", "for", "name", "in", "sorted", "(", "losses", ")", ":", "tf", ".", "contrib", ".", "summary", ".", "scalar", "(", "name", "=", "(", "'regularization/'", "+", "name", ")", ",", "tensor", "=", "losses", "[", "name", "]", ")", "# Total loss summary.", "if", "'losses'", "in", "self", ".", "summary_labels", "or", "'total-loss'", "in", "self", ".", "summary_labels", ":", "tf", ".", "contrib", ".", "summary", ".", "scalar", "(", "name", "=", "'total-loss'", ",", "tensor", "=", "loss", ")", "return", "loss" ]
42.867925
23.962264
def search(self, query, nid=None): """Search for posts with ``query`` :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type query: str :param query: The search query; should just be keywords for posts that you are looking for """ r = self.request( method="network.search", nid=nid, data=dict(query=query) ) return self._handle_error(r, "Search with query '{}' failed." .format(query))
[ "def", "search", "(", "self", ",", "query", ",", "nid", "=", "None", ")", ":", "r", "=", "self", ".", "request", "(", "method", "=", "\"network.search\"", ",", "nid", "=", "nid", ",", "data", "=", "dict", "(", "query", "=", "query", ")", ")", "return", "self", ".", "_handle_error", "(", "r", ",", "\"Search with query '{}' failed.\"", ".", "format", "(", "query", ")", ")" ]
37.388889
16.555556
def faulty(): ''' Display list of faulty resources CLI Example: .. code-block:: bash salt '*' fmadm.faulty ''' fmadm = _check_fmadm() cmd = '{cmd} faulty'.format( cmd=fmadm, ) res = __salt__['cmd.run_all'](cmd) result = {} if res['stdout'] == '': result = False else: result = _parse_fmadm_faulty(res['stdout']) return result
[ "def", "faulty", "(", ")", ":", "fmadm", "=", "_check_fmadm", "(", ")", "cmd", "=", "'{cmd} faulty'", ".", "format", "(", "cmd", "=", "fmadm", ",", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "result", "=", "{", "}", "if", "res", "[", "'stdout'", "]", "==", "''", ":", "result", "=", "False", "else", ":", "result", "=", "_parse_fmadm_faulty", "(", "res", "[", "'stdout'", "]", ")", "return", "result" ]
17.818182
23.181818
def do_denyaccess(self, line): """denyaccess <subject> Remove subject from access policy.""" subject, = self._split_args(line, 1, 0) self._command_processor.get_session().get_access_control().remove_allowed_subject( subject ) self._print_info_if_verbose( 'Removed subject "{}" from access policy'.format(subject) )
[ "def", "do_denyaccess", "(", "self", ",", "line", ")", ":", "subject", ",", "=", "self", ".", "_split_args", "(", "line", ",", "1", ",", "0", ")", "self", ".", "_command_processor", ".", "get_session", "(", ")", ".", "get_access_control", "(", ")", ".", "remove_allowed_subject", "(", "subject", ")", "self", ".", "_print_info_if_verbose", "(", "'Removed subject \"{}\" from access policy'", ".", "format", "(", "subject", ")", ")" ]
42
20.333333
def _hexdecode(hexstring): """Convert a hex encoded string to a byte string. For example '4A' will return 'J', and '04' will return ``'\\x04'`` (which has length 1). Args: hexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length. Allowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space). Returns: A string of half the length, with characters corresponding to all 0-255 values for each byte. Raises: TypeError, ValueError """ # Note: For Python3 the appropriate would be: raise TypeError(new_error_message) from err # but the Python2 interpreter will indicate SyntaxError. # Thus we need to live with this warning in Python3: # 'During handling of the above exception, another exception occurred' _checkString(hexstring, description='hexstring') if len(hexstring) % 2 != 0: raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring)) if sys.version_info[0] > 2: by = bytes(hexstring, 'latin1') try: return str(binascii.unhexlify(by), encoding='latin1') except binascii.Error as err: new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}'.format(err.args[0], hexstring) raise TypeError(new_error_message) else: try: return hexstring.decode('hex') except TypeError as err: raise TypeError('Hexdecode reported an error: {}. Input hexstring: {}'.format(err.message, hexstring))
[ "def", "_hexdecode", "(", "hexstring", ")", ":", "# Note: For Python3 the appropriate would be: raise TypeError(new_error_message) from err", "# but the Python2 interpreter will indicate SyntaxError.", "# Thus we need to live with this warning in Python3:", "# 'During handling of the above exception, another exception occurred'", "_checkString", "(", "hexstring", ",", "description", "=", "'hexstring'", ")", "if", "len", "(", "hexstring", ")", "%", "2", "!=", "0", ":", "raise", "ValueError", "(", "'The input hexstring must be of even length. Given: {!r}'", ".", "format", "(", "hexstring", ")", ")", "if", "sys", ".", "version_info", "[", "0", "]", ">", "2", ":", "by", "=", "bytes", "(", "hexstring", ",", "'latin1'", ")", "try", ":", "return", "str", "(", "binascii", ".", "unhexlify", "(", "by", ")", ",", "encoding", "=", "'latin1'", ")", "except", "binascii", ".", "Error", "as", "err", ":", "new_error_message", "=", "'Hexdecode reported an error: {!s}. Input hexstring: {}'", ".", "format", "(", "err", ".", "args", "[", "0", "]", ",", "hexstring", ")", "raise", "TypeError", "(", "new_error_message", ")", "else", ":", "try", ":", "return", "hexstring", ".", "decode", "(", "'hex'", ")", "except", "TypeError", "as", "err", ":", "raise", "TypeError", "(", "'Hexdecode reported an error: {}. Input hexstring: {}'", ".", "format", "(", "err", ".", "message", ",", "hexstring", ")", ")" ]
39.205128
29.974359
def check_version(dstore): """ :param dstore: a DataStore instance :returns: a message if the stored version is different from the current version """ ds_version = dstore.hdf5.attrs['engine_version'] if ds_version != __version__: return (': the datastore is at version %s, but the exporter at ' 'version %s' % (ds_version, __version__)) else: return ''
[ "def", "check_version", "(", "dstore", ")", ":", "ds_version", "=", "dstore", ".", "hdf5", ".", "attrs", "[", "'engine_version'", "]", "if", "ds_version", "!=", "__version__", ":", "return", "(", "': the datastore is at version %s, but the exporter at '", "'version %s'", "%", "(", "ds_version", ",", "__version__", ")", ")", "else", ":", "return", "''" ]
34.083333
16.75
def explain(self, index, doc_type, id, body=None, **query_params): """ The explain api computes a score explanation for a query and a specific document. This can give useful feedback whether a document matches or didn't match a specific query. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html>`_ :param index: The name of the index :param doc_type: The type of the document :param id: The document ID :param body: The query definition using the Query DSL :arg _source: True or false to return the _source field or not, or a list of fields to return :arg _source_exclude: A list of fields to exclude from the returned _source field :arg _source_include: A list of fields to extract and return from the _source field :arg analyze_wildcard: Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) :arg analyzer: The analyzer for the query string query :arg default_operator: The default operator for query string query (AND or OR), default 'OR', valid choices are: 'AND', 'OR' :arg df: The default field for query string query (default: _all) :arg fields: A comma-separated list of fields to return in the response :arg lenient: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored :arg lowercase_expanded_terms: Specify whether query terms should be lowercased :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg q: Query in the Lucene query string syntax :arg routing: Specific routing value """ self._es_parser.is_not_empty_params(index, doc_type, id) path = self._es_parser.make_path(index, doc_type, id, EsMethods.EXPLAIN) result = yield self._perform_request(HttpMethod.GET, path, body, params=query_params) returnValue(result)
[ "def", "explain", "(", "self", ",", "index", ",", "doc_type", ",", "id", ",", "body", "=", "None", ",", "*", "*", "query_params", ")", ":", "self", ".", "_es_parser", ".", "is_not_empty_params", "(", "index", ",", "doc_type", ",", "id", ")", "path", "=", "self", ".", "_es_parser", ".", "make_path", "(", "index", ",", "doc_type", ",", "id", ",", "EsMethods", ".", "EXPLAIN", ")", "result", "=", "yield", "self", ".", "_perform_request", "(", "HttpMethod", ".", "GET", ",", "path", ",", "body", ",", "params", "=", "query_params", ")", "returnValue", "(", "result", ")" ]
53.333333
21.422222
def _fuzzily_matches(response, candidate): '''return True if response fuzzily matches candidate''' r_words = response.split() c_words = candidate.split() # match whole words first for word in r_words: if word in c_words: r_words.remove(word) c_words.remove(word) # match partial words, fewest matches first match_pairs = [] for partial in sorted(r_words, key=lambda p: len(p), reverse=True): matches = [w for w in c_words if partial in w] match_pairs.append((partial, matches)) # if all items can be uniquly matched, the match is passed while len(match_pairs): min_pair = min(match_pairs, key=lambda x:len(x[1])) # this is the partial and matches with the shortest match list # if there are ever no matches for something, the match is failed if len(min_pair[1]) == 0: return False # choose the match with the fewest matches to remaining partials. # that way we leave more options for more partials, for the best # chance of a full match partials_left = [p[0] for p in match_pairs] min_option = min(min_pair[1], key=lambda x:len([p for p in partials_left if x in p])) # remove the current pair - we've matched it now match_pairs.remove(min_pair) # remove the matched option from all pairs' options so it won't be matched again for pair in match_pairs: pair_options = pair[1] if min_option in pair_options: pair_options.remove(min_option) # if all the items in the response were matched, this is match return True
[ "def", "_fuzzily_matches", "(", "response", ",", "candidate", ")", ":", "r_words", "=", "response", ".", "split", "(", ")", "c_words", "=", "candidate", ".", "split", "(", ")", "# match whole words first", "for", "word", "in", "r_words", ":", "if", "word", "in", "c_words", ":", "r_words", ".", "remove", "(", "word", ")", "c_words", ".", "remove", "(", "word", ")", "# match partial words, fewest matches first", "match_pairs", "=", "[", "]", "for", "partial", "in", "sorted", "(", "r_words", ",", "key", "=", "lambda", "p", ":", "len", "(", "p", ")", ",", "reverse", "=", "True", ")", ":", "matches", "=", "[", "w", "for", "w", "in", "c_words", "if", "partial", "in", "w", "]", "match_pairs", ".", "append", "(", "(", "partial", ",", "matches", ")", ")", "# if all items can be uniquly matched, the match is passed", "while", "len", "(", "match_pairs", ")", ":", "min_pair", "=", "min", "(", "match_pairs", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "1", "]", ")", ")", "# this is the partial and matches with the shortest match list", "# if there are ever no matches for something, the match is failed", "if", "len", "(", "min_pair", "[", "1", "]", ")", "==", "0", ":", "return", "False", "# choose the match with the fewest matches to remaining partials.", "# that way we leave more options for more partials, for the best", "# chance of a full match", "partials_left", "=", "[", "p", "[", "0", "]", "for", "p", "in", "match_pairs", "]", "min_option", "=", "min", "(", "min_pair", "[", "1", "]", ",", "key", "=", "lambda", "x", ":", "len", "(", "[", "p", "for", "p", "in", "partials_left", "if", "x", "in", "p", "]", ")", ")", "# remove the current pair - we've matched it now", "match_pairs", ".", "remove", "(", "min_pair", ")", "# remove the matched option from all pairs' options so it won't be matched again", "for", "pair", "in", "match_pairs", ":", "pair_options", "=", "pair", "[", "1", "]", "if", "min_option", "in", "pair_options", ":", "pair_options", ".", "remove", "(", "min_option", ")", "# if all the items in the response were matched, this is match", "return", "True" ]
46.542857
16.942857
def extract(self, file_path, is_drum=False): ''' Extract MIDI file. Args: file_path: File path of MIDI. is_drum: Extract drum data or not. Returns: pd.DataFrame(columns=["program", "start", "end", "pitch", "velocity", "duration"]) ''' midi_data = pretty_midi.PrettyMIDI(file_path) note_tuple_list = [] for instrument in midi_data.instruments: if (is_drum is False and instrument.is_drum is False) or (is_drum is True and instrument.is_drum is True): for note in instrument.notes: note_tuple_list.append((instrument.program, note.start, note.end, note.pitch, note.velocity)) note_df = pd.DataFrame(note_tuple_list, columns=["program", "start", "end", "pitch", "velocity"]) note_df = note_df.sort_values(by=["program", "start", "end"]) note_df["duration"] = note_df.end - note_df.start return note_df
[ "def", "extract", "(", "self", ",", "file_path", ",", "is_drum", "=", "False", ")", ":", "midi_data", "=", "pretty_midi", ".", "PrettyMIDI", "(", "file_path", ")", "note_tuple_list", "=", "[", "]", "for", "instrument", "in", "midi_data", ".", "instruments", ":", "if", "(", "is_drum", "is", "False", "and", "instrument", ".", "is_drum", "is", "False", ")", "or", "(", "is_drum", "is", "True", "and", "instrument", ".", "is_drum", "is", "True", ")", ":", "for", "note", "in", "instrument", ".", "notes", ":", "note_tuple_list", ".", "append", "(", "(", "instrument", ".", "program", ",", "note", ".", "start", ",", "note", ".", "end", ",", "note", ".", "pitch", ",", "note", ".", "velocity", ")", ")", "note_df", "=", "pd", ".", "DataFrame", "(", "note_tuple_list", ",", "columns", "=", "[", "\"program\"", ",", "\"start\"", ",", "\"end\"", ",", "\"pitch\"", ",", "\"velocity\"", "]", ")", "note_df", "=", "note_df", ".", "sort_values", "(", "by", "=", "[", "\"program\"", ",", "\"start\"", ",", "\"end\"", "]", ")", "note_df", "[", "\"duration\"", "]", "=", "note_df", ".", "end", "-", "note_df", ".", "start", "return", "note_df" ]
44.909091
27.909091
def _send(self): """ Send data to Influxdb. Data that can not be sent will be kept in queued. """ # Check to see if we have a valid socket. If not, try to connect. try: if self.influx is None: self.log.debug("InfluxdbHandler: Socket is not connected. " "Reconnecting.") self._connect() if self.influx is None: self.log.debug("InfluxdbHandler: Reconnect failed.") else: # build metrics data metrics = [] for path in self.batch: metrics.append({ "points": self.batch[path], "name": path, "columns": ["time", "value"]}) # Send data to influxdb self.log.debug("InfluxdbHandler: writing %d series of data", len(metrics)) self.influx.write_points(metrics, time_precision=self.time_precision) # empty batch buffer self.batch = {} self.batch_count = 0 self.time_multiplier = 1 except Exception: self._close() if self.time_multiplier < 5: self.time_multiplier += 1 self._throttle_error( "InfluxdbHandler: Error sending metrics, waiting for %ds.", 2**self.time_multiplier) raise
[ "def", "_send", "(", "self", ")", ":", "# Check to see if we have a valid socket. If not, try to connect.", "try", ":", "if", "self", ".", "influx", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"InfluxdbHandler: Socket is not connected. \"", "\"Reconnecting.\"", ")", "self", ".", "_connect", "(", ")", "if", "self", ".", "influx", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"InfluxdbHandler: Reconnect failed.\"", ")", "else", ":", "# build metrics data", "metrics", "=", "[", "]", "for", "path", "in", "self", ".", "batch", ":", "metrics", ".", "append", "(", "{", "\"points\"", ":", "self", ".", "batch", "[", "path", "]", ",", "\"name\"", ":", "path", ",", "\"columns\"", ":", "[", "\"time\"", ",", "\"value\"", "]", "}", ")", "# Send data to influxdb", "self", ".", "log", ".", "debug", "(", "\"InfluxdbHandler: writing %d series of data\"", ",", "len", "(", "metrics", ")", ")", "self", ".", "influx", ".", "write_points", "(", "metrics", ",", "time_precision", "=", "self", ".", "time_precision", ")", "# empty batch buffer", "self", ".", "batch", "=", "{", "}", "self", ".", "batch_count", "=", "0", "self", ".", "time_multiplier", "=", "1", "except", "Exception", ":", "self", ".", "_close", "(", ")", "if", "self", ".", "time_multiplier", "<", "5", ":", "self", ".", "time_multiplier", "+=", "1", "self", ".", "_throttle_error", "(", "\"InfluxdbHandler: Error sending metrics, waiting for %ds.\"", ",", "2", "**", "self", ".", "time_multiplier", ")", "raise" ]
38.846154
14.487179
def state_subpattern(self, value): """Parse subpatterns.""" num, *_, parsed = value if num in self.groups: return (yield required(self.groups[num])) yield from Traverser(parsed, groups=self.groups)
[ "def", "state_subpattern", "(", "self", ",", "value", ")", ":", "num", ",", "", "*", "_", ",", "parsed", "=", "value", "if", "num", "in", "self", ".", "groups", ":", "return", "(", "yield", "required", "(", "self", ".", "groups", "[", "num", "]", ")", ")", "yield", "from", "Traverser", "(", "parsed", ",", "groups", "=", "self", ".", "groups", ")" ]
33.714286
13.428571
def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
[ "def", "_tokenize_chinese_chars", "(", "self", ",", "text", ")", ":", "output", "=", "[", "]", "for", "char", "in", "text", ":", "cp", "=", "ord", "(", "char", ")", "if", "self", ".", "_is_chinese_char", "(", "cp", ")", ":", "output", ".", "append", "(", "\" \"", ")", "output", ".", "append", "(", "char", ")", "output", ".", "append", "(", "\" \"", ")", "else", ":", "output", ".", "append", "(", "char", ")", "return", "\"\"", ".", "join", "(", "output", ")" ]
32.583333
8.833333
def download_sample_and_align(job, sample, inputs, ids): """ Downloads the sample and runs BWA-kit :param JobFunctionWrappingJob job: Passed by Toil automatically :param tuple(str, list) sample: UUID and URLS for sample :param Namespace inputs: Contains input arguments :param dict ids: FileStore IDs for shared inputs """ uuid, urls = sample r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None) job.fileStore.logToMaster('Downloaded sample: {0}. R1 {1}\nR2 {2}\nStarting BWA Run'.format(uuid, r1_url, r2_url)) # Read fastq samples from file store ids['r1'] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() if r2_url: ids['r2'] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv() else: ids['r2'] = None # Create config for bwakit inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count()) inputs.uuid = uuid config = dict(**vars(inputs)) # Create config as a copy of inputs since it has values we want config.update(ids) # Overwrite attributes with the FileStoreIDs from ids config = argparse.Namespace(**config) # Define and wire job functions bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim, disk=inputs.file_size, cores=inputs.cores) job.addFollowOn(bam_id) output_name = uuid + '.bam' + str(inputs.suffix) if inputs.suffix else uuid + '.bam' if urlparse(inputs.output_dir).scheme == 's3': bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir, s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size) else: mkdir_p(inputs.ouput_dir) bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir, disk=inputs.file_size)
[ "def", "download_sample_and_align", "(", "job", ",", "sample", ",", "inputs", ",", "ids", ")", ":", "uuid", ",", "urls", "=", "sample", "r1_url", ",", "r2_url", "=", "urls", "if", "len", "(", "urls", ")", "==", "2", "else", "(", "urls", "[", "0", "]", ",", "None", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Downloaded sample: {0}. R1 {1}\\nR2 {2}\\nStarting BWA Run'", ".", "format", "(", "uuid", ",", "r1_url", ",", "r2_url", ")", ")", "# Read fastq samples from file store", "ids", "[", "'r1'", "]", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "r1_url", ",", "s3_key_path", "=", "inputs", ".", "ssec", ",", "disk", "=", "inputs", ".", "file_size", ")", ".", "rv", "(", ")", "if", "r2_url", ":", "ids", "[", "'r2'", "]", "=", "job", ".", "addChildJobFn", "(", "download_url_job", ",", "r2_url", ",", "s3_key_path", "=", "inputs", ".", "ssec", ",", "disk", "=", "inputs", ".", "file_size", ")", ".", "rv", "(", ")", "else", ":", "ids", "[", "'r2'", "]", "=", "None", "# Create config for bwakit", "inputs", ".", "cores", "=", "min", "(", "inputs", ".", "maxCores", ",", "multiprocessing", ".", "cpu_count", "(", ")", ")", "inputs", ".", "uuid", "=", "uuid", "config", "=", "dict", "(", "*", "*", "vars", "(", "inputs", ")", ")", "# Create config as a copy of inputs since it has values we want", "config", ".", "update", "(", "ids", ")", "# Overwrite attributes with the FileStoreIDs from ids", "config", "=", "argparse", ".", "Namespace", "(", "*", "*", "config", ")", "# Define and wire job functions", "bam_id", "=", "job", ".", "wrapJobFn", "(", "run_bwakit", ",", "config", ",", "sort", "=", "inputs", ".", "sort", ",", "trim", "=", "inputs", ".", "trim", ",", "disk", "=", "inputs", ".", "file_size", ",", "cores", "=", "inputs", ".", "cores", ")", "job", ".", "addFollowOn", "(", "bam_id", ")", "output_name", "=", "uuid", "+", "'.bam'", "+", "str", "(", "inputs", ".", "suffix", ")", "if", "inputs", ".", "suffix", "else", "uuid", "+", "'.bam'", "if", "urlparse", "(", "inputs", ".", "output_dir", ")", ".", "scheme", "==", "'s3'", ":", "bam_id", ".", "addChildJobFn", "(", "s3am_upload_job", ",", "file_id", "=", "bam_id", ".", "rv", "(", ")", ",", "file_name", "=", "output_name", ",", "s3_dir", "=", "inputs", ".", "output_dir", ",", "s3_key_path", "=", "inputs", ".", "ssec", ",", "cores", "=", "inputs", ".", "cores", ",", "disk", "=", "inputs", ".", "file_size", ")", "else", ":", "mkdir_p", "(", "inputs", ".", "ouput_dir", ")", "bam_id", ".", "addChildJobFn", "(", "copy_file_job", ",", "name", "=", "output_name", ",", "file_id", "=", "bam_id", ".", "rv", "(", ")", ",", "output_dir", "=", "inputs", ".", "output_dir", ",", "disk", "=", "inputs", ".", "file_size", ")" ]
54.805556
28.527778
def default_content_filter(sender, instance, **kwargs): # pylint: disable=unused-argument """ Set default value for `EnterpriseCustomerCatalog.content_filter` if not already set. """ if kwargs['created'] and not instance.content_filter: instance.content_filter = get_default_catalog_content_filter() instance.save()
[ "def", "default_content_filter", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=unused-argument", "if", "kwargs", "[", "'created'", "]", "and", "not", "instance", ".", "content_filter", ":", "instance", ".", "content_filter", "=", "get_default_catalog_content_filter", "(", ")", "instance", ".", "save", "(", ")" ]
49.285714
23.571429
def _fill_instance_child(xmldoc, element_name, return_type): '''Converts a child of the current dom element to the specified type. ''' element = xmldoc.find(_get_serialization_name(element_name)) if element is None: return None return_obj = return_type() _ETreeXmlToObject._fill_data_to_return_object(element, return_obj) return return_obj
[ "def", "_fill_instance_child", "(", "xmldoc", ",", "element_name", ",", "return_type", ")", ":", "element", "=", "xmldoc", ".", "find", "(", "_get_serialization_name", "(", "element_name", ")", ")", "if", "element", "is", "None", ":", "return", "None", "return_obj", "=", "return_type", "(", ")", "_ETreeXmlToObject", ".", "_fill_data_to_return_object", "(", "element", ",", "return_obj", ")", "return", "return_obj" ]
36.272727
25.363636
def _append_seed(self, seed_type: str, data: Any) -> 'Seeding': """Add a seeding method and returns self. :returns: self for fluid API """ self.append({ SEED_METHOD: seed_type, SEED_DATA: data, }) return self
[ "def", "_append_seed", "(", "self", ",", "seed_type", ":", "str", ",", "data", ":", "Any", ")", "->", "'Seeding'", ":", "self", ".", "append", "(", "{", "SEED_METHOD", ":", "seed_type", ",", "SEED_DATA", ":", "data", ",", "}", ")", "return", "self" ]
27.2
15.4
def tsv(self, path, features, filtered=True, override=False): """Export the data of the current instance to a .tsv file Parameters ---------- path: str Path to a .tsv file. The ending .tsv is added automatically. features: list of str The features in the resulting .tsv file. These are strings that are defined in `dclab.definitions.scalar_feature_names`, e.g. "area_cvx", "deform", "frame", "fl1_max", "aspect". filtered: bool If set to `True`, only the filtered data (index in ds._filter) are used. override: bool If set to `True`, an existing file ``path`` will be overridden. If set to `False`, raises `OSError` if ``path`` exists. """ features = [c.lower() for c in features] path = pathlib.Path(path) ds = self.rtdc_ds # Make sure that path ends with .tsv if path.suffix != ".tsv": path = path.with_name(path.name + ".tsv") # Check if file already exist if not override and path.exists(): raise OSError("File already exists: {}\n".format( str(path).encode("ascii", "ignore")) + "Please use the `override=True` option.") # Check that features are in dfn.scalar_feature_names for c in features: if c not in dfn.scalar_feature_names: raise ValueError("Unknown feature name {}".format(c)) # Open file with path.open("w") as fd: # write header header1 = "\t".join([c for c in features]) fd.write("# "+header1+"\n") header2 = "\t".join([dfn.feature_name2label[c] for c in features]) fd.write("# "+header2+"\n") with path.open("ab") as fd: # write data if filtered: data = [ds[c][ds._filter] for c in features] else: data = [ds[c] for c in features] np.savetxt(fd, np.array(data).transpose(), fmt=str("%.10e"), delimiter="\t")
[ "def", "tsv", "(", "self", ",", "path", ",", "features", ",", "filtered", "=", "True", ",", "override", "=", "False", ")", ":", "features", "=", "[", "c", ".", "lower", "(", ")", "for", "c", "in", "features", "]", "path", "=", "pathlib", ".", "Path", "(", "path", ")", "ds", "=", "self", ".", "rtdc_ds", "# Make sure that path ends with .tsv", "if", "path", ".", "suffix", "!=", "\".tsv\"", ":", "path", "=", "path", ".", "with_name", "(", "path", ".", "name", "+", "\".tsv\"", ")", "# Check if file already exist", "if", "not", "override", "and", "path", ".", "exists", "(", ")", ":", "raise", "OSError", "(", "\"File already exists: {}\\n\"", ".", "format", "(", "str", "(", "path", ")", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ")", "+", "\"Please use the `override=True` option.\"", ")", "# Check that features are in dfn.scalar_feature_names", "for", "c", "in", "features", ":", "if", "c", "not", "in", "dfn", ".", "scalar_feature_names", ":", "raise", "ValueError", "(", "\"Unknown feature name {}\"", ".", "format", "(", "c", ")", ")", "# Open file", "with", "path", ".", "open", "(", "\"w\"", ")", "as", "fd", ":", "# write header", "header1", "=", "\"\\t\"", ".", "join", "(", "[", "c", "for", "c", "in", "features", "]", ")", "fd", ".", "write", "(", "\"# \"", "+", "header1", "+", "\"\\n\"", ")", "header2", "=", "\"\\t\"", ".", "join", "(", "[", "dfn", ".", "feature_name2label", "[", "c", "]", "for", "c", "in", "features", "]", ")", "fd", ".", "write", "(", "\"# \"", "+", "header2", "+", "\"\\n\"", ")", "with", "path", ".", "open", "(", "\"ab\"", ")", "as", "fd", ":", "# write data", "if", "filtered", ":", "data", "=", "[", "ds", "[", "c", "]", "[", "ds", ".", "_filter", "]", "for", "c", "in", "features", "]", "else", ":", "data", "=", "[", "ds", "[", "c", "]", "for", "c", "in", "features", "]", "np", ".", "savetxt", "(", "fd", ",", "np", ".", "array", "(", "data", ")", ".", "transpose", "(", ")", ",", "fmt", "=", "str", "(", "\"%.10e\"", ")", ",", "delimiter", "=", "\"\\t\"", ")" ]
40.113208
17.471698
def destroy_domain_record(self, domain_id, record_id): """ This method deletes the specified domain record. Required parameters domain_id: Integer or Domain Name (e.g. domain.com), specifies the domain for which to destroy a record. record_id: Integer, specifies the record_id to destroy. """ json = self.request('/domains/%s/records/%s/destroy' % (domain_id, record_id), method='GET') status = json.get('status') return status
[ "def", "destroy_domain_record", "(", "self", ",", "domain_id", ",", "record_id", ")", ":", "json", "=", "self", ".", "request", "(", "'/domains/%s/records/%s/destroy'", "%", "(", "domain_id", ",", "record_id", ")", ",", "method", "=", "'GET'", ")", "status", "=", "json", ".", "get", "(", "'status'", ")", "return", "status" ]
33.529412
19.647059
def namedb_state_transition_sanity_check( opcode, op_data, history_id, cur_record, record_table ): """ Sanity checks: make sure that: * the opcode and op_data are consistent with one another. * the history_id, cur_record, and record_table are consistent with one another. Return True if so. Raise an exception if not. DO NOT CALL THIS METHOD DIRECTLY. """ namedb_op_sanity_check( opcode, op_data, cur_record ) if opcode in OPCODE_NAME_STATE_TRANSITIONS: # name state transition assert record_table == "name_records", "BUG: name state transition opcode (%s) on table %s" % (opcode, record_table) assert cur_record.has_key('name'), "BUG: name state transition with no name" assert op_data.has_key('name'), "BUG: name state transition with no name" assert op_data['name'] == history_id, 'BUG: name op data is for the wrong name ({} != {})'.format(op_data['name'], history_id) assert op_data['name'] == cur_record['name'], 'BUG: name op data is for the wrong name ({} != {})'.format(op_data['name'], cur_record['name']) assert cur_record['name'] == history_id, "BUG: history ID '%s' != '%s'" % (history_id, cur_record['name']) elif opcode in OPCODE_NAMESPACE_STATE_TRANSITIONS: # namespace state transition assert record_table == "namespaces", "BUG: namespace state transition opcode (%s) on table %s" % (opcode, record_table) assert cur_record.has_key('namespace_id'), "BUG: namespace state transition with no namespace ID" assert cur_record['namespace_id'] == history_id, "BUG: history ID '%s' != '%s'" % (history_id, cur_record['namespace_id']) assert op_data['namespace_id'] == history_id, 'BUG: name op data is for the wrong name ({} != {})'.format(op_data['namespace_id'], history_id) assert op_data['namespace_id'] == cur_record['namespace_id'], 'BUG: name op data is for the wrong name ({} != {})'.format(op_data['namespace_id'], cur_record['namespace_id']) assert cur_record['namespace_id'] == history_id, "BUG: history ID '%s' != '%s'" % (history_id, cur_record['namespace_id']) assert cur_record.has_key('block_number'), 'BUG: name state transition with no block number' if op_data.has_key('block_number'): assert op_data['block_number'] == cur_record['block_number'], 'BUG: block number mismatch ({} != {})'.format(op_data['block_number'], cur_record['block_number']) return True
[ "def", "namedb_state_transition_sanity_check", "(", "opcode", ",", "op_data", ",", "history_id", ",", "cur_record", ",", "record_table", ")", ":", "namedb_op_sanity_check", "(", "opcode", ",", "op_data", ",", "cur_record", ")", "if", "opcode", "in", "OPCODE_NAME_STATE_TRANSITIONS", ":", "# name state transition ", "assert", "record_table", "==", "\"name_records\"", ",", "\"BUG: name state transition opcode (%s) on table %s\"", "%", "(", "opcode", ",", "record_table", ")", "assert", "cur_record", ".", "has_key", "(", "'name'", ")", ",", "\"BUG: name state transition with no name\"", "assert", "op_data", ".", "has_key", "(", "'name'", ")", ",", "\"BUG: name state transition with no name\"", "assert", "op_data", "[", "'name'", "]", "==", "history_id", ",", "'BUG: name op data is for the wrong name ({} != {})'", ".", "format", "(", "op_data", "[", "'name'", "]", ",", "history_id", ")", "assert", "op_data", "[", "'name'", "]", "==", "cur_record", "[", "'name'", "]", ",", "'BUG: name op data is for the wrong name ({} != {})'", ".", "format", "(", "op_data", "[", "'name'", "]", ",", "cur_record", "[", "'name'", "]", ")", "assert", "cur_record", "[", "'name'", "]", "==", "history_id", ",", "\"BUG: history ID '%s' != '%s'\"", "%", "(", "history_id", ",", "cur_record", "[", "'name'", "]", ")", "elif", "opcode", "in", "OPCODE_NAMESPACE_STATE_TRANSITIONS", ":", "# namespace state transition ", "assert", "record_table", "==", "\"namespaces\"", ",", "\"BUG: namespace state transition opcode (%s) on table %s\"", "%", "(", "opcode", ",", "record_table", ")", "assert", "cur_record", ".", "has_key", "(", "'namespace_id'", ")", ",", "\"BUG: namespace state transition with no namespace ID\"", "assert", "cur_record", "[", "'namespace_id'", "]", "==", "history_id", ",", "\"BUG: history ID '%s' != '%s'\"", "%", "(", "history_id", ",", "cur_record", "[", "'namespace_id'", "]", ")", "assert", "op_data", "[", "'namespace_id'", "]", "==", "history_id", ",", "'BUG: name op data is for the wrong name ({} != {})'", ".", "format", "(", "op_data", "[", "'namespace_id'", "]", ",", "history_id", ")", "assert", "op_data", "[", "'namespace_id'", "]", "==", "cur_record", "[", "'namespace_id'", "]", ",", "'BUG: name op data is for the wrong name ({} != {})'", ".", "format", "(", "op_data", "[", "'namespace_id'", "]", ",", "cur_record", "[", "'namespace_id'", "]", ")", "assert", "cur_record", "[", "'namespace_id'", "]", "==", "history_id", ",", "\"BUG: history ID '%s' != '%s'\"", "%", "(", "history_id", ",", "cur_record", "[", "'namespace_id'", "]", ")", "assert", "cur_record", ".", "has_key", "(", "'block_number'", ")", ",", "'BUG: name state transition with no block number'", "if", "op_data", ".", "has_key", "(", "'block_number'", ")", ":", "assert", "op_data", "[", "'block_number'", "]", "==", "cur_record", "[", "'block_number'", "]", ",", "'BUG: block number mismatch ({} != {})'", ".", "format", "(", "op_data", "[", "'block_number'", "]", ",", "cur_record", "[", "'block_number'", "]", ")", "return", "True" ]
65.837838
46.756757
def venv_has_script(script): """ :param script: script to look for in bin folder """ def f(venv): path=os.path.join(venv, 'bin', script) if os.path.isfile(path): return True return f
[ "def", "venv_has_script", "(", "script", ")", ":", "def", "f", "(", "venv", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "venv", ",", "'bin'", ",", "script", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "True", "return", "f" ]
24.666667
11.777778
def set_attributes(self, attr_obj=None, ns_uri=None, **attr_dict): """ Add or update this element's attributes, where attributes can be specified in a number of ways. :param attr_obj: a dictionary or list of attribute name/value pairs. :type attr_obj: dict, list, tuple, or None :param ns_uri: a URI defining a namespace for the new attributes. :type ns_uri: string or None :param dict attr_dict: attribute name and values specified as keyword arguments. """ self._set_element_attributes(self.impl_node, attr_obj=attr_obj, ns_uri=ns_uri, **attr_dict)
[ "def", "set_attributes", "(", "self", ",", "attr_obj", "=", "None", ",", "ns_uri", "=", "None", ",", "*", "*", "attr_dict", ")", ":", "self", ".", "_set_element_attributes", "(", "self", ".", "impl_node", ",", "attr_obj", "=", "attr_obj", ",", "ns_uri", "=", "ns_uri", ",", "*", "*", "attr_dict", ")" ]
45.857143
19.142857
def get_live_data_flat_binary(self): """ Gets the live data packet in flatbuffer binary format. You'll need to do something like GameTickPacket.GetRootAsGameTickPacket(binary, 0) to get the data out. This is a temporary method designed to keep the integration test working. It returns the raw bytes of the flatbuffer so that it can be stored in a file. We can get rid of this once we have a first-class data recorder that lives inside the core dll. """ byte_buffer = self.game.UpdateLiveDataPacketFlatbuffer() if byte_buffer.size >= 4: # GetRootAsGameTickPacket gets angry if the size is less than 4 # We're counting on this copying the data over to a new memory location so that the original # pointer can be freed safely. proto_string = ctypes.string_at(byte_buffer.ptr, byte_buffer.size) self.game.Free(byte_buffer.ptr) # Avoid a memory leak self.game_status(None, RLBotCoreStatus.Success) return proto_string
[ "def", "get_live_data_flat_binary", "(", "self", ")", ":", "byte_buffer", "=", "self", ".", "game", ".", "UpdateLiveDataPacketFlatbuffer", "(", ")", "if", "byte_buffer", ".", "size", ">=", "4", ":", "# GetRootAsGameTickPacket gets angry if the size is less than 4", "# We're counting on this copying the data over to a new memory location so that the original", "# pointer can be freed safely.", "proto_string", "=", "ctypes", ".", "string_at", "(", "byte_buffer", ".", "ptr", ",", "byte_buffer", ".", "size", ")", "self", ".", "game", ".", "Free", "(", "byte_buffer", ".", "ptr", ")", "# Avoid a memory leak", "self", ".", "game_status", "(", "None", ",", "RLBotCoreStatus", ".", "Success", ")", "return", "proto_string" ]
61.352941
31
def send(self, request, socket, context, *args): """ When an event is sent, run all relevant handlers. Relevant handlers are those without a channel pattern when the given socket is not subscribed to any particular channel, or the handlers with a channel pattern that matches any of the channels that the given socket is subscribed to. In the case of subscribe/unsubscribe, match the channel arg being sent to the channel pattern. """ for handler, pattern in self.handlers: no_channel = not pattern and not socket.channels if self.name.endswith("subscribe") and pattern: matches = [pattern.match(args[0])] else: matches = [pattern.match(c) for c in socket.channels if pattern] if no_channel or filter(None, matches): handler(request, socket, context, *args)
[ "def", "send", "(", "self", ",", "request", ",", "socket", ",", "context", ",", "*", "args", ")", ":", "for", "handler", ",", "pattern", "in", "self", ".", "handlers", ":", "no_channel", "=", "not", "pattern", "and", "not", "socket", ".", "channels", "if", "self", ".", "name", ".", "endswith", "(", "\"subscribe\"", ")", "and", "pattern", ":", "matches", "=", "[", "pattern", ".", "match", "(", "args", "[", "0", "]", ")", "]", "else", ":", "matches", "=", "[", "pattern", ".", "match", "(", "c", ")", "for", "c", "in", "socket", ".", "channels", "if", "pattern", "]", "if", "no_channel", "or", "filter", "(", "None", ",", "matches", ")", ":", "handler", "(", "request", ",", "socket", ",", "context", ",", "*", "args", ")" ]
48.210526
17.894737
def jsonarrindex(self, name, path, scalar, start=0, stop=-1): """ Returns the index of ``scalar`` in the JSON array under ``path`` at key ``name``. The search can be limited using the optional inclusive ``start`` and exclusive ``stop`` indices. """ return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
[ "def", "jsonarrindex", "(", "self", ",", "name", ",", "path", ",", "scalar", ",", "start", "=", "0", ",", "stop", "=", "-", "1", ")", ":", "return", "self", ".", "execute_command", "(", "'JSON.ARRINDEX'", ",", "name", ",", "str_path", "(", "path", ")", ",", "self", ".", "_encode", "(", "scalar", ")", ",", "start", ",", "stop", ")" ]
56
24.285714
def get_character(self, position, offset=0): """Return character at *position* with the given offset.""" position = self.get_position(position) + offset cursor = self.textCursor() cursor.movePosition(QTextCursor.End) if position < cursor.position(): cursor.setPosition(position) cursor.movePosition(QTextCursor.Right, QTextCursor.KeepAnchor) return to_text_string(cursor.selectedText()) else: return ''
[ "def", "get_character", "(", "self", ",", "position", ",", "offset", "=", "0", ")", ":", "position", "=", "self", ".", "get_position", "(", "position", ")", "+", "offset", "cursor", "=", "self", ".", "textCursor", "(", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "End", ")", "if", "position", "<", "cursor", ".", "position", "(", ")", ":", "cursor", ".", "setPosition", "(", "position", ")", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "Right", ",", "QTextCursor", ".", "KeepAnchor", ")", "return", "to_text_string", "(", "cursor", ".", "selectedText", "(", ")", ")", "else", ":", "return", "''" ]
44.166667
10.166667
def embed(self, name, data=None): """Attach an image file and prepare for HTML embedding. This method should only be used to embed images. :param name: Path to the image to embed if data is None, or the name of the file if the ``data`` argument is given :param data: Contents of the image to embed, or None if the data is to be read from the file pointed to by the ``name`` argument """ if data is None: with open(name, 'rb') as fp: data = fp.read() name = os.path.basename(name) elif isinstance(data, bytes): pass elif hasattr(data, 'read'): data = data.read() else: raise TypeError("Unable to read image contents") subtype = imghdr.what(None, data) self.attach(name, data, 'image', subtype, True)
[ "def", "embed", "(", "self", ",", "name", ",", "data", "=", "None", ")", ":", "if", "data", "is", "None", ":", "with", "open", "(", "name", ",", "'rb'", ")", "as", "fp", ":", "data", "=", "fp", ".", "read", "(", ")", "name", "=", "os", ".", "path", ".", "basename", "(", "name", ")", "elif", "isinstance", "(", "data", ",", "bytes", ")", ":", "pass", "elif", "hasattr", "(", "data", ",", "'read'", ")", ":", "data", "=", "data", ".", "read", "(", ")", "else", ":", "raise", "TypeError", "(", "\"Unable to read image contents\"", ")", "subtype", "=", "imghdr", ".", "what", "(", "None", ",", "data", ")", "self", ".", "attach", "(", "name", ",", "data", ",", "'image'", ",", "subtype", ",", "True", ")" ]
31.826087
18.347826
def get_transform(self): """ Get a new object to perform the scaling transformation. """ return _InterpolatedInverseTransform(transform=self._transform, smin=0, smax=self._transform._M)
[ "def", "get_transform", "(", "self", ")", ":", "return", "_InterpolatedInverseTransform", "(", "transform", "=", "self", ".", "_transform", ",", "smin", "=", "0", ",", "smax", "=", "self", ".", "_transform", ".", "_M", ")" ]
37.625
18.875
def publish_proto_metadata_update(self): """ Publish protobuf model in ipfs and update existing metadata file """ metadata = load_mpe_service_metadata(self.args.metadata_file) ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58) metadata.save_pretty(self.args.metadata_file)
[ "def", "publish_proto_metadata_update", "(", "self", ")", ":", "metadata", "=", "load_mpe_service_metadata", "(", "self", ".", "args", ".", "metadata_file", ")", "ipfs_hash_base58", "=", "utils_ipfs", ".", "publish_proto_in_ipfs", "(", "self", ".", "_get_ipfs_client", "(", ")", ",", "self", ".", "args", ".", "protodir", ")", "metadata", ".", "set_simple_field", "(", "\"model_ipfs_hash\"", ",", "ipfs_hash_base58", ")", "metadata", ".", "save_pretty", "(", "self", ".", "args", ".", "metadata_file", ")" ]
69.333333
22.666667
def _remove_alone_endif(self): """ Can occur on: if(..){ return } else{ return } Iterate until a fix point to remove the ENDIF node creates on the following pattern if(){ return } else if(){ return } """ prev_nodes = [] while set(prev_nodes) != set(self.nodes): prev_nodes = self.nodes to_remove = [] for node in self.nodes: if node.type == NodeType.ENDIF and not node.fathers: for son in node.sons: son.remove_father(node) node.set_sons([]) to_remove.append(node) self._nodes = [n for n in self.nodes if not n in to_remove]
[ "def", "_remove_alone_endif", "(", "self", ")", ":", "prev_nodes", "=", "[", "]", "while", "set", "(", "prev_nodes", ")", "!=", "set", "(", "self", ".", "nodes", ")", ":", "prev_nodes", "=", "self", ".", "nodes", "to_remove", "=", "[", "]", "for", "node", "in", "self", ".", "nodes", ":", "if", "node", ".", "type", "==", "NodeType", ".", "ENDIF", "and", "not", "node", ".", "fathers", ":", "for", "son", "in", "node", ".", "sons", ":", "son", ".", "remove_father", "(", "node", ")", "node", ".", "set_sons", "(", "[", "]", ")", "to_remove", ".", "append", "(", "node", ")", "self", ".", "_nodes", "=", "[", "n", "for", "n", "in", "self", ".", "nodes", "if", "not", "n", "in", "to_remove", "]" ]
29.066667
15.933333
def find_all(self, node_type): """Find all the nodes of a given type. If the type is a tuple, the check is performed for any of the tuple items. """ for child in self.iter_child_nodes(): if isinstance(child, node_type): yield child for result in child.find_all(node_type): yield result
[ "def", "find_all", "(", "self", ",", "node_type", ")", ":", "for", "child", "in", "self", ".", "iter_child_nodes", "(", ")", ":", "if", "isinstance", "(", "child", ",", "node_type", ")", ":", "yield", "child", "for", "result", "in", "child", ".", "find_all", "(", "node_type", ")", ":", "yield", "result" ]
40.666667
8.222222
def get_attribute_by_name_and_dimension(name, dimension_id=None,**kwargs): """ Get a specific attribute by its name. dimension_id can be None, because in attribute the dimension_id is not anymore mandatory """ try: attr_i = db.DBSession.query(Attr).filter(and_(Attr.name==name, Attr.dimension_id==dimension_id)).one() log.debug("Attribute retrieved") return attr_i except NoResultFound: return None
[ "def", "get_attribute_by_name_and_dimension", "(", "name", ",", "dimension_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "attr_i", "=", "db", ".", "DBSession", ".", "query", "(", "Attr", ")", ".", "filter", "(", "and_", "(", "Attr", ".", "name", "==", "name", ",", "Attr", ".", "dimension_id", "==", "dimension_id", ")", ")", ".", "one", "(", ")", "log", ".", "debug", "(", "\"Attribute retrieved\"", ")", "return", "attr_i", "except", "NoResultFound", ":", "return", "None" ]
41.090909
22.909091
def columns_used(self): """ Columns from any table used in the model. May come from either the choosers or alternatives tables. """ return list(tz.unique(tz.concatv( self.choosers_columns_used(), self.alts_columns_used(), self.interaction_columns_used())))
[ "def", "columns_used", "(", "self", ")", ":", "return", "list", "(", "tz", ".", "unique", "(", "tz", ".", "concatv", "(", "self", ".", "choosers_columns_used", "(", ")", ",", "self", ".", "alts_columns_used", "(", ")", ",", "self", ".", "interaction_columns_used", "(", ")", ")", ")", ")" ]
32.4
10.2
def update_devices(self, devices): """Update values from response of URL_DEVICES, callback if changed.""" for qspacket in devices: try: qsid = qspacket[QS_ID] except KeyError: _LOGGER.debug("Device without ID: %s", qspacket) continue if qsid not in self: self[qsid] = QSDev(data=qspacket) dev = self[qsid] dev.data = qspacket # Decode value from QSUSB newqs = _legacy_status(qspacket[QS_VALUE]) if dev.is_dimmer: # Adjust dimmer exponentially to get a smoother effect newqs = min(round(math.pow(newqs, self.dim_adj)), 100) newin = round(newqs * _MAX / 100) if abs(dev.value - newin) > 1: # Significant change _LOGGER.debug("%s qs=%s --> %s", qsid, newqs, newin) dev.value = newin self._cb_value_changed(self, qsid, newin)
[ "def", "update_devices", "(", "self", ",", "devices", ")", ":", "for", "qspacket", "in", "devices", ":", "try", ":", "qsid", "=", "qspacket", "[", "QS_ID", "]", "except", "KeyError", ":", "_LOGGER", ".", "debug", "(", "\"Device without ID: %s\"", ",", "qspacket", ")", "continue", "if", "qsid", "not", "in", "self", ":", "self", "[", "qsid", "]", "=", "QSDev", "(", "data", "=", "qspacket", ")", "dev", "=", "self", "[", "qsid", "]", "dev", ".", "data", "=", "qspacket", "# Decode value from QSUSB", "newqs", "=", "_legacy_status", "(", "qspacket", "[", "QS_VALUE", "]", ")", "if", "dev", ".", "is_dimmer", ":", "# Adjust dimmer exponentially to get a smoother effect", "newqs", "=", "min", "(", "round", "(", "math", ".", "pow", "(", "newqs", ",", "self", ".", "dim_adj", ")", ")", ",", "100", ")", "newin", "=", "round", "(", "newqs", "*", "_MAX", "/", "100", ")", "if", "abs", "(", "dev", ".", "value", "-", "newin", ")", ">", "1", ":", "# Significant change", "_LOGGER", ".", "debug", "(", "\"%s qs=%s --> %s\"", ",", "qsid", ",", "newqs", ",", "newin", ")", "dev", ".", "value", "=", "newin", "self", ".", "_cb_value_changed", "(", "self", ",", "qsid", ",", "newin", ")" ]
40.958333
15.875
def start(self, context): """Initialize the database connection.""" self.config['alias'] = self.alias safe_config = dict(self.config) del safe_config['host'] log.info("Connecting MongoEngine database layer.", extra=dict( uri = redact_uri(self.config['host']), config = self.config, )) self.connection = connect(**self.config)
[ "def", "start", "(", "self", ",", "context", ")", ":", "self", ".", "config", "[", "'alias'", "]", "=", "self", ".", "alias", "safe_config", "=", "dict", "(", "self", ".", "config", ")", "del", "safe_config", "[", "'host'", "]", "log", ".", "info", "(", "\"Connecting MongoEngine database layer.\"", ",", "extra", "=", "dict", "(", "uri", "=", "redact_uri", "(", "self", ".", "config", "[", "'host'", "]", ")", ",", "config", "=", "self", ".", "config", ",", ")", ")", "self", ".", "connection", "=", "connect", "(", "*", "*", "self", ".", "config", ")" ]
26.538462
18
def find_suggestions(self, sentence): """ Search all possible suggestions. Suggestions returned always have at least one document matching. Arguments: sentence --- keywords (single strings) for which we want suggestions Return: An array of sets of keywords. Each set of keywords (-> one string) is a suggestion. """ if not isinstance(sentence, str): sentence = str(sentence) keywords = sentence.split(" ") query_parser = self.search_param_list['strict'][0]['query_parser'] base_search = u" ".join(keywords).strip() final_suggestions = [] corrector = self.__searcher.corrector("content") label_corrector = self.__searcher.corrector("label") for (keyword_idx, keyword) in enumerate(keywords): if (len(keyword) <= MIN_KEYWORD_LEN): continue keyword_suggestions = label_corrector.suggest( keyword, limit=2 )[:] keyword_suggestions += corrector.suggest(keyword, limit=5)[:] for keyword_suggestion in keyword_suggestions: new_suggestion = keywords[:] new_suggestion[keyword_idx] = keyword_suggestion new_suggestion = u" ".join(new_suggestion).strip() if new_suggestion == base_search: continue # make sure it would return results query = query_parser.parse(new_suggestion) results = self.__searcher.search(query, limit=1) if len(results) <= 0: continue final_suggestions.append(new_suggestion) final_suggestions.sort() return final_suggestions
[ "def", "find_suggestions", "(", "self", ",", "sentence", ")", ":", "if", "not", "isinstance", "(", "sentence", ",", "str", ")", ":", "sentence", "=", "str", "(", "sentence", ")", "keywords", "=", "sentence", ".", "split", "(", "\" \"", ")", "query_parser", "=", "self", ".", "search_param_list", "[", "'strict'", "]", "[", "0", "]", "[", "'query_parser'", "]", "base_search", "=", "u\" \"", ".", "join", "(", "keywords", ")", ".", "strip", "(", ")", "final_suggestions", "=", "[", "]", "corrector", "=", "self", ".", "__searcher", ".", "corrector", "(", "\"content\"", ")", "label_corrector", "=", "self", ".", "__searcher", ".", "corrector", "(", "\"label\"", ")", "for", "(", "keyword_idx", ",", "keyword", ")", "in", "enumerate", "(", "keywords", ")", ":", "if", "(", "len", "(", "keyword", ")", "<=", "MIN_KEYWORD_LEN", ")", ":", "continue", "keyword_suggestions", "=", "label_corrector", ".", "suggest", "(", "keyword", ",", "limit", "=", "2", ")", "[", ":", "]", "keyword_suggestions", "+=", "corrector", ".", "suggest", "(", "keyword", ",", "limit", "=", "5", ")", "[", ":", "]", "for", "keyword_suggestion", "in", "keyword_suggestions", ":", "new_suggestion", "=", "keywords", "[", ":", "]", "new_suggestion", "[", "keyword_idx", "]", "=", "keyword_suggestion", "new_suggestion", "=", "u\" \"", ".", "join", "(", "new_suggestion", ")", ".", "strip", "(", ")", "if", "new_suggestion", "==", "base_search", ":", "continue", "# make sure it would return results", "query", "=", "query_parser", ".", "parse", "(", "new_suggestion", ")", "results", "=", "self", ".", "__searcher", ".", "search", "(", "query", ",", "limit", "=", "1", ")", "if", "len", "(", "results", ")", "<=", "0", ":", "continue", "final_suggestions", ".", "append", "(", "new_suggestion", ")", "final_suggestions", ".", "sort", "(", ")", "return", "final_suggestions" ]
38.391304
18.173913
def set_bgcolor(self, color): """set color for background of plot""" self.bgcolor = color for ax in self.canvas.figure.get_axes(): if matplotlib.__version__ < '2.0': ax.set_axis_bgcolor(color) else: ax.set_facecolor(color) if callable(self.theme_color_callback): self.theme_color_callback(color, 'bg')
[ "def", "set_bgcolor", "(", "self", ",", "color", ")", ":", "self", ".", "bgcolor", "=", "color", "for", "ax", "in", "self", ".", "canvas", ".", "figure", ".", "get_axes", "(", ")", ":", "if", "matplotlib", ".", "__version__", "<", "'2.0'", ":", "ax", ".", "set_axis_bgcolor", "(", "color", ")", "else", ":", "ax", ".", "set_facecolor", "(", "color", ")", "if", "callable", "(", "self", ".", "theme_color_callback", ")", ":", "self", ".", "theme_color_callback", "(", "color", ",", "'bg'", ")" ]
39.2
8
def get_item(filename, uuid): """ Read entry from JSON file """ with open(os.fsencode(str(filename)), "r") as f: data = json.load(f) results = [i for i in data if i["uuid"] == str(uuid)] if results: return results return None
[ "def", "get_item", "(", "filename", ",", "uuid", ")", ":", "with", "open", "(", "os", ".", "fsencode", "(", "str", "(", "filename", ")", ")", ",", "\"r\"", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "results", "=", "[", "i", "for", "i", "in", "data", "if", "i", "[", "\"uuid\"", "]", "==", "str", "(", "uuid", ")", "]", "if", "results", ":", "return", "results", "return", "None" ]
27.6
12.4
def fmt_tag(cur_namespace, tag, val): """ Processes a documentation reference. """ if tag == 'type': fq_val = val if '.' not in val and cur_namespace is not None: fq_val = cur_namespace.name + '.' + fq_val return fq_val elif tag == 'route': if ':' in val: val, version = val.split(':', 1) version = int(version) else: version = 1 return fmt_func(val, version) + "()" elif tag == 'link': anchor, link = val.rsplit(' ', 1) # There's no way to have links in TSDoc, so simply use JSDoc's formatting. # It's entirely possible some editors support this. return '[%s]{@link %s}' % (anchor, link) elif tag == 'val': # Value types seem to match JavaScript (true, false, null) return val elif tag == 'field': return val else: raise RuntimeError('Unknown doc ref tag %r' % tag)
[ "def", "fmt_tag", "(", "cur_namespace", ",", "tag", ",", "val", ")", ":", "if", "tag", "==", "'type'", ":", "fq_val", "=", "val", "if", "'.'", "not", "in", "val", "and", "cur_namespace", "is", "not", "None", ":", "fq_val", "=", "cur_namespace", ".", "name", "+", "'.'", "+", "fq_val", "return", "fq_val", "elif", "tag", "==", "'route'", ":", "if", "':'", "in", "val", ":", "val", ",", "version", "=", "val", ".", "split", "(", "':'", ",", "1", ")", "version", "=", "int", "(", "version", ")", "else", ":", "version", "=", "1", "return", "fmt_func", "(", "val", ",", "version", ")", "+", "\"()\"", "elif", "tag", "==", "'link'", ":", "anchor", ",", "link", "=", "val", ".", "rsplit", "(", "' '", ",", "1", ")", "# There's no way to have links in TSDoc, so simply use JSDoc's formatting.", "# It's entirely possible some editors support this.", "return", "'[%s]{@link %s}'", "%", "(", "anchor", ",", "link", ")", "elif", "tag", "==", "'val'", ":", "# Value types seem to match JavaScript (true, false, null)", "return", "val", "elif", "tag", "==", "'field'", ":", "return", "val", "else", ":", "raise", "RuntimeError", "(", "'Unknown doc ref tag %r'", "%", "tag", ")" ]
33.392857
15.107143