_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q277400
SourceProcessor.parse_file
test
def parse_file( self, filename ): """parse a C source file, and add its blocks to the processor's list""" self.reset() self.filename = filename fileinput.close() self.format = None self.lineno = 0 self.lines = [] for line in fileinput.input( filename ): # strip trailing newlines, important on Windows machines! if line[-1] == '\012': line = line[0:-1] if self.format == None: self.process_normal_line( line ) else: if self.format.end.match( line ): # that's a normal block end, add it to 'lines' and # create a new block self.lines.append( line ) self.add_block_lines() elif self.format.column.match( line ): # that's a normal column line, add it to 'lines' self.lines.append( line ) else: # humm.. this is an unexpected block end, # create a new block, but don't process the line self.add_block_lines() # we need to process the line again self.process_normal_line( line ) # record the last lines self.add_block_lines()
python
{ "resource": "" }
q277401
SourceProcessor.process_normal_line
test
def process_normal_line( self, line ): """process a normal line and check whether it is the start of a new block""" for f in re_source_block_formats: if f.start.match( line ): self.add_block_lines() self.format = f self.lineno = fileinput.filelineno() self.lines.append( line )
python
{ "resource": "" }
q277402
SourceProcessor.add_block_lines
test
def add_block_lines( self ): """add the current accumulated lines and create a new block""" if self.lines != []: block = SourceBlock( self, self.filename, self.lineno, self.lines ) self.blocks.append( block ) self.format = None self.lines = []
python
{ "resource": "" }
q277403
draw_string
test
def draw_string(font, text, x, y, width=None, height=None, align=Alignment.left, vertical_align=VerticalAlignment.baseline): '''Draw a string with the given font. :note: Text alignment and word-wrapping is not yet implemented. The text is rendered with the left edge and baseline at ``(x, y)``. :param font: the :class:`Font` to render text with :param text: a string of text to render. ''' style = Style(font) run = GlyphRun(style, text) glyph_layout = GlyphLayout([run], x, y, width, height, align, vertical_align) draw_glyph_layout(glyph_layout)
python
{ "resource": "" }
q277404
parse_iso_8601_time_str
test
def parse_iso_8601_time_str(time_str): """ Parses a standard ISO 8601 time string. The Route53 API uses these here and there. :param str time_str: An ISO 8601 time string. :rtype: datetime.datetime :returns: A timezone aware (UTC) datetime.datetime instance. """ if re.search('\.\d{3}Z$', time_str): submitted_at = datetime.datetime.strptime(time_str, \ '%Y-%m-%dT%H:%M:%S.%fZ') else: submitted_at = datetime.datetime.strptime(time_str, \ '%Y-%m-%dT%H:%M:%SZ') # Parse the string, and make it explicitly UTC. return submitted_at.replace(tzinfo=UTC_TIMEZONE)
python
{ "resource": "" }
q277405
HtmlFormatter.make_html_words
test
def make_html_words( self, words ): """ convert a series of simple words into some HTML text """ line = "" if words: line = html_quote( words[0] ) for w in words[1:]: line = line + " " + html_quote( w ) return line
python
{ "resource": "" }
q277406
HtmlFormatter.make_html_word
test
def make_html_word( self, word ): """analyze a simple word to detect cross-references and styling""" # look for cross-references m = re_crossref.match( word ) if m: try: name = m.group( 1 ) rest = m.group( 2 ) block = self.identifiers[name] url = self.make_block_url( block ) return '<a href="' + url + '">' + name + '</a>' + rest except: # we detected a cross-reference to an unknown item sys.stderr.write( \ "WARNING: undefined cross reference '" + name + "'.\n" ) return '?' + name + '?' + rest # look for italics and bolds m = re_italic.match( word ) if m: name = m.group( 1 ) rest = m.group( 3 ) return '<i>' + name + '</i>' + rest m = re_bold.match( word ) if m: name = m.group( 1 ) rest = m.group( 3 ) return '<b>' + name + '</b>' + rest return html_quote( word )
python
{ "resource": "" }
q277407
HtmlFormatter.make_html_para
test
def make_html_para( self, words ): """ convert words of a paragraph into tagged HTML text, handle xrefs """ line = "" if words: line = self.make_html_word( words[0] ) for word in words[1:]: line = line + " " + self.make_html_word( word ) # convert `...' quotations into real left and right single quotes line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \ r'\1&lsquo;\2&rsquo;\3', \ line ) # convert tilde into non-breakable space line = string.replace( line, "~", "&nbsp;" ) return para_header + line + para_footer
python
{ "resource": "" }
q277408
HtmlFormatter.make_html_code
test
def make_html_code( self, lines ): """ convert a code sequence to HTML """ line = code_header + '\n' for l in lines: line = line + html_quote( l ) + '\n' return line + code_footer
python
{ "resource": "" }
q277409
HtmlFormatter.make_html_items
test
def make_html_items( self, items ): """ convert a field's content into some valid HTML """ lines = [] for item in items: if item.lines: lines.append( self.make_html_code( item.lines ) ) else: lines.append( self.make_html_para( item.words ) ) return string.join( lines, '\n' )
python
{ "resource": "" }
q277410
MP4Tags.save
test
def save(self, filename): """Save the metadata to the given filename.""" values = [] items = sorted(self.items(), key=MP4Tags.__get_sort_stats ) for key, value in items: info = self.__atoms.get(key[:4], (None, type(self).__render_text)) try: values.append(info[1](self, key, value, *info[2:])) except (TypeError, ValueError) as s: reraise(MP4MetadataValueError, s, sys.exc_info()[2]) data = Atom.render(b"ilst", b"".join(values)) # Find the old atoms. fileobj = open(filename, "rb+") try: atoms = Atoms(fileobj) try: path = atoms.path(b"moov", b"udta", b"meta", b"ilst") except KeyError: self.__save_new(fileobj, atoms, data) else: self.__save_existing(fileobj, atoms, path, data) finally: fileobj.close()
python
{ "resource": "" }
q277411
MP4Tags.__update_parents
test
def __update_parents(self, fileobj, path, delta): """Update all parent atoms with the new size.""" for atom in path: fileobj.seek(atom.offset) size = cdata.uint_be(fileobj.read(4)) if size == 1: # 64bit # skip name (4B) and read size (8B) size = cdata.ulonglong_be(fileobj.read(12)[4:]) fileobj.seek(atom.offset + 8) fileobj.write(cdata.to_ulonglong_be(size + delta)) else: # 32bit fileobj.seek(atom.offset) fileobj.write(cdata.to_uint_be(size + delta))
python
{ "resource": "" }
q277412
run
test
def run(game): '''Start running the game. The window is created and shown at this point, and then the main event loop is entered. 'game.on_tick' and other event handlers are called repeatedly until the game exits. If a game is already running, this function replaces the :class:`Game` instance that receives events. ''' if bacon._current_game: bacon._current_game = game return global _tick_callback_handle bacon._current_game = game # Window handler window_resize_callback_handle = lib.WindowResizeEventHandler(window._window_resize_event_handler) lib.SetWindowResizeEventHandler(window_resize_callback_handle) # Key handler key_callback_handle = lib.KeyEventHandler(keyboard._key_event_handler) lib.SetKeyEventHandler(key_callback_handle) # Mouse handlers mouse_button_callback_handle = lib.MouseButtonEventHandler(mouse_input._mouse_button_event_handler) lib.SetMouseButtonEventHandler(mouse_button_callback_handle) mouse_scroll_callback_handle = lib.MouseScrollEventHandler(mouse_input._mouse_scroll_event_handler) lib.SetMouseScrollEventHandler(mouse_scroll_callback_handle) # Controller handlers controller_connected_handle = lib.ControllerConnectedEventHandler(controller._controller_connected_event_handler) lib.SetControllerConnectedEventHandler(controller_connected_handle) controller_button_handle = lib.ControllerButtonEventHandler(controller._controller_button_event_handler) lib.SetControllerButtonEventHandler(controller_button_handle) controller_axis_handle = lib.ControllerAxisEventHandler(controller._controller_axis_event_handler) lib.SetControllerAxisEventHandler(controller_axis_handle) # Tick handler _tick_callback_handle = lib.TickCallback(_first_tick_callback) lib.SetTickCallback(_tick_callback_handle) lib.Run() bacon._current_game = None _tick_callback_handle = None lib.SetWindowResizeEventHandler(lib.WindowResizeEventHandler(0)) lib.SetKeyEventHandler(lib.KeyEventHandler(0)) lib.SetMouseButtonEventHandler(lib.MouseButtonEventHandler(0)) lib.SetMouseScrollEventHandler(lib.MouseScrollEventHandler(0)) lib.SetControllerConnectedEventHandler(lib.ControllerConnectedEventHandler(0)) lib.SetControllerButtonEventHandler(lib.ControllerButtonEventHandler(0)) lib.SetControllerAxisEventHandler(lib.ControllerAxisEventHandler(0)) lib.SetTickCallback(lib.TickCallback(0))
python
{ "resource": "" }
q277413
ControllerMapping.register
test
def register(cls, vendor_id, product_id, mapping): '''Register a mapping for controllers with the given vendor and product IDs. The mapping will replace any existing mapping for these IDs for controllers not yet connected. :param vendor_id: the vendor ID of the controller, as reported by :attr:`Controller.vendor_id` :param product_id: the vendor ID of the controller, as reported by :attr:`Controller.product_id` :param mapping: a :class:`ControllerMapping` to apply ''' cls._registry[(vendor_id, product_id)] = mapping
python
{ "resource": "" }
q277414
ControllerMapping.get
test
def get(cls, controller): '''Find a mapping that can apply to the given controller. Returns None if unsuccessful. :param controller: :class:`Controller` to look up :return: :class:`ControllerMapping` ''' try: return cls._registry[(controller.vendor_id, controller.product_id)] except KeyError: return None
python
{ "resource": "" }
q277415
EasyMP4Tags.RegisterFreeformKey
test
def RegisterFreeformKey(cls, key, name, mean=b"com.apple.iTunes"): """Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 freeform atom (----) and name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterFreeformKey( "musicbrainz_artistid", b"MusicBrainz Artist Id") """ atomid = b"----:" + mean + b":" + name def getter(tags, key): return [s.decode("utf-8", "replace") for s in tags[atomid]] def setter(tags, key, value): tags[atomid] = [utf8(v) for v in value] def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter)
python
{ "resource": "" }
q277416
BaseTransport._hmac_sign_string
test
def _hmac_sign_string(self, string_to_sign): """ Route53 uses AWS an HMAC-based authentication scheme, involving the signing of a date string with the user's secret access key. More details on the specifics can be found in their documentation_. .. documentation:: http://docs.amazonwebservices.com/Route53/latest/DeveloperGuide/RESTAuthentication.html This method is used to sign said time string, for use in the request headers. :param str string_to_sign: The time string to sign. :rtype: str :returns: An HMAC signed string. """ # Just use SHA256, since we're all running modern versions # of Python (right?). new_hmac = hmac.new( self.connection._aws_secret_access_key.encode('utf-8'), digestmod=hashlib.sha256 ) new_hmac.update(string_to_sign.encode('utf-8')) # The HMAC digest str is done at this point. digest = new_hmac.digest() # Now we have to Base64 encode it, and we're done. return base64.b64encode(digest).decode('utf-8')
python
{ "resource": "" }
q277417
BaseTransport.get_request_headers
test
def get_request_headers(self): """ Determine the headers to send along with the request. These are pretty much the same for every request, with Route53. """ date_header = time.asctime(time.gmtime()) # We sign the time string above with the user's AWS secret access key # in order to authenticate our request. signing_key = self._hmac_sign_string(date_header) # Amazon's super fun auth token. auth_header = "AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s" % ( self.connection._aws_access_key_id, signing_key, ) return { 'X-Amzn-Authorization': auth_header, 'x-amz-date': date_header, 'Host': 'route53.amazonaws.com', }
python
{ "resource": "" }
q277418
BaseTransport.send_request
test
def send_request(self, path, data, method): """ All outbound requests go through this method. It defers to the transport's various HTTP method-specific methods. :param str path: The path to tack on to the endpoint URL for the query. :param data: The params to send along with the request. :type data: Either a dict or bytes, depending on the request type. :param str method: One of 'GET', 'POST', or 'DELETE'. :rtype: str :returns: The body of the response. """ headers = self.get_request_headers() if method == 'GET': return self._send_get_request(path, data, headers) elif method == 'POST': return self._send_post_request(path, data, headers) elif method == 'DELETE': return self._send_delete_request(path, headers) else: raise Route53Error("Invalid request method: %s" % method)
python
{ "resource": "" }
q277419
RequestsTransport._send_get_request
test
def _send_get_request(self, path, params, headers): """ Sends the GET request to the Route53 endpoint. :param str path: The path to tack on to the endpoint URL for the query. :param dict params: Key/value pairs to send. :param dict headers: A dict of headers to send with the request. :rtype: str :returns: The body of the response. """ r = requests.get(self.endpoint + path, params=params, headers=headers) r.raise_for_status() return r.text
python
{ "resource": "" }
q277420
RequestsTransport._send_post_request
test
def _send_post_request(self, path, data, headers): """ Sends the POST request to the Route53 endpoint. :param str path: The path to tack on to the endpoint URL for the query. :param data: Either a dict, or bytes. :type data: dict or bytes :param dict headers: A dict of headers to send with the request. :rtype: str :returns: The body of the response. """ r = requests.post(self.endpoint + path, data=data, headers=headers) return r.text
python
{ "resource": "" }
q277421
RequestsTransport._send_delete_request
test
def _send_delete_request(self, path, headers): """ Sends the DELETE request to the Route53 endpoint. :param str path: The path to tack on to the endpoint URL for the query. :param dict headers: A dict of headers to send with the request. :rtype: str :returns: The body of the response. """ r = requests.delete(self.endpoint + path, headers=headers) return r.text
python
{ "resource": "" }
q277422
APEValue
test
def APEValue(value, kind): """APEv2 tag value factory. Use this if you need to specify the value's type manually. Binary and text data are automatically detected by APEv2.__setitem__. """ if kind in (TEXT, EXTERNAL): if not isinstance(value, text_type): # stricter with py3 if PY3: raise TypeError("str only for text/external values") else: value = value.encode("utf-8") if kind == TEXT: return APETextValue(value, kind) elif kind == BINARY: return APEBinaryValue(value, kind) elif kind == EXTERNAL: return APEExtValue(value, kind) else: raise ValueError("kind must be TEXT, BINARY, or EXTERNAL")
python
{ "resource": "" }
q277423
Route53Connection._send_request
test
def _send_request(self, path, data, method): """ Uses the HTTP transport to query the Route53 API. Runs the response through lxml's parser, before we hand it off for further picking apart by our call-specific parsers. :param str path: The RESTful path to tack on to the :py:attr:`endpoint`. :param data: The params to send along with the request. :type data: Either a dict or bytes, depending on the request type. :param str method: One of 'GET', 'POST', or 'DELETE'. :rtype: lxml.etree._Element :returns: An lxml Element root. """ response_body = self._transport.send_request(path, data, method) root = etree.fromstring(response_body) #print(prettyprint_xml(root)) return root
python
{ "resource": "" }
q277424
Route53Connection._do_autopaginating_api_call
test
def _do_autopaginating_api_call(self, path, params, method, parser_func, next_marker_xpath, next_marker_param_name, next_type_xpath=None, parser_kwargs=None): """ Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. :param str method: The API method on the endpoint. :param dict params: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :param str next_marker_param_name: The XPath to the marker tag that will determine whether we continue paginating. :param str next_marker_param_name: The parameter name to manipulate in the request data to bring up the next page on the next request loop. :keyword str next_type_xpath: For the py:meth:`list_resource_record_sets_by_zone_id` method, there's an additional paginator token. Specifying this XPath looks for it. :keyword dict parser_kwargs: Optional dict of additional kwargs to pass on to the parser function. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method. """ if not parser_kwargs: parser_kwargs = {} # We loop indefinitely since we have no idea how many "pages" of # results we're going to have to go through. while True: # An lxml Element node. root = self._send_request(path, params, method) # Individually yield HostedZone instances after parsing/instantiating. for record in parser_func(root, connection=self, **parser_kwargs): yield record # This will determine at what offset we start the next query. next_marker = root.find(next_marker_xpath) if next_marker is None: # If the NextMarker tag is absent, we know we've hit the # last page. break # if NextMarker is present, we'll adjust our API request params # and query again for the next page. params[next_marker_param_name] = next_marker.text if next_type_xpath: # This is a _list_resource_record_sets_by_zone_id call. Look # for the given tag via XPath and adjust our type arg for # the next request. Without specifying this, we loop # infinitely. next_type = root.find(next_type_xpath) params['type'] = next_type.text
python
{ "resource": "" }
q277425
Route53Connection.list_hosted_zones
test
def list_hosted_zones(self, page_chunks=100): """ List all hosted zones associated with this connection's account. Since this method returns a generator, you can pull as many or as few entries as you'd like, without having to query and receive every hosted zone you may have. :keyword int page_chunks: This API call is "paginated" behind-the-scenes in order to break up large result sets. This number determines the maximum number of :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances to retrieve per request. The default is fine for almost everyone. :rtype: generator :returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances. """ return self._do_autopaginating_api_call( path='hostedzone', params={'maxitems': page_chunks}, method='GET', parser_func=xml_parsers.list_hosted_zones_parser, next_marker_xpath="./{*}NextMarker", next_marker_param_name="marker", )
python
{ "resource": "" }
q277426
Route53Connection.create_hosted_zone
test
def create_hosted_zone(self, name, caller_reference=None, comment=None): """ Creates and returns a new hosted zone. Once a hosted zone is created, its details can't be changed. :param str name: The name of the hosted zone to create. :keyword str caller_reference: A unique string that identifies the request and that allows failed create_hosted_zone requests to be retried without the risk of executing the operation twice. If no value is given, we'll generate a Type 4 UUID for you. :keyword str comment: An optional comment to attach to the zone. :rtype: tuple :returns: A tuple in the form of ``(hosted_zone, change_info)``. The ``hosted_zone`` variable contains a :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instance matching the newly created zone, and ``change_info`` is a dict with some details about the API request. """ body = xml_generators.create_hosted_zone_writer( connection=self, name=name, caller_reference=caller_reference, comment=comment ) root = self._send_request( path='hostedzone', data=body, method='POST', ) return xml_parsers.created_hosted_zone_parser( root=root, connection=self )
python
{ "resource": "" }
q277427
Route53Connection._list_resource_record_sets_by_zone_id
test
def _list_resource_record_sets_by_zone_id(self, id, rrset_type=None, identifier=None, name=None, page_chunks=100): """ Lists a hosted zone's resource record sets by Zone ID, if you already know it. .. tip:: For most cases, we recommend going through a :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instance's :py:meth:`HostedZone.record_sets <route53.hosted_zone.HostedZone.record_sets>` property, but this saves an HTTP request if you already know the zone's ID. :param str id: The ID of the zone whose record sets we're listing. :keyword str rrset_type: The type of resource record set to begin the record listing from. :keyword str identifier: Weighted and latency resource record sets only: If results were truncated for a given DNS name and type, the value of SetIdentifier for the next resource record set that has the current DNS name and type. :keyword str name: Not really sure what this does. :keyword int page_chunks: This API call is paginated behind-the-scenes by this many ResourceRecordSet instances. The default should be fine for just about everybody, aside from those with tons of RRS. :rtype: generator :returns: A generator of ResourceRecordSet instances. """ params = { 'name': name, 'type': rrset_type, 'identifier': identifier, 'maxitems': page_chunks, } return self._do_autopaginating_api_call( path='hostedzone/%s/rrset' % id, params=params, method='GET', parser_func=xml_parsers.list_resource_record_sets_by_zone_id_parser, parser_kwargs={'zone_id': id}, next_marker_xpath="./{*}NextRecordName", next_marker_param_name="name", next_type_xpath="./{*}NextRecordType" )
python
{ "resource": "" }
q277428
Route53Connection._change_resource_record_sets
test
def _change_resource_record_sets(self, change_set, comment=None): """ Given a ChangeSet, POST it to the Route53 API. .. note:: You probably shouldn't be using this method directly, as there are convenience methods on the ResourceRecordSet sub-classes. :param change_set.ChangeSet change_set: The ChangeSet object to create the XML doc from. :keyword str comment: An optional comment to go along with the request. :rtype: dict :returns: A dict of change info, which contains some details about the request. """ body = xml_generators.change_resource_record_set_writer( connection=self, change_set=change_set, comment=comment ) root = self._send_request( path='hostedzone/%s/rrset' % change_set.hosted_zone_id, data=body, method='POST', ) #print(prettyprint_xml(root)) e_change_info = root.find('./{*}ChangeInfo') if e_change_info is None: error = root.find('./{*}Error').find('./{*}Message').text raise Route53Error(error) return parse_change_info(e_change_info)
python
{ "resource": "" }
q277429
draw_image
test
def draw_image(image, x1, y1, x2 = None, y2 = None): '''Draw an image. The image's top-left corner is drawn at ``(x1, y1)``, and its lower-left at ``(x2, y2)``. If ``x2`` and ``y2`` are omitted, they are calculated to render the image at its native resoultion. Note that images can be flipped and scaled by providing alternative values for ``x2`` and ``y2``. :param image: an :class:`Image` to draw ''' if x2 is None: x2 = x1 + image.width if y2 is None: y2 = y1 + image.height lib.DrawImage(image._handle, x1, y1, x2, y2)
python
{ "resource": "" }
q277430
draw_image_region
test
def draw_image_region(image, x1, y1, x2, y2, ix1, iy1, ix2, iy2): '''Draw a rectangular region of an image. The part of the image contained by the rectangle in texel-space by the coordinates ``(ix1, iy1)`` to ``(ix2, iy2)`` is drawn at coordinates ``(x1, y1)`` to ``(x2, y2)``. All coordinates have the origin ``(0, 0)`` at the upper-left corner. For example, to draw the left half of a ``100x100`` image at coordinates ``(x, y)``:: bacon.draw_image_region(image, x, y, x + 50, y + 100, 0, 0, 50, 100) :param image: an :class:`Image` to draw ''' lib.DrawImageRegion(image._handle, x1, y1, x2, y2, ix1, iy1, ix2, iy2)
python
{ "resource": "" }
q277431
OggPage.size
test
def size(self): """Total frame size.""" header_size = 27 # Initial header size for datum in self.packets: quot, rem = divmod(len(datum), 255) header_size += quot + 1 if not self.complete and rem == 0: # Packet contains a multiple of 255 bytes and is not # terminated, so we don't have a \x00 at the end. header_size -= 1 header_size += sum(map(len, self.packets)) return header_size
python
{ "resource": "" }
q277432
OggPage.replace
test
def replace(cls, fileobj, old_pages, new_pages): """Replace old_pages with new_pages within fileobj. old_pages must have come from reading fileobj originally. new_pages are assumed to have the 'same' data as old_pages, and so the serial and sequence numbers will be copied, as will the flags for the first and last pages. fileobj will be resized and pages renumbered as necessary. As such, it must be opened r+b or w+b. """ # Number the new pages starting from the first old page. first = old_pages[0].sequence for page, seq in zip(new_pages, range(first, first + len(new_pages))): page.sequence = seq page.serial = old_pages[0].serial new_pages[0].first = old_pages[0].first new_pages[0].last = old_pages[0].last new_pages[0].continued = old_pages[0].continued new_pages[-1].first = old_pages[-1].first new_pages[-1].last = old_pages[-1].last new_pages[-1].complete = old_pages[-1].complete if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: new_pages[-1].position = -1 new_data = b''.join(cls.write(p) for p in new_pages) # Make room in the file for the new data. delta = len(new_data) fileobj.seek(old_pages[0].offset, 0) insert_bytes(fileobj, delta, old_pages[0].offset) fileobj.seek(old_pages[0].offset, 0) fileobj.write(new_data) new_data_end = old_pages[0].offset + delta # Go through the old pages and delete them. Since we shifted # the data down the file, we need to adjust their offsets. We # also need to go backwards, so we don't adjust the deltas of # the other pages. old_pages.reverse() for old_page in old_pages: adj_offset = old_page.offset + delta delete_bytes(fileobj, old_page.size, adj_offset) # Finally, if there's any discrepency in length, we need to # renumber the pages for the logical stream. if len(old_pages) != len(new_pages): fileobj.seek(new_data_end, 0) serial = new_pages[-1].serial sequence = new_pages[-1].sequence + 1 cls.renumber(fileobj, serial, sequence)
python
{ "resource": "" }
q277433
OggPage.find_last
test
def find_last(fileobj, serial): """Find the last page of the stream 'serial'. If the file is not multiplexed this function is fast. If it is, it must read the whole the stream. This finds the last page in the actual file object, or the last page in the stream (with eos set), whichever comes first. """ # For non-muxed streams, look at the last page. try: fileobj.seek(-256*256, 2) except IOError: # The file is less than 64k in length. fileobj.seek(0) data = fileobj.read() try: index = data.rindex(b"OggS") except ValueError: raise error("unable to find final Ogg header") bytesobj = cBytesIO(data[index:]) best_page = None try: page = OggPage(bytesobj) except error: pass else: if page.serial == serial: if page.last: return page else: best_page = page else: best_page = None # The stream is muxed, so use the slow way. fileobj.seek(0) try: page = OggPage(fileobj) while not page.last: page = OggPage(fileobj) while page.serial != serial: page = OggPage(fileobj) best_page = page return page except error: return best_page except EOFError: return best_page
python
{ "resource": "" }
q277434
ContentProcessor.set_section
test
def set_section( self, section_name ): """set current section during parsing""" if not self.sections.has_key( section_name ): section = DocSection( section_name ) self.sections[section_name] = section self.section = section else: self.section = self.sections[section_name]
python
{ "resource": "" }
q277435
ContentProcessor.add_markup
test
def add_markup( self ): """add a new markup section""" if self.markup and self.markup_lines: # get rid of last line of markup if it's empty marks = self.markup_lines if len( marks ) > 0 and not string.strip( marks[-1] ): self.markup_lines = marks[:-1] m = DocMarkup( self.markup, self.markup_lines ) self.markups.append( m ) self.markup = None self.markup_lines = []
python
{ "resource": "" }
q277436
ContentProcessor.process_content
test
def process_content( self, content ): """process a block content and return a list of DocMarkup objects corresponding to it""" markup = None markup_lines = [] first = 1 for line in content: found = None for t in re_markup_tags: m = t.match( line ) if m: found = string.lower( m.group( 1 ) ) prefix = len( m.group( 0 ) ) line = " " * prefix + line[prefix:] # remove markup from line break # is it the start of a new markup section ? if found: first = 0 self.add_markup() # add current markup content self.markup = found if len( string.strip( line ) ) > 0: self.markup_lines.append( line ) elif first == 0: self.markup_lines.append( line ) self.add_markup() return self.markups
python
{ "resource": "" }
q277437
DocBlock.get_markup
test
def get_markup( self, tag_name ): """return the DocMarkup corresponding to a given tag in a block""" for m in self.markups: if m.tag == string.lower( tag_name ): return m return None
python
{ "resource": "" }
q277438
create_hosted_zone_writer
test
def create_hosted_zone_writer(connection, name, caller_reference, comment): """ Forms an XML string that we'll send to Route53 in order to create a new hosted zone. :param Route53Connection connection: The connection instance used to query the API. :param str name: The name of the hosted zone to create. """ if not caller_reference: caller_reference = str(uuid.uuid4()) e_root = etree.Element( "CreateHostedZoneRequest", xmlns=connection._xml_namespace ) e_name = etree.SubElement(e_root, "Name") e_name.text = name e_caller_reference = etree.SubElement(e_root, "CallerReference") e_caller_reference.text = caller_reference if comment: e_config = etree.SubElement(e_root, "HostedZoneConfig") e_comment = etree.SubElement(e_config, "Comment") e_comment.text = comment e_tree = etree.ElementTree(element=e_root) fobj = BytesIO() # This writes bytes. e_tree.write(fobj, xml_declaration=True, encoding='utf-8', method="xml") return fobj.getvalue().decode('utf-8')
python
{ "resource": "" }
q277439
lock
test
def lock(fileobj): """Lock a file object 'safely'. That means a failure to lock because the platform doesn't support fcntl or filesystem locks is not considered a failure. This call does block. Returns whether or not the lock was successful, or raises an exception in more extreme circumstances (full lock table, invalid file). """ try: import fcntl except ImportError: return False else: try: fcntl.lockf(fileobj, fcntl.LOCK_EX) except IOError: # FIXME: There's possibly a lot of complicated # logic that needs to go here in case the IOError # is EACCES or EAGAIN. return False else: return True
python
{ "resource": "" }
q277440
insert_bytes
test
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2**16): """Insert size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or equivalent. Mutagen tries to use mmap to resize the file, but falls back to a significantly slower method if mmap fails. """ assert 0 < size assert 0 <= offset locked = False fobj.seek(0, 2) filesize = fobj.tell() movesize = filesize - offset fobj.write(b'\x00' * size) fobj.flush() try: try: import mmap file_map = mmap.mmap(fobj.fileno(), filesize + size) try: file_map.move(offset + size, offset, movesize) finally: file_map.close() except (ValueError, EnvironmentError, ImportError): # handle broken mmap scenarios locked = lock(fobj) fobj.truncate(filesize) fobj.seek(0, 2) padsize = size # Don't generate an enormous string if we need to pad # the file out several megs. while padsize: addsize = min(BUFFER_SIZE, padsize) fobj.write(b"\x00" * addsize) padsize -= addsize fobj.seek(filesize, 0) while movesize: # At the start of this loop, fobj is pointing at the end # of the data we need to move, which is of movesize length. thismove = min(BUFFER_SIZE, movesize) # Seek back however much we're going to read this frame. fobj.seek(-thismove, 1) nextpos = fobj.tell() # Read it, so we're back at the end. data = fobj.read(thismove) # Seek back to where we need to write it. fobj.seek(-thismove + size, 1) # Write it. fobj.write(data) # And seek back to the end of the unmoved data. fobj.seek(nextpos) movesize -= thismove fobj.flush() finally: if locked: unlock(fobj)
python
{ "resource": "" }
q277441
delete_bytes
test
def delete_bytes(fobj, size, offset, BUFFER_SIZE=2**16): """Delete size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or equivalent. Mutagen tries to use mmap to resize the file, but falls back to a significantly slower method if mmap fails. """ locked = False assert 0 < size assert 0 <= offset fobj.seek(0, 2) filesize = fobj.tell() movesize = filesize - offset - size assert 0 <= movesize try: if movesize > 0: fobj.flush() try: import mmap file_map = mmap.mmap(fobj.fileno(), filesize) try: file_map.move(offset, offset + size, movesize) finally: file_map.close() except (ValueError, EnvironmentError, ImportError): # handle broken mmap scenarios locked = lock(fobj) fobj.seek(offset + size) buf = fobj.read(BUFFER_SIZE) while buf: fobj.seek(offset) fobj.write(buf) offset += len(buf) fobj.seek(offset + size) buf = fobj.read(BUFFER_SIZE) fobj.truncate(filesize - size) fobj.flush() finally: if locked: unlock(fobj)
python
{ "resource": "" }
q277442
utf8
test
def utf8(data): """Convert a basestring to a valid UTF-8 str.""" if isinstance(data, bytes): return data.decode("utf-8", "replace").encode("utf-8") elif isinstance(data, text_type): return data.encode("utf-8") else: raise TypeError("only unicode/bytes types can be converted to UTF-8")
python
{ "resource": "" }
q277443
ChangeSet.add_change
test
def add_change(self, action, record_set): """ Adds a change to this change set. :param str action: Must be one of either 'CREATE' or 'DELETE'. :param resource_record_set.ResourceRecordSet record_set: The ResourceRecordSet object that was created or deleted. """ action = action.upper() if action not in ['CREATE', 'DELETE']: raise Route53Error("action must be one of 'CREATE' or 'DELETE'") change_tuple = (action, record_set) if action == 'CREATE': self.creations.append(change_tuple) else: self.deletions.append(change_tuple)
python
{ "resource": "" }
q277444
parse_change_info
test
def parse_change_info(e_change_info): """ Parses a ChangeInfo tag. Seen in CreateHostedZone, DeleteHostedZone, and ChangeResourceRecordSetsRequest. :param lxml.etree._Element e_change_info: A ChangeInfo element. :rtype: dict :returns: A dict representation of the change info. """ if e_change_info is None: return e_change_info status = e_change_info.find('./{*}Status').text submitted_at = e_change_info.find('./{*}SubmittedAt').text submitted_at = parse_iso_8601_time_str(submitted_at) return { 'request_id': id, 'request_status': status, 'request_submitted_at': submitted_at }
python
{ "resource": "" }
q277445
Font.measure_string
test
def measure_string(self, str): '''Calculates the width of the given string in this font. :param str: the string to measure :return float: width of the string, in pixels ''' style = bacon.text.Style(self) run = bacon.text.GlyphRun(style, str) glyph_layout = bacon.text.GlyphLayout([run], 0, 0) return glyph_layout.content_width
python
{ "resource": "" }
q277446
ResourceRecordSet.is_modified
test
def is_modified(self): """ Determines whether this record set has been modified since the last retrieval or save. :rtype: bool :returns: ``True` if the record set has been modified, and ``False`` if not. """ for key, val in self._initial_vals.items(): if getattr(self, key) != val: # One of the initial values doesn't match, we know # this object has been touched. return True return False
python
{ "resource": "" }
q277447
ResourceRecordSet.delete
test
def delete(self): """ Deletes this record set. """ cset = ChangeSet(connection=self.connection, hosted_zone_id=self.zone_id) cset.add_change('DELETE', self) return self.connection._change_resource_record_sets(cset)
python
{ "resource": "" }
q277448
ResourceRecordSet.save
test
def save(self): """ Saves any changes to this record set. """ cset = ChangeSet(connection=self.connection, hosted_zone_id=self.zone_id) # Record sets can't actually be modified. You have to delete the # existing one and create a new one. Since this happens within a single # change set, it appears that the values were modified, when instead # the whole thing is replaced. cset.add_change('DELETE', self) cset.add_change('CREATE', self) retval = self.connection._change_resource_record_sets(cset) # Now copy the current attribute values on this instance to # the initial_vals dict. This will re-set the modification tracking. for key, val in self._initial_vals.items(): self._initial_vals[key] = getattr(self, key) return retval
python
{ "resource": "" }
q277449
ParseID3v1
test
def ParseID3v1(data): """Parse an ID3v1 tag, returning a list of ID3v2.4 frames.""" try: data = data[data.index(b'TAG'):] except ValueError: return None if 128 < len(data) or len(data) < 124: return None # Issue #69 - Previous versions of Mutagen, when encountering # out-of-spec TDRC and TYER frames of less than four characters, # wrote only the characters available - e.g. "1" or "" - into the # year field. To parse those, reduce the size of the year field. # Amazingly, "0s" works as a struct format string. unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124) try: tag, title, artist, album, year, comment, track, genre = unpack( unpack_fmt, data) except StructError: return None if tag != b"TAG": return None def fix(data): return data.split(b'\x00')[0].strip().decode('latin1') title, artist, album, year, comment = map( fix, [title, artist, album, year, comment]) frames = {} if title: frames['TIT2'] = TIT2(encoding=0, text=title) if artist: frames['TPE1'] = TPE1(encoding=0, text=[artist]) if album: frames['TALB'] = TALB(encoding=0, text=album) if year: frames['TDRC'] = TDRC(encoding=0, text=year) if comment: frames['COMM'] = COMM(encoding=0, lang='eng', desc="ID3v1 Comment", text=comment) # Don't read a track number if it looks like the comment was # padded with spaces instead of nulls (thanks, WinAmp). if track and ((track != 32) or (data[-3] == b'\x00'[0])): frames['TRCK'] = TRCK(encoding=0, text=str(track)) if genre != 255: frames['TCON'] = TCON(encoding=0, text=str(genre)) return frames
python
{ "resource": "" }
q277450
MakeID3v1
test
def MakeID3v1(id3): """Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.""" v1 = {} for v2id, name in {"TIT2": "title", "TPE1": "artist", "TALB": "album"}.items(): if v2id in id3: text = id3[v2id].text[0].encode('latin1', 'replace')[:30] else: text = b'' v1[name] = text + (b'\x00' * (30 - len(text))) if "COMM" in id3: cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28] else: cmnt = b'' v1['comment'] = cmnt + (b'\x00' * (29 - len(cmnt))) if "TRCK" in id3: try: v1["track"] = chr_(+id3["TRCK"]) except ValueError: v1["track"] = b'\x00' else: v1["track"] = b'\x00' if "TCON" in id3: try: genre = id3["TCON"].genres[0] except IndexError: pass else: if genre in TCON.GENRES: v1["genre"] = chr_(TCON.GENRES.index(genre)) if "genre" not in v1: v1["genre"] = b"\xff" if "TDRC" in id3: year = text_type(id3["TDRC"]).encode('latin1', 'replace') elif "TYER" in id3: year = text_type(id3["TYER"]).encode('latin1', 'replace') else: year = b'' v1['year'] = (year + b'\x00\x00\x00\x00')[:4] return (b'TAG' + v1['title'] + v1['artist'] + v1['album'] + v1['year'] + v1['comment'] + v1['track'] + v1['genre'])
python
{ "resource": "" }
q277451
ID3.__fullread
test
def __fullread(self, size): """ Read a certain number of bytes from the source file. """ try: if size < 0: raise ValueError('Requested bytes (%s) less than zero' % size) if size > self.__filesize: raise EOFError('Requested %#x of %#x (%s)' % ( int(size), int(self.__filesize), self.filename)) except AttributeError: pass data = self._fileobj.read(size) if len(data) != size: raise EOFError self.__readbytes += size return data
python
{ "resource": "" }
q277452
ID3.delall
test
def delall(self, key): """Delete all tags of a given kind; see getall.""" if key in self: del(self[key]) else: key = key + ":" for k in self.keys(): if k.startswith(key): del(self[k])
python
{ "resource": "" }
q277453
ID3.loaded_frame
test
def loaded_frame(self, tag): """Deprecated; use the add method.""" # turn 2.2 into 2.3/2.4 tags if len(type(tag).__name__) == 3: tag = type(tag).__base__(tag) self[tag.HashKey] = tag
python
{ "resource": "" }
q277454
ID3.__update_common
test
def __update_common(self): """Updates done by both v23 and v24 update""" if "TCON" in self: # Get rid of "(xx)Foobr" format. self["TCON"].genres = self["TCON"].genres if self.version < self._V23: # ID3v2.2 PIC frames are slightly different. pics = self.getall("APIC") mimes = {"PNG": "image/png", "JPG": "image/jpeg"} self.delall("APIC") for pic in pics: newpic = APIC( encoding=pic.encoding, mime=mimes.get(pic.mime, pic.mime), type=pic.type, desc=pic.desc, data=pic.data) self.add(newpic) # ID3v2.2 LNK frames are just way too different to upgrade. self.delall("LINK")
python
{ "resource": "" }
q277455
ID3.update_to_v24
test
def update_to_v24(self): """Convert older tags into an ID3v2.4 tag. This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to TDRC). If you intend to save tags, you must call this function at some point; it is called by default when loading the tag. """ self.__update_common() if self.__unknown_version == self._V23: # convert unknown 2.3 frames (flags/size) to 2.4 converted = [] for frame in self.unknown_frames: try: name, size, flags = unpack('>4sLH', frame[:10]) frame = BinaryFrame.fromData(self, flags, frame[10:]) except (struct.error, error): continue name = name.decode('ascii') converted.append(self.__save_frame(frame, name=name)) self.unknown_frames[:] = converted self.__unknown_version = self._V24 # TDAT, TYER, and TIME have been turned into TDRC. try: date = text_type(self.get("TYER", "")) if date.strip(u"\x00"): self.pop("TYER") dat = text_type(self.get("TDAT", "")) if dat.strip("\x00"): self.pop("TDAT") date = "%s-%s-%s" % (date, dat[2:], dat[:2]) time = text_type(self.get("TIME", "")) if time.strip("\x00"): self.pop("TIME") date += "T%s:%s:00" % (time[:2], time[2:]) if "TDRC" not in self: self.add(TDRC(encoding=0, text=date)) except UnicodeDecodeError: # Old ID3 tags have *lots* of Unicode problems, so if TYER # is bad, just chuck the frames. pass # TORY can be the first part of a TDOR. if "TORY" in self: f = self.pop("TORY") if "TDOR" not in self: try: self.add(TDOR(encoding=0, text=str(f))) except UnicodeDecodeError: pass # IPLS is now TIPL. if "IPLS" in self: f = self.pop("IPLS") if "TIPL" not in self: self.add(TIPL(encoding=f.encoding, people=f.people)) # These can't be trivially translated to any ID3v2.4 tags, or # should have been removed already. for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME", "CRM"]: if key in self: del(self[key])
python
{ "resource": "" }
q277456
Sound.unload
test
def unload(self): '''Release all resources associated with the sound.''' if self._handle != -1: lib.UnloadSound(self._handle) self._handle = -1
python
{ "resource": "" }
q277457
Sound.play
test
def play(self, gain=None, pan=None, pitch=None): '''Play the sound as a `one-shot`. The sound will be played to completion. If the sound is played more than once at a time, it will mix with all previous instances of itself. If you need more control over the playback of sounds, see :class:`Voice`. :param gain: optional volume level to play the sound back at, between 0.0 and 1.0 (defaults to 1.0) :param pan: optional stereo pan, between -1.0 (left) and 1.0 (right) :param pitch: optional sampling rate modification, between 0.4 and 16.0, where 1.0 represents the original pitch ''' if gain is None and pan is None and pitch is None: lib.PlaySound(self._handle) else: voice = Voice(self) if gain is not None: voice.gain = gain if pan is not None: voice.pan = pan if pitch is not None: voice.pitch = pitch voice.play()
python
{ "resource": "" }
q277458
Voice.set_loop_points
test
def set_loop_points(self, start_sample=-1, end_sample=0): '''Set the loop points within the sound. The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to the entire sound duration. :note: There is currently no API for converting sample numbers to times. :param start_sample: sample number to loop back to :param end_sample: sample number to loop at ''' lib.SetVoiceLoopPoints(self._handle, start_sample, end_sample)
python
{ "resource": "" }
q277459
adobe_glyph_values
test
def adobe_glyph_values(): """return the list of glyph names and their unicode values""" lines = string.split( adobe_glyph_list, '\n' ) glyphs = [] values = [] for line in lines: if line: fields = string.split( line, ';' ) # print fields[1] + ' - ' + fields[0] subfields = string.split( fields[1], ' ' ) if len( subfields ) == 1: glyphs.append( fields[0] ) values.append( fields[1] ) return glyphs, values
python
{ "resource": "" }
q277460
filter_glyph_names
test
def filter_glyph_names( alist, filter ): """filter `alist' by taking _out_ all glyph names that are in `filter'""" count = 0 extras = [] for name in alist: try: filtered_index = filter.index( name ) except: extras.append( name ) return extras
python
{ "resource": "" }
q277461
dump_encoding
test
def dump_encoding( file, encoding_name, encoding_list ): """dump a given encoding""" write = file.write write( " /* the following are indices into the SID name table */\n" ) write( " static const unsigned short " + encoding_name + "[" + repr( len( encoding_list ) ) + "] =\n" ) write( " {\n" ) line = " " comma = "" col = 0 for value in encoding_list: line += comma line += "%3d" % value comma = "," col += 1 if col == 16: col = 0 comma = ",\n " write( line + "\n };\n\n\n" )
python
{ "resource": "" }
q277462
dump_array
test
def dump_array( the_array, write, array_name ): """dumps a given encoding""" write( " static const unsigned char " + array_name + "[" + repr( len( the_array ) ) + "L] =\n" ) write( " {\n" ) line = "" comma = " " col = 0 for value in the_array: line += comma line += "%3d" % ord( value ) comma = "," col += 1 if col == 16: col = 0 comma = ",\n " if len( line ) > 1024: write( line ) line = "" write( line + "\n };\n\n\n" )
python
{ "resource": "" }
q277463
main
test
def main(): """main program body""" if len( sys.argv ) != 2: print __doc__ % sys.argv[0] sys.exit( 1 ) file = open( sys.argv[1], "w\n" ) write = file.write count_sid = len( sid_standard_names ) # `mac_extras' contains the list of glyph names in the Macintosh standard # encoding which are not in the SID Standard Names. # mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names ) # `base_list' contains the names of our final glyph names table. # It consists of the `mac_extras' glyph names, followed by the SID # standard names. # mac_extras_count = len( mac_extras ) base_list = mac_extras + sid_standard_names write( "/***************************************************************************/\n" ) write( "/* */\n" ) write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) ) write( "/* */\n" ) write( "/* PostScript glyph names. */\n" ) write( "/* */\n" ) write( "/* Copyright 2005, 2008, 2011 by */\n" ) write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" ) write( "/* */\n" ) write( "/* This file is part of the FreeType project, and may only be used, */\n" ) write( "/* modified, and distributed under the terms of the FreeType project */\n" ) write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" ) write( "/* this file you indicate that you have read the license and */\n" ) write( "/* understand and accept it fully. */\n" ) write( "/* */\n" ) write( "/***************************************************************************/\n" ) write( "\n" ) write( "\n" ) write( " /* This file has been generated automatically -- do not edit! */\n" ) write( "\n" ) write( "\n" ) # dump final glyph list (mac extras + sid standard names) # st = StringTable( base_list, "ft_standard_glyph_names" ) st.dump( file ) st.dump_sublist( file, "ft_mac_names", "FT_NUM_MAC_NAMES", mac_standard_names ) st.dump_sublist( file, "ft_sid_names", "FT_NUM_SID_NAMES", sid_standard_names ) dump_encoding( file, "t1_standard_encoding", t1_standard_encoding ) dump_encoding( file, "t1_expert_encoding", t1_expert_encoding ) # dump the AGL in its compressed form # agl_glyphs, agl_values = adobe_glyph_values() dict = StringNode( "", 0 ) for g in range( len( agl_glyphs ) ): dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) ) dict = dict.optimize() dict_len = dict.locate( 0 ) dict_array = dict.store( "" ) write( """\ /* * This table is a compressed version of the Adobe Glyph List (AGL), * optimized for efficient searching. It has been generated by the * `glnames.py' python script located in the `src/tools' directory. * * The lookup function to get the Unicode value for a given string * is defined below the table. */ #ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST """ ) dump_array( dict_array, write, "ft_adobe_glyph_list" ) # write the lookup routine now # write( """\ /* * This function searches the compressed table efficiently. */ static unsigned long ft_get_adobe_glyph_index( const char* name, const char* limit ) { int c = 0; int count, min, max; const unsigned char* p = ft_adobe_glyph_list; if ( name == 0 || name >= limit ) goto NotFound; c = *name++; count = p[1]; p += 2; min = 0; max = count; while ( min < max ) { int mid = ( min + max ) >> 1; const unsigned char* q = p + mid * 2; int c2; q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] ); c2 = q[0] & 127; if ( c2 == c ) { p = q; goto Found; } if ( c2 < c ) min = mid + 1; else max = mid; } goto NotFound; Found: for (;;) { /* assert (*p & 127) == c */ if ( name >= limit ) { if ( (p[0] & 128) == 0 && (p[1] & 128) != 0 ) return (unsigned long)( ( (int)p[2] << 8 ) | p[3] ); goto NotFound; } c = *name++; if ( p[0] & 128 ) { p++; if ( c != (p[0] & 127) ) goto NotFound; continue; } p++; count = p[0] & 127; if ( p[0] & 128 ) p += 2; p++; for ( ; count > 0; count--, p += 2 ) { int offset = ( (int)p[0] << 8 ) | p[1]; const unsigned char* q = ft_adobe_glyph_list + offset; if ( c == ( q[0] & 127 ) ) { p = q; goto NextIter; } } goto NotFound; NextIter: ; } NotFound: return 0; } #endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */ """ ) if 0: # generate unit test, or don't # # now write the unit test to check that everything works OK # write( "#ifdef TEST\n\n" ) write( "static const char* const the_names[] = {\n" ) for name in agl_glyphs: write( ' "' + name + '",\n' ) write( " 0\n};\n" ) write( "static const unsigned long the_values[] = {\n" ) for val in agl_values: write( ' 0x' + val + ',\n' ) write( " 0\n};\n" ) write( """ #include <stdlib.h> #include <stdio.h> int main( void ) { int result = 0; const char* const* names = the_names; const unsigned long* values = the_values; for ( ; *names; names++, values++ ) { const char* name = *names; unsigned long reference = *values; unsigned long value; value = ft_get_adobe_glyph_index( name, name + strlen( name ) ); if ( value != reference ) { result = 1; fprintf( stderr, "name '%s' => %04x instead of %04x\\n", name, value, reference ); } } return result; } """ ) write( "#endif /* TEST */\n" ) write("\n/* END */\n")
python
{ "resource": "" }
q277464
file_exists
test
def file_exists( pathname ): """checks that a given file exists""" result = 1 try: file = open( pathname, "r" ) file.close() except: result = None sys.stderr.write( pathname + " couldn't be accessed\n" ) return result
python
{ "resource": "" }
q277465
make_file_list
test
def make_file_list( args = None ): """builds a list of input files from command-line arguments""" file_list = [] # sys.stderr.write( repr( sys.argv[1 :] ) + '\n' ) if not args: args = sys.argv[1 :] for pathname in args: if string.find( pathname, '*' ) >= 0: newpath = glob.glob( pathname ) newpath.sort() # sort files -- this is important because # of the order of files else: newpath = [pathname] file_list.extend( newpath ) if len( file_list ) == 0: file_list = None else: # now filter the file list to remove non-existing ones file_list = filter( file_exists, file_list ) return file_list
python
{ "resource": "" }
q277466
parse_hosted_zone
test
def parse_hosted_zone(e_zone, connection): """ This a common parser that allows the passing of any valid HostedZone tag. It will spit out the appropriate HostedZone object for the tag. :param lxml.etree._Element e_zone: The root node of the etree parsed response from the API. :param Route53Connection connection: The connection instance used to query the API. :rtype: HostedZone :returns: An instantiated HostedZone object. """ # This dict will be used to instantiate a HostedZone instance to yield. kwargs = {} # Within HostedZone tags are a number of sub-tags that include info # about the instance. for e_field in e_zone: # Cheesy way to strip off the namespace. tag_name = e_field.tag.split('}')[1] field_text = e_field.text if tag_name == 'Config': # Config has the Comment tag beneath it, needing # special handling. e_comment = e_field.find('./{*}Comment') kwargs['comment'] = e_comment.text if e_comment is not None else None continue elif tag_name == 'Id': # This comes back with a path prepended. Yank that sillyness. field_text = field_text.strip('/hostedzone/') # Map the XML tag name to a kwarg name. kw_name = HOSTED_ZONE_TAG_TO_KWARG_MAP[tag_name] # This will be the key/val pair used to instantiate the # HostedZone instance. kwargs[kw_name] = field_text return HostedZone(connection, **kwargs)
python
{ "resource": "" }
q277467
parse_delegation_set
test
def parse_delegation_set(zone, e_delegation_set): """ Parses a DelegationSet tag. These often accompany HostedZone tags in responses like CreateHostedZone and GetHostedZone. :param HostedZone zone: An existing HostedZone instance to populate. :param lxml.etree._Element e_delegation_set: A DelegationSet element. """ e_nameservers = e_delegation_set.find('./{*}NameServers') nameservers = [] for e_nameserver in e_nameservers: nameservers.append(e_nameserver.text) zone._nameservers = nameservers
python
{ "resource": "" }
q277468
MetadataBlock.writeblocks
test
def writeblocks(blocks): """Render metadata block as a byte string.""" data = [] codes = [[block.code, block.write()] for block in blocks] codes[-1][0] |= 128 for code, datum in codes: byte = chr_(code) if len(datum) > 2**24: raise error("block is too long to write") length = struct.pack(">I", len(datum))[-3:] data.append(byte + length + datum) return b"".join(data)
python
{ "resource": "" }
q277469
MetadataBlock.group_padding
test
def group_padding(blocks): """Consolidate FLAC padding metadata blocks. The overall size of the rendered blocks does not change, so this adds several bytes of padding for each merged block. """ paddings = [b for b in blocks if isinstance(b, Padding)] for p in paddings: blocks.remove(p) # total padding size is the sum of padding sizes plus 4 bytes # per removed header. size = sum(padding.length for padding in paddings) padding = Padding() padding.length = size + 4 * (len(paddings) - 1) blocks.append(padding)
python
{ "resource": "" }
q277470
FLAC.delete
test
def delete(self, filename=None): """Remove Vorbis comments from a file. If no filename is given, the one most recently loaded is used. """ if filename is None: filename = self.filename for s in list(self.metadata_blocks): if isinstance(s, VCFLACDict): self.metadata_blocks.remove(s) self.tags = None self.save() break
python
{ "resource": "" }
q277471
FLAC.save
test
def save(self, filename=None, deleteid3=False): """Save metadata blocks to a file. If no filename is given, the one most recently loaded is used. """ if filename is None: filename = self.filename f = open(filename, 'rb+') try: # Ensure we've got padding at the end, and only at the end. # If adding makes it too large, we'll scale it down later. self.metadata_blocks.append(Padding(b'\x00' * 1020)) MetadataBlock.group_padding(self.metadata_blocks) header = self.__check_header(f) # "fLaC" and maybe ID3 available = self.__find_audio_offset(f) - header data = MetadataBlock.writeblocks(self.metadata_blocks) # Delete ID3v2 if deleteid3 and header > 4: available += header - 4 header = 4 if len(data) > available: # If we have too much data, see if we can reduce padding. padding = self.metadata_blocks[-1] newlength = padding.length - (len(data) - available) if newlength > 0: padding.length = newlength data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available elif len(data) < available: # If we have too little data, increase padding. self.metadata_blocks[-1].length += (available - len(data)) data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available if len(data) != available: # We couldn't reduce the padding enough. diff = (len(data) - available) insert_bytes(f, diff, header) f.seek(header - 4) f.write(b"fLaC" + data) # Delete ID3v1 if deleteid3: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == b"TAG": f.seek(-128, 2) f.truncate() finally: f.close()
python
{ "resource": "" }
q277472
parse_rrset_alias
test
def parse_rrset_alias(e_alias): """ Parses an Alias tag beneath a ResourceRecordSet, spitting out the two values found within. This is specific to A records that are set to Alias. :param lxml.etree._Element e_alias: An Alias tag beneath a ResourceRecordSet. :rtype: tuple :returns: A tuple in the form of ``(alias_hosted_zone_id, alias_dns_name)``. """ alias_hosted_zone_id = e_alias.find('./{*}HostedZoneId').text alias_dns_name = e_alias.find('./{*}DNSName').text return alias_hosted_zone_id, alias_dns_name
python
{ "resource": "" }
q277473
parse_rrset_record_values
test
def parse_rrset_record_values(e_resource_records): """ Used to parse the various Values from the ResourceRecords tags on most rrset types. :param lxml.etree._Element e_resource_records: A ResourceRecords tag beneath a ResourceRecordSet. :rtype: list :returns: A list of resource record strings. """ records = [] for e_record in e_resource_records: for e_value in e_record: records.append(e_value.text) return records
python
{ "resource": "" }
q277474
parse_rrset
test
def parse_rrset(e_rrset, connection, zone_id): """ This a parser that allows the passing of any valid ResourceRecordSet tag. It will spit out the appropriate ResourceRecordSet object for the tag. :param lxml.etree._Element e_rrset: The root node of the etree parsed response from the API. :param Route53Connection connection: The connection instance used to query the API. :param str zone_id: The zone ID of the HostedZone these rrsets belong to. :rtype: ResourceRecordSet :returns: An instantiated ResourceRecordSet object. """ # This dict will be used to instantiate a ResourceRecordSet instance to yield. kwargs = { 'connection': connection, 'zone_id': zone_id, } rrset_type = None for e_field in e_rrset: # Cheesy way to strip off the namespace. tag_name = e_field.tag.split('}')[1] field_text = e_field.text if tag_name == 'Type': # Need to store this to determine which ResourceRecordSet # subclass to instantiate. rrset_type = field_text continue elif tag_name == 'AliasTarget': # A records have some special field values we need. alias_hosted_zone_id, alias_dns_name = parse_rrset_alias(e_field) kwargs['alias_hosted_zone_id'] = alias_hosted_zone_id kwargs['alias_dns_name'] = alias_dns_name # Alias A entries have no TTL. kwargs['ttl'] = None continue elif tag_name == 'ResourceRecords': kwargs['records'] = parse_rrset_record_values(e_field) continue # Map the XML tag name to a kwarg name. kw_name = RRSET_TAG_TO_KWARG_MAP[tag_name] # This will be the key/val pair used to instantiate the # ResourceRecordSet instance. kwargs[kw_name] = field_text if not rrset_type: raise Route53Error("No Type tag found in ListResourceRecordSetsResponse.") if 'records' not in kwargs: # Not all rrsets have records. kwargs['records'] = [] RRSetSubclass = RRSET_TYPE_TO_RSET_SUBCLASS_MAP[rrset_type] return RRSetSubclass(**kwargs)
python
{ "resource": "" }
q277475
HostedZone.delete
test
def delete(self, force=False): """ Deletes this hosted zone. After this method is ran, you won't be able to add records, or do anything else with the zone. You'd need to re-create it, as zones are read-only after creation. :keyword bool force: If ``True``, delete the :py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it means nuking all associated record sets. If ``False``, an exception is raised if this :py:class:`HostedZone <route53.hosted_zone.HostedZone>` has record sets. :rtype: dict :returns: A dict of change info, which contains some details about the request. """ self._halt_if_already_deleted() if force: # Forcing deletion by cleaning up all record sets first. We'll # do it all in one change set. cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id) for rrset in self.record_sets: # You can delete a HostedZone if there are only SOA and NS # entries left. So delete everything but SOA/NS entries. if rrset.rrset_type not in ['SOA', 'NS']: cset.add_change('DELETE', rrset) if cset.deletions or cset.creations: # Bombs away. self.connection._change_resource_record_sets(cset) # Now delete the HostedZone. retval = self.connection.delete_hosted_zone_by_id(self.id) # Used to protect against modifying a deleted HostedZone. self._is_deleted = True return retval
python
{ "resource": "" }
q277476
HostedZone._add_record
test
def _add_record(self, record_set_class, name, values, ttl=60, weight=None, region=None,set_identifier=None, alias_hosted_zone_id=None, alias_dns_name=None): """ Convenience method for creating ResourceRecordSets. Most of the calls are basically the same, this saves on repetition. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created ResourceRecordSet sub-class instance. """ self._halt_if_already_deleted() rrset_kwargs = dict( connection=self.connection, zone_id=self.id, name=name, ttl=ttl, records=values, weight=weight, region=region, set_identifier=set_identifier, ) if alias_hosted_zone_id or alias_dns_name: rrset_kwargs.update(dict( alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name )) rrset = record_set_class(**rrset_kwargs) cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id) cset.add_change('CREATE', rrset) change_info = self.connection._change_resource_record_sets(cset) return rrset, change_info
python
{ "resource": "" }
q277477
HostedZone.create_a_record
test
def create_a_record(self, name, values, ttl=60, weight=None, region=None, set_identifier=None, alias_hosted_zone_id=None, alias_dns_name=None): """ Creates and returns an A record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :keyword int weight: *For weighted record sets only*. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. Ranges from 0-255. :keyword str region: *For latency-based record sets*. The Amazon EC2 region where the resource that is specified in this resource record set resides. :keyword str set_identifier: *For weighted and latency resource record sets only*. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. 1-128 chars. :keyword str alias_hosted_zone_id: Alias A records have this specified. It appears to be the hosted zone ID for the ELB the Alias points at. :keyword str alias_dns_name: Alias A records have this specified. It is the DNS name for the ELB that the Alias points to. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created :py:class:`AResourceRecordSet <route53.resource_record_set.AResourceRecordSet>` instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(AResourceRecordSet, **values)
python
{ "resource": "" }
q277478
HostedZone.create_aaaa_record
test
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None, set_identifier=None): """ Creates an AAAA record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :keyword int weight: *For weighted record sets only*. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. Ranges from 0-255. :keyword str region: *For latency-based record sets*. The Amazon EC2 region where the resource that is specified in this resource record set resides. :keyword str set_identifier: *For weighted and latency resource record sets only*. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. 1-128 chars. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created AAAAResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(AAAAResourceRecordSet, **values)
python
{ "resource": "" }
q277479
HostedZone.create_cname_record
test
def create_cname_record(self, name, values, ttl=60, weight=None, region=None, set_identifier=None): """ Creates a CNAME record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :keyword int weight: *For weighted record sets only*. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. Ranges from 0-255. :keyword str region: *For latency-based record sets*. The Amazon EC2 region where the resource that is specified in this resource record set resides. :keyword str set_identifier: *For weighted and latency resource record sets only*. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. 1-128 chars. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created CNAMEResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(CNAMEResourceRecordSet, **values)
python
{ "resource": "" }
q277480
HostedZone.create_mx_record
test
def create_mx_record(self, name, values, ttl=60): """ Creates a MX record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created MXResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(MXResourceRecordSet, **values)
python
{ "resource": "" }
q277481
HostedZone.create_ns_record
test
def create_ns_record(self, name, values, ttl=60): """ Creates a NS record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created NSResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(NSResourceRecordSet, **values)
python
{ "resource": "" }
q277482
HostedZone.create_ptr_record
test
def create_ptr_record(self, name, values, ttl=60): """ Creates a PTR record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created PTRResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(PTRResourceRecordSet, **values)
python
{ "resource": "" }
q277483
HostedZone.create_spf_record
test
def create_spf_record(self, name, values, ttl=60): """ Creates a SPF record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created SPFResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(SPFResourceRecordSet, **values)
python
{ "resource": "" }
q277484
HostedZone.create_srv_record
test
def create_srv_record(self, name, values, ttl=60): """ Creates a SRV record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created SRVResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(SRVResourceRecordSet, **values)
python
{ "resource": "" }
q277485
HostedZone.create_txt_record
test
def create_txt_record(self, name, values, ttl=60, weight=None, region=None, set_identifier=None): """ Creates a TXT record attached to this hosted zone. :param str name: The fully qualified name of the record to add. :param list values: A list of value strings for the record. :keyword int ttl: The time-to-live of the record (in seconds). :keyword int weight: *For weighted record sets only*. Among resource record sets that have the same combination of DNS name and type, a value that determines what portion of traffic for the current resource record set is routed to the associated location. Ranges from 0-255. :keyword str region: *For latency-based record sets*. The Amazon EC2 region where the resource that is specified in this resource record set resides. :keyword str set_identifier: *For weighted and latency resource record sets only*. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. 1-128 chars. :rtype: tuple :returns: A tuple in the form of ``(rrset, change_info)``, where ``rrset`` is the newly created TXTResourceRecordSet instance. """ self._halt_if_already_deleted() # Grab the params/kwargs here for brevity's sake. values = locals() del values['self'] return self._add_record(TXTResourceRecordSet, **values)
python
{ "resource": "" }
q277486
EasyID3.RegisterTXXXKey
test
def RegisterTXXXKey(cls, key, desc): """Register a user-defined text frame key. Some ID3 tags are stored in TXXX frames, which allow a freeform 'description' which acts as a subkey, e.g. TXXX:BARCODE.:: EasyID3.RegisterTXXXKey('barcode', 'BARCODE'). """ frameid = "TXXX:" + desc def getter(id3, key): return list(id3[frameid]) def setter(id3, key, value): try: frame = id3[frameid] except KeyError: enc = 0 # Store 8859-1 if we can, per MusicBrainz spec. try: for v in value: v.encode('latin_1') except UnicodeError: enc = 3 id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc)) else: frame.text = value def deleter(id3, key): del(id3[frameid]) cls.RegisterKey(key, getter, setter, deleter)
python
{ "resource": "" }
q277487
get_change_values
test
def get_change_values(change): """ In the case of deletions, we pull the change values for the XML request from the ResourceRecordSet._initial_vals dict, since we want the original values. For creations, we pull from the attributes on ResourceRecordSet. Since we're dealing with attributes vs. dict key/vals, we'll abstract this part away here and just always pass a dict to write_change. :rtype: dict :returns: A dict of change data, used by :py:func:`write_change` to write the change request XML. """ action, rrset = change if action == 'CREATE': # For creations, we want the current values, since they don't need to # match an existing record set. values = dict() for key, val in rrset._initial_vals.items(): # Pull from the record set's attributes, which are the current # values. values[key] = getattr(rrset, key) return values else: # We can look at the initial values dict for deletions, since we # have to match against the values currently in Route53. return rrset._initial_vals
python
{ "resource": "" }
q277488
write_change
test
def write_change(change): """ Creates an XML element for the change. :param tuple change: A change tuple from a ChangeSet. Comes in the form of ``(action, rrset)``. :rtype: lxml.etree._Element :returns: A fully baked Change tag. """ action, rrset = change change_vals = get_change_values(change) e_change = etree.Element("Change") e_action = etree.SubElement(e_change, "Action") e_action.text = action e_rrset = etree.SubElement(e_change, "ResourceRecordSet") e_name = etree.SubElement(e_rrset, "Name") e_name.text = change_vals['name'] e_type = etree.SubElement(e_rrset, "Type") e_type.text = rrset.rrset_type if change_vals.get('set_identifier'): e_set_id = etree.SubElement(e_rrset, "SetIdentifier") e_set_id.text = change_vals['set_identifier'] if change_vals.get('weight'): e_weight = etree.SubElement(e_rrset, "Weight") e_weight.text = change_vals['weight'] if change_vals.get('alias_hosted_zone_id') or change_vals.get('alias_dns_name'): e_alias_target = etree.SubElement(e_rrset, "AliasTarget") e_hosted_zone_id = etree.SubElement(e_alias_target, "HostedZoneId") e_hosted_zone_id.text = change_vals['alias_hosted_zone_id'] e_dns_name = etree.SubElement(e_alias_target, "DNSName") e_dns_name.text = change_vals['alias_dns_name'] if change_vals.get('region'): e_weight = etree.SubElement(e_rrset, "Region") e_weight.text = change_vals['region'] e_ttl = etree.SubElement(e_rrset, "TTL") e_ttl.text = str(change_vals['ttl']) if rrset.is_alias_record_set(): # A record sets in Alias mode don't have any resource records. return e_change e_resource_records = etree.SubElement(e_rrset, "ResourceRecords") for value in change_vals['records']: e_resource_record = etree.SubElement(e_resource_records, "ResourceRecord") e_value = etree.SubElement(e_resource_record, "Value") e_value.text = value return e_change
python
{ "resource": "" }
q277489
change_resource_record_set_writer
test
def change_resource_record_set_writer(connection, change_set, comment=None): """ Forms an XML string that we'll send to Route53 in order to change record sets. :param Route53Connection connection: The connection instance used to query the API. :param change_set.ChangeSet change_set: The ChangeSet object to create the XML doc from. :keyword str comment: An optional comment to go along with the request. """ e_root = etree.Element( "ChangeResourceRecordSetsRequest", xmlns=connection._xml_namespace ) e_change_batch = etree.SubElement(e_root, "ChangeBatch") if comment: e_comment = etree.SubElement(e_change_batch, "Comment") e_comment.text = comment e_changes = etree.SubElement(e_change_batch, "Changes") # Deletions need to come first in the change sets. for change in change_set.deletions + change_set.creations: e_changes.append(write_change(change)) e_tree = etree.ElementTree(element=e_root) #print(prettyprint_xml(e_root)) fobj = BytesIO() # This writes bytes. e_tree.write(fobj, xml_declaration=True, encoding='utf-8', method="xml") return fobj.getvalue().decode('utf-8')
python
{ "resource": "" }
q277490
init_logs
test
def init_logs(): """Initiate log file.""" start_time = dt.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M') logname = os.path.join(os.path.expanduser("~") + "/nanoGUI_" + start_time + ".log") handlers = [logging.FileHandler(logname)] logging.basicConfig( format='%(asctime)s %(message)s', handlers=handlers, level=logging.INFO) logging.info('NanoGUI {} started with NanoPlot {}'.format(__version__, nanoplot.__version__)) logging.info('Python version is: {}'.format(sys.version.replace('\n', ' '))) return logname
python
{ "resource": "" }
q277491
NavigationBar.alias_item
test
def alias_item(self, alias): """Gets an item by its alias.""" ident = self.alias[alias] return self.items[ident]
python
{ "resource": "" }
q277492
freeze_dict
test
def freeze_dict(dict_): """Freezes ``dict`` into ``tuple``. A typical usage is packing ``dict`` into hashable. e.g.:: >>> freeze_dict({'a': 1, 'b': 2}) (('a', 1), ('b', 2)) """ pairs = dict_.items() key_getter = operator.itemgetter(0) return tuple(sorted(pairs, key=key_getter))
python
{ "resource": "" }
q277493
join_html_attrs
test
def join_html_attrs(attrs): """Joins the map structure into HTML attributes. The return value is a 2-tuple ``(template, ordered_values)``. It should be passed into :class:`markupsafe.Markup` to prevent XSS attacked. e.g.:: >>> join_html_attrs({'href': '/', 'data-active': 'true'}) ('data-active="{0}" href="{1}"', ['true', '/']) """ attrs = collections.OrderedDict(freeze_dict(attrs or {})) template = ' '.join('%s="{%d}"' % (k, i) for i, k in enumerate(attrs)) return template, list(attrs.values())
python
{ "resource": "" }
q277494
Navigation.init_app
test
def init_app(self, app): """Initializes an app to work with this extension. The app-context signals will be subscribed and the template context will be initialized. :param app: the :class:`flask.Flask` app instance. """ # connects app-level signals appcontext_pushed.connect(self.initialize_bars, app) # integrate with jinja template app.add_template_global(self, 'nav')
python
{ "resource": "" }
q277495
Navigation.initialize_bars
test
def initialize_bars(self, sender=None, **kwargs): """Calls the initializers of all bound navigation bars.""" for bar in self.bars.values(): for initializer in bar.initializers: initializer(self)
python
{ "resource": "" }
q277496
Navigation.bind_bar
test
def bind_bar(self, sender=None, **kwargs): """Binds a navigation bar into this extension instance.""" bar = kwargs.pop('bar') self.bars[bar.name] = bar
python
{ "resource": "" }
q277497
Item.args
test
def args(self): """The arguments which will be passed to ``url_for``. :type: :class:`dict` """ if self._args is None: return {} if callable(self._args): return dict(self._args()) return dict(self._args)
python
{ "resource": "" }
q277498
Item.url
test
def url(self): """The final url of this navigation item. By default, the value is generated by the :attr:`self.endpoint` and :attr:`self.args`. .. note:: The :attr:`url` property require the app context without a provided config value :const:`SERVER_NAME`, because of :func:`flask.url_for`. :type: :class:`str` """ if self.is_internal: return url_for(self.endpoint, **self.args) return self._url
python
{ "resource": "" }
q277499
Item.is_current
test
def is_current(self): """``True`` if current request has same endpoint with the item. The property should be used in a bound request context, or the :class:`RuntimeError` may be raised. """ if not self.is_internal: return False # always false for external url has_same_endpoint = (request.endpoint == self.endpoint) has_same_args = (request.view_args == self.args) return has_same_endpoint and has_same_args
python
{ "resource": "" }