_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q277400
|
SourceProcessor.parse_file
|
test
|
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
|
python
|
{
"resource": ""
}
|
q277401
|
SourceProcessor.process_normal_line
|
test
|
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
|
python
|
{
"resource": ""
}
|
q277402
|
SourceProcessor.add_block_lines
|
test
|
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename,
|
python
|
{
"resource": ""
}
|
q277403
|
draw_string
|
test
|
def draw_string(font, text, x, y, width=None, height=None, align=Alignment.left, vertical_align=VerticalAlignment.baseline):
'''Draw a string with the given font.
:note: Text alignment and word-wrapping is not yet implemented. The text is rendered with the left edge and
baseline at ``(x, y)``.
:param font: the :class:`Font` to render text with
|
python
|
{
"resource": ""
}
|
q277404
|
parse_iso_8601_time_str
|
test
|
def parse_iso_8601_time_str(time_str):
"""
Parses a standard ISO 8601 time string. The Route53 API uses these here
and there.
:param str time_str: An ISO 8601 time string.
:rtype: datetime.datetime
:returns: A timezone aware (UTC) datetime.datetime instance.
"""
if re.search('\.\d{3}Z$', time_str):
submitted_at = datetime.datetime.strptime(time_str, \
|
python
|
{
"resource": ""
}
|
q277405
|
HtmlFormatter.make_html_words
|
test
|
def make_html_words( self, words ):
""" convert a series of simple words into some HTML text """
line = ""
if words:
line = html_quote( words[0] )
|
python
|
{
"resource": ""
}
|
q277406
|
HtmlFormatter.make_html_word
|
test
|
def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
m = re_crossref.match( word )
if m:
try:
name = m.group( 1 )
rest = m.group( 2 )
block = self.identifiers[name]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>' + rest
|
python
|
{
"resource": ""
}
|
q277407
|
HtmlFormatter.make_html_para
|
test
|
def make_html_para( self, words ):
""" convert words of a paragraph into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
|
python
|
{
"resource": ""
}
|
q277408
|
HtmlFormatter.make_html_code
|
test
|
def make_html_code( self, lines ):
""" convert a code sequence to HTML """
line = code_header + '\n'
for l in lines:
|
python
|
{
"resource": ""
}
|
q277409
|
HtmlFormatter.make_html_items
|
test
|
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
|
python
|
{
"resource": ""
}
|
q277410
|
MP4Tags.save
|
test
|
def save(self, filename):
"""Save the metadata to the given filename."""
values = []
items = sorted(self.items(), key=MP4Tags.__get_sort_stats )
for key, value in items:
info = self.__atoms.get(key[:4], (None, type(self).__render_text))
try:
values.append(info[1](self, key, value, *info[2:]))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
fileobj = open(filename, "rb+")
try:
|
python
|
{
"resource": ""
}
|
q277411
|
MP4Tags.__update_parents
|
test
|
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
|
python
|
{
"resource": ""
}
|
q277412
|
run
|
test
|
def run(game):
'''Start running the game. The window is created and shown at this point, and then
the main event loop is entered. 'game.on_tick' and other event handlers are called
repeatedly until the game exits.
If a game is already running, this function replaces the :class:`Game` instance that
receives events.
'''
if bacon._current_game:
bacon._current_game = game
return
global _tick_callback_handle
bacon._current_game = game
# Window handler
window_resize_callback_handle = lib.WindowResizeEventHandler(window._window_resize_event_handler)
lib.SetWindowResizeEventHandler(window_resize_callback_handle)
# Key handler
key_callback_handle = lib.KeyEventHandler(keyboard._key_event_handler)
lib.SetKeyEventHandler(key_callback_handle)
# Mouse handlers
mouse_button_callback_handle = lib.MouseButtonEventHandler(mouse_input._mouse_button_event_handler)
lib.SetMouseButtonEventHandler(mouse_button_callback_handle)
mouse_scroll_callback_handle = lib.MouseScrollEventHandler(mouse_input._mouse_scroll_event_handler)
lib.SetMouseScrollEventHandler(mouse_scroll_callback_handle)
# Controller handlers
controller_connected_handle = lib.ControllerConnectedEventHandler(controller._controller_connected_event_handler)
lib.SetControllerConnectedEventHandler(controller_connected_handle)
|
python
|
{
"resource": ""
}
|
q277413
|
ControllerMapping.register
|
test
|
def register(cls, vendor_id, product_id, mapping):
'''Register a mapping for controllers with the given vendor and product IDs. The mapping will
replace any existing mapping for these IDs for controllers not yet connected.
:param vendor_id: the vendor ID of the controller, as reported by :attr:`Controller.vendor_id`
:param product_id: the vendor ID of the
|
python
|
{
"resource": ""
}
|
q277414
|
ControllerMapping.get
|
test
|
def get(cls, controller):
'''Find a mapping that can apply to the given controller. Returns None if unsuccessful.
:param controller: :class:`Controller` to look up
:return: :class:`ControllerMapping`
'''
try:
|
python
|
{
"resource": ""
}
|
q277415
|
EasyMP4Tags.RegisterFreeformKey
|
test
|
def RegisterFreeformKey(cls, key, name, mean=b"com.apple.iTunes"):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
you can use this function::
EasyMP4Tags.RegisterFreeformKey(
"musicbrainz_artistid", b"MusicBrainz Artist Id")
"""
atomid = b"----:" + mean + b":" + name
def getter(tags, key):
return [s.decode("utf-8", "replace")
|
python
|
{
"resource": ""
}
|
q277416
|
BaseTransport._hmac_sign_string
|
test
|
def _hmac_sign_string(self, string_to_sign):
"""
Route53 uses AWS an HMAC-based authentication scheme, involving the
signing of a date string with the user's secret access key. More details
on the specifics can be found in their documentation_.
.. documentation:: http://docs.amazonwebservices.com/Route53/latest/DeveloperGuide/RESTAuthentication.html
This method is used to sign said time string, for use in the request
headers.
:param str string_to_sign: The time string to sign.
:rtype: str
:returns: An HMAC signed string.
"""
#
|
python
|
{
"resource": ""
}
|
q277417
|
BaseTransport.get_request_headers
|
test
|
def get_request_headers(self):
"""
Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53.
"""
date_header = time.asctime(time.gmtime())
# We sign the time string above with the user's AWS secret access key
# in order to authenticate our request.
signing_key = self._hmac_sign_string(date_header)
# Amazon's super fun
|
python
|
{
"resource": ""
}
|
q277418
|
BaseTransport.send_request
|
test
|
def send_request(self, path, data, method):
"""
All outbound requests go through this method. It defers to the
transport's various HTTP method-specific methods.
:param str path: The path to tack on to the endpoint URL for
the query.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: str
:returns: The body of the response.
"""
headers = self.get_request_headers()
|
python
|
{
"resource": ""
}
|
q277419
|
RequestsTransport._send_get_request
|
test
|
def _send_get_request(self, path, params, headers):
"""
Sends the GET request to the Route53 endpoint.
:param str path: The path to tack on to the endpoint URL for
the query.
:param dict params: Key/value pairs to send.
:param dict headers: A dict of headers to send with the request.
|
python
|
{
"resource": ""
}
|
q277420
|
RequestsTransport._send_post_request
|
test
|
def _send_post_request(self, path, data, headers):
"""
Sends the POST request to the Route53 endpoint.
:param str path: The path to tack on to the endpoint URL for
the query.
:param data: Either a dict, or bytes.
:type data: dict or bytes
:param dict headers: A
|
python
|
{
"resource": ""
}
|
q277421
|
RequestsTransport._send_delete_request
|
test
|
def _send_delete_request(self, path, headers):
"""
Sends the DELETE request to the Route53 endpoint.
:param str path: The path to tack on to the endpoint URL for
the query.
:param dict headers: A dict of headers to send with the request.
|
python
|
{
"resource": ""
}
|
q277422
|
APEValue
|
test
|
def APEValue(value, kind):
"""APEv2 tag value factory.
Use this if you need to specify the value's type manually. Binary
and text data are automatically detected by APEv2.__setitem__.
"""
if kind in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
# stricter with py3
if PY3:
raise TypeError("str only for text/external values")
else:
value = value.encode("utf-8")
if kind
|
python
|
{
"resource": ""
}
|
q277423
|
Route53Connection._send_request
|
test
|
def _send_request(self, path, data, method):
"""
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
|
python
|
{
"resource": ""
}
|
q277424
|
Route53Connection._do_autopaginating_api_call
|
test
|
def _do_autopaginating_api_call(self, path, params, method, parser_func,
next_marker_xpath, next_marker_param_name,
next_type_xpath=None, parser_kwargs=None):
"""
Given an API method, the arguments passed to it, and a function to
hand parsing off to, loop through the record sets in the API call
until all records have been yielded.
:param str method: The API method on the endpoint.
:param dict params: The kwargs from the top-level API method.
:param callable parser_func: A callable that is used for parsing the
output from the API call.
:param str next_marker_param_name: The XPath to the marker tag that
will determine whether we continue paginating.
:param str next_marker_param_name: The parameter name to manipulate
in the request data to bring up the next page on the next
request loop.
:keyword str next_type_xpath: For the
py:meth:`list_resource_record_sets_by_zone_id` method, there's
an additional paginator token. Specifying this XPath looks for it.
:keyword dict parser_kwargs:
|
python
|
{
"resource": ""
}
|
q277425
|
Route53Connection.list_hosted_zones
|
test
|
def list_hosted_zones(self, page_chunks=100):
"""
List all hosted zones associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
|
python
|
{
"resource": ""
}
|
q277426
|
Route53Connection.create_hosted_zone
|
test
|
def create_hosted_zone(self, name, caller_reference=None, comment=None):
"""
Creates and returns a new hosted zone. Once a hosted zone is created,
its details can't be changed.
:param str name: The name of the hosted zone to create.
:keyword str caller_reference: A unique string that identifies the
request and that allows failed create_hosted_zone requests to be
retried without the risk of executing the operation twice. If no
value is given, we'll generate a Type 4 UUID for you.
:keyword str comment: An optional comment to attach to
|
python
|
{
"resource": ""
}
|
q277427
|
Route53Connection._list_resource_record_sets_by_zone_id
|
test
|
def _list_resource_record_sets_by_zone_id(self, id, rrset_type=None,
identifier=None, name=None,
page_chunks=100):
"""
Lists a hosted zone's resource record sets by Zone ID, if you
already know it.
.. tip:: For most cases, we recommend going through a
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance's
:py:meth:`HostedZone.record_sets <route53.hosted_zone.HostedZone.record_sets>`
property, but this saves an HTTP request if you already know the
zone's ID.
:param str id: The ID of the zone whose record sets we're listing.
:keyword str rrset_type: The type of resource record set to begin the
record listing from.
:keyword str identifier: Weighted and latency resource record sets
only: If results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record set
that has the current DNS name and type.
:keyword str name: Not really sure what this does.
:keyword int page_chunks: This API call is paginated behind-the-scenes
by this many ResourceRecordSet instances. The default should be
|
python
|
{
"resource": ""
}
|
q277428
|
Route53Connection._change_resource_record_sets
|
test
|
def _change_resource_record_sets(self, change_set, comment=None):
"""
Given a ChangeSet, POST it to the Route53 API.
.. note:: You probably shouldn't be using this method directly,
as there are convenience methods on the ResourceRecordSet
sub-classes.
:param change_set.ChangeSet change_set: The ChangeSet object to create
the XML doc from.
:keyword str comment: An optional comment to go along with the request.
:rtype: dict
|
python
|
{
"resource": ""
}
|
q277429
|
draw_image
|
test
|
def draw_image(image, x1, y1, x2 = None, y2 = None):
'''Draw an image.
The image's top-left corner is drawn at ``(x1, y1)``, and its lower-left at ``(x2, y2)``. If ``x2`` and ``y2`` are omitted, they
are calculated to render the image at its native resoultion.
Note that images can be flipped and scaled by providing alternative values for ``x2`` and ``y2``.
:param image: an
|
python
|
{
"resource": ""
}
|
q277430
|
draw_image_region
|
test
|
def draw_image_region(image, x1, y1, x2, y2,
ix1, iy1, ix2, iy2):
'''Draw a rectangular region of an image.
The part of the image contained by the rectangle in texel-space by the coordinates ``(ix1, iy1)`` to ``(ix2, iy2)`` is
drawn at coordinates ``(x1, y1)`` to ``(x2, y2)``. All coordinates have the origin ``(0, 0)`` at the upper-left corner.
For example, to draw the left half of a ``100x100`` image at coordinates ``(x, y)``::
|
python
|
{
"resource": ""
}
|
q277431
|
OggPage.size
|
test
|
def size(self):
"""Total frame size."""
header_size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
header_size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple
|
python
|
{
"resource": ""
}
|
q277432
|
OggPage.replace
|
test
|
def replace(cls, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1
new_data = b''.join(cls.write(p) for p in new_pages)
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
|
python
|
{
"resource": ""
}
|
q277433
|
OggPage.find_last
|
test
|
def find_last(fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try:
fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try:
index = data.rindex(b"OggS")
except ValueError:
raise error("unable to find final Ogg header")
bytesobj = cBytesIO(data[index:])
best_page = None
try:
page = OggPage(bytesobj)
except error:
pass
else:
if page.serial == serial:
if page.last:
|
python
|
{
"resource": ""
}
|
q277434
|
ContentProcessor.set_section
|
test
|
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
|
python
|
{
"resource": ""
}
|
q277435
|
ContentProcessor.add_markup
|
test
|
def add_markup( self ):
"""add a new markup section"""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
|
python
|
{
"resource": ""
}
|
q277436
|
ContentProcessor.process_content
|
test
|
def process_content( self, content ):
"""process a block content and return a list of DocMarkup objects
corresponding to it"""
markup = None
markup_lines = []
first = 1
for line in content:
found = None
for t in re_markup_tags:
m = t.match( line )
if m:
found = string.lower( m.group( 1 ) )
prefix = len( m.group( 0 ) )
line = " " * prefix + line[prefix:] # remove markup from line
break
# is it the start of a new markup section ?
if found:
first = 0
|
python
|
{
"resource": ""
}
|
q277437
|
DocBlock.get_markup
|
test
|
def get_markup( self, tag_name ):
"""return the DocMarkup corresponding to a given tag in a block"""
for m in self.markups:
|
python
|
{
"resource": ""
}
|
q277438
|
create_hosted_zone_writer
|
test
|
def create_hosted_zone_writer(connection, name, caller_reference, comment):
"""
Forms an XML string that we'll send to Route53 in order to create
a new hosted zone.
:param Route53Connection connection: The connection instance used to
query the API.
:param str name: The name of the hosted zone to create.
"""
if not caller_reference:
caller_reference = str(uuid.uuid4())
e_root = etree.Element(
|
python
|
{
"resource": ""
}
|
q277439
|
lock
|
test
|
def lock(fileobj):
"""Lock a file object 'safely'.
That means a failure to lock because the platform doesn't
support fcntl or filesystem locks is not considered a
failure. This call does block.
Returns whether or not the lock was successful, or
raises an exception in more extreme circumstances (full
lock table, invalid file).
"""
try:
import fcntl
except ImportError:
return False
else:
try:
|
python
|
{
"resource": ""
}
|
q277440
|
insert_bytes
|
test
|
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2**16):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
"""
assert 0 < size
assert 0 <= offset
locked = False
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
fobj.write(b'\x00' * size)
fobj.flush()
try:
try:
import mmap
file_map = mmap.mmap(fobj.fileno(), filesize + size)
try:
file_map.move(offset + size, offset, movesize)
finally:
file_map.close()
except (ValueError, EnvironmentError, ImportError):
# handle broken mmap scenarios
locked = lock(fobj)
|
python
|
{
"resource": ""
}
|
q277441
|
delete_bytes
|
test
|
def delete_bytes(fobj, size, offset, BUFFER_SIZE=2**16):
"""Delete size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
"""
locked = False
assert 0 < size
assert 0 <= offset
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset - size
assert 0 <= movesize
try:
if movesize > 0:
fobj.flush()
try:
import mmap
file_map = mmap.mmap(fobj.fileno(), filesize)
try:
file_map.move(offset, offset + size, movesize)
finally:
file_map.close()
except (ValueError, EnvironmentError, ImportError):
|
python
|
{
"resource": ""
}
|
q277442
|
utf8
|
test
|
def utf8(data):
"""Convert a basestring to a valid UTF-8 str."""
if isinstance(data, bytes):
return data.decode("utf-8", "replace").encode("utf-8")
|
python
|
{
"resource": ""
}
|
q277443
|
ChangeSet.add_change
|
test
|
def add_change(self, action, record_set):
"""
Adds a change to this change set.
:param str action: Must be one of either 'CREATE' or 'DELETE'.
:param resource_record_set.ResourceRecordSet record_set: The
ResourceRecordSet object that was created or deleted.
"""
action = action.upper()
|
python
|
{
"resource": ""
}
|
q277444
|
parse_change_info
|
test
|
def parse_change_info(e_change_info):
"""
Parses a ChangeInfo tag. Seen in CreateHostedZone, DeleteHostedZone,
and ChangeResourceRecordSetsRequest.
:param lxml.etree._Element e_change_info: A ChangeInfo element.
:rtype: dict
:returns: A dict representation of the change info.
"""
if e_change_info is None:
return e_change_info
status =
|
python
|
{
"resource": ""
}
|
q277445
|
Font.measure_string
|
test
|
def measure_string(self, str):
'''Calculates the width of the given string in this font.
:param str: the string to measure
:return float: width of the string, in pixels
'''
style = bacon.text.Style(self)
run =
|
python
|
{
"resource": ""
}
|
q277446
|
ResourceRecordSet.is_modified
|
test
|
def is_modified(self):
"""
Determines whether this record set has been modified since the
last retrieval or save.
:rtype: bool
:returns: ``True` if the record set has been modified,
|
python
|
{
"resource": ""
}
|
q277447
|
ResourceRecordSet.delete
|
test
|
def delete(self):
"""
Deletes this record set.
"""
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.zone_id)
|
python
|
{
"resource": ""
}
|
q277448
|
ResourceRecordSet.save
|
test
|
def save(self):
"""
Saves any changes to this record set.
"""
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.zone_id)
# Record sets can't actually be modified. You have to delete the
# existing one and create a new one. Since this happens within a single
# change set, it appears that the values
|
python
|
{
"resource": ""
}
|
q277449
|
ParseID3v1
|
test
|
def ParseID3v1(data):
"""Parse an ID3v1 tag, returning a list of ID3v2.4 frames."""
try:
data = data[data.index(b'TAG'):]
except ValueError:
return None
if 128 < len(data) or len(data) < 124:
return None
# Issue #69 - Previous versions of Mutagen, when encountering
# out-of-spec TDRC and TYER frames of less than four characters,
# wrote only the characters available - e.g. "1" or "" - into the
# year field. To parse those, reduce the size of the year field.
# Amazingly, "0s" works as a struct format string.
unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124)
try:
tag, title, artist, album, year, comment, track, genre = unpack(
unpack_fmt, data)
except StructError:
return None
if tag != b"TAG":
return None
def fix(data):
return data.split(b'\x00')[0].strip().decode('latin1')
title, artist, album, year, comment = map(
fix, [title, artist, album, year, comment])
frames = {}
if title:
frames['TIT2'] = TIT2(encoding=0, text=title)
if artist:
frames['TPE1'] = TPE1(encoding=0, text=[artist])
if album:
|
python
|
{
"resource": ""
}
|
q277450
|
MakeID3v1
|
test
|
def MakeID3v1(id3):
"""Return an ID3v1.1 tag string from a dict of ID3v2.4 frames."""
v1 = {}
for v2id, name in {"TIT2": "title", "TPE1": "artist",
"TALB": "album"}.items():
if v2id in id3:
text = id3[v2id].text[0].encode('latin1', 'replace')[:30]
else:
text = b''
v1[name] = text + (b'\x00' * (30 - len(text)))
if "COMM" in id3:
cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28]
else:
cmnt = b''
v1['comment'] = cmnt + (b'\x00' * (29 - len(cmnt)))
if "TRCK" in id3:
try:
v1["track"] = chr_(+id3["TRCK"])
except ValueError:
v1["track"] = b'\x00'
else:
v1["track"] = b'\x00'
if "TCON" in id3:
try:
genre = id3["TCON"].genres[0]
except IndexError:
pass
else:
if genre in TCON.GENRES:
|
python
|
{
"resource": ""
}
|
q277451
|
ID3.__fullread
|
test
|
def __fullread(self, size):
""" Read a certain number of bytes from the source file. """
try:
if size < 0:
raise ValueError('Requested bytes (%s) less than zero' % size)
if size > self.__filesize:
raise EOFError('Requested %#x of %#x (%s)' % (
|
python
|
{
"resource": ""
}
|
q277452
|
ID3.delall
|
test
|
def delall(self, key):
"""Delete all tags of a given kind; see getall."""
if key in self:
del(self[key])
else:
key = key + ":"
|
python
|
{
"resource": ""
}
|
q277453
|
ID3.loaded_frame
|
test
|
def loaded_frame(self, tag):
"""Deprecated; use the add method."""
# turn 2.2 into 2.3/2.4 tags
if len(type(tag).__name__) == 3:
|
python
|
{
"resource": ""
}
|
q277454
|
ID3.__update_common
|
test
|
def __update_common(self):
"""Updates done by both v23 and v24 update"""
if "TCON" in self:
# Get rid of "(xx)Foobr" format.
self["TCON"].genres = self["TCON"].genres
if self.version < self._V23:
# ID3v2.2 PIC frames are slightly different.
pics = self.getall("APIC")
mimes = {"PNG": "image/png", "JPG": "image/jpeg"}
self.delall("APIC")
for pic in pics:
newpic = APIC(
|
python
|
{
"resource": ""
}
|
q277455
|
ID3.update_to_v24
|
test
|
def update_to_v24(self):
"""Convert older tags into an ID3v2.4 tag.
This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to
TDRC). If you intend to save tags, you must call this function
at some point; it is called by default when loading the tag.
"""
self.__update_common()
if self.__unknown_version == self._V23:
# convert unknown 2.3 frames (flags/size) to 2.4
converted = []
for frame in self.unknown_frames:
try:
name, size, flags = unpack('>4sLH', frame[:10])
frame = BinaryFrame.fromData(self, flags, frame[10:])
except (struct.error, error):
continue
name = name.decode('ascii')
converted.append(self.__save_frame(frame, name=name))
self.unknown_frames[:] = converted
self.__unknown_version = self._V24
# TDAT, TYER, and TIME have been turned into TDRC.
try:
date = text_type(self.get("TYER", ""))
if date.strip(u"\x00"):
self.pop("TYER")
dat = text_type(self.get("TDAT", ""))
if dat.strip("\x00"):
self.pop("TDAT")
date = "%s-%s-%s" % (date, dat[2:], dat[:2])
time = text_type(self.get("TIME", ""))
if time.strip("\x00"):
self.pop("TIME")
date += "T%s:%s:00" % (time[:2], time[2:])
if "TDRC" not in self:
self.add(TDRC(encoding=0, text=date))
except UnicodeDecodeError:
|
python
|
{
"resource": ""
}
|
q277456
|
Sound.unload
|
test
|
def unload(self):
'''Release all resources associated with the sound.'''
if self._handle != -1:
|
python
|
{
"resource": ""
}
|
q277457
|
Sound.play
|
test
|
def play(self, gain=None, pan=None, pitch=None):
'''Play the sound as a `one-shot`.
The sound will be played to completion. If the sound is played more than once at a time, it will mix
with all previous instances of itself. If you need more control over the playback of sounds, see
:class:`Voice`.
:param gain: optional volume level to play the sound back at, between 0.0 and 1.0 (defaults to 1.0)
:param pan: optional stereo pan, between -1.0 (left) and 1.0 (right)
:param pitch: optional sampling rate modification, between 0.4 and 16.0, where 1.0 represents the original pitch
'''
if gain is None and pan is
|
python
|
{
"resource": ""
}
|
q277458
|
Voice.set_loop_points
|
test
|
def set_loop_points(self, start_sample=-1, end_sample=0):
'''Set the loop points within the sound.
The sound must have been created with ``loop=True``. The default parameters cause the loop points to be set to
the entire sound duration.
:note: There is currently no API for converting sample numbers to times.
|
python
|
{
"resource": ""
}
|
q277459
|
adobe_glyph_values
|
test
|
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
|
python
|
{
"resource": ""
}
|
q277460
|
filter_glyph_names
|
test
|
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in
|
python
|
{
"resource": ""
}
|
q277461
|
dump_encoding
|
test
|
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line
|
python
|
{
"resource": ""
}
|
q277462
|
dump_array
|
test
|
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L] =\n" )
write( " {\n" )
|
python
|
{
"resource": ""
}
|
q277463
|
main
|
test
|
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008, 2011 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
|
python
|
{
"resource": ""
}
|
q277464
|
file_exists
|
test
|
def file_exists( pathname ):
"""checks that a given file exists"""
result = 1
try:
file = open(
|
python
|
{
"resource": ""
}
|
q277465
|
make_file_list
|
test
|
def make_file_list( args = None ):
"""builds a list of input files from command-line arguments"""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1 :]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
|
python
|
{
"resource": ""
}
|
q277466
|
parse_hosted_zone
|
test
|
def parse_hosted_zone(e_zone, connection):
"""
This a common parser that allows the passing of any valid HostedZone
tag. It will spit out the appropriate HostedZone object for the tag.
:param lxml.etree._Element e_zone: The root node of the etree parsed
response from the API.
:param Route53Connection connection: The connection instance used to
query the API.
:rtype: HostedZone
:returns: An instantiated HostedZone object.
"""
# This dict will be used to instantiate a HostedZone instance to yield.
kwargs = {}
# Within HostedZone tags are a number of sub-tags that include info
#
|
python
|
{
"resource": ""
}
|
q277467
|
parse_delegation_set
|
test
|
def parse_delegation_set(zone, e_delegation_set):
"""
Parses a DelegationSet tag. These often accompany HostedZone tags in
responses like CreateHostedZone and GetHostedZone.
:param HostedZone zone: An existing HostedZone instance to populate.
:param lxml.etree._Element e_delegation_set: A DelegationSet element.
|
python
|
{
"resource": ""
}
|
q277468
|
MetadataBlock.writeblocks
|
test
|
def writeblocks(blocks):
"""Render metadata block as a byte string."""
data = []
codes = [[block.code, block.write()] for block in blocks]
codes[-1][0] |= 128
|
python
|
{
"resource": ""
}
|
q277469
|
MetadataBlock.group_padding
|
test
|
def group_padding(blocks):
"""Consolidate FLAC padding metadata blocks.
The overall size of the rendered blocks does not change, so
this adds several bytes of padding for each merged block.
"""
paddings = [b for b in blocks
|
python
|
{
"resource": ""
}
|
q277470
|
FLAC.delete
|
test
|
def delete(self, filename=None):
"""Remove Vorbis comments from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
for s in list(self.metadata_blocks):
|
python
|
{
"resource": ""
}
|
q277471
|
FLAC.save
|
test
|
def save(self, filename=None, deleteid3=False):
"""Save metadata blocks to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
f = open(filename, 'rb+')
try:
# Ensure we've got padding at the end, and only at the end.
# If adding makes it too large, we'll scale it down later.
self.metadata_blocks.append(Padding(b'\x00' * 1020))
MetadataBlock.group_padding(self.metadata_blocks)
header = self.__check_header(f)
# "fLaC" and maybe ID3
available = self.__find_audio_offset(f) - header
data = MetadataBlock.writeblocks(self.metadata_blocks)
# Delete ID3v2
if deleteid3 and header > 4:
available += header - 4
header = 4
if
|
python
|
{
"resource": ""
}
|
q277472
|
parse_rrset_alias
|
test
|
def parse_rrset_alias(e_alias):
"""
Parses an Alias tag beneath a ResourceRecordSet, spitting out the two values
found within. This is specific to A records that are set to Alias.
:param lxml.etree._Element e_alias: An
|
python
|
{
"resource": ""
}
|
q277473
|
parse_rrset_record_values
|
test
|
def parse_rrset_record_values(e_resource_records):
"""
Used to parse the various Values from the ResourceRecords tags on
most rrset types.
:param lxml.etree._Element e_resource_records: A ResourceRecords tag
beneath a ResourceRecordSet.
:rtype: list
:returns: A list of resource record strings.
"""
|
python
|
{
"resource": ""
}
|
q277474
|
parse_rrset
|
test
|
def parse_rrset(e_rrset, connection, zone_id):
"""
This a parser that allows the passing of any valid ResourceRecordSet
tag. It will spit out the appropriate ResourceRecordSet object for the tag.
:param lxml.etree._Element e_rrset: The root node of the etree parsed
response from the API.
:param Route53Connection connection: The connection instance used to
query the API.
:param str zone_id: The zone ID of the HostedZone these rrsets belong to.
:rtype: ResourceRecordSet
:returns: An instantiated ResourceRecordSet object.
"""
# This dict will be used to instantiate a ResourceRecordSet instance to yield.
kwargs = {
'connection': connection,
'zone_id': zone_id,
}
rrset_type = None
for e_field in e_rrset:
# Cheesy way to strip off the namespace.
tag_name = e_field.tag.split('}')[1]
field_text = e_field.text
if tag_name == 'Type':
# Need to store this to determine which ResourceRecordSet
# subclass to instantiate.
rrset_type = field_text
continue
elif tag_name == 'AliasTarget':
# A records have some special field values we need.
alias_hosted_zone_id, alias_dns_name = parse_rrset_alias(e_field)
|
python
|
{
"resource": ""
}
|
q277475
|
HostedZone.delete
|
test
|
def delete(self, force=False):
"""
Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
self._halt_if_already_deleted()
if force:
# Forcing deletion by cleaning up all record sets first. We'll
# do it all in one change set.
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
for rrset in self.record_sets:
# You can delete a HostedZone if
|
python
|
{
"resource": ""
}
|
q277476
|
HostedZone._add_record
|
test
|
def _add_record(self, record_set_class, name, values, ttl=60, weight=None,
region=None,set_identifier=None, alias_hosted_zone_id=None,
alias_dns_name=None):
"""
Convenience method for creating ResourceRecordSets. Most of the calls
are basically the same, this saves on repetition.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created ResourceRecordSet sub-class
instance.
"""
self._halt_if_already_deleted()
rrset_kwargs = dict(
connection=self.connection,
zone_id=self.id,
name=name,
ttl=ttl,
records=values,
weight=weight,
region=region,
set_identifier=set_identifier,
)
|
python
|
{
"resource": ""
}
|
q277477
|
HostedZone.create_a_record
|
test
|
def create_a_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None, alias_hosted_zone_id=None,
alias_dns_name=None):
"""
Creates and returns an A record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:keyword str alias_hosted_zone_id: Alias A records have this specified.
|
python
|
{
"resource": ""
}
|
q277478
|
HostedZone.create_aaaa_record
|
test
|
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
|
python
|
{
"resource": ""
}
|
q277479
|
HostedZone.create_cname_record
|
test
|
def create_cname_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates a CNAME record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
|
python
|
{
"resource": ""
}
|
q277480
|
HostedZone.create_mx_record
|
test
|
def create_mx_record(self, name, values, ttl=60):
"""
Creates a MX record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
|
python
|
{
"resource": ""
}
|
q277481
|
HostedZone.create_ns_record
|
test
|
def create_ns_record(self, name, values, ttl=60):
"""
Creates a NS record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
|
python
|
{
"resource": ""
}
|
q277482
|
HostedZone.create_ptr_record
|
test
|
def create_ptr_record(self, name, values, ttl=60):
"""
Creates a PTR record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
|
python
|
{
"resource": ""
}
|
q277483
|
HostedZone.create_spf_record
|
test
|
def create_spf_record(self, name, values, ttl=60):
"""
Creates a SPF record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
|
python
|
{
"resource": ""
}
|
q277484
|
HostedZone.create_srv_record
|
test
|
def create_srv_record(self, name, values, ttl=60):
"""
Creates a SRV record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
|
python
|
{
"resource": ""
}
|
q277485
|
HostedZone.create_txt_record
|
test
|
def create_txt_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates a TXT record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
|
python
|
{
"resource": ""
}
|
q277486
|
EasyID3.RegisterTXXXKey
|
test
|
def RegisterTXXXKey(cls, key, desc):
"""Register a user-defined text frame key.
Some ID3 tags are stored in TXXX frames, which allow a
freeform 'description' which acts as a subkey,
e.g. TXXX:BARCODE.::
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
"""
frameid = "TXXX:" + desc
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
enc = 0
# Store 8859-1 if we can, per MusicBrainz spec.
try:
for v in value:
|
python
|
{
"resource": ""
}
|
q277487
|
get_change_values
|
test
|
def get_change_values(change):
"""
In the case of deletions, we pull the change values for the XML request
from the ResourceRecordSet._initial_vals dict, since we want the original
values. For creations, we pull from the attributes on ResourceRecordSet.
Since we're dealing with attributes vs. dict key/vals, we'll abstract
this part away here and just always pass a dict to write_change.
:rtype: dict
:returns: A dict of change data, used by :py:func:`write_change` to
write the change request XML.
"""
action, rrset = change
if action == 'CREATE':
# For creations, we want the current values, since they don't need to
# match an
|
python
|
{
"resource": ""
}
|
q277488
|
write_change
|
test
|
def write_change(change):
"""
Creates an XML element for the change.
:param tuple change: A change tuple from a ChangeSet. Comes in the form
of ``(action, rrset)``.
:rtype: lxml.etree._Element
:returns: A fully baked Change tag.
"""
action, rrset = change
change_vals = get_change_values(change)
e_change = etree.Element("Change")
e_action = etree.SubElement(e_change, "Action")
e_action.text = action
e_rrset = etree.SubElement(e_change, "ResourceRecordSet")
e_name = etree.SubElement(e_rrset, "Name")
e_name.text = change_vals['name']
e_type = etree.SubElement(e_rrset, "Type")
e_type.text = rrset.rrset_type
if change_vals.get('set_identifier'):
e_set_id = etree.SubElement(e_rrset, "SetIdentifier")
e_set_id.text = change_vals['set_identifier']
if change_vals.get('weight'):
e_weight = etree.SubElement(e_rrset, "Weight")
e_weight.text = change_vals['weight']
if change_vals.get('alias_hosted_zone_id') or change_vals.get('alias_dns_name'):
e_alias_target = etree.SubElement(e_rrset, "AliasTarget")
e_hosted_zone_id = etree.SubElement(e_alias_target, "HostedZoneId")
e_hosted_zone_id.text
|
python
|
{
"resource": ""
}
|
q277489
|
change_resource_record_set_writer
|
test
|
def change_resource_record_set_writer(connection, change_set, comment=None):
"""
Forms an XML string that we'll send to Route53 in order to change
record sets.
:param Route53Connection connection: The connection instance used to
query the API.
:param change_set.ChangeSet change_set: The ChangeSet object to create the
XML doc from.
:keyword str comment: An optional comment to go along with the request.
"""
e_root = etree.Element(
"ChangeResourceRecordSetsRequest",
xmlns=connection._xml_namespace
)
e_change_batch = etree.SubElement(e_root, "ChangeBatch")
if comment:
e_comment = etree.SubElement(e_change_batch, "Comment")
e_comment.text = comment
e_changes = etree.SubElement(e_change_batch, "Changes")
# Deletions need to come
|
python
|
{
"resource": ""
}
|
q277490
|
init_logs
|
test
|
def init_logs():
"""Initiate log file."""
start_time = dt.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M')
logname = os.path.join(os.path.expanduser("~") + "/nanoGUI_" + start_time + ".log")
handlers = [logging.FileHandler(logname)]
logging.basicConfig(
format='%(asctime)s %(message)s',
handlers=handlers,
level=logging.INFO)
|
python
|
{
"resource": ""
}
|
q277491
|
NavigationBar.alias_item
|
test
|
def alias_item(self, alias):
"""Gets an item by its alias."""
|
python
|
{
"resource": ""
}
|
q277492
|
freeze_dict
|
test
|
def freeze_dict(dict_):
"""Freezes ``dict`` into ``tuple``.
A typical usage is packing ``dict`` into hashable.
e.g.::
>>> freeze_dict({'a': 1, 'b': 2})
(('a', 1), ('b', 2))
"""
pairs
|
python
|
{
"resource": ""
}
|
q277493
|
join_html_attrs
|
test
|
def join_html_attrs(attrs):
"""Joins the map structure into HTML attributes.
The return value is a 2-tuple ``(template, ordered_values)``. It should be
passed into :class:`markupsafe.Markup` to prevent XSS attacked.
e.g.::
>>> join_html_attrs({'href': '/', 'data-active': 'true'})
('data-active="{0}" href="{1}"', ['true', '/'])
|
python
|
{
"resource": ""
}
|
q277494
|
Navigation.init_app
|
test
|
def init_app(self, app):
"""Initializes an app to work with this extension.
The app-context signals will be subscribed and the template context
will be initialized.
:param app: the :class:`flask.Flask` app instance.
"""
# connects app-level signals
|
python
|
{
"resource": ""
}
|
q277495
|
Navigation.initialize_bars
|
test
|
def initialize_bars(self, sender=None, **kwargs):
"""Calls the initializers of all bound navigation bars."""
for bar in self.bars.values():
|
python
|
{
"resource": ""
}
|
q277496
|
Navigation.bind_bar
|
test
|
def bind_bar(self, sender=None, **kwargs):
"""Binds a navigation bar into this extension instance."""
|
python
|
{
"resource": ""
}
|
q277497
|
Item.args
|
test
|
def args(self):
"""The arguments which will be passed to ``url_for``.
:type: :class:`dict`
"""
if self._args is None:
return {}
|
python
|
{
"resource": ""
}
|
q277498
|
Item.url
|
test
|
def url(self):
"""The final url of this navigation item.
By default, the value is generated by the :attr:`self.endpoint` and
:attr:`self.args`.
.. note::
The :attr:`url` property require the app context without a provided
|
python
|
{
"resource": ""
}
|
q277499
|
Item.is_current
|
test
|
def is_current(self):
"""``True`` if current request has same endpoint with the item.
The property should be used in a bound request context, or the
:class:`RuntimeError` may be raised.
"""
if not self.is_internal:
return
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.