text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Convert RowsEvent to a dict
<END_TASK>
<USER_TASK:>
Description:
def _rows_event_to_dict(e, stream):
""" Convert RowsEvent to a dict
Args:
e (pymysqlreplication.row_event.RowsEvent): the event
stream (pymysqlreplication.BinLogStreamReader):
the stream that yields event
Returns:
dict: event's data as a dict
""" |
pk_cols = e.primary_key if isinstance(e.primary_key, (list, tuple)) \
else (e.primary_key, )
if isinstance(e, row_event.UpdateRowsEvent):
sig = signals.rows_updated
action = 'update'
row_converter = _convert_update_row
elif isinstance(e, row_event.WriteRowsEvent):
sig = signals.rows_inserted
action = 'insert'
row_converter = _convert_write_row
elif isinstance(e, row_event.DeleteRowsEvent):
sig = signals.rows_deleted
action = 'delete'
row_converter = _convert_write_row
else:
assert False, 'Invalid binlog event'
meta = {
'time': e.timestamp,
'log_pos': stream.log_pos,
'log_file': stream.log_file,
'schema': e.schema,
'table': e.table,
'action': action,
}
rows = list(map(row_converter, e.rows))
for row in rows:
row['keys'] = {k: row['values'][k] for k in pk_cols}
return rows, meta |
<SYSTEM_TASK:>
Process on a binlog event
<END_TASK>
<USER_TASK:>
Description:
def on_binlog(event, stream):
""" Process on a binlog event
1. Convert event instance into a dict
2. Send corresponding schema/table/signals
Args:
event (pymysqlreplication.row_event.RowsEvent): the event
""" |
rows, meta = _rows_event_to_dict(event, stream)
table_name = '%s.%s' % (meta['schema'], meta['table'])
if meta['action'] == 'insert':
sig = signals.rows_inserted
elif meta['action'] == 'update':
sig = signals.rows_updated
elif meta['action'] == 'delete':
sig = signals.rows_deleted
else:
raise RuntimeError('Invalid action "%s"' % meta['action'])
sig.send(table_name, rows=rows, meta=meta) |
<SYSTEM_TASK:>
Decorate a function, method or class to mark as deprecated.
<END_TASK>
<USER_TASK:>
Description:
def deprecated(msg, dep_version):
"""Decorate a function, method or class to mark as deprecated.
Raise DeprecationWarning and add a deprecation notice to the docstring.
""" |
def wrapper(func):
docstring = func.__doc__ or ''
docstring_msg = '.. deprecated:: {version} {msg}'.format(
version=dep_version,
msg=msg,
)
if docstring:
# We don't know how far to indent this message
# so instead we just dedent everything.
string_list = docstring.splitlines()
first_line = string_list[0]
remaining = textwrap.dedent(''.join(string_list[1:]))
docstring = '\n'.join([
first_line,
remaining,
'',
docstring_msg,
])
else:
docstring = docstring_msg
func.__doc__ = docstring
@wraps(func)
def inner(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return inner
return wrapper |
<SYSTEM_TASK:>
Generate MappedRanges for all mapped ranges.
<END_TASK>
<USER_TASK:>
Description:
def ranges(self, start=None, stop=None):
"""Generate MappedRanges for all mapped ranges.
Yields:
MappedRange
""" |
_check_start_stop(start, stop)
start_loc = self._bisect_right(start)
if stop is None:
stop_loc = len(self._keys)
else:
stop_loc = self._bisect_left(stop)
start_val = self._values[start_loc - 1]
candidate_keys = [start] + self._keys[start_loc:stop_loc] + [stop]
candidate_values = [start_val] + self._values[start_loc:stop_loc]
for i, value in enumerate(candidate_values):
if value is not NOT_SET:
start_key = candidate_keys[i]
stop_key = candidate_keys[i + 1]
yield MappedRange(start_key, stop_key, value) |
<SYSTEM_TASK:>
Set the range from start to stop to value.
<END_TASK>
<USER_TASK:>
Description:
def set(self, value, start=None, stop=None):
"""Set the range from start to stop to value.""" |
_check_start_stop(start, stop)
# start_index, stop_index will denote the sections we are replacing
start_index = self._bisect_left(start)
if start is not None: # start_index == 0
prev_value = self._values[start_index - 1]
if prev_value == value:
# We're setting a range where the left range has the same
# value, so create one big range
start_index -= 1
start = self._keys[start_index]
if stop is None:
new_keys = [start]
new_values = [value]
stop_index = len(self._keys)
else:
stop_index = self._bisect_right(stop)
stop_value = self._values[stop_index - 1]
stop_key = self._keys[stop_index - 1]
if stop_key == stop and stop_value == value:
new_keys = [start]
new_values = [value]
else:
new_keys = [start, stop]
new_values = [value, stop_value]
self._keys[start_index:stop_index] = new_keys
self._values[start_index:stop_index] = new_values |
<SYSTEM_TASK:>
Delete the range from start to stop from self.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, start=None, stop=None):
"""Delete the range from start to stop from self.
Raises:
KeyError: If part of the passed range isn't mapped.
""" |
_check_start_stop(start, stop)
start_loc = self._bisect_right(start) - 1
if stop is None:
stop_loc = len(self._keys)
else:
stop_loc = self._bisect_left(stop)
for value in self._values[start_loc:stop_loc]:
if value is NOT_SET:
raise KeyError((start, stop))
# this is inefficient, we've already found the sub ranges
self.set(NOT_SET, start=start, stop=stop) |
<SYSTEM_TASK:>
Empty the range from start to stop.
<END_TASK>
<USER_TASK:>
Description:
def empty(self, start=None, stop=None):
"""Empty the range from start to stop.
Like delete, but no Error is raised if the entire range isn't mapped.
""" |
self.set(NOT_SET, start=start, stop=stop) |
<SYSTEM_TASK:>
Get the start key of the first range.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Get the start key of the first range.
None if RangeMap is empty or unbounded to the left.
""" |
if self._values[0] is NOT_SET:
try:
return self._keys[1]
except IndexError:
# This is empty or everything is mapped to a single value
return None
else:
# This is unbounded to the left
return self._keys[0] |
<SYSTEM_TASK:>
Filter all known plugins by a whitelist specified. If the whitelist is
<END_TASK>
<USER_TASK:>
Description:
def get_tools(whitelist, known_plugins):
"""
Filter all known plugins by a whitelist specified. If the whitelist is
empty, default to all plugins.
""" |
def getpath(c):
return "%s:%s" % (c.__module__, c.__class__.__name__)
tools = [x for x in known_plugins if getpath(x) in whitelist]
if not tools:
if whitelist:
raise UnknownTools(map(getpath, known_plugins))
tools = known_plugins
return tools |
<SYSTEM_TASK:>
Init the config fle.
<END_TASK>
<USER_TASK:>
Description:
def init(directory):
"""Init the config fle.""" |
username = click.prompt("Input your username")
password = click.prompt("Input your password", hide_input=True,
confirmation_prompt=True)
log_directory = click.prompt("Input your log directory")
if not path.exists(log_directory):
sys.exit("Invalid log directory, please have a check.")
config_file_path = path.join(directory, 'v2ex_config.json')
config = {
"username": username,
"password": password,
"log_directory": path.abspath(log_directory)
}
with open(config_file_path, 'w') as f:
json.dump(config, f)
click.echo("Init the config file at: {0}".format(config_file_path)) |
<SYSTEM_TASK:>
How long you have kept signing in.
<END_TASK>
<USER_TASK:>
Description:
def last(conf):
"""How long you have kept signing in.""" |
try:
v2ex = V2ex(conf.config)
v2ex.login()
last_date = v2ex.get_last()
click.echo(last_date)
except KeyError:
click.echo('Keyerror, please check your config file.')
except IndexError:
click.echo('Please check your username and password.') |
<SYSTEM_TASK:>
Return a generator yielding uploads referenced in the given text.
<END_TASK>
<USER_TASK:>
Description:
def get_uploads(text):
"""
Return a generator yielding uploads referenced in the given text.
""" |
uploads = []
for match in UPLOAD_RE.finditer(text):
try:
upload = FileUpload.objects.get(slug=match.group(1))
except FileUpload.DoesNotExist:
continue
yield upload |
<SYSTEM_TASK:>
Accept an re match object resulting from an ``UPLOAD_RE`` match
<END_TASK>
<USER_TASK:>
Description:
def parse_match(match):
"""
Accept an re match object resulting from an ``UPLOAD_RE`` match
and return a two-tuple where the first element is the
corresponding ``FileUpload`` and the second is a dictionary of the
key=value options.
If there is no ``FileUpload`` object corresponding to the match,
the first element of the returned tuple is None.
""" |
try:
upload = FileUpload.objects.get(slug=match.group(1))
except FileUpload.DoesNotExist:
upload = None
options = parse_options(match.group(2))
return (upload, options) |
<SYSTEM_TASK:>
Builds a standard 52 card French deck of Card instances.
<END_TASK>
<USER_TASK:>
Description:
def build(self, jokers=False, num_jokers=0):
"""
Builds a standard 52 card French deck of Card instances.
:arg bool jokers:
Whether or not to include jokers in the deck.
:arg int num_jokers:
The number of jokers to include.
""" |
jokers = jokers or self.jokers
num_jokers = num_jokers or self.num_jokers
self.decks_used += 1
self.cards += build_cards(jokers, num_jokers) |
<SYSTEM_TASK:>
Returns a list of cards, which are removed from the deck.
<END_TASK>
<USER_TASK:>
Description:
def deal(self, num=1, rebuild=False, shuffle=False, end=TOP):
"""
Returns a list of cards, which are removed from the deck.
:arg int num:
The number of cards to deal.
:arg bool rebuild:
Whether or not to rebuild the deck when cards run out.
:arg bool shuffle:
Whether or not to shuffle on rebuild.
:arg str end:
The end of the ``Stack`` to add the cards to. Can be ``TOP`` ("top")
or ``BOTTOM`` ("bottom").
:returns:
A given number of cards from the deck.
""" |
_num = num
rebuild = rebuild or self.rebuild
re_shuffle = shuffle or self.re_shuffle
self_size = self.size
if rebuild or num <= self_size:
dealt_cards = [None] * num
elif num > self_size:
dealt_cards = [None] * self_size
while num > 0:
ends = {TOP: self.cards.pop, BOTTOM: self.cards.popleft}
n = _num - num
try:
card = ends[end]()
dealt_cards[n] = card
num -= 1
except:
if self.size == 0:
if rebuild:
self.build()
if re_shuffle:
self.shuffle()
else:
break
return Stack(cards=dealt_cards) |
<SYSTEM_TASK:>
A helper method to read of bytes from a socket to a maximum length
<END_TASK>
<USER_TASK:>
Description:
def socket_recvall(socket, length, bufsize=4096):
"""A helper method to read of bytes from a socket to a maximum length""" |
data = b""
while len(data) < length:
data += socket.recv(bufsize)
return data |
<SYSTEM_TASK:>
Sends a message, but does not return a response
<END_TASK>
<USER_TASK:>
Description:
def send(self, message):
"""Sends a message, but does not return a response
:returns: None - can't receive a response over UDP
""" |
self.socket.sendto(message.SerializeToString(), self.address)
return None |
<SYSTEM_TASK:>
Connects to the given host
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""Connects to the given host""" |
self.socket = socket.create_connection(self.address, self.timeout) |
<SYSTEM_TASK:>
Sends a message to a Riemann server and returns it's response
<END_TASK>
<USER_TASK:>
Description:
def send(self, message):
"""Sends a message to a Riemann server and returns it's response
:param message: The message to send to the Riemann server
:returns: The response message from Riemann
:raises RiemannError: if the server returns an error
""" |
message = message.SerializeToString()
self.socket.sendall(struct.pack('!I', len(message)) + message)
length = struct.unpack('!I', self.socket.recv(4))[0]
response = riemann_client.riemann_pb2.Msg()
response.ParseFromString(socket_recvall(self.socket, length))
if not response.ok:
raise RiemannError(response.error)
return response |
<SYSTEM_TASK:>
Adds a message to the list, returning a fake 'ok' response
<END_TASK>
<USER_TASK:>
Description:
def send(self, message):
"""Adds a message to the list, returning a fake 'ok' response
:returns: A response message with ``ok = True``
""" |
for event in message.events:
self.events.append(event)
reply = riemann_client.riemann_pb2.Msg()
reply.ok = True
return reply |
<SYSTEM_TASK:>
Returns True if the key exists
<END_TASK>
<USER_TASK:>
Description:
def exists(self, callback=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
""" |
def existence_tested(response):
if callable(callback):
callback(bool(response))
self.bucket.lookup(self.name, callback=existence_tested) |
<SYSTEM_TASK:>
Retrieve an object from S3 using the name of the Key object as the
<END_TASK>
<USER_TASK:>
Description:
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None, callback=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
""" |
fp = open(filename, 'wb')
def got_contents_to_filename(response):
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception: pass
if callable(callback):
callback(response)
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers, callback=got_contents_to_filename) |
<SYSTEM_TASK:>
Delete a message from a queue.
<END_TASK>
<USER_TASK:>
Description:
def delete_message(self, queue, message):
"""
Delete a message from a queue.
:type queue: A :class:`boto.sqs.queue.Queue` object
:param queue: The Queue from which messages are read.
:type message: A :class:`boto.sqs.message.Message` object
:param message: The Message to be deleted
:rtype: bool
:return: True if successful, False otherwise.
""" |
params = {'ReceiptHandle' : message.receipt_handle}
return self.get_status('DeleteMessage', params, queue.id) |
<SYSTEM_TASK:>
Delete a message from a queue, given a receipt handle.
<END_TASK>
<USER_TASK:>
Description:
def delete_message_from_handle(self, queue, receipt_handle):
"""
Delete a message from a queue, given a receipt handle.
:type queue: A :class:`boto.sqs.queue.Queue` object
:param queue: The Queue from which messages are read.
:type receipt_handle: str
:param receipt_handle: The receipt handle for the message
:rtype: bool
:return: True if successful, False otherwise.
""" |
params = {'ReceiptHandle' : receipt_handle}
return self.get_status('DeleteMessage', params, queue.id) |
<SYSTEM_TASK:>
Delivers up to 10 messages to a queue in a single request.
<END_TASK>
<USER_TASK:>
Description:
def send_message_batch(self, queue, messages):
"""
Delivers up to 10 messages to a queue in a single request.
:type queue: A :class:`boto.sqs.queue.Queue` object.
:param queue: The Queue to which the messages will be written.
:type messages: List of lists.
:param messages: A list of lists or tuples. Each inner
tuple represents a single message to be written
and consists of and ID (string) that must be unique
within the list of messages, the message body itself
which can be a maximum of 64K in length, and an
integer which represents the delay time (in seconds)
for the message (0-900) before the message will
be delivered to the queue.
""" |
params = {}
for i, msg in enumerate(messages):
p_name = 'SendMessageBatchRequestEntry.%i.Id' % (i+1)
params[p_name] = msg[0]
p_name = 'SendMessageBatchRequestEntry.%i.MessageBody' % (i+1)
params[p_name] = msg[1]
p_name = 'SendMessageBatchRequestEntry.%i.DelaySeconds' % (i+1)
params[p_name] = msg[2]
return self.get_object('SendMessageBatch', params, BatchResults,
queue.id, verb='POST') |
<SYSTEM_TASK:>
Retrieves the queue with the given name, or ``None`` if no match
<END_TASK>
<USER_TASK:>
Description:
def get_queue(self, queue_name):
"""
Retrieves the queue with the given name, or ``None`` if no match
was found.
:param str queue_name: The name of the queue to retrieve.
:rtype: :py:class:`boto.sqs.queue.Queue` or ``None``
:returns: The requested queue, or ``None`` if no match was found.
""" |
rs = self.get_all_queues(queue_name)
for q in rs:
if q.url.endswith(queue_name):
return q
return None |
<SYSTEM_TASK:>
Match template and find exactly one match in the Image using specified features.
<END_TASK>
<USER_TASK:>
Description:
def match_one(template, image, options=None):
"""
Match template and find exactly one match in the Image using specified features.
:param template: Template Image
:param image: Search Image
:param options: Options include
- features: List of options for each feature
:return: (Box, Score) Bounding box of the matched object, Heatmap value
""" |
heatmap, scale = multi_feat_match(template, image, options)
min_val, _, min_loc, _ = cv.minMaxLoc(heatmap)
top_left = tuple(scale * x for x in min_loc)
score = min_val
h, w = template.shape[:2]
return Box(top_left[0], top_left[1], w, h), score |
<SYSTEM_TASK:>
Match template and image by extracting specified feature
<END_TASK>
<USER_TASK:>
Description:
def feature_match(template, image, options=None):
"""
Match template and image by extracting specified feature
:param template: Template image
:param image: Search image
:param options: Options include
- feature: Feature extractor to use. Default is 'rgb'. Available options are:
'hog', 'lab', 'rgb', 'gray'
:return: Heatmap
""" |
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
feat = fe.factory(op['feature'])
tmpl_f = feat(template, op)
img_f = feat(image, op)
scale = image.shape[0] / img_f.shape[0]
heatmap = match_template(tmpl_f, img_f, op)
return heatmap, scale |
<SYSTEM_TASK:>
Multi channel template matching using simple correlation distance
<END_TASK>
<USER_TASK:>
Description:
def match_template(template, image, options=None):
"""
Multi channel template matching using simple correlation distance
:param template: Template image
:param image: Search image
:param options: Other options:
- distance: Distance measure to use. Default: 'correlation'
- normalize: Heatmap values will be in the range of 0 to 1. Default: True
- retain_size: Whether to retain the same size as input image. Default: True
:return: Heatmap
""" |
# If the input has max of 3 channels, use the faster OpenCV matching
if len(image.shape) <= 3 and image.shape[2] <= 3:
return match_template_opencv(template, image, options)
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
template = img_utils.gray3(template)
image = img_utils.gray3(image)
h, w, d = template.shape
im_h, im_w = image.shape[:2]
template_v = template.flatten()
heatmap = np.zeros((im_h - h, im_w - w))
for col in range(0, im_w - w):
for row in range(0, im_h - h):
cropped_im = image[row:row + h, col:col + w, :]
cropped_v = cropped_im.flatten()
if op['distance'] == 'euclidean':
heatmap[row, col] = scipy.spatial.distance.euclidean(template_v, cropped_v)
elif op['distance'] == 'correlation':
heatmap[row, col] = scipy.spatial.distance.correlation(template_v, cropped_v)
# normalize
if op['normalize']:
heatmap /= heatmap.max()
# size
if op['retain_size']:
hmap = np.ones(image.shape[:2]) * heatmap.max()
h, w = heatmap.shape
hmap[:h, :w] = heatmap
heatmap = hmap
return heatmap |
<SYSTEM_TASK:>
Match template using OpenCV template matching implementation.
<END_TASK>
<USER_TASK:>
Description:
def match_template_opencv(template, image, options):
"""
Match template using OpenCV template matching implementation.
Limited by number of channels as maximum of 3.
Suitable for direct RGB or Gray-scale matching
:param options: Other options:
- distance: Distance measure to use. (euclidean | correlation | ccoeff).
Default: 'correlation'
- normalize: Heatmap values will be in the range of 0 to 1. Default: True
- retain_size: Whether to retain the same size as input image. Default: True
:return: Heatmap
""" |
# if image has more than 3 channels, use own implementation
if len(image.shape) > 3:
return match_template(template, image, options)
op = _DEF_TM_OPT.copy()
if options is not None:
op.update(options)
method = cv.TM_CCORR_NORMED
if op['normalize'] and op['distance'] == 'euclidean':
method = cv.TM_SQDIFF_NORMED
elif op['distance'] == 'euclidean':
method = cv.TM_SQDIFF
elif op['normalize'] and op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF_NORMED
elif op['distance'] == 'ccoeff':
method = cv.TM_CCOEFF
elif not op['normalize'] and op['distance'] == 'correlation':
method = cv.TM_CCORR
heatmap = cv.matchTemplate(image, template, method)
# make minimum peak heatmap
if method not in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:
heatmap = heatmap.max() - heatmap
if op['normalize']:
heatmap /= heatmap.max()
# size
if op['retain_size']:
hmap = np.ones(image.shape[:2]) * heatmap.max()
h, w = heatmap.shape
hmap[:h, :w] = heatmap
heatmap = hmap
return heatmap |
<SYSTEM_TASK:>
Get JSON from response if success, raise requests.HTTPError otherwise.
<END_TASK>
<USER_TASK:>
Description:
def resp_json(resp):
"""
Get JSON from response if success, raise requests.HTTPError otherwise.
Args:
resp: requests.Response or flask.Response
Retuens:
JSON value
""" |
if isinstance(resp, flask.Response):
if 400 <= resp.status_code < 600:
msg = resp.status
try:
result = loads(resp.data.decode("utf-8"))
if isinstance(result, str):
msg = "%s, %s" % (resp.status, result)
else:
msg = "%s %s, %s" % (
resp.status_code, result["error"], result["message"])
except Exception:
pass
raise requests.HTTPError(msg, response=resp)
else:
return loads(resp.data.decode("utf-8"))
else:
try:
resp.raise_for_status()
except requests.HTTPError as ex:
# the response may contains {"error": "", "message": ""}
# append error and message to exception if possible
try:
result = resp.json()
ex.args += (result["error"], result["message"])
except (ValueError, KeyError):
pass
raise
return resp.json() |
<SYSTEM_TASK:>
Translates a dictionary of event attributes to an Event object
<END_TASK>
<USER_TASK:>
Description:
def create_event(data):
"""Translates a dictionary of event attributes to an Event object
:param dict data: The attributes to be set on the event
:returns: A protocol buffer ``Event`` object
""" |
event = riemann_client.riemann_pb2.Event()
event.host = socket.gethostname()
event.tags.extend(data.pop('tags', []))
for key, value in data.pop('attributes', {}).items():
attribute = event.attributes.add()
attribute.key, attribute.value = key, value
for name, value in data.items():
if value is not None:
setattr(event, name, value)
return event |
<SYSTEM_TASK:>
Sends multiple events to Riemann in a single message
<END_TASK>
<USER_TASK:>
Description:
def send_events(self, events):
"""Sends multiple events to Riemann in a single message
:param events: A list or iterable of ``Event`` objects
:returns: The response message from Riemann
""" |
message = riemann_client.riemann_pb2.Msg()
for event in events:
message.events.add().MergeFrom(event)
return self.transport.send(message) |
<SYSTEM_TASK:>
Sends multiple events in a single message
<END_TASK>
<USER_TASK:>
Description:
def events(self, *events):
"""Sends multiple events in a single message
>>> client.events({'service': 'riemann-client', 'state': 'awesome'})
:param \*events: event dictionaries for :py:func:`create_event`
:returns: The response message from Riemann
""" |
return self.send_events(self.create_event(e) for e in events) |
<SYSTEM_TASK:>
Translates an Event object to a dictionary of event attributes
<END_TASK>
<USER_TASK:>
Description:
def create_dict(event):
"""Translates an Event object to a dictionary of event attributes
All attributes are included, so ``create_dict(create_event(input))``
may return more attributes than were present in the input.
:param event: A protocol buffer ``Event`` object
:returns: A dictionary of event attributes
""" |
data = dict()
for descriptor, value in event.ListFields():
if descriptor.name == 'tags':
value = list(value)
elif descriptor.name == 'attributes':
value = dict(((a.key, a.value) for a in value))
data[descriptor.name] = value
return data |
<SYSTEM_TASK:>
Adds multiple events to the queued message
<END_TASK>
<USER_TASK:>
Description:
def send_events(self, events):
"""Adds multiple events to the queued message
:returns: None - nothing has been sent to the Riemann server yet
""" |
for event in events:
self.queue.events.add().MergeFrom(event)
return None |
<SYSTEM_TASK:>
Returns a list of valid host globs for an SSL certificate.
<END_TASK>
<USER_TASK:>
Description:
def GetValidHostsForCert(cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
""" |
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname'] |
<SYSTEM_TASK:>
Validates that a given hostname is valid for an SSL certificate.
<END_TASK>
<USER_TASK:>
Description:
def ValidateCertificateHostname(cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
""" |
hosts = GetValidHostsForCert(cert)
boto.log.debug(
"validating server certificate: hostname=%s, certificate hosts=%s",
hostname, hosts)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False |
<SYSTEM_TASK:>
Adds the given list of ``Card`` instances to the top of the stack.
<END_TASK>
<USER_TASK:>
Description:
def add(self, cards, end=TOP):
"""
Adds the given list of ``Card`` instances to the top of the stack.
:arg cards:
The cards to add to the ``Stack``. Can be a single ``Card``
instance, or a ``list`` of cards.
:arg str end:
The end of the ``Stack`` to add the cards to. Can be ``TOP`` ("top")
or ``BOTTOM`` ("bottom").
""" |
if end is TOP:
try:
self.cards += cards
except:
self.cards += [cards]
elif end is BOTTOM:
try:
self.cards.extendleft(cards)
except:
self.cards.extendleft([cards]) |
<SYSTEM_TASK:>
Returns a list of cards, which are removed from the Stack.
<END_TASK>
<USER_TASK:>
Description:
def deal(self, num=1, end=TOP):
"""
Returns a list of cards, which are removed from the Stack.
:arg int num:
The number of cards to deal.
:arg str end:
Which end to deal from. Can be ``0`` (top) or ``1`` (bottom).
:returns:
The given number of cards from the stack.
""" |
ends = {TOP: self.cards.pop, BOTTOM: self.cards.popleft}
self_size = self.size
if num <= self_size:
dealt_cards = [None] * num
else:
num = self_size
dealt_cards = [None] * self_size
if self_size:
for n in xrange(num):
try:
card = ends[end]()
dealt_cards[n] = card
except:
break
return Stack(cards=dealt_cards)
else:
return Stack() |
<SYSTEM_TASK:>
Empties the stack, removing all cards from it, and returns them.
<END_TASK>
<USER_TASK:>
Description:
def empty(self, return_cards=False):
"""
Empties the stack, removing all cards from it, and returns them.
:arg bool return_cards:
Whether or not to return the cards.
:returns:
If ``return_cards=True``, a list containing the cards removed
from the Stack.
""" |
cards = list(self.cards)
self.cards = []
if return_cards:
return cards |
<SYSTEM_TASK:>
Searches the stack for cards with a value, suit, name, or
<END_TASK>
<USER_TASK:>
Description:
def find(self, term, limit=0, sort=False, ranks=None):
"""
Searches the stack for cards with a value, suit, name, or
abbreviation matching the given argument, 'term'.
:arg str term:
The search term. Can be a card full name, value, suit,
or abbreviation.
:arg int limit:
The number of items to retrieve for each term. ``0`` equals
no limit.
:arg bool sort:
Whether or not to sort the results.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of stack indices for the cards matching the given terms,
if found.
""" |
ranks = ranks or self.ranks
found_indices = []
count = 0
if not limit:
for i, card in enumerate(self.cards):
if check_term(card, term):
found_indices.append(i)
else:
for i, card in enumerate(self.cards):
if count < limit:
if check_term(card, term):
found_indices.append(i)
count += 1
else:
break
if sort:
found_indices = sort_card_indices(self, found_indices, ranks)
return found_indices |
<SYSTEM_TASK:>
Get the specified cards from the stack.
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, terms, limit=0, sort=False, ranks=None):
"""
Get the specified cards from the stack.
:arg term:
The search term. Can be a card full name, value, suit,
abbreviation, or stack indice.
:arg int limit:
The number of items to retrieve for each term.
:arg bool sort:
Whether or not to sort the results, by poker ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
A list of the specified cards, if found.
""" |
ranks = ranks or self.ranks
got_cards = []
try:
indices = self.find_list(terms, limit=limit)
got_cards = [self.cards[i] for i in indices if self.cards[i]
not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
except:
indices = []
for item in terms:
try:
card = self.cards[item]
if card not in got_cards:
got_cards.append(card)
indices.append(item)
except:
indices += self.find(item, limit=limit)
got_cards += [self.cards[i] for i in indices if
self.cards[i] not in got_cards]
self.cards = [v for i, v in enumerate(self.cards) if
i not in indices]
if sort:
got_cards = sort_cards(got_cards, ranks)
return got_cards |
<SYSTEM_TASK:>
Insert a given card into the stack at a given indice.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, card, indice=-1):
"""
Insert a given card into the stack at a given indice.
:arg Card card:
The card to insert into the stack.
:arg int indice:
Where to insert the given card.
""" |
self_size = len(self.cards)
if indice in [0, -1]:
if indice == -1:
self.cards.append(card)
else:
self.cards.appendleft(card)
elif indice != self_size:
half_x, half_y = self.split(indice)
self.cards = list(half_x.cards) + [card] + list(half_y.cards) |
<SYSTEM_TASK:>
Insert a list of given cards into the stack at a given indice.
<END_TASK>
<USER_TASK:>
Description:
def insert_list(self, cards, indice=-1):
"""
Insert a list of given cards into the stack at a given indice.
:arg list cards:
The list of cards to insert into the stack.
:arg int indice:
Where to insert the given cards.
""" |
self_size = len(self.cards)
if indice in [0, -1]:
if indice == -1:
self.cards += cards
else:
self.cards.extendleft(cards)
elif indice != self_size:
half_x, half_y = self.split(indice)
self.cards = list(half_x.cards) + list(cards) + list(half_y.cards) |
<SYSTEM_TASK:>
Checks whether the stack is sorted.
<END_TASK>
<USER_TASK:>
Description:
def is_sorted(self, ranks=None):
"""
Checks whether the stack is sorted.
:arg dict ranks:
The rank dict to reference for checking. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
Whether or not the cards are sorted.
""" |
ranks = ranks or self.ranks
return check_sorted(self, ranks) |
<SYSTEM_TASK:>
Shuffles the Stack.
<END_TASK>
<USER_TASK:>
Description:
def shuffle(self, times=1):
"""
Shuffles the Stack.
.. note::
Shuffling large numbers of cards (100,000+) may take a while.
:arg int times:
The number of times to shuffle.
""" |
for _ in xrange(times):
random.shuffle(self.cards) |
<SYSTEM_TASK:>
Sorts the stack, either by poker ranks, or big two ranks.
<END_TASK>
<USER_TASK:>
Description:
def sort(self, ranks=None):
"""
Sorts the stack, either by poker ranks, or big two ranks.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
The sorted cards.
""" |
ranks = ranks or self.ranks
self.cards = sort_cards(self.cards, ranks) |
<SYSTEM_TASK:>
Splits the Stack, either in half, or at the given indice, into two
<END_TASK>
<USER_TASK:>
Description:
def split(self, indice=None):
"""
Splits the Stack, either in half, or at the given indice, into two
separate Stacks.
:arg int indice:
Optional. The indice to split the Stack at. Defaults to the middle
of the ``Stack``.
:returns:
The two parts of the Stack, as separate Stack instances.
""" |
self_size = self.size
if self_size > 1:
if not indice:
mid = self_size // 2
return Stack(cards=self[0:mid]), Stack(cards=self[mid::])
else:
return Stack(cards=self[0:indice]), Stack(cards=self[indice::])
else:
return Stack(cards=self.cards), Stack() |
<SYSTEM_TASK:>
Abort with suitable error response
<END_TASK>
<USER_TASK:>
Description:
def abort(code, error=None, message=None):
"""
Abort with suitable error response
Args:
code (int): status code
error (str): error symbol or flask.Response
message (str): error message
""" |
if error is None:
flask_abort(code)
elif isinstance(error, Response):
error.status_code = code
flask_abort(code, response=error)
else:
body = {
"status": code,
"error": error,
"message": message
}
flask_abort(code, response=export(body, code)) |
<SYSTEM_TASK:>
Create a suitable response
<END_TASK>
<USER_TASK:>
Description:
def export(rv, code=None, headers=None):
"""
Create a suitable response
Args:
rv: return value of action
code: status code
headers: response headers
Returns:
flask.Response
""" |
if isinstance(rv, ResponseBase):
return make_response(rv, code, headers)
else:
if code is None:
code = 200
mediatype = request.accept_mimetypes.best_match(
exporters.keys(), default='application/json')
return exporters[mediatype](rv, code, headers) |
<SYSTEM_TASK:>
Get request data based on request.method
<END_TASK>
<USER_TASK:>
Description:
def get_request_data():
"""
Get request data based on request.method
If method is GET or DELETE, get data from request.args
If method is POST, PATCH or PUT, get data from request.form or request.json
""" |
method = request.method.lower()
if method in ["get", "delete"]:
return request.args
elif method in ["post", "put", "patch"]:
if request.mimetype == 'application/json':
try:
return request.get_json()
except:
abort(400, "InvalidData", "invalid json content")
else:
return request.form
else:
return None |
<SYSTEM_TASK:>
Get title of desc
<END_TASK>
<USER_TASK:>
Description:
def get_title(desc, default=None):
"""Get title of desc""" |
if not desc:
return default
lines = desc.strip('\n').split('\n')
if not lines:
return default
return lines[0].lstrip('# ').rstrip(' ') |
<SYSTEM_TASK:>
Make resource's method an action
<END_TASK>
<USER_TASK:>
Description:
def make_action(self, fn, schema_parser, meta):
"""
Make resource's method an action
Validate input, output by schema in meta.
If no input schema, call fn without params.
If no output schema, will not validate return value.
Args:
fn: resource's method
schema_parser: for parsing schema in meta
meta: meta data of the action
""" |
validate_input = validate_output = None
if "$input" in meta:
with MarkKey("$input"):
validate_input = schema_parser.parse(meta["$input"])
if "$output" in meta:
with MarkKey("$output"):
validate_output = schema_parser.parse(meta["$output"])
def action(data):
if validate_input:
try:
data = validate_input(data)
except Invalid as ex:
return abort(400, "InvalidData", str(ex))
if isinstance(data, dict):
rv = fn(**data)
else:
rv = fn(data)
else:
rv = fn()
rv, status, headers = unpack(rv)
if validate_output:
try:
rv = validate_output(rv)
except Invalid as ex:
return abort(500, "ServerError", str(ex))
return rv, status, headers
return action |
<SYSTEM_TASK:>
Create a view function
<END_TASK>
<USER_TASK:>
Description:
def make_view(self, action_group):
"""
Create a view function
Check permission and Dispatch request to action by request.method
""" |
def view(*args, **kwargs):
try:
httpmathod = request.method.lower()
if httpmathod not in action_group:
abort(405)
resp = self._before_request()
if resp is None:
fn = action_group[httpmathod]
resp = fn(get_request_data())
except Exception as ex:
resp = self._handle_error(ex)
if resp is None:
raise
resp = self._after_request(*unpack(resp))
return export(*resp)
return view |
<SYSTEM_TASK:>
Get all available regions for the SNS service.
<END_TASK>
<USER_TASK:>
Description:
def regions():
"""
Get all available regions for the SNS service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
""" |
return [RegionInfo(name='us-east-1',
endpoint='sns.us-east-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='eu-west-1',
endpoint='sns.eu-west-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='us-west-1',
endpoint='sns.us-west-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='sa-east-1',
endpoint='sns.sa-east-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='us-west-2',
endpoint='sns.us-west-2.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='ap-northeast-1',
endpoint='sns.ap-northeast-1.amazonaws.com',
connection_cls=SNSConnection),
RegionInfo(name='ap-southeast-1',
endpoint='sns.ap-southeast-1.amazonaws.com',
connection_cls=SNSConnection),
] |
<SYSTEM_TASK:>
Add a tag to this object. Tag's are stored by AWS and can be used
<END_TASK>
<USER_TASK:>
Description:
def add_tag(self, key, value=''):
"""
Add a tag to this object. Tag's are stored by AWS and can be used
to organize and filter resources. Adding a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If you want only the tag name and no value, the
value should be the empty string.
""" |
status = self.connection.create_tags([self.id], {key : value})
if self.tags is None:
self.tags = TagSet()
self.tags[key] = value |
<SYSTEM_TASK:>
Remove a tag from this object. Removing a tag involves a round-trip
<END_TASK>
<USER_TASK:>
Description:
def remove_tag(self, key, value=None):
"""
Remove a tag from this object. Removing a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If a value is provided, it must match the value
currently stored in EC2. If not, the tag will not
be removed. If a value of None is provided, all
tags with the specified name will be deleted.
NOTE: There is an important distinction between
a value of '' and a value of None.
""" |
if value:
tags = {key : value}
else:
tags = [key]
status = self.connection.delete_tags([self.id], tags)
if key in self.tags:
del self.tags[key] |
<SYSTEM_TASK:>
Get a variable number of messages.
<END_TASK>
<USER_TASK:>
Description:
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None, callback=None):
"""
Get a variable number of messages.
:type num_messages: int
:param num_messages: The maximum number of messages to read from the queue.
:type visibility_timeout: int
:param visibility_timeout: The VisibilityTimeout for the messages read.
:type attributes: str
:param attributes: The name of additional attribute to return with response
or All if you want all attributes. The default is to
return no additional attributes. Valid values:
All
SenderId
SentTimestamp
ApproximateReceiveCount
ApproximateFirstReceiveTimestamp
:rtype: list
:return: A list of :class:`boto.sqs.message.Message` objects.
""" |
return self.connection.receive_message(self, number_messages=num_messages,
visibility_timeout=visibility_timeout,
attributes=attributes, callback=callback) |
<SYSTEM_TASK:>
Delete a message from the queue.
<END_TASK>
<USER_TASK:>
Description:
def delete_message(self, message, callback=None):
"""
Delete a message from the queue.
:type message: :class:`boto.sqs.message.Message`
:param message: The :class:`boto.sqs.message.Message` object to delete.
:rtype: bool
:return: True if successful, False otherwise
""" |
return self.connection.delete_message(self, message, callback=callback) |
<SYSTEM_TASK:>
Method returns prepared request query for Yahoo YQL API.
<END_TASK>
<USER_TASK:>
Description:
def prepare_query(self, symbol, start_date, end_date):
"""Method returns prepared request query for Yahoo YQL API.""" |
query = \
'select * from yahoo.finance.historicaldata where symbol = "%s" and startDate = "%s" and endDate = "%s"' \
% (symbol, start_date, end_date)
return query |
<SYSTEM_TASK:>
Make a request on this connection
<END_TASK>
<USER_TASK:>
Description:
def _make_request(self, method, url, post_data=None, body=None):
"""
Make a request on this connection
""" |
if not self.connection:
self._connect()
try:
self.connection.close()
except:
pass
self.connection.connect()
headers = {}
if self.auth_header:
headers["Authorization"] = self.auth_header
self.connection.request(method, url, body, headers)
resp = self.connection.getresponse()
return resp |
<SYSTEM_TASK:>
Marshal the object and do a PUT
<END_TASK>
<USER_TASK:>
Description:
def save_object(self, obj, expected_value=None):
"""
Marshal the object and do a PUT
""" |
doc = self.marshal_object(obj)
if obj.id:
url = "/%s/%s" % (self.db_name, obj.id)
else:
url = "/%s" % (self.db_name)
resp = self._make_request("PUT", url, body=doc.toxml())
new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))
obj.id = new_obj.id
for prop in obj.properties():
try:
propname = prop.name
except AttributeError:
propname = None
if propname:
value = getattr(new_obj, prop.name)
if value:
setattr(obj, prop.name, value)
return obj |
<SYSTEM_TASK:>
Update the DB instance's status information by making a call to fetch
<END_TASK>
<USER_TASK:>
Description:
def update(self, validate=False):
"""
Update the DB instance's status information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
""" |
rs = self.connection.get_all_dbinstances(self.id)
if len(rs) > 0:
for i in rs:
if i.id == self.id:
self.__dict__.update(i.__dict__)
elif validate:
raise ValueError('%s is not a valid Instance ID' % self.id)
return self.status |
<SYSTEM_TASK:>
Delete this DBInstance.
<END_TASK>
<USER_TASK:>
Description:
def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
"""
Delete this DBInstance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether a final
db snapshot is created before the instance
is deleted. If True, no snapshot is created.
If False, a snapshot is created before
deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
""" |
return self.connection.delete_dbinstance(self.id,
skip_final_snapshot,
final_snapshot_id) |
<SYSTEM_TASK:>
Modify this DBInstance.
<END_TASK>
<USER_TASK:>
Description:
def modify(self, param_group=None, security_groups=None,
preferred_maintenance_window=None,
master_password=None, allocated_storage=None,
instance_class=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
apply_immediately=False):
"""
Modify this DBInstance.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
this DBInstance.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC)
during which maintenance can
occur.
Default is Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Changes will be applied at
next maintenance window unless
apply_immediately is True.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type apply_immediately: bool
:param apply_immediately: If true, the modifications will be applied
as soon as possible rather than waiting for
the next preferred maintenance window.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
""" |
return self.connection.modify_dbinstance(self.id,
param_group,
security_groups,
preferred_maintenance_window,
master_password,
allocated_storage,
instance_class,
backup_retention_period,
preferred_backup_window,
multi_az,
apply_immediately) |
<SYSTEM_TASK:>
Not currently fully supported, but we can use this
<END_TASK>
<USER_TASK:>
Description:
def fetch(self, limit, offset=0):
"""Not currently fully supported, but we can use this
to allow them to set a limit in a chainable method""" |
self.limit = limit
self.offset = offset
return self |
<SYSTEM_TASK:>
Create and return a new Session Token based on the contents
<END_TASK>
<USER_TASK:>
Description:
def from_json(cls, json_doc):
"""
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
""" |
d = json.loads(json_doc)
token = cls()
token.__dict__.update(d)
return token |
<SYSTEM_TASK:>
Create and return a new Session Token based on the contents
<END_TASK>
<USER_TASK:>
Description:
def load(cls, file_path):
"""
Create and return a new Session Token based on the contents
of a previously saved JSON-format file.
:type file_path: str
:param file_path: The fully qualified path to the JSON-format
file containing the previously saved Session Token information.
""" |
fp = open(file_path)
json_doc = fp.read()
fp.close()
return cls.from_json(json_doc) |
<SYSTEM_TASK:>
Return a Python dict containing the important information
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""
Return a Python dict containing the important information
about this Session Token.
""" |
return {'access_key': self.access_key,
'secret_key': self.secret_key,
'session_token': self.session_token,
'expiration': self.expiration,
'request_id': self.request_id} |
<SYSTEM_TASK:>
Persist a Session Token to a file in JSON format.
<END_TASK>
<USER_TASK:>
Description:
def save(self, file_path):
"""
Persist a Session Token to a file in JSON format.
:type path: str
:param path: The fully qualified path to the file where the
the Session Token data should be written. Any previous
data in the file will be overwritten. To help protect
the credentials contained in the file, the permissions
of the file will be set to readable/writable by owner only.
""" |
fp = open(file_path, 'wb')
json.dump(self.to_dict(), fp)
fp.close()
os.chmod(file_path, 0600) |
<SYSTEM_TASK:>
Checks to see if the Session Token is expired or not. By default
<END_TASK>
<USER_TASK:>
Description:
def is_expired(self, time_offset_seconds=0):
"""
Checks to see if the Session Token is expired or not. By default
it will check to see if the Session Token is expired as of the
moment the method is called. However, you can supply an
optional parameter which is the number of seconds of offset
into the future for the check. For example, if you supply
a value of 5, this method will return a True if the Session
Token will be expired 5 seconds from this moment.
:type time_offset_seconds: int
:param time_offset_seconds: The number of seconds into the future
to test the Session Token for expiration.
""" |
now = datetime.datetime.utcnow()
if time_offset_seconds:
now = now + datetime.timedelta(seconds=time_offset_seconds)
ts = boto.utils.parse_ts(self.expiration)
delta = ts - now
return delta.total_seconds() <= 0 |
<SYSTEM_TASK:>
Add an entry to the system crontab.
<END_TASK>
<USER_TASK:>
Description:
def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None):
"""
Add an entry to the system crontab.
""" |
raise NotImplementedError |
<SYSTEM_TASK:>
Returns a dictionary with location data or False on failure.
<END_TASK>
<USER_TASK:>
Description:
def get_location(self, ip, detailed=False):
"""Returns a dictionary with location data or False on failure.
Amount of information about IP contained in the dictionary depends
upon `detailed` flag state.
""" |
seek = self._get_pos(ip)
if seek > 0:
return self._parse_location(seek, detailed=detailed)
return False |
<SYSTEM_TASK:>
Returns a list of dictionaries with location data or False
<END_TASK>
<USER_TASK:>
Description:
def get_locations(self, ip, detailed=False):
"""Returns a list of dictionaries with location data or False
on failure. Argument `ip` must be an iterable object.
Amount of information about IP contained in the dictionary depends
upon `detailed` flag state.
""" |
if isinstance(ip, str):
ip = [ip]
seek = map(self._get_pos, ip)
return [self._parse_location(elem, detailed=detailed) if elem > 0 else False for elem in seek] |
<SYSTEM_TASK:>
Finds an AuthHandler that is ready to authenticate.
<END_TASK>
<USER_TASK:>
Description:
def get_auth_handler(host, config, provider, requested_capability=None):
"""Finds an AuthHandler that is ready to authenticate.
Lists through all the registered AuthHandlers to find one that is willing
to handle for the requested capabilities, config and provider.
:type host: string
:param host: The name of the host
:type config:
:param config:
:type provider:
:param provider:
Returns:
An implementation of AuthHandler.
Raises:
boto.exception.NoAuthHandlerFound:
boto.exception.TooManyAuthHandlerReadyToAuthenticate:
""" |
ready_handlers = []
auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability)
total_handlers = len(auth_handlers)
for handler in auth_handlers:
try:
ready_handlers.append(handler(host, config, provider))
except boto.auth_handler.NotReadyToAuthenticate:
pass
if not ready_handlers:
checked_handlers = auth_handlers
names = [handler.__name__ for handler in checked_handlers]
raise boto.exception.NoAuthHandlerFound(
'No handler was ready to authenticate. %d handlers were checked.'
' %s '
'Check your credentials' % (len(names), str(names)))
if len(ready_handlers) > 1:
# NOTE: Even though it would be nice to accept more than one handler
# by using one of the many ready handlers, we are never sure that each
# of them are referring to the same storage account. Since we cannot
# easily guarantee that, it is always safe to fail, rather than operate
# on the wrong account.
names = [handler.__class__.__name__ for handler in ready_handlers]
raise boto.exception.TooManyAuthHandlerReadyToAuthenticate(
'%d AuthHandlers %s ready to authenticate for requested_capability '
'%s, only 1 expected. This happens if you import multiple '
'pluging.Plugin implementations that declare support for the '
'requested_capability.' % (len(names), str(names),
requested_capability))
return ready_handlers[0] |
<SYSTEM_TASK:>
Select the headers from the request that need to be included
<END_TASK>
<USER_TASK:>
Description:
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
""" |
headers_to_sign = {}
headers_to_sign = {'Host' : self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign |
<SYSTEM_TASK:>
Return the headers that need to be included in the StringToSign
<END_TASK>
<USER_TASK:>
Description:
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
""" |
l = ['%s:%s'%(n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign]
l.sort()
return '\n'.join(l) |
<SYSTEM_TASK:>
Return the canonical StringToSign as well as a dict
<END_TASK>
<USER_TASK:>
Description:
def string_to_sign(self, http_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
""" |
headers_to_sign = self.headers_to_sign(http_request)
canonical_headers = self.canonical_headers(headers_to_sign)
string_to_sign = '\n'.join([http_request.method,
http_request.path,
'',
canonical_headers,
'',
http_request.body])
return string_to_sign, headers_to_sign |
<SYSTEM_TASK:>
Add AWS3 authentication to a request.
<END_TASK>
<USER_TASK:>
Description:
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
""" |
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
req.headers['X-Amz-Date'] = formatdate(usegmt=True)
req.headers['X-Amz-Security-Token'] = self._provider.security_token
string_to_sign, headers_to_sign = self.string_to_sign(req)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
hash_value = sha256(string_to_sign).digest()
b64_hmac = self.sign_string(hash_value)
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s," % self.algorithm()
s += "SignedHeaders=%s," % ';'.join(headers_to_sign)
s += "Signature=%s" % b64_hmac
req.headers['X-Amzn-Authorization'] = s |
<SYSTEM_TASK:>
Returns true if the requested capability is supported by this plugin
<END_TASK>
<USER_TASK:>
Description:
def is_capable(cls, requested_capability):
"""Returns true if the requested capability is supported by this plugin
""" |
for c in requested_capability:
if not c in cls.capability:
return False
return True |
<SYSTEM_TASK:>
Returns the next connection in this pool that is ready to be
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None of there aren't any.
""" |
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None |
<SYSTEM_TASK:>
Get rid of stale connections.
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
"""
Get rid of stale connections.
""" |
# Note that we do not close the connection here -- somebody
# may still be reading from it.
while len(self.queue) > 0 and self._pair_stale(self.queue[0]):
self.queue.pop(0) |
<SYSTEM_TASK:>
Adds a connection to the pool of connections that can be
<END_TASK>
<USER_TASK:>
Description:
def put_http_connection(self, host, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
""" |
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn) |
<SYSTEM_TASK:>
Clean up the stale connections in all of the pools, and then
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
""" |
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now |
<SYSTEM_TASK:>
Makes a request to the server, with stock multiple-retry logic.
<END_TASK>
<USER_TASK:>
Description:
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None):
"""Makes a request to the server, with stock multiple-retry logic.""" |
http_request = self.build_base_http_request(method, path, auth_path,
{}, headers, data, host)
return self._mexe(http_request, sender, override_num_retries) |
<SYSTEM_TASK:>
Create radio button window for option selection
<END_TASK>
<USER_TASK:>
Description:
def GetRadioButtonSelect(selectList, title="Select", msg=""):
"""
Create radio button window for option selection
title: Window name
mag: Label of the radio button
return (seldctedItem, selectedindex)
""" |
root = tkinter.Tk()
root.title(title)
val = tkinter.IntVar()
val.set(0)
if msg != "":
tkinter.Label(root, text=msg).pack()
index = 0
for item in selectList:
tkinter.Radiobutton(root, text=item, variable=val,
value=index).pack(anchor=tkinter.W)
index += 1
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
root.destroy()
print(selectList[val.get()] + " is selected")
return (selectList[val.get()], val.get()) |
<SYSTEM_TASK:>
Create list with selectList,
<END_TASK>
<USER_TASK:>
Description:
def GetListSelect(selectList, title="Select", msg=""):
"""
Create list with selectList,
and then return seleced string and index
title: Window name
mag: Label of the list
return (seldctedItem, selectedindex)
""" |
root = tkinter.Tk()
root.title(title)
label = tkinter.Label(root, text=msg)
label.pack()
listbox = tkinter.Listbox(root)
for i in selectList:
listbox.insert(tkinter.END, i)
listbox.pack()
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
selected = listbox.get(listbox.curselection())
print(selected + " is selected")
root.destroy()
return (selected, selectList.index(selected)) |
<SYSTEM_TASK:>
Get selected check button options
<END_TASK>
<USER_TASK:>
Description:
def GetCheckButtonSelect(selectList, title="Select", msg=""):
"""
Get selected check button options
title: Window name
mag: Label of the check button
return selected dictionary
{'sample b': False, 'sample c': False, 'sample a': False}
""" |
root = tkinter.Tk()
root.title(title)
label = tkinter.Label(root, text=msg)
label.pack()
optList = []
for item in selectList:
opt = tkinter.BooleanVar()
opt.set(False)
tkinter.Checkbutton(root, text=item, variable=opt).pack()
optList.append(opt)
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
root.destroy()
result = {}
for (opt, select) in zip(optList, selectList):
result[select] = opt.get()
print(result)
return result |
<SYSTEM_TASK:>
Get entries of the list
<END_TASK>
<USER_TASK:>
Description:
def GetEntries(dataList, title="Select", msg=""):
"""
Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'}
""" |
root = tkinter.Tk()
root.title(title)
label = tkinter.Label(root, text=msg)
label.pack()
entries = []
for item in dataList:
tkinter.Label(root, text=item).pack()
entry = tkinter.Entry(root)
entry.pack()
entries.append(entry)
# print entries
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
result = {}
for (entry, data) in zip(entries, dataList):
result[data] = entry.get()
root.destroy()
print(result)
return result |
<SYSTEM_TASK:>
Add a rule to the SecurityGroup object. Note that this method
<END_TASK>
<USER_TASK:>
Description:
def add_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip):
"""
Add a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
""" |
rule = IPPermissions(self)
rule.ip_protocol = ip_protocol
rule.from_port = from_port
rule.to_port = to_port
self.rules.append(rule)
rule.add_grant(src_group_name, src_group_owner_id, cidr_ip) |
<SYSTEM_TASK:>
Remove a rule to the SecurityGroup object. Note that this method
<END_TASK>
<USER_TASK:>
Description:
def remove_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip):
"""
Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
""" |
target_rule = None
for rule in self.rules:
if rule.ip_protocol == ip_protocol:
if rule.from_port == from_port:
if rule.to_port == to_port:
target_rule = rule
target_grant = None
for grant in rule.grants:
if grant.name == src_group_name:
if grant.owner_id == src_group_owner_id:
if grant.cidr_ip == cidr_ip:
target_grant = grant
if target_grant:
rule.grants.remove(target_grant)
if len(rule.grants) == 0:
self.rules.remove(target_rule) |
<SYSTEM_TASK:>
Create a copy of this security group in another region.
<END_TASK>
<USER_TASK:>
Description:
def copy_to_region(self, region, name=None):
"""
Create a copy of this security group in another region.
Note that the new security group will be a separate entity
and will not stay in sync automatically after the copy
operation.
:type region: :class:`boto.ec2.regioninfo.RegionInfo`
:param region: The region to which this security group will be copied.
:type name: string
:param name: The name of the copy. If not supplied, the copy
will have the same name as this security group.
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The new security group.
""" |
if region.name == self.region:
raise BotoClientError('Unable to copy to the same Region')
conn_params = self.connection.get_params()
rconn = region.connect(**conn_params)
sg = rconn.create_security_group(name or self.name, self.description)
source_groups = []
for rule in self.rules:
for grant in rule.grants:
if grant.name:
if grant.name not in source_groups:
source_groups.append(grant.name)
sg.authorize(None, None, None, None, grant)
else:
sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
grant.cidr_ip)
return sg |
<SYSTEM_TASK:>
Find all of the current instances that are running within this
<END_TASK>
<USER_TASK:>
Description:
def instances(self):
"""
Find all of the current instances that are running within this
security group.
:rtype: list of :class:`boto.ec2.instance.Instance`
:return: A list of Instance objects
""" |
# It would be more efficient to do this with filters now
# but not all services that implement EC2 API support filters.
instances = []
rs = self.connection.get_all_instances()
for reservation in rs:
uses_group = [g.name for g in reservation.groups if g.name == self.name]
if uses_group:
instances.extend(reservation.instances)
return instances |
<SYSTEM_TASK:>
Taken from the AWS book Python examples and modified for use with boto
<END_TASK>
<USER_TASK:>
Description:
def build_post_policy(self, expiration_time, conditions):
"""
Taken from the AWS book Python examples and modified for use with boto
""" |
assert type(expiration_time) == time.struct_time, \
'Policy document must include a valid expiration Time object'
# Convert conditions object mappings to condition statements
return '{"expiration": "%s",\n"conditions": [%s]}' % \
(time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) |
<SYSTEM_TASK:>
Creates a new located bucket. By default it's in the USA. You can pass
<END_TASK>
<USER_TASK:>
Description:
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
"""
Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create an European bucket.
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: :class:`boto.s3.connection.Location`
:param location: The location of the new bucket
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
""" |
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConstraint><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConstraint>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body) |
<SYSTEM_TASK:>
Encodes a nonnegative integer into syncsafe format
<END_TASK>
<USER_TASK:>
Description:
def encode(i, *, width=-1):
"""Encodes a nonnegative integer into syncsafe format
When width > 0, then len(result) == width
When width < 0, then len(result) >= abs(width)
""" |
if i < 0:
raise ValueError("value is negative")
assert width != 0
data = bytearray()
while i:
data.append(i & 127)
i >>= 7
if width > 0 and len(data) > width:
raise ValueError("Integer too large")
if len(data) < abs(width):
data.extend([0] * (abs(width) - len(data)))
data.reverse()
return data |
<SYSTEM_TASK:>
Create new message of appropriate class.
<END_TASK>
<USER_TASK:>
Description:
def new_message(self, body=''):
"""
Create new message of appropriate class.
:type body: message body
:param body: The body of the newly created message (optional).
:rtype: :class:`boto.sqs.message.Message`
:return: A new Message object
""" |
m = self.message_class(self, body)
m.queue = self
return m |
<SYSTEM_TASK:>
Utility function to remove all messages from a queue
<END_TASK>
<USER_TASK:>
Description:
def clear(self, page_size=10, vtimeout=10):
"""Utility function to remove all messages from a queue""" |
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
self.delete_message(m)
n += 1
l = self.get_messages(page_size, vtimeout)
return n |
<SYSTEM_TASK:>
Read all messages from the queue and persist them to file-like object.
<END_TASK>
<USER_TASK:>
Description:
def save_to_file(self, fp, sep='\n'):
"""
Read all messages from the queue and persist them to file-like object.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
""" |
n = 0
m = self.read()
while m:
n += 1
fp.write(m.get_body())
if sep:
fp.write(sep)
self.delete_message(m)
m = self.read()
return n |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.