text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def shutdown(self):
'Close the hub connection'
log.info("shutting down")
self._peer.go_down(reconnect=False, expected=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _headers(self, others={}):
"""Return the default headers and others as necessary""" |
headers = {
'Content-Type': 'application/json'
}
for p in others.keys():
headers[p] = others[p]
return headers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read(config_file, configspec, server_mode=False, default_section='default_settings', list_values=True):
'''
Read the config file with spec validation
'''
# configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec),
# encoding='UTF8',
# interpolation='Template',
# list_values=False,
# _inspec=True)
config = ConfigObj(config_file,
configspec=path.join(path.abspath(path.dirname(__file__)),
configspec),
list_values=list_values)
validation = config.validate(validate.Validator(), preserve_errors=True)
if validation == True:
config = dict(config)
for section in config:
if section != default_section:
if server_mode: # When it's a servers config file, retrieve the correct fqdn
config[section]['availability'] = True
if config[section]['custom_fqdn'] == None:
config[section]['custom_fqdn'] = socket.getfqdn()
for option in config[section]: # retrieve default configuration for missing values
if config[section][option] == None:
config[section][option] = config[default_section][option]
del(config[default_section])
return config
else:
raise ConfiguratorException(config_file, validation) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def elapsed_time_string(start_time, stop_time):
r""" Return a formatted string with the elapsed time between two time points. The string includes years (365 days), months (30 days), days (24 hours), hours (60 minutes), minutes (60 seconds) and seconds. If both arguments are equal, the string returned is :code:`'None'`; otherwise, the string returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s], [MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s], etc.) is omitted if the value of that part is null/zero :param start_time: Starting time point :type start_time: `datetime <https://docs.python.org/3/library/ datetime.html#datetime-objects>`_ :param stop_time: Ending time point :type stop_time: `datetime` :rtype: string :raises: RuntimeError (Invalid time delta specification) For example: '1 year, 2 days and 2 seconds' """ |
if start_time > stop_time:
raise RuntimeError("Invalid time delta specification")
delta_time = stop_time - start_time
# Python 2.6 datetime objects do not have total_seconds() method
tot_seconds = int(
(
delta_time.microseconds
+ (delta_time.seconds + delta_time.days * 24 * 3600) * 10 ** 6
)
/ 10 ** 6
)
years, remainder = divmod(tot_seconds, 365 * 24 * 60 * 60)
months, remainder = divmod(remainder, 30 * 24 * 60 * 60)
days, remainder = divmod(remainder, 24 * 60 * 60)
hours, remainder = divmod(remainder, 60 * 60)
minutes, seconds = divmod(remainder, 60)
token_iter = zip(
[years, months, days, hours, minutes, seconds],
["year", "month", "day", "hour", "minute", "second"],
)
ret_list = [
"{token} {token_name}{plural}".format(
token=num, token_name=desc, plural="s" if num > 1 else ""
)
for num, desc in token_iter
if num > 0
]
if not ret_list:
return "None"
if len(ret_list) == 1:
return ret_list[0]
if len(ret_list) == 2:
return ret_list[0] + " and " + ret_list[1]
return (", ".join(ret_list[0:-1])) + " and " + ret_list[-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pcolor(text, color, indent=0):
r""" Return a string that once printed is colorized. :param text: Text to colorize :type text: string :param color: Color to use, one of :code:`'black'`, :code:`'red'`, :code:`'green'`, :code:`'yellow'`, :code:`'blue'`, :code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or :code:`'none'` (case insensitive) :type color: string :param indent: Number of spaces to prefix the output with :type indent: integer :rtype: string :raises: * RuntimeError (Argument \`color\` is not valid) * RuntimeError (Argument \`indent\` is not valid) * RuntimeError (Argument \`text\` is not valid) * ValueError (Unknown color *[color]*) """ |
esc_dict = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"none": -1,
}
if not isinstance(text, str):
raise RuntimeError("Argument `text` is not valid")
if not isinstance(color, str):
raise RuntimeError("Argument `color` is not valid")
if not isinstance(indent, int):
raise RuntimeError("Argument `indent` is not valid")
color = color.lower()
if color not in esc_dict:
raise ValueError("Unknown color {color}".format(color=color))
if esc_dict[color] != -1:
return "\033[{color_code}m{indent}{text}\033[0m".format(
color_code=esc_dict[color], indent=" " * indent, text=text
)
return "{indent}{text}".format(indent=" " * indent, text=text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quote_str(obj):
r""" Add extra quotes to a string. If the argument is not a string it is returned unmodified. :param obj: Object :type obj: any :rtype: Same as argument For example: 5 '"Hello!"' '\'He said "hello!"\'' """ |
if not isinstance(obj, str):
return obj
return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strframe(obj, extended=False):
""" Return a string with a frame record pretty-formatted. The record is typically an item in a list generated by `inspect.stack() <https://docs.python.org/3/library/inspect.html#inspect.stack>`_). :param obj: Frame record :type obj: tuple :param extended: Flag that indicates whether contents of the frame object are printed (True) or not (False) :type extended: boolean :rtype: string """ |
# Stack frame -> (frame object [0], filename [1], line number of current
# line [2], function name [3], list of lines of context from source
# code [4], index of current line within list [5])
fname = normalize_windows_fname(obj[1])
ret = list()
ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow"))
ret.append("File name......: {0}".format(fname))
ret.append("Line number....: {0}".format(obj[2]))
ret.append("Function name..: {0}".format(obj[3]))
ret.append("Context........: {0}".format(obj[4]))
ret.append("Index..........: {0}".format(obj[5]))
if extended:
ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back))))
ret.append("f_builtins.....: {0}".format(obj[0].f_builtins))
ret.append("f_code.........: {0}".format(obj[0].f_code))
ret.append("f_globals......: {0}".format(obj[0].f_globals))
ret.append("f_lasti........: {0}".format(obj[0].f_lasti))
ret.append("f_lineno.......: {0}".format(obj[0].f_lineno))
ret.append("f_locals.......: {0}".format(obj[0].f_locals))
if hasattr(obj[0], "f_restricted"): # pragma: no cover
ret.append("f_restricted...: {0}".format(obj[0].f_restricted))
ret.append("f_trace........: {0}".format(obj[0].f_trace))
return "\n".join(ret) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, x):
""" Set variable values via a dictionary mapping name to value. """ |
for name, value in iter(x.items()):
if hasattr(value, "ndim"):
if self[name].value.ndim < value.ndim:
self[name].value.itemset(value.squeeze())
else:
self[name].value = value
else:
self[name].value.itemset(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select(self, fixed):
""" Return a subset of variables according to ``fixed``. """ |
names = [n for n in self.names() if self[n].isfixed == fixed]
return Variables({n: self[n] for n in names}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def validate(self, tracking_number):
"Return True if this is a valid USPS tracking number."
tracking_num = tracking_number[:-1].replace(' ', '')
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num):
if ii % 2:
odd_total += int(digit)
else:
even_total += int(digit)
total = odd_total + even_total * 3
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def validate(self, tracking_number):
"Return True if this is a valid UPS tracking number."
tracking_num = tracking_number[2:-1]
odd_total = 0
even_total = 0
for ii, digit in enumerate(tracking_num.upper()):
try:
value = int(digit)
except ValueError:
value = int((ord(digit) - 63) % 10)
if (ii + 1) % 2:
odd_total += value
else:
even_total += value
total = odd_total + even_total * 2
check = ((total - (total % 10) + 10) - total) % 10
return (check == int(tracking_number[-1:])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def track(self, tracking_number):
"Track a UPS package by number. Returns just a delivery date."
resp = self.send_request(tracking_number)
return self.parse_response(resp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(pathtovector, wordlist=(), num_to_load=None, truncate_embeddings=None, unk_word=None, sep=" "):
r""" Read a file in word2vec .txt format. The load function will raise a ValueError when trying to load items which do not conform to line lengths. Parameters pathtovector : string The path to the vector file. header : bool Whether the vector file has a header of the type (NUMBER OF ITEMS, SIZE OF VECTOR). wordlist : iterable, optional, default () A list of words you want loaded from the vector file. If this is None (default), all words will be loaded. num_to_load : int, optional, default None The number of items to load from the file. Because loading can take some time, it is sometimes useful to onlyl load the first n items from a vector file for quick inspection. truncate_embeddings : int, optional, default None If this value is not None, the vectors in the vector space will be truncated to the number of dimensions indicated by this value. unk_word : object The object to treat as UNK in your vector space. If this is not in your items dictionary after loading, we add it with a zero vector. Returns ------- r : Reach An initialized Reach instance. """ |
vectors, items = Reach._load(pathtovector,
wordlist,
num_to_load,
truncate_embeddings,
sep)
if unk_word is not None:
if unk_word not in set(items):
unk_vec = np.zeros((1, vectors.shape[1]))
vectors = np.concatenate([unk_vec, vectors], 0)
items = [unk_word] + items
unk_index = 0
else:
unk_index = items.index(unk_word)
else:
unk_index = None
return Reach(vectors,
items,
name=os.path.split(pathtovector)[-1],
unk_index=unk_index) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load(pathtovector, wordlist, num_to_load=None, truncate_embeddings=None, sep=" "):
"""Load a matrix and wordlist from a .vec file.""" |
vectors = []
addedwords = set()
words = []
try:
wordlist = set(wordlist)
except ValueError:
wordlist = set()
logger.info("Loading {0}".format(pathtovector))
firstline = open(pathtovector).readline().strip()
try:
num, size = firstline.split(sep)
num, size = int(num), int(size)
logger.info("Vector space: {} by {}".format(num, size))
header = True
except ValueError:
size = len(firstline.split(sep)) - 1
logger.info("Vector space: {} dim, # items unknown".format(size))
word, rest = firstline.split(sep, 1)
# If the first line is correctly parseable, set header to False.
header = False
if truncate_embeddings is None or truncate_embeddings == 0:
truncate_embeddings = size
for idx, line in enumerate(open(pathtovector, encoding='utf-8')):
if header and idx == 0:
continue
word, rest = line.rstrip(" \n").split(sep, 1)
if wordlist and word not in wordlist:
continue
if word in addedwords:
raise ValueError("Duplicate: {} on line {} was in the "
"vector space twice".format(word, idx))
if len(rest.split(sep)) != size:
raise ValueError("Incorrect input at index {}, size "
"is {}, expected "
"{}".format(idx+1,
len(rest.split(sep)), size))
words.append(word)
addedwords.add(word)
vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings])
if num_to_load is not None and len(addedwords) >= num_to_load:
break
vectors = np.array(vectors).astype(np.float32)
logger.info("Loading finished")
if wordlist:
diff = wordlist - addedwords
if diff:
logger.info("Not all items from your wordlist were in your "
"vector space: {}.".format(diff))
return vectors, words |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vectorize(self, tokens, remove_oov=False, norm=False):
""" Vectorize a sentence by replacing all items with their vectors. Parameters tokens : object or list of objects The tokens to vectorize. remove_oov : bool, optional, default False Whether to remove OOV items. If False, OOV items are replaced by the UNK glyph. If this is True, the returned sequence might have a different length than the original sequence. norm : bool, optional, default False Whether to return the unit vectors, or the regular vectors. Returns ------- s : numpy array An M * N matrix, where every item has been replaced by its vector. OOV items are either removed, or replaced by the value of the UNK glyph. """ |
if not tokens:
raise ValueError("You supplied an empty list.")
index = list(self.bow(tokens, remove_oov=remove_oov))
if not index:
raise ValueError("You supplied a list with only OOV tokens: {}, "
"which then got removed. Set remove_oov to False,"
" or filter your sentences to remove any in which"
" all items are OOV.")
if norm:
return np.stack([self.norm_vectors[x] for x in index])
else:
return np.stack([self.vectors[x] for x in index]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bow(self, tokens, remove_oov=False):
""" Create a bow representation of a list of tokens. Parameters tokens : list. The list of items to change into a bag of words representation. remove_oov : bool. Whether to remove OOV items from the input. If this is True, the length of the returned BOW representation might not be the length of the original representation. Returns ------- bow : generator A BOW representation of the list of items. """ |
if remove_oov:
tokens = [x for x in tokens if x in self.items]
for t in tokens:
try:
yield self.items[t]
except KeyError:
if self.unk_index is None:
raise ValueError("You supplied OOV items but didn't "
"provide the index of the replacement "
"glyph. Either set remove_oov to True, "
"or set unk_index to the index of the "
"item which replaces any OOV items.")
yield self.unk_index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, corpus, remove_oov=False, norm=False):
""" Transform a corpus by repeated calls to vectorize, defined above. Parameters corpus : A list of strings, list of list of strings. Represents a corpus as a list of sentences, where sentences can either be strings or lists of tokens. remove_oov : bool, optional, default False If True, removes OOV items from the input before vectorization. Returns ------- c : list A list of numpy arrays, where each array represents the transformed sentence in the original list. The list is guaranteed to be the same length as the input list, but the arrays in the list may be of different lengths, depending on whether remove_oov is True. """ |
return [self.vectorize(s, remove_oov=remove_oov, norm=norm)
for s in corpus] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def most_similar(self, items, num=10, batch_size=100, show_progressbar=False, return_names=True):
""" Return the num most similar items to a given list of items. Parameters items : list of objects or a single object. The items to get the most similar items to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ |
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._batch(x,
batch_size,
num+1,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def threshold(self, items, threshold=.5, batch_size=100, show_progressbar=False, return_names=True):
""" Return all items whose similarity is higher than threshold. Parameters items : list of objects or a single object. The items to get the most similar items to. threshold : float, optional, default .5 The radius within which to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ |
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._threshold_batch(x,
batch_size,
threshold,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize(vectors):
""" Normalize a matrix of row vectors to unit length. Contains a shortcut if there are no zero vectors in the matrix. If there are zero vectors, we do some indexing tricks to avoid dividing by 0. Parameters vectors : np.array The vectors to normalize. Returns ------- vectors : np.array The input vectors, normalized to unit length. """ |
if np.ndim(vectors) == 1:
norm = np.linalg.norm(vectors)
if norm == 0:
return np.zeros_like(vectors)
return vectors / norm
norm = np.linalg.norm(vectors, axis=1)
if np.any(norm == 0):
nonzero = norm > 0
result = np.zeros_like(vectors)
n = norm[nonzero]
p = vectors[nonzero]
result[nonzero] = p / n[:, None]
return result
else:
return vectors / norm[:, None] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items.""" |
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def similarity(self, i1, i2):
""" Compute the similarity between two sets of items. Parameters i1 : object The first set of items. i2 : object The second set of item. Returns ------- sim : array of floats An array of similarity scores between 1 and 0. """ |
try:
if i1 in self.items:
i1 = [i1]
except TypeError:
pass
try:
if i2 in self.items:
i2 = [i2]
except TypeError:
pass
i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])
i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])
return i1_vec.dot(i2_vec.T) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prune(self, wordlist):
""" Prune the current reach instance by removing items. Parameters wordlist : list of str A list of words to keep. Note that this wordlist need not include all words in the Reach instance. Any words which are in the wordlist, but not in the reach instance are ignored. """ |
# Remove duplicates
wordlist = set(wordlist).intersection(set(self.items.keys()))
indices = [self.items[w] for w in wordlist if w in self.items]
if self.unk_index is not None and self.unk_index not in indices:
raise ValueError("Your unknown item is not in your list of items. "
"Set it to None before pruning, or pass your "
"unknown item.")
self.vectors = self.vectors[indices]
self.norm_vectors = self.norm_vectors[indices]
self.items = {w: idx for idx, w in enumerate(wordlist)}
self.indices = {v: k for k, v in self.items.items()}
if self.unk_index is not None:
self.unk_index = self.items[wordlist[self.unk_index]] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, path, write_header=True):
""" Save the current vector space in word2vec format. Parameters path : str The path to save the vector file to. write_header : bool, optional, default True Whether to write a word2vec-style header as the first line of the file """ |
with open(path, 'w') as f:
if write_header:
f.write(u"{0} {1}\n".format(str(self.vectors.shape[0]),
str(self.vectors.shape[1])))
for i in range(len(self.items)):
w = self.indices[i]
vec = self.vectors[i]
f.write(u"{0} {1}\n".format(w,
" ".join([str(x) for x in vec]))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_fast_format(self, filename):
""" Save a reach instance in a fast format. The reach fast format stores the words and vectors of a Reach instance separately in a JSON and numpy format, respectively. Parameters filename : str The prefix to add to the saved filename. Note that this is not the real filename under which these items are stored. The words and unk_index are stored under "{filename}_words.json", and the numpy matrix is saved under "{filename}_vectors.npy". """ |
items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1]))
items = {"items": items,
"unk_index": self.unk_index,
"name": self.name}
json.dump(items, open("{}_items.json".format(filename), 'w'))
np.save(open("{}_vectors.npy".format(filename), 'wb'), self.vectors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_fast_format(filename):
""" Load a reach instance in fast format. As described above, the fast format stores the words and vectors of the Reach instance separately, and is drastically faster than loading from .txt files. Parameters filename : str The filename prefix from which to load. Note that this is not a real filepath as such, but a shared prefix for both files. In order for this to work, both {filename}_words.json and {filename}_vectors.npy should be present. """ |
words, unk_index, name, vectors = Reach._load_fast(filename)
return Reach(vectors, words, unk_index=unk_index, name=name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_fast(filename):
"""Sub for fast loader.""" |
it = json.load(open("{}_items.json".format(filename)))
words, unk_index, name = it["items"], it["unk_index"], it["name"]
vectors = np.load(open("{}_vectors.npy".format(filename), 'rb'))
return words, unk_index, name, vectors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def api_walk(uri, per_page=100, key="login"):
""" For a GitHub URI, walk all the pages until there's no more content """ |
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def api_get(uri, key=None):
""" Simple API endpoint get, return only the keys we care about """ |
response = get_json(uri)
if response:
if type(response) == list:
r = response[0]
elif type(response) == dict:
r = response
if type(r) == dict:
# Special nested value we care about
if key == USER_LOGIN:
return user_login(r)
if key in r:
return r[key] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reducejson(j):
""" """ |
authors = []
for key in j["data"]["repository"]["commitComments"]["edges"]:
authors.append(key["node"]["author"])
for key in j["data"]["repository"]["issues"]["nodes"]:
authors.append(key["author"])
for c in key["comments"]["nodes"]:
authors.append(c["author"])
for key in j["data"]["repository"]["pullRequests"]["edges"]:
authors.append(key["node"]["author"])
for c in key["node"]["comments"]["nodes"]:
authors.append(c["author"])
unique = list({v['login']:v for v in authors if v is not None}.values())
return unique |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run(self):
'''Execute the expression and return a Result, which includes the exit
status and any captured output. Raise an exception if the status is
non-zero.'''
with spawn_output_reader() as (stdout_capture, stdout_thread):
with spawn_output_reader() as (stderr_capture, stderr_thread):
context = starter_iocontext(stdout_capture, stderr_capture)
status = self._exec(context)
stdout_bytes = stdout_thread.join()
stderr_bytes = stderr_thread.join()
result = Result(status.code, stdout_bytes, stderr_bytes)
if is_checked_error(status):
raise StatusError(result, self)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def start(self):
'''Equivalent to `run`, but instead of blocking the current thread,
return a WaitHandle that doesn't block until `wait` is called. This is
currently implemented with a simple background thread, though in theory
it could avoid using threads in most cases.'''
thread = ThreadWithReturn(self.run)
thread.start()
return WaitHandle(thread) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _exec(self, cmd, url, json_data=None):
""" execute a command at the device using the RESTful API :param str cmd: one of the REST commands, e.g. GET or POST :param str url: URL of the REST API the command should be applied to :param dict json_data: json data that should be attached to the command """ |
assert(cmd in ("GET", "POST", "PUT", "DELETE"))
assert(self.dev is not None)
if json_data is None:
json_data = {}
# add device address to the URL
url = url.format(self.dev["ipv4_internal"])
# set basic authentication
auth = HTTPBasicAuth("dev", self.dev["api_key"])
# execute HTTP request
res = None
if cmd == "GET":
res = self._local_session.session.get(
url, auth=auth, verify=False
)
elif cmd == "POST":
res = self._local_session.session.post(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "PUT":
res = self._local_session.session.put(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "DELETE":
res = self._local_session.session.delete(
url, auth=auth, verify=False
)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_widget_id(self, package_name):
""" returns widget_id for given package_name does not care about multiple widget ids at the moment, just picks the first :param str package_name: package to check for :return: id of first widget which belongs to the given package_name :rtype: str """ |
widget_id = ""
for app in self.get_apps_list():
if app.package == package_name:
widget_id = list(app.widgets.keys())[0]
return widget_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user(self):
""" get the user details via the cloud """ |
log.debug("getting user information from LaMetric cloud...")
_, url = CLOUD_URLS["get_user"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_devices(self, force_reload=False, save_devices=True):
""" get all devices that are linked to the user, if the local device file is not existing the devices will be obtained from the LaMetric cloud, otherwise the local device file will be read. :param bool force_reload: When True, devices are read again from cloud :param bool save_devices: When True, devices obtained from the LaMetric cloud are stored locally """ |
if (
(not os.path.exists(self._devices_filename)) or
(force_reload is True)
):
# -- load devices from LaMetric cloud --
log.debug("getting devices from LaMetric cloud...")
_, url = CLOUD_URLS["get_devices"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
# store obtained devices internally
self._devices = res.json()
if save_devices is True:
# save obtained devices to the local file
self.save_devices()
return self._devices
else:
# -- load devices from local file --
log.debug(
"getting devices from '{}'...".format(self._devices_filename)
)
return self.load_devices() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_devices(self):
""" save devices that have been obtained from LaMetric cloud to a local file """ |
log.debug("saving devices to ''...".format(self._devices_filename))
if self._devices != []:
with codecs.open(self._devices_filename, "wb", "utf-8") as f:
json.dump(self._devices, f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_endpoint_map(self):
""" returns API version and endpoint map """ |
log.debug("getting end points...")
cmd, url = DEVICE_URLS["get_endpoint_map"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_devices(self):
""" load stored devices from the local file """ |
self._devices = []
if os.path.exists(self._devices_filename):
log.debug(
"loading devices from '{}'...".format(self._devices_filename)
)
with codecs.open(self._devices_filename, "rb", "utf-8") as f:
self._devices = json.load(f)
return self._devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_device_state(self):
""" returns the full device state """ |
log.debug("getting device state...")
cmd, url = DEVICE_URLS["get_device_state"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_notification( self, model, priority="warning", icon_type=None, lifetime=None ):
""" sends new notification to the device :param Model model: an instance of the Model class that should be used :param str priority: the priority of the notification [info, warning or critical] (default: warning) :param str icon_type: the icon type of the notification [none, info or alert] (default: None) :param int lifetime: the lifetime of the notification in ms (default: 2 min) """ |
assert(priority in ("info", "warning", "critical"))
assert(icon_type in (None, "none", "info", "alert"))
assert((lifetime is None) or (lifetime > 0))
log.debug("sending notification...")
cmd, url = DEVICE_URLS["send_notification"]
json_data = {"model": model.json(), "priority": priority}
if icon_type is not None:
json_data["icon_type"] = icon_type
if lifetime is not None:
json_data["lifetime"] = lifetime
return self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_notifications(self):
""" returns the list of all notifications in queue """ |
log.debug("getting notifications in queue...")
cmd, url = DEVICE_URLS["get_notifications_queue"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_notification(self, notification_id):
""" returns a specific notification by given id :param str notification_id: the ID of the notification """ |
log.debug("getting notification '{}'...".format(notification_id))
cmd, url = DEVICE_URLS["get_notification"]
return self._exec(cmd, url.replace(":id", notification_id)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_display(self):
""" returns information about the display, including brightness, screensaver etc. """ |
log.debug("getting display information...")
cmd, url = DEVICE_URLS["get_display"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_screensaver( self, mode, is_mode_enabled, start_time=None, end_time=None, is_screensaver_enabled=True ):
""" set the display's screensaver mode :param str mode: mode of the screensaver [when_dark, time_based] :param bool is_mode_enabled: specifies if mode is enabled or disabled :param str start_time: start time, only used in time_based mode (format: %H:%M:%S) :param str end_time: end time, only used in time_based mode (format: %H:%M:%S) :param bool is_screensaver_enabled: is overall screensaver turned on overrules mode specific settings """ |
assert(mode in ("when_dark", "time_based"))
log.debug("setting screensaver to '{}'...".format(mode))
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"screensaver": {
"enabled": is_screensaver_enabled,
"mode": mode,
"mode_params": {
"enabled": is_mode_enabled
},
}
}
if mode == "time_based":
# TODO: add time checks
assert((start_time is not None) and (end_time is not None))
json_data["screensaver"]["mode_params"]["start_time"] = start_time
json_data["screensaver"]["mode_params"]["end_time"] = end_time
return self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_volume(self):
""" returns the current volume """ |
log.debug("getting volumne...")
cmd, url = DEVICE_URLS["get_volume"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_volume(self, volume=50):
""" allows to change the volume :param int volume: volume to be set for the current device [0..100] (default: 50) """ |
assert(volume in range(101))
log.debug("setting volume...")
cmd, url = DEVICE_URLS["set_volume"]
json_data = {
"volume": volume,
}
return self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bluetooth_state(self):
""" returns the bluetooth state """ |
log.debug("getting bluetooth state...")
cmd, url = DEVICE_URLS["get_bluetooth_state"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_wifi_state(self):
""" returns the current Wi-Fi state the device is connected to """ |
log.debug("getting wifi state...")
cmd, url = DEVICE_URLS["get_wifi_state"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_apps_list(self):
""" gets installed apps and puts them into the available_apps list """ |
log.debug("getting apps and setting them in the internal app list...")
cmd, url = DEVICE_URLS["get_apps_list"]
result = self._exec(cmd, url)
self.available_apps = [
AppModel(result[app])
for app in result
] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def switch_to_app(self, package):
""" activates an app that is specified by package. Selects the first app it finds in the app list :param package: name of package/app :type package: str :return: None :rtype: None """ |
log.debug("switching to app '{}'...".format(package))
cmd, url = DEVICE_URLS["switch_to_app"]
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def switch_to_next_app(self):
""" switches to the next app """ |
log.debug("switching to next app...")
cmd, url = DEVICE_URLS["switch_to_next_app"]
self.result = self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def activate_widget(self, package):
""" activate the widget of the given package :param str package: name of the package """ |
cmd, url = DEVICE_URLS["activate_widget"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _app_exec(self, package, action, params=None):
""" meta method for all interactions with apps :param package: name of package/app :type package: str :param action: the action to be executed :type action: str :param params: optional parameters for this action :type params: dict :return: None :rtype: None """ |
# get list of possible commands from app.actions
allowed_commands = []
for app in self.get_apps_list():
if app.package == package:
allowed_commands = list(app.actions.keys())
break
# check if action is in this list
assert(action in allowed_commands)
cmd, url = DEVICE_URLS["do_action"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
json_data = {"id": action}
if params is not None:
json_data["params"] = params
self.result = self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alarm_set(self, time, wake_with_radio=False):
""" set the alarm clock :param str time: time of the alarm (format: %H:%M:%S) :param bool wake_with_radio: if True, radio will be used for the alarm instead of beep sound """ |
# TODO: check for correct time format
log.debug("alarm => set...")
params = {
"enabled": True,
"time": time,
"wake_with_radio": wake_with_radio
}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def alarm_disable(self):
""" disable the alarm """ |
log.debug("alarm => disable...")
params = {"enabled": False}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def countdown_set(self, duration, start_now):
""" set the countdown :param str duration: :param str start_now: """ |
log.debug("countdown => set...")
params = {'duration': duration, 'start_now': start_now}
self._app_exec(
"com.lametric.countdown", "countdown.configure", params
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def action(self, includes: dict, variables: dict) -> tuple: """ Call external script. :param includes: testcase's includes :param variables: variables :return: script's output """ |
json_args = fill_template_str(json.dumps(self.data), variables)
p = subprocess.Popen([self.module, json_args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.wait() == 0:
out = p.stdout.read().decode()
debug(out)
return variables, json.loads(out)
else:
out = p.stdout.read().decode()
warning(out)
raise Exception('Execution failed.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_credentials(self, client_id=None, client_secret=None):
""" set given credentials and reset the session """ |
self._client_id = client_id
self._client_secret = client_secret
# make sure to reset session due to credential change
self._session = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_session(self, get_token=True):
""" init a new oauth2 session that is required to access the cloud :param bool get_token: if True, a token will be obtained, after the session has been created """ |
if (self._client_id is None) or (self._client_secret is None):
sys.exit(
"Please make sure to set the client id and client secret "
"via the constructor, the environment variables or the config "
"file; otherwise, the LaMetric cloud cannot be accessed. "
"Abort!"
)
self._session = OAuth2Session(
client=BackendApplicationClient(client_id=self._client_id)
)
if get_token is True:
# get oauth token
self.get_token() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_token(self):
""" get current oauth token """ |
self.token = self._session.fetch_token(
token_url=CLOUD_URLS["get_token"][1],
client_id=self._client_id,
client_secret=self._client_secret
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simple_input(self, variables):
""" Use this method to get simple input as python object, with all templates filled in :param variables: :return: python object """ |
json_args = fill_template_str(json.dumps(self.data), variables)
return try_get_objects(json_args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(self):
""" creates an empty configuration file """ |
if not self.exists():
# create new empyt config file based on template
self.config.add_section("lametric")
self.config.set("lametric", "client_id", "")
self.config.set("lametric", "client_secret", "")
# save new config
self.save()
# stop here, so user can set his config
sys.exit(
"An empty config file '{}' has been created. Please set "
"the corresponding LaMetric API credentials.".format(
self._filename
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
""" save current config to the file """ |
with open(self._filename, "w") as f:
self.config.write(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rate_limit_wait(self):
""" Sleep if rate limiting is required based on current time and last query. """ |
if self._rate_limit_dt and self._last_query is not None:
dt = time.time() - self._last_query
wait = self._rate_limit_dt - dt
if wait > 0:
time.sleep(wait) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def route(self, arg, destination=None, waypoints=None, raw=False, **kwargs):
""" Query a route. route(locations):
points can be - a sequence of locations - a Shapely LineString route(origin, destination, waypoints=None) - origin and destination are a single destination - waypoints are the points to be inserted between the origin and destination If waypoints is specified, destination must also be specified Each location can be: - string (will be geocoded by the routing provider. Not all providers accept this as input) - (longitude, latitude) sequence (tuple, list, numpy array, etc.) - Shapely Point with x as longitude, y as latitude Additional parameters raw : bool, default False Return the raw json dict response from the service Returns ------- list of Route objects If raw is True, returns the json dict instead of converting to Route objects Examples -------- mq = directions.Mapquest(key) routes = mq.route('1 magazine st. cambridge, ma', 'south station boston, ma') routes = mq.route('1 magazine st. cambridge, ma', 'south station boston, ma', waypoints=['700 commonwealth ave. boston, ma']) # Uses each point in the line as a waypoint. There is a limit to the # number of waypoints for each service. Consult the docs. routes = mq.route(line) # Feel free to mix different location types routes = mq.route(line.coords[0], 'south station boston, ma', waypoints=[(-71.103972, 42.349324)]) """ |
points = _parse_points(arg, destination, waypoints)
if len(points) < 2:
raise ValueError('You must specify at least 2 points')
self.rate_limit_wait()
data = self.raw_query(points, **kwargs)
self._last_query = time.time()
if raw:
return data
return self.format_output(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def discover_upnp_devices( self, st="upnp:rootdevice", timeout=2, mx=1, retries=1 ):
""" sends an SSDP discovery packet to the network and collects the devices that replies to it. A dictionary is returned using the devices unique usn as key """ |
# prepare UDP socket to transfer the SSDP packets
s = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
s.settimeout(timeout)
# prepare SSDP discover message
msg = SSDPDiscoveryMessage(mx=mx, st=st)
# try to get devices with multiple retries in case of failure
devices = {}
for _ in range(retries):
# send SSDP discovery message
s.sendto(msg.bytes, SSDP_MULTICAST_ADDR)
devices = {}
try:
while True:
# parse response and store it in dict
r = SSDPResponse(s.recvfrom(65507))
devices[r.usn] = r
except socket.timeout:
break
return devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filtered_devices( self, model_name, device_types="upnp:rootdevice", timeout=2 ):
""" returns a dict of devices that contain the given model name """ |
# get list of all UPNP devices in the network
upnp_devices = self.discover_upnp_devices(st=device_types)
# go through all UPNP devices and filter wanted devices
filtered_devices = collections.defaultdict(dict)
for dev in upnp_devices.values():
try:
# download XML file with information about the device
# from the device's location
r = requests.get(dev.location, timeout=timeout)
if r.status_code == requests.codes.ok:
# parse returned XML
root = ET.fromstring(r.text)
# add shortcut for XML namespace to access sub nodes
ns = {"upnp": "urn:schemas-upnp-org:device-1-0"}
# get device element
device = root.find("upnp:device", ns)
if model_name in device.find(
"upnp:modelName", ns
).text:
# model name is wanted => add to list
# get unique UDN of the device that is used as key
udn = device.find("upnp:UDN", ns).text
# add url base
url_base = root.find("upnp:URLBase", ns)
if url_base is not None:
filtered_devices[udn][
"URLBase"
] = url_base.text
# add interesting device attributes and
# use unique UDN as key
for attr in (
"deviceType", "friendlyName", "manufacturer",
"manufacturerURL", "modelDescription",
"modelName", "modelNumber"
):
el = device.find("upnp:%s" % attr, ns)
if el is not None:
filtered_devices[udn][
attr
] = el.text.strip()
except ET.ParseError:
# just skip devices that are invalid xml
pass
except requests.exceptions.ConnectTimeout:
# just skip devices that are not replying in time
print("Timeout for '%s'. Skipping." % dev.location)
return filtered_devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lazy_map(data_processor, data_generator, n_cpus=1, stepsize=None):
"""A variant of multiprocessing.Pool.map that supports lazy evaluation As with the regular multiprocessing.Pool.map, the processes are spawned off asynchronously while the results are returned in order. In contrast to multiprocessing.Pool.map, the iterator (here: data_generator) is not consumed at once but evaluated lazily which is useful if the iterator (for example, a generator) contains objects with a large memory footprint. Parameters ========== data_processor : func A processing function that is applied to objects in `data_generator` data_generator : iterator or generator A python iterator or generator that yields objects to be fed into the `data_processor` function for processing. n_cpus=1 : int (default: 1) Number of processes to run in parallel. - If `n_cpus` > 0, the specified number of CPUs will be used. - If `n_cpus=0`, all available CPUs will be used. - If `n_cpus` < 0, all available CPUs - `n_cpus` will be used. stepsize : int or None (default: None) The number of items to fetch from the iterator to pass on to the workers at a time. If `stepsize=None` (default), the stepsize size will be set equal to `n_cpus`. Returns ========= list : A Python list containing the results returned by the `data_processor` function when called on all elements in yielded by the `data_generator` in sorted order. Note that the last list may contain fewer items if the number of elements in `data_generator` is not evenly divisible by `stepsize`. """ |
if not n_cpus:
n_cpus = mp.cpu_count()
elif n_cpus < 0:
n_cpus = mp.cpu_count() - n_cpus
if stepsize is None:
stepsize = n_cpus
results = []
with mp.Pool(processes=n_cpus) as p:
while True:
r = p.map(data_processor, islice(data_generator, stepsize))
if r:
results.extend(r)
else:
break
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lazy_imap(data_processor, data_generator, n_cpus=1, stepsize=None):
"""A variant of multiprocessing.Pool.imap that supports lazy evaluation As with the regular multiprocessing.Pool.imap, the processes are spawned off asynchronously while the results are returned in order. In contrast to multiprocessing.Pool.imap, the iterator (here: data_generator) is not consumed at once but evaluated lazily which is useful if the iterator (for example, a generator) contains objects with a large memory footprint. Parameters ========== data_processor : func A processing function that is applied to objects in `data_generator` data_generator : iterator or generator A python iterator or generator that yields objects to be fed into the `data_processor` function for processing. n_cpus=1 : int (default: 1) Number of processes to run in parallel. - If `n_cpus` > 0, the specified number of CPUs will be used. - If `n_cpus=0`, all available CPUs will be used. - If `n_cpus` < 0, all available CPUs - `n_cpus` will be used. stepsize : int or None (default: None) The number of items to fetch from the iterator to pass on to the workers at a time. If `stepsize=None` (default), the stepsize size will be set equal to `n_cpus`. Returns ========= list : A Python list containing the *n* results returned by the `data_processor` function when called on elements by the `data_generator` in sorted order; *n* is equal to the size of `stepsize`. If `stepsize` is None, *n* is equal to `n_cpus`. """ |
if not n_cpus:
n_cpus = mp.cpu_count()
elif n_cpus < 0:
n_cpus = mp.cpu_count() - n_cpus
if stepsize is None:
stepsize = n_cpus
with mp.Pool(processes=n_cpus) as p:
while True:
r = p.map(data_processor, islice(data_generator, stepsize))
if r:
yield r
else:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_variables(func):
""" Use this decorator on Step.action implementation. Your action method should always return variables, or both variables and output. This decorator will update variables with output. """ |
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if isinstance(result, tuple):
return self.process_register(result[0], result[1])
else:
return self.process_register(result)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_properties(self, data):
""" set the properties of the app model by the given data dict """ |
for property in data.keys():
if property in vars(self):
setattr(self, property, data[property]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_long_description():
""" get long description from README.rst file """ |
with codecs.open(os.path.join(here, "README.rst"), "r", "utf-8") as f:
return f.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def roll_call_handler(service, action_type, payload, props, **kwds):
""" This action handler responds to the "roll call" emitted by the api gateway when it is brought up with the normal summary produced by the service. """ |
# if the action type corresponds to a roll call
if action_type == roll_call_type():
# then announce the service
await service.announce() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def flexible_api_handler(service, action_type, payload, props, **kwds):
""" This query handler builds the dynamic picture of availible services. """ |
# if the action represents a new service
if action_type == intialize_service_action():
# the treat the payload like json if its a string
model = json.loads(payload) if isinstance(payload, str) else payload
# the list of known models
models = service._external_service_data['models']
# the list of known connections
connections = service._external_service_data['connections']
# the list of known mutations
mutations = service._external_service_data['mutations']
# if the model is a connection
if 'connection' in model:
# if we haven't seen the connection before
if not [conn for conn in connections if conn['name'] == model['name']]:
# add it to the list
connections.append(model)
# or if there are registered fields
elif 'fields' in model and not [mod for mod in models if mod['name'] == model['name']]:
# add it to the model list
models.append(model)
# the service could provide mutations as well as affect the topology
if 'mutations' in model:
# go over each mutation announce
for mutation in model['mutations']:
# if there isn't a mutation by the same name in the local cache
if not [mut for mut in mutations if mut['name'] == mutation['name']]:
# add it to the local cache
mutations.append(mutation)
# if there are models
if models:
# create a new schema corresponding to the models and connections
service.schema = generate_api_schema(
models=models,
connections=connections,
mutations=mutations,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_order_by(model, order_by):
""" This function figures out the list of orderings for the given model and argument. Args: model (nautilus.BaseModel):
The model to compute ordering against order_by (list of str):
the list of fields to order_by. If the field starts with a `+` then the order is acending, if `-` descending, if no character proceeds the field, the ordering is assumed to be ascending. Returns: (list of filters):
the model filters to apply to the query """ |
# the list of filters for the models
out = []
# for each attribute we have to order by
for key in order_by:
# remove any whitespace
key = key.strip()
# if the key starts with a plus
if key.startswith("+"):
# add the ascending filter to the list
out.append(getattr(model, key[1:]))
# otherwise if the key starts with a minus
elif key.startswith("-"):
# add the descending filter to the list
out.append(getattr(model, key[1:]).desc())
# otherwise the key needs the default filter
else:
# add the default filter to the list
out.append(getattr(model, key))
# returnt the list of filters
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def model(model_names):
""" Creates the example directory structure necessary for a model service. """ |
# for each model name we need to create
for model_name in model_names:
# the template context
context = {
'name': model_name,
}
# render the model template
render_template(template='common', context=context)
render_template(template='model', context=context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connection(model_connections):
""" Creates the example directory structure necessary for a connection service. """ |
# for each connection group
for connection_str in model_connections:
# the services to connect
services = connection_str.split(':')
services.sort()
service_name = ''.join([service.title() for service in services])
# the template context
context = {
# make sure the first letter is lowercase
'name': service_name[0].lower() + service_name[1:],
'services': services,
}
render_template(template='common', context=context)
render_template(template='connection', context=context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_model_string(model):
""" This function returns the conventional action designator for a given model. """ |
name = model if isinstance(model, str) else model.__name__
return normalize_string(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_native_type_dictionary(fields, respect_required=False, wrap_field=True, name=''):
""" This function takes a list of type summaries and builds a dictionary with native representations of each entry. Useful for dynamically building native class records from summaries. """ |
# a place to start when building the input field attributes
input_fields = {}
# go over every input in the summary
for field in fields:
field_name = name + field['name']
field_type = field['type']
# if the type field is a string
if isinstance(field_type, str):
# compute the native api type for the field
field_type = convert_typestring_to_api_native(field_type)(
# required=respect_required and field['required']
)
# add an entry in the attributes
input_fields[field['name']] = field_type
# we could also be looking at a dictionary
elif isinstance(field_type, dict):
object_fields = field_type['fields']
# add the dictionary to the parent as a graphql object type
input_fields[field['name']] = graphql_type_from_summary(
summary={
'name': field_name+"ArgType",
'fields': object_fields
}
)
# if we are supposed to wrap the object in a field
if wrap_field:
# then wrap the value we just added
input_fields[field['name']] = graphene.Field(input_fields[field['name']])
# we're done
return input_fields |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def summarize_crud_mutation(method, model, isAsync=False):
""" This function provides the standard form for crud mutations. """ |
# create the approrpriate action type
action_type = get_crud_action(method=method, model=model)
# the name of the mutation
name = crud_mutation_name(model=model, action=method)
# a mapping of methods to input factories
input_map = {
'create': create_mutation_inputs,
'update': update_mutation_inputs,
'delete': delete_mutation_inputs,
}
# a mappting of methods to output factories
output_map = {
'create': create_mutation_outputs,
'update': update_mutation_outputs,
'delete': delete_mutation_outputs,
}
# the inputs for the mutation
inputs = input_map[method](model)
# the mutation outputs
outputs = output_map[method](model)
# return the appropriate summary
return summarize_mutation(
mutation_name=name,
event=action_type,
isAsync=isAsync,
inputs=inputs,
outputs=outputs
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
""" This function starts the brokers interaction with the kafka stream """ |
self.loop.run_until_complete(self._consumer.start())
self.loop.run_until_complete(self._producer.start())
self._consumer_task = self.loop.create_task(self._consume_event_callback()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
""" This method stops the brokers interaction with the kafka stream """ |
self.loop.run_until_complete(self._consumer.stop())
self.loop.run_until_complete(self._producer.stop())
# attempt
try:
# to cancel the service
self._consumer_task.cancel()
# if there was no service
except AttributeError:
# keep going
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def send(self, payload='', action_type='', channel=None, **kwds):
""" This method sends a message over the kafka stream. """ |
# use a custom channel if one was provided
channel = channel or self.producer_channel
# serialize the action type for the
message = serialize_action(action_type=action_type, payload=payload, **kwds)
# send the message
return await self._producer.send(channel, message.encode()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_action(action_type, payload, **extra_fields):
""" This function returns the conventional form of the actions. """ |
action_dict = dict(
action_type=action_type,
payload=payload,
**extra_fields
)
# return a serializable version
return json.dumps(action_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fields_for_model(model):
""" This function returns the fields for a schema that matches the provided nautilus model. Args: model (nautilus.model.BaseModel):
The model to base the field list on Returns: (dict<field_name: str, graphqlType>):
A mapping of field names to graphql types """ |
# the attribute arguments (no filters)
args = {field.name.lower() : convert_peewee_field(field) \
for field in model.fields()}
# use the field arguments, without the segments
return args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_connection_model(service):
""" Create an SQL Alchemy table that connects the provides services """ |
# the services connected
services = service._services
# the mixins / base for the model
bases = (BaseModel,)
# the fields of the derived
attributes = {model_service_name(service): fields.CharField() for service in services}
# create an instance of base model with the right attributes
return type(BaseModel)(connection_service_name(service), bases, attributes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_handler(Model, name=None, **kwds):
""" This factory returns an action handler that creates a new instance of the specified model when a create action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel):
The model to create when the action received. Returns: function(action_type, payload):
The action handler for this model """ |
async def action_handler(service, action_type, payload, props, notify=True, **kwds):
# if the payload represents a new instance of `Model`
if action_type == get_crud_action('create', name or Model):
# print('handling create for ' + name or Model)
try:
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
# for each required field
for requirement in Model.required_fields():
# save the name of the field
field_name = requirement.name
# ensure the value is in the payload
# TODO: check all required fields rather than failing on the first
if not field_name in payload and field_name != 'id':
# yell loudly
raise ValueError(
"Required field not found in payload: %s" %field_name
)
# create a new model
new_model = Model(**payload)
# save the new model instance
new_model.save()
# if we need to tell someone about what happened
if notify:
# publish the scucess event
await service.event_broker.send(
payload=ModelSerializer().serialize(new_model),
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# if we need to tell someone about what happened
if notify:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# otherwise we aren't supposed to notify
else:
# raise the exception normally
raise err
# return the handler
return action_handler |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _has_id(self, *args, **kwds):
""" Equality checks are overwitten to perform the actual check in a semantic way. """ |
# if there is only one positional argument
if len(args) == 1:
# parse the appropriate query
result = await parse_string(
self._query,
self.service.object_resolver,
self.service.connection_resolver,
self.service.mutation_resolver,
obey_auth=False
)
# go to the bottom of the result for the list of matching ids
return self._find_id(result['data'], args[0])
# otherwise
else:
# treat the attribute like a normal filter
return self._has_id(**kwds) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_id(self, result, uid):
""" This method performs a depth-first search for the given uid in the dictionary of results. """ |
# if the result is a list
if isinstance(result, list):
# if the list has a valid entry
if any([self._find_id(value, uid) for value in result]):
# then we're done
return True
# otherwise results could be dictionaries
if isinstance(result, dict):
# the children of the result that are lists
list_children = [value for value in result.values() if isinstance(value, list)]
# go to every value that is a list
for value in list_children:
# if the value is a match
if self._find_id(value, uid):
# we're done
return True
# the children of the result that are dicts
dict_children = [value for value in result.values() if isinstance(value, dict)]
# perform the check on every child that is a dict
for value in dict_children:
# if the child is a match
if self._find_id(value, uid):
# we're done
return True
# if there are no values that are lists and there is an id key
if not list_children and not dict_children and 'id' in result:
# the value of the remote id field
result_id = result['id']
# we've found a match if the id field matches (cast to match type)
return result_id == type(result_id)(uid)
# we didn't find the result
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_before(self):
"""Returns a builder inserting a new block before the current block""" |
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_after(self):
"""Returns a builder inserting a new block after the current block""" |
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def comment(self, text, comment_prefix='#'):
"""Creates a comment block Args: text (str):
content of comment without # comment_prefix (str):
character indicating start of comment Returns: self for chaining """ |
comment = Comment(self._container)
if not text.startswith(comment_prefix):
text = "{} {}".format(comment_prefix, text)
if not text.endswith('\n'):
text = "{}{}".format(text, '\n')
comment.add_line(text)
self._container.structure.insert(self._idx, comment)
self._idx += 1
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def section(self, section):
"""Creates a section block Args: section (str or :class:`Section`):
name of section or object Returns: self for chaining """ |
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
# create a new section
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def space(self, newlines=1):
"""Creates a vertical space of newlines Args: newlines (int):
number of empty lines Returns: self for chaining """ |
space = Space()
for line in range(newlines):
space.add_line('\n')
self._container.structure.insert(self._idx, space)
self._idx += 1
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section Args: key (str):
key of the option value (str or None):
value of the option **kwargs: are passed to the constructor of :class:`Option` Returns: self for chaining """ |
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_comment(self, line):
"""Add a Comment object to the section Used during initial parsing mainly Args: line (str):
one line in the comment """ |
if not isinstance(self.last_item, Comment):
comment = Comment(self._structure)
self._structure.append(comment)
self.last_item.add_line(line)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_space(self, line):
"""Add a Space object to the section Used during initial parsing mainly Args: line (str):
one line that defines the space, maybe whitespaces """ |
if not isinstance(self.last_item, Space):
space = Space(self._structure)
self._structure.append(space)
self.last_item.add_line(line)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, option, value=None):
"""Set an option for chaining. Args: option (str):
option name value (str):
value, default None """ |
option = self._container.optionxform(option)
if option in self.options():
self.__getitem__(option).value = value
else:
self.__setitem__(option, value)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, filename, encoding=None):
"""Read and parse a filename. Args: filename (str):
path to file encoding (str):
encoding of file, default None """ |
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
self._filename = os.path.abspath(filename) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.