text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Dumps the entire index into a json file.
<END_TASK>
<USER_TASK:>
Description:
def dump(self, path: str, file_name: str = "", **kwargs: dict):
"""
Dumps the entire index into a json file.
:param path: The path to directory where the dump should be stored.
:param file_name: Name of the file the dump should be stored in. If empty the index name is used.
:param kwargs: Keyword arguments for the json converter. (ex. indent=4, ensure_ascii=False)
""" |
export = list()
for results in self.scroll():
export.extend(results)
if not path.endswith('/'):
path += '/'
if file_name == '':
file_name = self.index
if not file_name.endswith('.json'):
file_name += '.json'
store = path + file_name
with open(store, 'w') as fp:
json.dump(export, fp, **kwargs)
logging.info("Extracted %s records from the index %s and stored them in %s/%s.", len(export), self.index, path, file_name) |
<SYSTEM_TASK:>
parse command line options and either launch some configuration dialog or start an instance of _MainLoop as a daemon
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
parse command line options and either launch some configuration dialog or start an instance of _MainLoop as a daemon
""" |
(options, _) = _parse_args()
if options.change_password:
c.keyring_set_password(c["username"])
sys.exit(0)
if options.select:
courses = client.get_courses()
c.selection_dialog(courses)
c.save()
sys.exit(0)
if options.stop:
os.system("kill -2 `cat ~/.studdp/studdp.pid`")
sys.exit(0)
task = _MainLoop(options.daemonize, options.update_courses)
if options.daemonize:
log.info("daemonizing...")
with daemon.DaemonContext(working_directory=".", pidfile=PIDLockFile(PID_FILE)):
# we have to create a new logger in the daemon context
handler = logging.FileHandler(LOG_PATH)
handler.setFormatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')
log.addHandler(handler)
task()
else:
task() |
<SYSTEM_TASK:>
Takes data and returns a signature
<END_TASK>
<USER_TASK:>
Description:
def generate(self, signature_data):
"""Takes data and returns a signature
:arg dict signature_data: data to use to generate a signature
:returns: ``Result`` instance
""" |
result = Result()
for rule in self.pipeline:
rule_name = rule.__class__.__name__
try:
if rule.predicate(signature_data, result):
rule.action(signature_data, result)
except Exception as exc:
if self.error_handler:
self.error_handler(
signature_data,
exc_info=sys.exc_info(),
extra={'rule': rule_name}
)
result.info(rule_name, 'Rule failed: %s', exc)
return result |
<SYSTEM_TASK:>
Helper function to lift str -> bool maps used by aiger
<END_TASK>
<USER_TASK:>
Description:
def _unblast(name2vals, name_map):
"""Helper function to lift str -> bool maps used by aiger
to the word level. Dual of the `_blast` function.""" |
def _collect(names):
return tuple(name2vals[n] for n in names)
return {bvname: _collect(names) for bvname, names in name_map} |
<SYSTEM_TASK:>
Generates a logger instance from the singleton
<END_TASK>
<USER_TASK:>
Description:
def create_logger(self):
"""Generates a logger instance from the singleton""" |
name = "bors"
if hasattr(self, "name"):
name = self.name
self.log = logging.getLogger(name)
try:
lvl = self.conf.get_log_level()
except AttributeError:
lvl = self.context.get("log_level", None)
self.log.setLevel(getattr(logging, lvl, logging.INFO)) |
<SYSTEM_TASK:>
gets class from name and data, sets base level attrs
<END_TASK>
<USER_TASK:>
Description:
def get_cls(project_name, project_data):
"""
gets class from name and data, sets base level attrs
defaults to facsimile.base.Facsimile
""" |
if project_name:
cls = getattr(facsimile.base, project_data.get('class', 'Facsimile'))
cls.name = project_name
else:
cls = facsimile.base.Facsimile
return cls |
<SYSTEM_TASK:>
Chech that the entered recaptcha data is correct
<END_TASK>
<USER_TASK:>
Description:
def check_recaptcha(view_func):
"""Chech that the entered recaptcha data is correct""" |
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
request.recaptcha_is_valid = None
if request.method == 'POST':
recaptcha_response = request.POST.get('g-recaptcha-response')
data = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
r = requests.post(
'https://www.google.com/recaptcha/api/siteverify',
data=data
)
result = r.json()
if result['success']:
request.recaptcha_is_valid = True
else:
request.recaptcha_is_valid = False
error_message = 'Invalid reCAPTCHA. Please try again. '
error_message += str(result['error-codes'])
print(error_message)
return view_func(request, *args, **kwargs)
return _wrapped_view |
<SYSTEM_TASK:>
Execute the strategies on the given context
<END_TASK>
<USER_TASK:>
Description:
def execute(self, context):
"""Execute the strategies on the given context""" |
for ware in self.middleware:
ware.premessage(context)
context = ware.bind(context)
ware.postmessage(context)
return context |
<SYSTEM_TASK:>
Perform cleanup! We're goin' down!!!
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self):
"""Perform cleanup! We're goin' down!!!""" |
for ware in self.middleware:
ware.preshutdown()
self._shutdown()
ware.postshutdown() |
<SYSTEM_TASK:>
Generates documentation for signature generation pipeline
<END_TASK>
<USER_TASK:>
Description:
def main(argv=None):
"""Generates documentation for signature generation pipeline""" |
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'pipeline',
help='Python dotted path to rules pipeline to document'
)
parser.add_argument('output', help='output file')
if argv is None:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
print('Generating documentation for %s in %s...' % (args.pipeline, args.output))
rules = import_rules(args.pipeline)
with open(args.output, 'w') as fp:
fp.write('.. THIS IS AUTOGEMERATED USING:\n')
fp.write(' \n')
fp.write(' %s\n' % (' '.join(sys.argv)))
fp.write(' \n')
fp.write('Signature generation rules pipeline\n')
fp.write('===================================\n')
fp.write('\n')
fp.write('\n')
fp.write(
'This is the signature generation pipeline defined at ``%s``:\n' %
args.pipeline
)
fp.write('\n')
for i, rule in enumerate(rules):
li = '%s. ' % (i + 1)
fp.write('%s%s\n' % (
li,
indent(get_doc(rule), ' ' * len(li))
))
fp.write('\n') |
<SYSTEM_TASK:>
This function is called by the Django API to specify how this object
<END_TASK>
<USER_TASK:>
Description:
def handle(self, *args, **options):
"""This function is called by the Django API to specify how this object
will be saved to the database.
""" |
taxonomy_id = options['taxonomy_id']
# Remove leading and trailing blank characters in "common_name"
# and "scientific_name
common_name = options['common_name'].strip()
scientific_name = options['scientific_name'].strip()
if common_name and scientific_name:
# A 'slug' is a label for an object in django, which only contains
# letters, numbers, underscores, and hyphens, thus making it URL-
# usable. The slugify method in django takes any string and
# converts it to this format. For more information, see:
# http://stackoverflow.com/questions/427102/what-is-a-slug-in-django
slug = slugify(scientific_name)
logger.info("Slug generated: %s", slug)
# If organism exists, update with passed parameters
try:
org = Organism.objects.get(taxonomy_id=taxonomy_id)
org.common_name = common_name
org.scientific_name = scientific_name
org.slug = slug
# If organism doesn't exist, construct an organism object
# (see organisms/models.py).
except Organism.DoesNotExist:
org = Organism(taxonomy_id=taxonomy_id,
common_name=common_name,
scientific_name=scientific_name,
slug=slug
)
org.save() # Save to the database.
else:
# Report an error if the user did not fill out all fields.
logger.error(
"Failed to add or update organism. "
"Please check that all fields are filled correctly."
) |
<SYSTEM_TASK:>
Creates a window of given resolution.
<END_TASK>
<USER_TASK:>
Description:
def init(resolution, pygame_flags=0, display_pos=(0, 0), interactive_mode=False):
"""Creates a window of given resolution.
:param resolution: the resolution of the windows as (width, height) in
pixels
:type resolution: tuple
:param pygame_flags: modify the creation of the window.
For further information see :ref:`creating_a_window`
:type pygame_flags: int
:param display_pos: determines the position on the desktop where the
window is created. In a multi monitor system this can be used to position
the window on a different monitor. E.g. the monitor to the right of the
main-monitor would be at position (1920, 0) if the main monitor has the
width 1920.
:type display_pos: tuple
:param interactive_mode: Will install a thread, that emptys the
event-queue every 100ms. This is neccessary to be able to use the
display() function in an interactive console on windows systems.
If interactive_mode is set, init() will return a reference to the
background thread. This thread has a stop() method which can be used to
cancel it. If you use ctrl+d or exit() within ipython, while the thread
is still running, ipython will become unusable, but not close.
:type interactive_mode: bool
:return: a reference to the display screen, or a reference to the background
thread if interactive_mode was set to true. In the second scenario you
can obtain a reference to the display surface via
pygame.display.get_surface()
:rtype: pygame.Surface
""" |
os.environ['SDL_VIDEO_WINDOW_POS'] = "{}, {}".format(*display_pos)
pygame.init()
pygame.font.init()
disp = pygame.display.set_mode(resolution, pygame_flags)
return _PumpThread() if interactive_mode else disp |
<SYSTEM_TASK:>
Returns an empty surface filled with fill_color.
<END_TASK>
<USER_TASK:>
Description:
def empty_surface(fill_color, size=None):
"""Returns an empty surface filled with fill_color.
:param fill_color: color to fill the surface with
:type fill_color: pygame.Color
:param size: the size of the new surface, if None its created
to be the same size as the screen
:type size: int-2-tuple
""" |
sr = pygame.display.get_surface().get_rect()
surf = pygame.Surface(size or (sr.w, sr.h))
surf.fill(fill_color)
return surf |
<SYSTEM_TASK:>
Called by internal API subsystem to initialize websockets connections
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Called by internal API subsystem to initialize websockets connections
in the API interface
""" |
self.api = self.context.get("cls")(self.context)
self.context["inst"].append(self) # Adapters used by strategies
def on_ws_connect(*args, **kwargs):
"""Callback on connect hook to set is_connected_ws"""
self.is_connected_ws = True
self.api.on_ws_connect(*args, **kwargs)
# Initialize websocket in a thread with channels
if hasattr(self.api, "on_ws_connect"):
self.thread = Process(target=self.api.connect_ws, args=(
on_ws_connect, [
SockChannel(channel, res_type, self._generate_result)
for channel, res_type in
self
.context
.get("conf")
.get("subscriptions")
.items()
]))
self.thread.start() |
<SYSTEM_TASK:>
Take a list of SockChannel objects and extend the websock listener
<END_TASK>
<USER_TASK:>
Description:
def add_channels(self, channels):
"""
Take a list of SockChannel objects and extend the websock listener
""" |
chans = [
SockChannel(chan, res, self._generate_result)
for chan, res in channels.items()
]
self.api.channels.extend(chans)
self.api.connect_channels(chans) |
<SYSTEM_TASK:>
Generate the result object
<END_TASK>
<USER_TASK:>
Description:
def _generate_result(self, res_type, channel, result):
"""Generate the result object""" |
schema = self.api.ws_result_schema()
schema.context['channel'] = channel
schema.context['response_type'] = res_type
self.callback(schema.load(result), self.context) |
<SYSTEM_TASK:>
returns a random element from seq n times. If n is None, it continues indefinitly
<END_TASK>
<USER_TASK:>
Description:
def rand_elem(seq, n=None):
"""returns a random element from seq n times. If n is None, it continues indefinitly""" |
return map(random.choice, repeat(seq, n) if n is not None else repeat(seq)) |
<SYSTEM_TASK:>
Print the first paragraph of the docstring of the decorated function.
<END_TASK>
<USER_TASK:>
Description:
def print_doc1(*args, **kwargs):
'''Print the first paragraph of the docstring of the decorated function.
The paragraph will be printed as a oneliner.
May be invoked as a simple, argument-less decorator (i.e. ``@print_doc1``)
or with named arguments ``color``, ``bold``, ``prefix`` of ``tail``
(eg. ``@print_doc1(color=utils.red, bold=True, prefix=' ')``).
Examples:
# >>> @print_doc1
# ... def foo():
# ... """First line of docstring.
# ...
# ... another line.
# ... """ |
# ... pass
# ...
# >>> foo()
# \033[34mFirst line of docstring\033[0m
# >>> @print_doc1
# ... def foo():
# ... """First paragraph of docstring which contains more than one
# ... line.
# ...
# ... Another paragraph.
# ... """
# ... pass
# ...
# >>> foo()
# \033[34mFirst paragraph of docstring which contains more than one line\033[0m
'''
# output settings from kwargs or take defaults
color = kwargs.get('color', blue)
bold = kwargs.get('bold', False)
prefix = kwargs.get('prefix', '')
tail = kwargs.get('tail', '\n')
def real_decorator(func):
'''real decorator function'''
@wraps(func)
def wrapper(*args, **kwargs):
'''the wrapper function'''
try:
prgf = first_paragraph(func.__doc__)
print(color(prefix + prgf + tail, bold))
except AttributeError as exc:
name = func.__name__
print(red(flo('{name}() has no docstring')))
raise(exc)
return func(*args, **kwargs)
return wrapper
invoked = bool(not args or kwargs)
if not invoked:
# invoke decorator function which returns the wrapper function
return real_decorator(func=args[0])
return real_decorator |
<SYSTEM_TASK:>
Read all lines from file.
<END_TASK>
<USER_TASK:>
Description:
def _readlines(fname, fpointer1=open, fpointer2=open): # pragma: no cover
"""Read all lines from file.""" |
# fpointer1, fpointer2 arguments to ease testing
try:
with fpointer1(fname, "r") as fobj:
return fobj.readlines()
except UnicodeDecodeError: # pragma: no cover
with fpointer2(fname, "r", encoding="utf-8") as fobj:
return fobj.readlines() |
<SYSTEM_TASK:>
for a given path and regexp pattern, return the files that match
<END_TASK>
<USER_TASK:>
Description:
def match(Class, path, pattern, flags=re.I, sortkey=None, ext=None):
"""for a given path and regexp pattern, return the files that match""" |
return sorted(
[
Class(fn=fn)
for fn in rglob(path, f"*{ext or ''}")
if re.search(pattern, os.path.basename(fn), flags=flags) is not None
and os.path.basename(fn)[0] != '~' # omit temp files
],
key=sortkey,
) |
<SYSTEM_TASK:>
copy the file to the new_fn, preserving atime and mtime
<END_TASK>
<USER_TASK:>
Description:
def copy(self, new_fn):
"""copy the file to the new_fn, preserving atime and mtime""" |
new_file = self.__class__(fn=str(new_fn))
new_file.write(data=self.read())
new_file.utime(self.atime, self.mtime)
return new_file |
<SYSTEM_TASK:>
make a filesystem-compliant basename for this file
<END_TASK>
<USER_TASK:>
Description:
def make_basename(self, fn=None, ext=None):
"""make a filesystem-compliant basename for this file""" |
fb, oldext = os.path.splitext(os.path.basename(fn or self.fn))
ext = ext or oldext.lower()
fb = String(fb).hyphenify(ascii=True)
return ''.join([fb, ext]) |
<SYSTEM_TASK:>
delete the file from the filesystem.
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""delete the file from the filesystem.""" |
if self.isfile:
os.remove(self.fn)
elif self.isdir:
shutil.rmtree(self.fn) |
<SYSTEM_TASK:>
given a number of bytes, return the file size in readable units
<END_TASK>
<USER_TASK:>
Description:
def readable_size(C, bytes, suffix='B', decimals=1, sep='\u00a0'):
"""given a number of bytes, return the file size in readable units""" |
if bytes is None:
return
size = float(bytes)
for unit in C.SIZE_UNITS:
if abs(size) < 1024 or unit == C.SIZE_UNITS[-1]:
return "{size:.{decimals}f}{sep}{unit}{suffix}".format(
size=size,
unit=unit,
suffix=suffix,
sep=sep,
decimals=C.SIZE_UNITS.index(unit) > 0 and decimals or 0, # B with no decimal
)
size /= 1024 |
<SYSTEM_TASK:>
Makes a HTTP call, formats response and does error handling.
<END_TASK>
<USER_TASK:>
Description:
def request(self, url, method, data=None, headers=None):
"""Makes a HTTP call, formats response and does error handling.
""" |
http_headers = merge_dict(self.default_headers, headers or {})
request_data = merge_dict({'api_key': self.apikey}, data or {})
logger.info('HTTP %s REQUEST TO %s' % (method, url))
start = datetime.datetime.now()
try:
response = requests.request(method=method, url=url, data=json.dumps(request_data),
headers=http_headers)
except exceptions.BadRequestError as e:
return json.loads({'errors': e.content})
duration = datetime.datetime.now() - start
logger.info('RESPONSE %s DURATION %s.%s' % (response.encoding, duration.seconds,
duration.microseconds))
return json.loads(response.content) if response.content else {} |
<SYSTEM_TASK:>
Calculates a good piece size for a size
<END_TASK>
<USER_TASK:>
Description:
def calc_piece_size(size, min_piece_size=20, max_piece_size=29, max_piece_count=1000):
"""
Calculates a good piece size for a size
""" |
logger.debug('Calculating piece size for %i' % size)
for i in range(min_piece_size, max_piece_size): # 20 = 1MB
if size / (2**i) < max_piece_count:
break
return 2**i |
<SYSTEM_TASK:>
Prepare a list of all pieces grouped together
<END_TASK>
<USER_TASK:>
Description:
def split_pieces(piece_list, segments, num):
"""
Prepare a list of all pieces grouped together
""" |
piece_groups = []
pieces = list(piece_list)
while pieces:
for i in range(segments):
p = pieces[i::segments][:num]
if not p:
break
piece_groups.append(p)
pieces = pieces[num * segments:]
return piece_groups |
<SYSTEM_TASK:>
recursive glob, gets all files that match the pattern within the directory tree
<END_TASK>
<USER_TASK:>
Description:
def rglob(dirname, pattern, dirs=False, sort=True):
"""recursive glob, gets all files that match the pattern within the directory tree""" |
fns = []
path = str(dirname)
if os.path.isdir(path):
fns = glob(os.path.join(escape(path), pattern))
dns = [fn for fn
in [os.path.join(path, fn)
for fn in os.listdir(path)]
if os.path.isdir(fn)]
if dirs==True:
fns += dns
for d in dns:
fns += rglob(d, pattern)
if sort==True:
fns.sort()
else:
log.warn("not a directory: %r" % path)
return fns |
<SYSTEM_TASK:>
transforms any os path into unix style
<END_TASK>
<USER_TASK:>
Description:
def os_path_transform(self, s, sep=os.path.sep):
""" transforms any os path into unix style """ |
if sep == '/':
return s
else:
return s.replace(sep, '/') |
<SYSTEM_TASK:>
finds the destination based on source
<END_TASK>
<USER_TASK:>
Description:
def resolve_dst(self, dst_dir, src):
"""
finds the destination based on source
if source is an absolute path, and there's no pattern, it copies the file to base dst_dir
""" |
if os.path.isabs(src):
return os.path.join(dst_dir, os.path.basename(src))
return os.path.join(dst_dir, src) |
<SYSTEM_TASK:>
copy the zip file from its filename to the given filename.
<END_TASK>
<USER_TASK:>
Description:
def write(self, fn=None):
"""copy the zip file from its filename to the given filename.""" |
fn = fn or self.fn
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
f = open(self.fn, 'rb')
b = f.read()
f.close()
f = open(fn, 'wb')
f.write(b)
f.close() |
<SYSTEM_TASK:>
Normalizes a single rust frame with a function
<END_TASK>
<USER_TASK:>
Description:
def normalize_rust_function(self, function, line):
"""Normalizes a single rust frame with a function""" |
# Drop the prefix and return type if there is any
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=(' as ',)
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
# Remove rust-generated uniqueness hashes
function = self.fixup_hash.sub('', function)
return function |
<SYSTEM_TASK:>
Normalizes a single cpp frame with a function
<END_TASK>
<USER_TASK:>
Description:
def normalize_cpp_function(self, function, line):
"""Normalizes a single cpp frame with a function""" |
# Drop member function cv/ref qualifiers like const, const&, &, and &&
for ref in ('const', 'const&', '&&', '&'):
if function.endswith(ref):
function = function[:-len(ref)].strip()
# Drop the prefix and return type if there is any if it's not operator
# overloading--operator overloading syntax doesn't have the things
# we're dropping here and can look curious, so don't try
if '::operator' not in function:
function = drop_prefix_and_return_type(function)
# Collapse types
function = collapse(
function,
open_string='<',
close_string='>',
replacement='<T>',
exceptions=('name omitted', 'IPC::ParamTraits')
)
# Collapse arguments
if self.collapse_arguments:
function = collapse(
function,
open_string='(',
close_string=')',
replacement='',
exceptions=('anonymous namespace', 'operator')
)
# Remove PGO cold block labels like "[clone .cold.222]". bug #1397926
if 'clone .cold' in function:
function = collapse(
function,
open_string='[',
close_string=']',
replacement=''
)
if self.signatures_with_line_numbers_re.match(function):
function = '{}:{}'.format(function, line)
# Remove spaces before all stars, ampersands, and commas
function = self.fixup_space.sub('', function)
# Ensure a space after commas
function = self.fixup_comma.sub(', ', function)
return function |
<SYSTEM_TASK:>
Normalizes a single frame
<END_TASK>
<USER_TASK:>
Description:
def normalize_frame(
self,
module=None,
function=None,
file=None,
line=None,
module_offset=None,
offset=None,
normalized=None,
**kwargs # eat any extra kwargs passed in
):
"""Normalizes a single frame
Returns a structured conglomeration of the input parameters to serve as
a signature. The parameter names of this function reflect the exact
names of the fields from the jsonMDSW frame output. This allows this
function to be invoked by passing a frame as ``**a_frame``.
Sometimes, a frame may already have a normalized version cached. If
that exists, return it instead.
""" |
# If there's a cached normalized value, use that so we don't spend time
# figuring it out again
if normalized is not None:
return normalized
if function:
# If there's a filename and it ends in .rs, then normalize using
# Rust rules
if file and (parse_source_file(file) or '').endswith('.rs'):
return self.normalize_rust_function(
function=function,
line=line
)
# Otherwise normalize it with C/C++ rules
return self.normalize_cpp_function(
function=function,
line=line
)
# If there's a file and line number, use that
if file and line:
filename = file.rstrip('/\\')
if '\\' in filename:
file = filename.rsplit('\\')[-1]
else:
file = filename.rsplit('/')[-1]
return '{}#{}'.format(file, line)
# If there's an offset and no module/module_offset, use that
if not module and not module_offset and offset:
return '@{}'.format(offset)
# Return module/module_offset
return '{}@{}'.format(module or '', module_offset) |
<SYSTEM_TASK:>
Fix up some garbage errors.
<END_TASK>
<USER_TASK:>
Description:
def _clean_tag(t):
"""Fix up some garbage errors.""" |
# TODO: when score present, include info.
t = _scored_patt.sub(string=t, repl='')
if t == '_country_' or t.startswith('_country:'):
t = 'nnp_country'
elif t == 'vpb':
t = 'vb' # "carjack" is listed with vpb tag.
elif t == 'nnd':
t = 'nns' # "abbes" is listed with nnd tag.
elif t == 'nns_root:':
t = 'nns' # 'micros' is listed as nns_root.
elif t == 'root:zygote':
t = 'nn' # 'root:zygote' for zygote. :-/
elif t.startswith('root:'):
t = 'uh' # Don't know why, but these are all UH tokens.
elif t in ('abbr_united_states_marine_corps', 'abbr_orange_juice'):
t = "abbreviation"
elif t == '+abbreviation':
t = 'abbreviation'
elif t.startswith('fw_misspelling:'):
t = 'fw'
return t |
<SYSTEM_TASK:>
return the parent URL, with params, query, and fragment in place
<END_TASK>
<USER_TASK:>
Description:
def parent(self):
"""return the parent URL, with params, query, and fragment in place""" |
path = '/'.join(self.path.split('/')[:-1])
s = path.strip('/').split(':')
if len(s)==2 and s[1]=='':
return None
else:
return self.__class__(self, path=path) |
<SYSTEM_TASK:>
join a list of url elements, and include any keyword arguments, as a new URL
<END_TASK>
<USER_TASK:>
Description:
def join(C, *args, **kwargs):
"""join a list of url elements, and include any keyword arguments, as a new URL""" |
u = C('/'.join([str(arg).strip('/') for arg in args]), **kwargs)
return u |
<SYSTEM_TASK:>
initialize process timing for the current stack
<END_TASK>
<USER_TASK:>
Description:
def start(self, key=None, **params):
"""initialize process timing for the current stack""" |
self.params.update(**params)
key = key or self.stack_key
if key is not None:
self.current_times[key] = time() |
<SYSTEM_TASK:>
record the current stack process as finished
<END_TASK>
<USER_TASK:>
Description:
def finish(self):
"""record the current stack process as finished""" |
self.report(fraction=1.0)
key = self.stack_key
if key is not None:
if self.data.get(key) is None:
self.data[key] = []
start_time = self.current_times.get(key) or time()
self.data[key].append(Dict(runtime=time()-start_time, **self.params)) |
<SYSTEM_TASK:>
Add the directive header and options to the generated content.
<END_TASK>
<USER_TASK:>
Description:
def add_directive_header(self, sig):
"""Add the directive header and options to the generated content.""" |
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', "module")
name = self.format_name()
self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig),
'<autodoc>')
if self.options.noindex:
self.add_line(u' :noindex:', '<autodoc>')
if self.objpath:
# Be explicit about the module, this is necessary since .. class::
# etc. don't support a prepended module name
self.add_line(u' :module: %s' % self.modname, '<autodoc>') |
<SYSTEM_TASK:>
Return the default headers and others as necessary
<END_TASK>
<USER_TASK:>
Description:
def _headers(self, others={}):
"""Return the default headers and others as necessary""" |
headers = {
'Content-Type': 'application/json'
}
for p in others.keys():
headers[p] = others[p]
return headers |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def elapsed_time_string(start_time, stop_time):
r"""
Return a formatted string with the elapsed time between two time points.
The string includes years (365 days), months (30 days), days (24 hours),
hours (60 minutes), minutes (60 seconds) and seconds. If both arguments
are equal, the string returned is :code:`'None'`; otherwise, the string
returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s],
[MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s],
etc.) is omitted if the value of that part is null/zero
:param start_time: Starting time point
:type start_time: `datetime <https://docs.python.org/3/library/
datetime.html#datetime-objects>`_
:param stop_time: Ending time point
:type stop_time: `datetime`
:rtype: string
:raises: RuntimeError (Invalid time delta specification)
For example:
>>> import datetime, pmisc
>>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1)
>>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3)
>>> pmisc.elapsed_time_string(start_time, stop_time)
'1 year, 2 days and 2 seconds'
""" |
if start_time > stop_time:
raise RuntimeError("Invalid time delta specification")
delta_time = stop_time - start_time
# Python 2.6 datetime objects do not have total_seconds() method
tot_seconds = int(
(
delta_time.microseconds
+ (delta_time.seconds + delta_time.days * 24 * 3600) * 10 ** 6
)
/ 10 ** 6
)
years, remainder = divmod(tot_seconds, 365 * 24 * 60 * 60)
months, remainder = divmod(remainder, 30 * 24 * 60 * 60)
days, remainder = divmod(remainder, 24 * 60 * 60)
hours, remainder = divmod(remainder, 60 * 60)
minutes, seconds = divmod(remainder, 60)
token_iter = zip(
[years, months, days, hours, minutes, seconds],
["year", "month", "day", "hour", "minute", "second"],
)
ret_list = [
"{token} {token_name}{plural}".format(
token=num, token_name=desc, plural="s" if num > 1 else ""
)
for num, desc in token_iter
if num > 0
]
if not ret_list:
return "None"
if len(ret_list) == 1:
return ret_list[0]
if len(ret_list) == 2:
return ret_list[0] + " and " + ret_list[1]
return (", ".join(ret_list[0:-1])) + " and " + ret_list[-1] |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def pcolor(text, color, indent=0):
r"""
Return a string that once printed is colorized.
:param text: Text to colorize
:type text: string
:param color: Color to use, one of :code:`'black'`, :code:`'red'`,
:code:`'green'`, :code:`'yellow'`, :code:`'blue'`,
:code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or
:code:`'none'` (case insensitive)
:type color: string
:param indent: Number of spaces to prefix the output with
:type indent: integer
:rtype: string
:raises:
* RuntimeError (Argument \`color\` is not valid)
* RuntimeError (Argument \`indent\` is not valid)
* RuntimeError (Argument \`text\` is not valid)
* ValueError (Unknown color *[color]*)
""" |
esc_dict = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"none": -1,
}
if not isinstance(text, str):
raise RuntimeError("Argument `text` is not valid")
if not isinstance(color, str):
raise RuntimeError("Argument `color` is not valid")
if not isinstance(indent, int):
raise RuntimeError("Argument `indent` is not valid")
color = color.lower()
if color not in esc_dict:
raise ValueError("Unknown color {color}".format(color=color))
if esc_dict[color] != -1:
return "\033[{color_code}m{indent}{text}\033[0m".format(
color_code=esc_dict[color], indent=" " * indent, text=text
)
return "{indent}{text}".format(indent=" " * indent, text=text) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def quote_str(obj):
r"""
Add extra quotes to a string.
If the argument is not a string it is returned unmodified.
:param obj: Object
:type obj: any
:rtype: Same as argument
For example:
>>> import pmisc
>>> pmisc.quote_str(5)
5
>>> pmisc.quote_str('Hello!')
'"Hello!"'
>>> pmisc.quote_str('He said "hello!"')
'\'He said "hello!"\''
""" |
if not isinstance(obj, str):
return obj
return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj) |
<SYSTEM_TASK:>
Return a string with a frame record pretty-formatted.
<END_TASK>
<USER_TASK:>
Description:
def strframe(obj, extended=False):
"""
Return a string with a frame record pretty-formatted.
The record is typically an item in a list generated by `inspect.stack()
<https://docs.python.org/3/library/inspect.html#inspect.stack>`_).
:param obj: Frame record
:type obj: tuple
:param extended: Flag that indicates whether contents of the frame object
are printed (True) or not (False)
:type extended: boolean
:rtype: string
""" |
# Stack frame -> (frame object [0], filename [1], line number of current
# line [2], function name [3], list of lines of context from source
# code [4], index of current line within list [5])
fname = normalize_windows_fname(obj[1])
ret = list()
ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow"))
ret.append("File name......: {0}".format(fname))
ret.append("Line number....: {0}".format(obj[2]))
ret.append("Function name..: {0}".format(obj[3]))
ret.append("Context........: {0}".format(obj[4]))
ret.append("Index..........: {0}".format(obj[5]))
if extended:
ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back))))
ret.append("f_builtins.....: {0}".format(obj[0].f_builtins))
ret.append("f_code.........: {0}".format(obj[0].f_code))
ret.append("f_globals......: {0}".format(obj[0].f_globals))
ret.append("f_lasti........: {0}".format(obj[0].f_lasti))
ret.append("f_lineno.......: {0}".format(obj[0].f_lineno))
ret.append("f_locals.......: {0}".format(obj[0].f_locals))
if hasattr(obj[0], "f_restricted"): # pragma: no cover
ret.append("f_restricted...: {0}".format(obj[0].f_restricted))
ret.append("f_trace........: {0}".format(obj[0].f_trace))
return "\n".join(ret) |
<SYSTEM_TASK:>
Set variable values via a dictionary mapping name to value.
<END_TASK>
<USER_TASK:>
Description:
def set(self, x):
"""
Set variable values via a dictionary mapping name to value.
""" |
for name, value in iter(x.items()):
if hasattr(value, "ndim"):
if self[name].value.ndim < value.ndim:
self[name].value.itemset(value.squeeze())
else:
self[name].value = value
else:
self[name].value.itemset(value) |
<SYSTEM_TASK:>
Return a subset of variables according to ``fixed``.
<END_TASK>
<USER_TASK:>
Description:
def select(self, fixed):
"""
Return a subset of variables according to ``fixed``.
""" |
names = [n for n in self.names() if self[n].isfixed == fixed]
return Variables({n: self[n] for n in names}) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def load(pathtovector,
wordlist=(),
num_to_load=None,
truncate_embeddings=None,
unk_word=None,
sep=" "):
r"""
Read a file in word2vec .txt format.
The load function will raise a ValueError when trying to load items
which do not conform to line lengths.
Parameters
----------
pathtovector : string
The path to the vector file.
header : bool
Whether the vector file has a header of the type
(NUMBER OF ITEMS, SIZE OF VECTOR).
wordlist : iterable, optional, default ()
A list of words you want loaded from the vector file. If this is
None (default), all words will be loaded.
num_to_load : int, optional, default None
The number of items to load from the file. Because loading can take
some time, it is sometimes useful to onlyl load the first n items
from a vector file for quick inspection.
truncate_embeddings : int, optional, default None
If this value is not None, the vectors in the vector space will
be truncated to the number of dimensions indicated by this value.
unk_word : object
The object to treat as UNK in your vector space. If this is not
in your items dictionary after loading, we add it with a zero
vector.
Returns
-------
r : Reach
An initialized Reach instance.
""" |
vectors, items = Reach._load(pathtovector,
wordlist,
num_to_load,
truncate_embeddings,
sep)
if unk_word is not None:
if unk_word not in set(items):
unk_vec = np.zeros((1, vectors.shape[1]))
vectors = np.concatenate([unk_vec, vectors], 0)
items = [unk_word] + items
unk_index = 0
else:
unk_index = items.index(unk_word)
else:
unk_index = None
return Reach(vectors,
items,
name=os.path.split(pathtovector)[-1],
unk_index=unk_index) |
<SYSTEM_TASK:>
Load a matrix and wordlist from a .vec file.
<END_TASK>
<USER_TASK:>
Description:
def _load(pathtovector,
wordlist,
num_to_load=None,
truncate_embeddings=None,
sep=" "):
"""Load a matrix and wordlist from a .vec file.""" |
vectors = []
addedwords = set()
words = []
try:
wordlist = set(wordlist)
except ValueError:
wordlist = set()
logger.info("Loading {0}".format(pathtovector))
firstline = open(pathtovector).readline().strip()
try:
num, size = firstline.split(sep)
num, size = int(num), int(size)
logger.info("Vector space: {} by {}".format(num, size))
header = True
except ValueError:
size = len(firstline.split(sep)) - 1
logger.info("Vector space: {} dim, # items unknown".format(size))
word, rest = firstline.split(sep, 1)
# If the first line is correctly parseable, set header to False.
header = False
if truncate_embeddings is None or truncate_embeddings == 0:
truncate_embeddings = size
for idx, line in enumerate(open(pathtovector, encoding='utf-8')):
if header and idx == 0:
continue
word, rest = line.rstrip(" \n").split(sep, 1)
if wordlist and word not in wordlist:
continue
if word in addedwords:
raise ValueError("Duplicate: {} on line {} was in the "
"vector space twice".format(word, idx))
if len(rest.split(sep)) != size:
raise ValueError("Incorrect input at index {}, size "
"is {}, expected "
"{}".format(idx+1,
len(rest.split(sep)), size))
words.append(word)
addedwords.add(word)
vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings])
if num_to_load is not None and len(addedwords) >= num_to_load:
break
vectors = np.array(vectors).astype(np.float32)
logger.info("Loading finished")
if wordlist:
diff = wordlist - addedwords
if diff:
logger.info("Not all items from your wordlist were in your "
"vector space: {}.".format(diff))
return vectors, words |
<SYSTEM_TASK:>
Vectorize a sentence by replacing all items with their vectors.
<END_TASK>
<USER_TASK:>
Description:
def vectorize(self, tokens, remove_oov=False, norm=False):
"""
Vectorize a sentence by replacing all items with their vectors.
Parameters
----------
tokens : object or list of objects
The tokens to vectorize.
remove_oov : bool, optional, default False
Whether to remove OOV items. If False, OOV items are replaced by
the UNK glyph. If this is True, the returned sequence might
have a different length than the original sequence.
norm : bool, optional, default False
Whether to return the unit vectors, or the regular vectors.
Returns
-------
s : numpy array
An M * N matrix, where every item has been replaced by
its vector. OOV items are either removed, or replaced
by the value of the UNK glyph.
""" |
if not tokens:
raise ValueError("You supplied an empty list.")
index = list(self.bow(tokens, remove_oov=remove_oov))
if not index:
raise ValueError("You supplied a list with only OOV tokens: {}, "
"which then got removed. Set remove_oov to False,"
" or filter your sentences to remove any in which"
" all items are OOV.")
if norm:
return np.stack([self.norm_vectors[x] for x in index])
else:
return np.stack([self.vectors[x] for x in index]) |
<SYSTEM_TASK:>
Create a bow representation of a list of tokens.
<END_TASK>
<USER_TASK:>
Description:
def bow(self, tokens, remove_oov=False):
"""
Create a bow representation of a list of tokens.
Parameters
----------
tokens : list.
The list of items to change into a bag of words representation.
remove_oov : bool.
Whether to remove OOV items from the input.
If this is True, the length of the returned BOW representation
might not be the length of the original representation.
Returns
-------
bow : generator
A BOW representation of the list of items.
""" |
if remove_oov:
tokens = [x for x in tokens if x in self.items]
for t in tokens:
try:
yield self.items[t]
except KeyError:
if self.unk_index is None:
raise ValueError("You supplied OOV items but didn't "
"provide the index of the replacement "
"glyph. Either set remove_oov to True, "
"or set unk_index to the index of the "
"item which replaces any OOV items.")
yield self.unk_index |
<SYSTEM_TASK:>
Transform a corpus by repeated calls to vectorize, defined above.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, corpus, remove_oov=False, norm=False):
"""
Transform a corpus by repeated calls to vectorize, defined above.
Parameters
----------
corpus : A list of strings, list of list of strings.
Represents a corpus as a list of sentences, where sentences
can either be strings or lists of tokens.
remove_oov : bool, optional, default False
If True, removes OOV items from the input before vectorization.
Returns
-------
c : list
A list of numpy arrays, where each array represents the transformed
sentence in the original list. The list is guaranteed to be the
same length as the input list, but the arrays in the list may be
of different lengths, depending on whether remove_oov is True.
""" |
return [self.vectorize(s, remove_oov=remove_oov, norm=norm)
for s in corpus] |
<SYSTEM_TASK:>
Return the num most similar items to a given list of items.
<END_TASK>
<USER_TASK:>
Description:
def most_similar(self,
items,
num=10,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Return the num most similar items to a given list of items.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
num : int, optional, default 10
The number of most similar items to retrieve.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
""" |
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._batch(x,
batch_size,
num+1,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] |
<SYSTEM_TASK:>
Return all items whose similarity is higher than threshold.
<END_TASK>
<USER_TASK:>
Description:
def threshold(self,
items,
threshold=.5,
batch_size=100,
show_progressbar=False,
return_names=True):
"""
Return all items whose similarity is higher than threshold.
Parameters
----------
items : list of objects or a single object.
The items to get the most similar items to.
threshold : float, optional, default .5
The radius within which to retrieve items.
batch_size : int, optional, default 100.
The batch size to use. 100 is a good default option. Increasing
the batch size may increase the speed.
show_progressbar : bool, optional, default False
Whether to show a progressbar.
return_names : bool, optional, default True
Whether to return the item names, or just the distances.
Returns
-------
sim : array
For each items in the input the num most similar items are returned
in the form of (NAME, DISTANCE) tuples. If return_names is false,
the returned list just contains distances.
""" |
# This line allows users to input single items.
# We used to rely on string identities, but we now also allow
# anything hashable as keys.
# Might fail if a list of passed items is also in the vocabulary.
# but I can't think of cases when this would happen, and what
# user expectations are.
try:
if items in self.items:
items = [items]
except TypeError:
pass
x = np.stack([self.norm_vectors[self.items[x]] for x in items])
result = self._threshold_batch(x,
batch_size,
threshold,
show_progressbar,
return_names)
# list call consumes the generator.
return [x[1:] for x in result] |
<SYSTEM_TASK:>
Normalize a matrix of row vectors to unit length.
<END_TASK>
<USER_TASK:>
Description:
def normalize(vectors):
"""
Normalize a matrix of row vectors to unit length.
Contains a shortcut if there are no zero vectors in the matrix.
If there are zero vectors, we do some indexing tricks to avoid
dividing by 0.
Parameters
----------
vectors : np.array
The vectors to normalize.
Returns
-------
vectors : np.array
The input vectors, normalized to unit length.
""" |
if np.ndim(vectors) == 1:
norm = np.linalg.norm(vectors)
if norm == 0:
return np.zeros_like(vectors)
return vectors / norm
norm = np.linalg.norm(vectors, axis=1)
if np.any(norm == 0):
nonzero = norm > 0
result = np.zeros_like(vectors)
n = norm[nonzero]
p = vectors[nonzero]
result[nonzero] = p / n[:, None]
return result
else:
return vectors / norm[:, None] |
<SYSTEM_TASK:>
Compute the similarity between a vector and a set of items.
<END_TASK>
<USER_TASK:>
Description:
def vector_similarity(self, vector, items):
"""Compute the similarity between a vector and a set of items.""" |
vector = self.normalize(vector)
items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items])
return vector.dot(items_vec.T) |
<SYSTEM_TASK:>
Compute the similarity between two sets of items.
<END_TASK>
<USER_TASK:>
Description:
def similarity(self, i1, i2):
"""
Compute the similarity between two sets of items.
Parameters
----------
i1 : object
The first set of items.
i2 : object
The second set of item.
Returns
-------
sim : array of floats
An array of similarity scores between 1 and 0.
""" |
try:
if i1 in self.items:
i1 = [i1]
except TypeError:
pass
try:
if i2 in self.items:
i2 = [i2]
except TypeError:
pass
i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1])
i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2])
return i1_vec.dot(i2_vec.T) |
<SYSTEM_TASK:>
Prune the current reach instance by removing items.
<END_TASK>
<USER_TASK:>
Description:
def prune(self, wordlist):
"""
Prune the current reach instance by removing items.
Parameters
----------
wordlist : list of str
A list of words to keep. Note that this wordlist need not include
all words in the Reach instance. Any words which are in the
wordlist, but not in the reach instance are ignored.
""" |
# Remove duplicates
wordlist = set(wordlist).intersection(set(self.items.keys()))
indices = [self.items[w] for w in wordlist if w in self.items]
if self.unk_index is not None and self.unk_index not in indices:
raise ValueError("Your unknown item is not in your list of items. "
"Set it to None before pruning, or pass your "
"unknown item.")
self.vectors = self.vectors[indices]
self.norm_vectors = self.norm_vectors[indices]
self.items = {w: idx for idx, w in enumerate(wordlist)}
self.indices = {v: k for k, v in self.items.items()}
if self.unk_index is not None:
self.unk_index = self.items[wordlist[self.unk_index]] |
<SYSTEM_TASK:>
Save the current vector space in word2vec format.
<END_TASK>
<USER_TASK:>
Description:
def save(self, path, write_header=True):
"""
Save the current vector space in word2vec format.
Parameters
----------
path : str
The path to save the vector file to.
write_header : bool, optional, default True
Whether to write a word2vec-style header as the first line of the
file
""" |
with open(path, 'w') as f:
if write_header:
f.write(u"{0} {1}\n".format(str(self.vectors.shape[0]),
str(self.vectors.shape[1])))
for i in range(len(self.items)):
w = self.indices[i]
vec = self.vectors[i]
f.write(u"{0} {1}\n".format(w,
" ".join([str(x) for x in vec]))) |
<SYSTEM_TASK:>
Save a reach instance in a fast format.
<END_TASK>
<USER_TASK:>
Description:
def save_fast_format(self, filename):
"""
Save a reach instance in a fast format.
The reach fast format stores the words and vectors of a Reach instance
separately in a JSON and numpy format, respectively.
Parameters
----------
filename : str
The prefix to add to the saved filename. Note that this is not the
real filename under which these items are stored.
The words and unk_index are stored under "{filename}_words.json",
and the numpy matrix is saved under "{filename}_vectors.npy".
""" |
items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1]))
items = {"items": items,
"unk_index": self.unk_index,
"name": self.name}
json.dump(items, open("{}_items.json".format(filename), 'w'))
np.save(open("{}_vectors.npy".format(filename), 'wb'), self.vectors) |
<SYSTEM_TASK:>
Load a reach instance in fast format.
<END_TASK>
<USER_TASK:>
Description:
def load_fast_format(filename):
"""
Load a reach instance in fast format.
As described above, the fast format stores the words and vectors of the
Reach instance separately, and is drastically faster than loading from
.txt files.
Parameters
----------
filename : str
The filename prefix from which to load. Note that this is not a
real filepath as such, but a shared prefix for both files.
In order for this to work, both {filename}_words.json and
{filename}_vectors.npy should be present.
""" |
words, unk_index, name, vectors = Reach._load_fast(filename)
return Reach(vectors, words, unk_index=unk_index, name=name) |
<SYSTEM_TASK:>
For a GitHub URI, walk all the pages until there's no more content
<END_TASK>
<USER_TASK:>
Description:
def api_walk(uri, per_page=100, key="login"):
"""
For a GitHub URI, walk all the pages until there's no more content
""" |
page = 1
result = []
while True:
response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page))
if len(response) == 0:
break
else:
page += 1
for r in response:
if key == USER_LOGIN:
result.append(user_login(r))
else:
result.append(r[key])
return list(set(result)) |
<SYSTEM_TASK:>
Simple API endpoint get, return only the keys we care about
<END_TASK>
<USER_TASK:>
Description:
def api_get(uri, key=None):
"""
Simple API endpoint get, return only the keys we care about
""" |
response = get_json(uri)
if response:
if type(response) == list:
r = response[0]
elif type(response) == dict:
r = response
if type(r) == dict:
# Special nested value we care about
if key == USER_LOGIN:
return user_login(r)
if key in r:
return r[key] |
<SYSTEM_TASK:>
Not sure if there's a better way to walk the ... interesting result
<END_TASK>
<USER_TASK:>
Description:
def reducejson(j):
"""
Not sure if there's a better way to walk the ... interesting result
""" |
authors = []
for key in j["data"]["repository"]["commitComments"]["edges"]:
authors.append(key["node"]["author"])
for key in j["data"]["repository"]["issues"]["nodes"]:
authors.append(key["author"])
for c in key["comments"]["nodes"]:
authors.append(c["author"])
for key in j["data"]["repository"]["pullRequests"]["edges"]:
authors.append(key["node"]["author"])
for c in key["node"]["comments"]["nodes"]:
authors.append(c["author"])
unique = list({v['login']:v for v in authors if v is not None}.values())
return unique |
<SYSTEM_TASK:>
execute a command at the device using the RESTful API
<END_TASK>
<USER_TASK:>
Description:
def _exec(self, cmd, url, json_data=None):
"""
execute a command at the device using the RESTful API
:param str cmd: one of the REST commands, e.g. GET or POST
:param str url: URL of the REST API the command should be applied to
:param dict json_data: json data that should be attached to the command
""" |
assert(cmd in ("GET", "POST", "PUT", "DELETE"))
assert(self.dev is not None)
if json_data is None:
json_data = {}
# add device address to the URL
url = url.format(self.dev["ipv4_internal"])
# set basic authentication
auth = HTTPBasicAuth("dev", self.dev["api_key"])
# execute HTTP request
res = None
if cmd == "GET":
res = self._local_session.session.get(
url, auth=auth, verify=False
)
elif cmd == "POST":
res = self._local_session.session.post(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "PUT":
res = self._local_session.session.put(
url, auth=auth, json=json_data, verify=False
)
elif cmd == "DELETE":
res = self._local_session.session.delete(
url, auth=auth, verify=False
)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() |
<SYSTEM_TASK:>
returns widget_id for given package_name does not care
<END_TASK>
<USER_TASK:>
Description:
def _get_widget_id(self, package_name):
"""
returns widget_id for given package_name does not care
about multiple widget ids at the moment, just picks the first
:param str package_name: package to check for
:return: id of first widget which belongs to the given package_name
:rtype: str
""" |
widget_id = ""
for app in self.get_apps_list():
if app.package == package_name:
widget_id = list(app.widgets.keys())[0]
return widget_id |
<SYSTEM_TASK:>
get the user details via the cloud
<END_TASK>
<USER_TASK:>
Description:
def get_user(self):
"""
get the user details via the cloud
""" |
log.debug("getting user information from LaMetric cloud...")
_, url = CLOUD_URLS["get_user"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
return res.json() |
<SYSTEM_TASK:>
get all devices that are linked to the user, if the local device
<END_TASK>
<USER_TASK:>
Description:
def get_devices(self, force_reload=False, save_devices=True):
"""
get all devices that are linked to the user, if the local device
file is not existing the devices will be obtained from the LaMetric
cloud, otherwise the local device file will be read.
:param bool force_reload: When True, devices are read again from cloud
:param bool save_devices: When True, devices obtained from the LaMetric
cloud are stored locally
""" |
if (
(not os.path.exists(self._devices_filename)) or
(force_reload is True)
):
# -- load devices from LaMetric cloud --
log.debug("getting devices from LaMetric cloud...")
_, url = CLOUD_URLS["get_devices"]
res = self._cloud_session.session.get(url)
if res is not None:
# raise an exception on error
res.raise_for_status()
# store obtained devices internally
self._devices = res.json()
if save_devices is True:
# save obtained devices to the local file
self.save_devices()
return self._devices
else:
# -- load devices from local file --
log.debug(
"getting devices from '{}'...".format(self._devices_filename)
)
return self.load_devices() |
<SYSTEM_TASK:>
save devices that have been obtained from LaMetric cloud
<END_TASK>
<USER_TASK:>
Description:
def save_devices(self):
"""
save devices that have been obtained from LaMetric cloud
to a local file
""" |
log.debug("saving devices to ''...".format(self._devices_filename))
if self._devices != []:
with codecs.open(self._devices_filename, "wb", "utf-8") as f:
json.dump(self._devices, f) |
<SYSTEM_TASK:>
load stored devices from the local file
<END_TASK>
<USER_TASK:>
Description:
def load_devices(self):
"""
load stored devices from the local file
""" |
self._devices = []
if os.path.exists(self._devices_filename):
log.debug(
"loading devices from '{}'...".format(self._devices_filename)
)
with codecs.open(self._devices_filename, "rb", "utf-8") as f:
self._devices = json.load(f)
return self._devices |
<SYSTEM_TASK:>
returns the full device state
<END_TASK>
<USER_TASK:>
Description:
def get_device_state(self):
"""
returns the full device state
""" |
log.debug("getting device state...")
cmd, url = DEVICE_URLS["get_device_state"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
sends new notification to the device
<END_TASK>
<USER_TASK:>
Description:
def send_notification(
self, model, priority="warning", icon_type=None, lifetime=None
):
"""
sends new notification to the device
:param Model model: an instance of the Model class that should be used
:param str priority: the priority of the notification
[info, warning or critical] (default: warning)
:param str icon_type: the icon type of the notification
[none, info or alert] (default: None)
:param int lifetime: the lifetime of the notification in ms
(default: 2 min)
""" |
assert(priority in ("info", "warning", "critical"))
assert(icon_type in (None, "none", "info", "alert"))
assert((lifetime is None) or (lifetime > 0))
log.debug("sending notification...")
cmd, url = DEVICE_URLS["send_notification"]
json_data = {"model": model.json(), "priority": priority}
if icon_type is not None:
json_data["icon_type"] = icon_type
if lifetime is not None:
json_data["lifetime"] = lifetime
return self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
returns the list of all notifications in queue
<END_TASK>
<USER_TASK:>
Description:
def get_notifications(self):
"""
returns the list of all notifications in queue
""" |
log.debug("getting notifications in queue...")
cmd, url = DEVICE_URLS["get_notifications_queue"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
returns a specific notification by given id
<END_TASK>
<USER_TASK:>
Description:
def get_notification(self, notification_id):
"""
returns a specific notification by given id
:param str notification_id: the ID of the notification
""" |
log.debug("getting notification '{}'...".format(notification_id))
cmd, url = DEVICE_URLS["get_notification"]
return self._exec(cmd, url.replace(":id", notification_id)) |
<SYSTEM_TASK:>
returns information about the display, including
<END_TASK>
<USER_TASK:>
Description:
def get_display(self):
"""
returns information about the display, including
brightness, screensaver etc.
""" |
log.debug("getting display information...")
cmd, url = DEVICE_URLS["get_display"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
set the display's screensaver mode
<END_TASK>
<USER_TASK:>
Description:
def set_screensaver(
self, mode, is_mode_enabled, start_time=None, end_time=None,
is_screensaver_enabled=True
):
"""
set the display's screensaver mode
:param str mode: mode of the screensaver
[when_dark, time_based]
:param bool is_mode_enabled: specifies if mode is enabled or disabled
:param str start_time: start time, only used in time_based mode
(format: %H:%M:%S)
:param str end_time: end time, only used in time_based mode
(format: %H:%M:%S)
:param bool is_screensaver_enabled: is overall screensaver turned on
overrules mode specific settings
""" |
assert(mode in ("when_dark", "time_based"))
log.debug("setting screensaver to '{}'...".format(mode))
cmd, url = DEVICE_URLS["set_display"]
json_data = {
"screensaver": {
"enabled": is_screensaver_enabled,
"mode": mode,
"mode_params": {
"enabled": is_mode_enabled
},
}
}
if mode == "time_based":
# TODO: add time checks
assert((start_time is not None) and (end_time is not None))
json_data["screensaver"]["mode_params"]["start_time"] = start_time
json_data["screensaver"]["mode_params"]["end_time"] = end_time
return self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
returns the current volume
<END_TASK>
<USER_TASK:>
Description:
def get_volume(self):
"""
returns the current volume
""" |
log.debug("getting volumne...")
cmd, url = DEVICE_URLS["get_volume"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
allows to change the volume
<END_TASK>
<USER_TASK:>
Description:
def set_volume(self, volume=50):
"""
allows to change the volume
:param int volume: volume to be set for the current device
[0..100] (default: 50)
""" |
assert(volume in range(101))
log.debug("setting volume...")
cmd, url = DEVICE_URLS["set_volume"]
json_data = {
"volume": volume,
}
return self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
returns the current Wi-Fi state the device is connected to
<END_TASK>
<USER_TASK:>
Description:
def get_wifi_state(self):
"""
returns the current Wi-Fi state the device is connected to
""" |
log.debug("getting wifi state...")
cmd, url = DEVICE_URLS["get_wifi_state"]
return self._exec(cmd, url) |
<SYSTEM_TASK:>
gets installed apps and puts them into the available_apps list
<END_TASK>
<USER_TASK:>
Description:
def set_apps_list(self):
"""
gets installed apps and puts them into the available_apps list
""" |
log.debug("getting apps and setting them in the internal app list...")
cmd, url = DEVICE_URLS["get_apps_list"]
result = self._exec(cmd, url)
self.available_apps = [
AppModel(result[app])
for app in result
] |
<SYSTEM_TASK:>
activates an app that is specified by package. Selects the first
<END_TASK>
<USER_TASK:>
Description:
def switch_to_app(self, package):
"""
activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None
""" |
log.debug("switching to app '{}'...".format(package))
cmd, url = DEVICE_URLS["switch_to_app"]
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) |
<SYSTEM_TASK:>
switches to the next app
<END_TASK>
<USER_TASK:>
Description:
def switch_to_next_app(self):
"""
switches to the next app
""" |
log.debug("switching to next app...")
cmd, url = DEVICE_URLS["switch_to_next_app"]
self.result = self._exec(cmd, url) |
<SYSTEM_TASK:>
activate the widget of the given package
<END_TASK>
<USER_TASK:>
Description:
def activate_widget(self, package):
"""
activate the widget of the given package
:param str package: name of the package
""" |
cmd, url = DEVICE_URLS["activate_widget"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url) |
<SYSTEM_TASK:>
meta method for all interactions with apps
<END_TASK>
<USER_TASK:>
Description:
def _app_exec(self, package, action, params=None):
"""
meta method for all interactions with apps
:param package: name of package/app
:type package: str
:param action: the action to be executed
:type action: str
:param params: optional parameters for this action
:type params: dict
:return: None
:rtype: None
""" |
# get list of possible commands from app.actions
allowed_commands = []
for app in self.get_apps_list():
if app.package == package:
allowed_commands = list(app.actions.keys())
break
# check if action is in this list
assert(action in allowed_commands)
cmd, url = DEVICE_URLS["do_action"]
# get widget id for the package
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
json_data = {"id": action}
if params is not None:
json_data["params"] = params
self.result = self._exec(cmd, url, json_data=json_data) |
<SYSTEM_TASK:>
set the alarm clock
<END_TASK>
<USER_TASK:>
Description:
def alarm_set(self, time, wake_with_radio=False):
"""
set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound
""" |
# TODO: check for correct time format
log.debug("alarm => set...")
params = {
"enabled": True,
"time": time,
"wake_with_radio": wake_with_radio
}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) |
<SYSTEM_TASK:>
disable the alarm
<END_TASK>
<USER_TASK:>
Description:
def alarm_disable(self):
"""
disable the alarm
""" |
log.debug("alarm => disable...")
params = {"enabled": False}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) |
<SYSTEM_TASK:>
set the countdown
<END_TASK>
<USER_TASK:>
Description:
def countdown_set(self, duration, start_now):
"""
set the countdown
:param str duration:
:param str start_now:
""" |
log.debug("countdown => set...")
params = {'duration': duration, 'start_now': start_now}
self._app_exec(
"com.lametric.countdown", "countdown.configure", params
) |
<SYSTEM_TASK:>
Call external script.
<END_TASK>
<USER_TASK:>
Description:
def action(self, includes: dict, variables: dict) -> tuple:
"""
Call external script.
:param includes: testcase's includes
:param variables: variables
:return: script's output
""" |
json_args = fill_template_str(json.dumps(self.data), variables)
p = subprocess.Popen([self.module, json_args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.wait() == 0:
out = p.stdout.read().decode()
debug(out)
return variables, json.loads(out)
else:
out = p.stdout.read().decode()
warning(out)
raise Exception('Execution failed.') |
<SYSTEM_TASK:>
set given credentials and reset the session
<END_TASK>
<USER_TASK:>
Description:
def set_credentials(self, client_id=None, client_secret=None):
"""
set given credentials and reset the session
""" |
self._client_id = client_id
self._client_secret = client_secret
# make sure to reset session due to credential change
self._session = None |
<SYSTEM_TASK:>
init a new oauth2 session that is required to access the cloud
<END_TASK>
<USER_TASK:>
Description:
def init_session(self, get_token=True):
"""
init a new oauth2 session that is required to access the cloud
:param bool get_token: if True, a token will be obtained, after
the session has been created
""" |
if (self._client_id is None) or (self._client_secret is None):
sys.exit(
"Please make sure to set the client id and client secret "
"via the constructor, the environment variables or the config "
"file; otherwise, the LaMetric cloud cannot be accessed. "
"Abort!"
)
self._session = OAuth2Session(
client=BackendApplicationClient(client_id=self._client_id)
)
if get_token is True:
# get oauth token
self.get_token() |
<SYSTEM_TASK:>
get current oauth token
<END_TASK>
<USER_TASK:>
Description:
def get_token(self):
"""
get current oauth token
""" |
self.token = self._session.fetch_token(
token_url=CLOUD_URLS["get_token"][1],
client_id=self._client_id,
client_secret=self._client_secret
) |
<SYSTEM_TASK:>
Use this method to get simple input as python object, with all
<END_TASK>
<USER_TASK:>
Description:
def simple_input(self, variables):
"""
Use this method to get simple input as python object, with all
templates filled in
:param variables:
:return: python object
""" |
json_args = fill_template_str(json.dumps(self.data), variables)
return try_get_objects(json_args) |
<SYSTEM_TASK:>
creates an empty configuration file
<END_TASK>
<USER_TASK:>
Description:
def create(self):
"""
creates an empty configuration file
""" |
if not self.exists():
# create new empyt config file based on template
self.config.add_section("lametric")
self.config.set("lametric", "client_id", "")
self.config.set("lametric", "client_secret", "")
# save new config
self.save()
# stop here, so user can set his config
sys.exit(
"An empty config file '{}' has been created. Please set "
"the corresponding LaMetric API credentials.".format(
self._filename
)
) |
<SYSTEM_TASK:>
save current config to the file
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""
save current config to the file
""" |
with open(self._filename, "w") as f:
self.config.write(f) |
<SYSTEM_TASK:>
Sleep if rate limiting is required based on current time and last
<END_TASK>
<USER_TASK:>
Description:
def rate_limit_wait(self):
"""
Sleep if rate limiting is required based on current time and last
query.
""" |
if self._rate_limit_dt and self._last_query is not None:
dt = time.time() - self._last_query
wait = self._rate_limit_dt - dt
if wait > 0:
time.sleep(wait) |
<SYSTEM_TASK:>
Query a route.
<END_TASK>
<USER_TASK:>
Description:
def route(self, arg, destination=None, waypoints=None, raw=False, **kwargs):
"""
Query a route.
route(locations): points can be
- a sequence of locations
- a Shapely LineString
route(origin, destination, waypoints=None)
- origin and destination are a single destination
- waypoints are the points to be inserted between the
origin and destination
If waypoints is specified, destination must also be specified
Each location can be:
- string (will be geocoded by the routing provider. Not all
providers accept this as input)
- (longitude, latitude) sequence (tuple, list, numpy array, etc.)
- Shapely Point with x as longitude, y as latitude
Additional parameters
---------------------
raw : bool, default False
Return the raw json dict response from the service
Returns
-------
list of Route objects
If raw is True, returns the json dict instead of converting to Route
objects
Examples
--------
mq = directions.Mapquest(key)
routes = mq.route('1 magazine st. cambridge, ma',
'south station boston, ma')
routes = mq.route('1 magazine st. cambridge, ma',
'south station boston, ma',
waypoints=['700 commonwealth ave. boston, ma'])
# Uses each point in the line as a waypoint. There is a limit to the
# number of waypoints for each service. Consult the docs.
line = LineString(...)
routes = mq.route(line)
# Feel free to mix different location types
routes = mq.route(line.coords[0], 'south station boston, ma',
waypoints=[(-71.103972, 42.349324)])
""" |
points = _parse_points(arg, destination, waypoints)
if len(points) < 2:
raise ValueError('You must specify at least 2 points')
self.rate_limit_wait()
data = self.raw_query(points, **kwargs)
self._last_query = time.time()
if raw:
return data
return self.format_output(data) |
<SYSTEM_TASK:>
sends an SSDP discovery packet to the network and collects
<END_TASK>
<USER_TASK:>
Description:
def discover_upnp_devices(
self, st="upnp:rootdevice", timeout=2, mx=1, retries=1
):
"""
sends an SSDP discovery packet to the network and collects
the devices that replies to it. A dictionary is returned
using the devices unique usn as key
""" |
# prepare UDP socket to transfer the SSDP packets
s = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
s.settimeout(timeout)
# prepare SSDP discover message
msg = SSDPDiscoveryMessage(mx=mx, st=st)
# try to get devices with multiple retries in case of failure
devices = {}
for _ in range(retries):
# send SSDP discovery message
s.sendto(msg.bytes, SSDP_MULTICAST_ADDR)
devices = {}
try:
while True:
# parse response and store it in dict
r = SSDPResponse(s.recvfrom(65507))
devices[r.usn] = r
except socket.timeout:
break
return devices |
<SYSTEM_TASK:>
returns a dict of devices that contain the given model name
<END_TASK>
<USER_TASK:>
Description:
def get_filtered_devices(
self, model_name, device_types="upnp:rootdevice", timeout=2
):
"""
returns a dict of devices that contain the given model name
""" |
# get list of all UPNP devices in the network
upnp_devices = self.discover_upnp_devices(st=device_types)
# go through all UPNP devices and filter wanted devices
filtered_devices = collections.defaultdict(dict)
for dev in upnp_devices.values():
try:
# download XML file with information about the device
# from the device's location
r = requests.get(dev.location, timeout=timeout)
if r.status_code == requests.codes.ok:
# parse returned XML
root = ET.fromstring(r.text)
# add shortcut for XML namespace to access sub nodes
ns = {"upnp": "urn:schemas-upnp-org:device-1-0"}
# get device element
device = root.find("upnp:device", ns)
if model_name in device.find(
"upnp:modelName", ns
).text:
# model name is wanted => add to list
# get unique UDN of the device that is used as key
udn = device.find("upnp:UDN", ns).text
# add url base
url_base = root.find("upnp:URLBase", ns)
if url_base is not None:
filtered_devices[udn][
"URLBase"
] = url_base.text
# add interesting device attributes and
# use unique UDN as key
for attr in (
"deviceType", "friendlyName", "manufacturer",
"manufacturerURL", "modelDescription",
"modelName", "modelNumber"
):
el = device.find("upnp:%s" % attr, ns)
if el is not None:
filtered_devices[udn][
attr
] = el.text.strip()
except ET.ParseError:
# just skip devices that are invalid xml
pass
except requests.exceptions.ConnectTimeout:
# just skip devices that are not replying in time
print("Timeout for '%s'. Skipping." % dev.location)
return filtered_devices |
<SYSTEM_TASK:>
A variant of multiprocessing.Pool.map that supports lazy evaluation
<END_TASK>
<USER_TASK:>
Description:
def lazy_map(data_processor, data_generator, n_cpus=1, stepsize=None):
"""A variant of multiprocessing.Pool.map that supports lazy evaluation
As with the regular multiprocessing.Pool.map, the processes are spawned off
asynchronously while the results are returned in order. In contrast to
multiprocessing.Pool.map, the iterator (here: data_generator) is not
consumed at once but evaluated lazily which is useful if the iterator
(for example, a generator) contains objects with a large memory footprint.
Parameters
==========
data_processor : func
A processing function that is applied to objects in `data_generator`
data_generator : iterator or generator
A python iterator or generator that yields objects to be fed into the
`data_processor` function for processing.
n_cpus=1 : int (default: 1)
Number of processes to run in parallel.
- If `n_cpus` > 0, the specified number of CPUs will be used.
- If `n_cpus=0`, all available CPUs will be used.
- If `n_cpus` < 0, all available CPUs - `n_cpus` will be used.
stepsize : int or None (default: None)
The number of items to fetch from the iterator to pass on to the
workers at a time.
If `stepsize=None` (default), the stepsize size will
be set equal to `n_cpus`.
Returns
=========
list : A Python list containing the results returned
by the `data_processor` function when called on
all elements in yielded by the `data_generator` in
sorted order. Note that the last list may contain
fewer items if the number of elements in `data_generator`
is not evenly divisible by `stepsize`.
""" |
if not n_cpus:
n_cpus = mp.cpu_count()
elif n_cpus < 0:
n_cpus = mp.cpu_count() - n_cpus
if stepsize is None:
stepsize = n_cpus
results = []
with mp.Pool(processes=n_cpus) as p:
while True:
r = p.map(data_processor, islice(data_generator, stepsize))
if r:
results.extend(r)
else:
break
return results |
<SYSTEM_TASK:>
A variant of multiprocessing.Pool.imap that supports lazy evaluation
<END_TASK>
<USER_TASK:>
Description:
def lazy_imap(data_processor, data_generator, n_cpus=1, stepsize=None):
"""A variant of multiprocessing.Pool.imap that supports lazy evaluation
As with the regular multiprocessing.Pool.imap, the processes are spawned
off asynchronously while the results are returned in order. In contrast to
multiprocessing.Pool.imap, the iterator (here: data_generator) is not
consumed at once but evaluated lazily which is useful if the iterator
(for example, a generator) contains objects with a large memory footprint.
Parameters
==========
data_processor : func
A processing function that is applied to objects in `data_generator`
data_generator : iterator or generator
A python iterator or generator that yields objects to be fed into the
`data_processor` function for processing.
n_cpus=1 : int (default: 1)
Number of processes to run in parallel.
- If `n_cpus` > 0, the specified number of CPUs will be used.
- If `n_cpus=0`, all available CPUs will be used.
- If `n_cpus` < 0, all available CPUs - `n_cpus` will be used.
stepsize : int or None (default: None)
The number of items to fetch from the iterator to pass on to the
workers at a time.
If `stepsize=None` (default), the stepsize size will
be set equal to `n_cpus`.
Returns
=========
list : A Python list containing the *n* results returned
by the `data_processor` function when called on
elements by the `data_generator` in
sorted order; *n* is equal to the size of `stepsize`. If `stepsize`
is None, *n* is equal to `n_cpus`.
""" |
if not n_cpus:
n_cpus = mp.cpu_count()
elif n_cpus < 0:
n_cpus = mp.cpu_count() - n_cpus
if stepsize is None:
stepsize = n_cpus
with mp.Pool(processes=n_cpus) as p:
while True:
r = p.map(data_processor, islice(data_generator, stepsize))
if r:
yield r
else:
break |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.