text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Enumerates the given list of strings into returned menu.
<END_TASK>
<USER_TASK:>
Description:
def enum_menu(strs, menu=None, *args, **kwargs):
"""Enumerates the given list of strings into returned menu.
**Params**:
- menu (Menu) - Existing menu to append. If not provided, a new menu will
be created.
""" |
if not menu:
menu = Menu(*args, **kwargs)
for s in strs:
menu.enum(s)
return menu |
<SYSTEM_TASK:>
Prompts the user for input and returns the given answer. Optionally
<END_TASK>
<USER_TASK:>
Description:
def ask(msg="Enter input", fmt=None, dft=None, vld=None, shw=True, blk=False, hlp=None, qstr=True):
"""Prompts the user for input and returns the given answer. Optionally
checks if answer is valid.
**Params**:
- msg (str) - Message to prompt the user with.
- fmt (func) - Function used to format user input.
- dft (int|float|str) - Default value if input is left blank.
- vld ([int|float|str|func]) - Valid input entries.
- shw (bool) - If true, show the user's input as typed.
- blk (bool) - If true, accept a blank string as valid input. Note that
supplying a default value will disable accepting blank input.
""" |
global _AUTO
def print_help():
lst = [v for v in vld if not callable(v)]
if blk:
lst.remove("")
for v in vld:
if not callable(v):
continue
if int == v:
lst.append("<int>")
elif float == v:
lst.append("<float>")
elif str == v:
lst.append("<str>")
else:
lst.append("(" + v.__name__ + ")")
if lst:
echo("[HELP] Valid input: %s" % (" | ".join([str(l) for l in lst])))
if hlp:
echo("[HELP] Extra notes: " + hlp)
if blk:
echo("[HELP] Input may be blank.")
vld = vld or []
hlp = hlp or ""
if not hasattr(vld, "__iter__"):
vld = [vld]
if not hasattr(fmt, "__call__"):
fmt = lambda x: x # NOTE: Defaults to function that does nothing.
msg = "%s%s" % (QSTR if qstr else "", msg)
dft = fmt(dft) if dft != None else None # Prevents showing [None] default.
if dft != None:
msg += " [%s]" % (dft if type(dft) is str else repr(dft))
vld.append(dft)
blk = False
if vld:
# Sanitize valid inputs.
vld = list(set([fmt(v) if fmt(v) else v for v in vld]))
if blk and "" not in vld:
vld.append("")
# NOTE: The following fixes a Py3 related bug found in `0.8.1`.
try: vld = sorted(vld)
except: pass
msg += ISTR
ans = None
while ans is None:
get_input = _input if shw else getpass
ans = get_input(msg)
if _AUTO:
echo(ans)
if "?" == ans:
print_help()
ans = None
continue
if "" == ans:
if dft != None:
ans = dft if not fmt else fmt(dft)
break
if "" not in vld:
ans = None
continue
try:
ans = ans if not fmt else fmt(ans)
except:
ans = None
if vld:
for v in vld:
if type(v) is type and cast(ans, v) is not None:
ans = cast(ans, v)
break
elif hasattr(v, "__call__"):
try:
if v(ans):
break
except:
pass
elif ans in vld:
break
else:
ans = None
return ans |
<SYSTEM_TASK:>
Prompts the user for a yes or no answer. Returns True for yes, False
<END_TASK>
<USER_TASK:>
Description:
def ask_yesno(msg="Proceed?", dft=None):
"""Prompts the user for a yes or no answer. Returns True for yes, False
for no.""" |
yes = ["y", "yes", "Y", "YES"]
no = ["n", "no", "N", "NO"]
if dft != None:
dft = yes[0] if (dft in yes or dft == True) else no[0]
return ask(msg, dft=dft, vld=yes+no) in yes |
<SYSTEM_TASK:>
Prompts the user for a float.
<END_TASK>
<USER_TASK:>
Description:
def ask_float(msg="Enter a float", dft=None, vld=None, hlp=None):
"""Prompts the user for a float.""" |
vld = vld or [float]
return ask(msg, dft=dft, vld=vld, fmt=partial(cast, typ=float), hlp=hlp) |
<SYSTEM_TASK:>
Clears the console.
<END_TASK>
<USER_TASK:>
Description:
def clear():
"""Clears the console.""" |
if sys.platform.startswith("win"):
call("cls", shell=True)
else:
call("clear", shell=True) |
<SYSTEM_TASK:>
Prints a status message at the start and finish of an associated
<END_TASK>
<USER_TASK:>
Description:
def status(*args, **kwargs):
"""Prints a status message at the start and finish of an associated
function. Can be used as a function decorator or as a function that accepts
another function as the first parameter.
**Params**:
The following parameters are available when used as a decorator:
- msg (str) [args] - Message to print at start of `func`.
The following parameters are available when used as a function:
- msg (str) [args] - Message to print at start of `func`.
- func (func) - Function to call. First `args` if using `status()` as a
function. Automatically provided if using `status()` as a decorator.
- fargs (list) - List of `args` passed to `func`.
- fkrgs (dict) - Dictionary of `kwargs` passed to `func`.
- fin (str) [kwargs] - Message to print when `func` finishes.
**Examples**:
::
@qprompt.status("Something is happening...")
def do_something(a):
time.sleep(a)
do_something()
# [!] Something is happening... DONE.
qprompt.status("Doing a thing...", myfunc, [arg1], {krgk: krgv})
# [!] Doing a thing... DONE.
""" |
def decor(func):
@wraps(func)
def wrapper(*args, **krgs):
echo("[!] " + msg, end=" ", flush=True)
result = func(*args, **krgs)
echo(fin, flush=True)
return result
return wrapper
fin = kwargs.pop('fin', "DONE.")
args = list(args)
if len(args) > 1 and callable(args[1]):
msg = args.pop(0)
func = args.pop(0)
try: fargs = args.pop(0)
except: fargs = []
try: fkrgs = args.pop(0)
except: fkrgs = {}
return decor(func)(*fargs, **fkrgs)
msg = args.pop(0)
return decor |
<SYSTEM_TASK:>
Prints a message then exits the program. Optionally pause before exit
<END_TASK>
<USER_TASK:>
Description:
def fatal(msg, exitcode=1, **kwargs):
"""Prints a message then exits the program. Optionally pause before exit
with `pause=True` kwarg.""" |
# NOTE: Can't use normal arg named `pause` since function has same name.
pause_before_exit = kwargs.pop("pause") if "pause" in kwargs.keys() else False
echo("[FATAL] " + msg, **kwargs)
if pause_before_exit:
pause()
sys.exit(exitcode) |
<SYSTEM_TASK:>
Outputs or returns a horizontal line of the given character and width.
<END_TASK>
<USER_TASK:>
Description:
def hrule(width=None, char=None):
"""Outputs or returns a horizontal line of the given character and width.
Returns printed string.""" |
width = width or HRWIDTH
char = char or HRCHAR
return echo(getline(char, width)) |
<SYSTEM_TASK:>
Sets the title of the console window.
<END_TASK>
<USER_TASK:>
Description:
def title(msg):
"""Sets the title of the console window.""" |
if sys.platform.startswith("win"):
ctypes.windll.kernel32.SetConsoleTitleW(tounicode(msg)) |
<SYSTEM_TASK:>
Wraps the given item content between horizontal lines. Item can be a
<END_TASK>
<USER_TASK:>
Description:
def wrap(item, args=None, krgs=None, **kwargs):
"""Wraps the given item content between horizontal lines. Item can be a
string or a function.
**Examples**:
::
qprompt.wrap("Hi, this will be wrapped.") # String item.
qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item.
""" |
with Wrap(**kwargs):
if callable(item):
args = args or []
krgs = krgs or {}
item(*args, **krgs)
else:
echo(item) |
<SYSTEM_TASK:>
Attempts to guess the menu entry name from the function name.
<END_TASK>
<USER_TASK:>
Description:
def _guess_name(desc, taken=None):
"""Attempts to guess the menu entry name from the function name.""" |
taken = taken or []
name = ""
# Try to find the shortest name based on the given description.
for word in desc.split():
c = word[0].lower()
if not c.isalnum():
continue
name += c
if name not in taken:
break
# If name is still taken, add a number postfix.
count = 2
while name in taken:
name = name + str(count)
count += 1
return name |
<SYSTEM_TASK:>
Add a menu entry whose name will be an auto indexed number.
<END_TASK>
<USER_TASK:>
Description:
def enum(self, desc, func=None, args=None, krgs=None):
"""Add a menu entry whose name will be an auto indexed number.""" |
name = str(len(self.entries)+1)
self.entries.append(MenuEntry(name, desc, func, args or [], krgs or {})) |
<SYSTEM_TASK:>
Runs the function associated with the given entry `name`.
<END_TASK>
<USER_TASK:>
Description:
def run(self, name):
"""Runs the function associated with the given entry `name`.""" |
for entry in self.entries:
if entry.name == name:
run_func(entry)
break |
<SYSTEM_TASK:>
Return two lists of scripts out of the original `scripts` list.
<END_TASK>
<USER_TASK:>
Description:
def partition_scripts(scripts, start_type1, start_type2):
"""Return two lists of scripts out of the original `scripts` list.
Scripts that begin with a `start_type1` or `start_type2` blocks
are returned first. All other scripts are returned second.
""" |
match, other = [], []
for script in scripts:
if (HairballPlugin.script_start_type(script) == start_type1 or
HairballPlugin.script_start_type(script) == start_type2):
match.append(script)
else:
other.append(script)
return match, other |
<SYSTEM_TASK:>
Return mapping of attributes to if they were initialized or not.
<END_TASK>
<USER_TASK:>
Description:
def attribute_result(cls, sprites):
"""Return mapping of attributes to if they were initialized or not.""" |
retval = dict((x, True) for x in cls.ATTRIBUTES)
for properties in sprites.values():
for attribute, state in properties.items():
retval[attribute] &= state != cls.STATE_MODIFIED
return retval |
<SYSTEM_TASK:>
Return the state of the scripts for the given attribute.
<END_TASK>
<USER_TASK:>
Description:
def attribute_state(cls, scripts, attribute):
"""Return the state of the scripts for the given attribute.
If there is more than one 'when green flag clicked' script and they
both modify the attribute, then the attribute is considered to not be
initialized.
""" |
green_flag, other = partition_scripts(scripts, cls.HAT_GREEN_FLAG, cls.HAT_CLONE)
block_set = cls.BLOCKMAPPING[attribute]
state = cls.STATE_NOT_MODIFIED
# TODO: Any regular broadcast blocks encountered in the initialization
# zone should be added to this loop for conflict checking.
for script in green_flag:
in_zone = True
for name, level, _ in cls.iter_blocks(script.blocks):
if name == 'broadcast %s and wait':
# TODO: Follow the broadcast and wait scripts that occur in
# the initialization zone
in_zone = False
if (name, 'absolute') in block_set:
if in_zone and level == 0: # Success!
if state == cls.STATE_NOT_MODIFIED:
state = cls.STATE_INITIALIZED
else: # Multiple when green flag clicked conflict
state = cls.STATE_MODIFIED
elif in_zone:
continue # Conservative ignore for nested absolutes
else:
state = cls.STATE_MODIFIED
break # The state of the script has been determined
elif (name, 'relative') in block_set:
state = cls.STATE_MODIFIED
break
if state != cls.STATE_NOT_MODIFIED:
return state
# Check the other scripts to see if the attribute was ever modified
for script in other:
for name, _, _ in cls.iter_blocks(script.blocks):
if name in [x[0] for x in block_set]:
return cls.STATE_MODIFIED
return cls.STATE_NOT_MODIFIED |
<SYSTEM_TASK:>
Output whether or not each attribute was correctly initialized.
<END_TASK>
<USER_TASK:>
Description:
def output_results(cls, sprites):
"""Output whether or not each attribute was correctly initialized.
Attributes that were not modified at all are considered to be properly
initialized.
""" |
print(' '.join(cls.ATTRIBUTES))
format_strs = ['{{{}!s:^{}}}'.format(x, len(x)) for x in
cls.ATTRIBUTES]
print(' '.join(format_strs).format(**cls.attribute_result(sprites))) |
<SYSTEM_TASK:>
Return a mapping of attributes to their initilization state.
<END_TASK>
<USER_TASK:>
Description:
def sprite_changes(cls, sprite):
"""Return a mapping of attributes to their initilization state.""" |
retval = dict((x, cls.attribute_state(sprite.scripts, x)) for x in
(x for x in cls.ATTRIBUTES if x != 'background'))
return retval |
<SYSTEM_TASK:>
Run and return the results of the AttributeInitialization plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results of the AttributeInitialization plugin.""" |
changes = dict((x.name, self.sprite_changes(x)) for x in
scratch.sprites)
changes['stage'] = {
'background': self.attribute_state(scratch.stage.scripts,
'costume')}
# self.output_results(changes)
return {'initialized': changes} |
<SYSTEM_TASK:>
Return the initialization state for each variable in variables.
<END_TASK>
<USER_TASK:>
Description:
def variable_state(cls, scripts, variables):
"""Return the initialization state for each variable in variables.
The state is determined based on the scripts passed in via the scripts
parameter.
If there is more than one 'when green flag clicked' script and they
both modify the attribute, then the attribute is considered to not be
initialized.
""" |
def conditionally_set_not_modified():
"""Set the variable to modified if it hasn't been altered."""
state = variables.get(block.args[0], None)
if state == cls.STATE_NOT_MODIFIED:
variables[block.args[0]] = cls.STATE_MODIFIED
green_flag, other = partition_scripts(scripts, cls.HAT_GREEN_FLAG)
variables = dict((x, cls.STATE_NOT_MODIFIED) for x in variables)
for script in green_flag:
in_zone = True
for name, level, block in cls.iter_blocks(script.blocks):
if name == 'broadcast %s and wait':
in_zone = False
if name == 'set %s effect to %s':
state = variables.get(block.args[0], None)
if state is None:
continue # Not a variable we care about
if in_zone and level == 0: # Success!
if state == cls.STATE_NOT_MODIFIED:
state = cls.STATE_INITIALIZED
else: # Multiple when green flag clicked conflict
# TODO: Need to allow multiple sets of a variable
# within the same script
# print 'CONFLICT', script
state = cls.STATE_MODIFIED
elif in_zone:
continue # Conservative ignore for nested absolutes
elif state == cls.STATE_NOT_MODIFIED:
state = cls.STATE_MODIFIED
variables[block.args[0]] = state
elif name == 'change %s effect by %s':
conditionally_set_not_modified()
for script in other:
for name, _, block in cls.iter_blocks(script.blocks):
if name in ('change %s effect by %s', 'set %s effect to %s'):
conditionally_set_not_modified()
return variables |
<SYSTEM_TASK:>
Run and return the results of the VariableInitialization plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results of the VariableInitialization plugin.""" |
variables = dict((x, self.variable_state(x.scripts, x.variables))
for x in scratch.sprites)
variables['global'] = self.variable_state(self.iter_scripts(scratch),
scratch.stage.variables)
# Output for now
import pprint
pprint.pprint(variables)
return {'variables': variables} |
<SYSTEM_TASK:>
Output the default sprite names found in the project.
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
"""Output the default sprite names found in the project.""" |
print('{} default sprite names found:'.format(self.total_default))
for name in self.list_default:
print(name) |
<SYSTEM_TASK:>
Run and return the results from the SpriteNaming plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results from the SpriteNaming plugin.""" |
for sprite in self.iter_sprites(scratch):
for default in self.default_names:
if default in sprite.name:
self.total_default += 1
self.list_default.append(sprite.name) |
<SYSTEM_TASK:>
Prepare query and update params.
<END_TASK>
<USER_TASK:>
Description:
def prepare_request_params(self, _query_params, _json_params):
""" Prepare query and update params. """ |
self._query_params = dictset(
_query_params or self.request.params.mixed())
self._json_params = dictset(_json_params)
ctype = self.request.content_type
if self.request.method in ['POST', 'PUT', 'PATCH']:
if ctype == 'application/json':
try:
self._json_params.update(self.request.json)
except simplejson.JSONDecodeError:
log.error(
"Expecting JSON. Received: '{}'. "
"Request: {} {}".format(
self.request.body, self.request.method,
self.request.url))
self._json_params = BaseView.convert_dotted(self._json_params)
self._query_params = BaseView.convert_dotted(self._query_params)
self._params = self._query_params.copy()
self._params.update(self._json_params) |
<SYSTEM_TASK:>
Set self.request.override_renderer if needed.
<END_TASK>
<USER_TASK:>
Description:
def set_override_rendered(self):
""" Set self.request.override_renderer if needed. """ |
if '' in self.request.accept:
self.request.override_renderer = self._default_renderer
elif 'application/json' in self.request.accept:
self.request.override_renderer = 'nefertari_json'
elif 'text/plain' in self.request.accept:
self.request.override_renderer = 'string' |
<SYSTEM_TASK:>
Wrap `self.index` method with ESAggregator.
<END_TASK>
<USER_TASK:>
Description:
def _setup_aggregation(self, aggregator=None):
""" Wrap `self.index` method with ESAggregator.
This makes `self.index` to first try to run aggregation and only
on fail original method is run. Method is wrapped only if it is
defined and `elasticsearch.enable_aggregations` setting is true.
""" |
from nefertari.elasticsearch import ES
if aggregator is None:
aggregator = ESAggregator
aggregations_enabled = (
ES.settings and ES.settings.asbool('enable_aggregations'))
if not aggregations_enabled:
log.debug('Elasticsearch aggregations are not enabled')
return
index = getattr(self, 'index', None)
index_defined = index and index != self.not_allowed_action
if index_defined:
self.index = aggregator(self).wrap(self.index) |
<SYSTEM_TASK:>
Query ES collection and return results.
<END_TASK>
<USER_TASK:>
Description:
def get_collection_es(self):
""" Query ES collection and return results.
This is default implementation of querying ES collection with
`self._query_params`. It must return found ES collection
results for default response renderers to work properly.
""" |
from nefertari.elasticsearch import ES
return ES(self.Model.__name__).get_collection(**self._query_params) |
<SYSTEM_TASK:>
Set public limits if auth is enabled and user is not
<END_TASK>
<USER_TASK:>
Description:
def set_public_limits(self):
""" Set public limits if auth is enabled and user is not
authenticated.
Also sets default limit for GET, HEAD requests.
""" |
if self.request.method.upper() in ['GET', 'HEAD']:
self._query_params.process_int_param('_limit', 20)
if self._auth_enabled and not getattr(self.request, 'user', None):
wrappers.set_public_limits(self) |
<SYSTEM_TASK:>
Convert object IDs from `self._json_params` to objects if needed.
<END_TASK>
<USER_TASK:>
Description:
def convert_ids2objects(self):
""" Convert object IDs from `self._json_params` to objects if needed.
Only IDs that belong to relationship field of `self.Model`
are converted.
""" |
if not self.Model:
log.info("%s has no model defined" % self.__class__.__name__)
return
for field in self._json_params.keys():
if not engine.is_relationship_field(field, self.Model):
continue
rel_model_cls = engine.get_relationship_cls(field, self.Model)
self.id2obj(field, rel_model_cls) |
<SYSTEM_TASK:>
Setup defaulf wrappers.
<END_TASK>
<USER_TASK:>
Description:
def setup_default_wrappers(self):
""" Setup defaulf wrappers.
Wrappers are applied when view method does not return instance
of Response. In this case nefertari renderers call wrappers and
handle response generation.
""" |
# Index
self._after_calls['index'] = [
wrappers.wrap_in_dict(self.request),
wrappers.add_meta(self.request),
wrappers.add_object_url(self.request),
]
# Show
self._after_calls['show'] = [
wrappers.wrap_in_dict(self.request),
wrappers.add_meta(self.request),
wrappers.add_object_url(self.request),
]
# Create
self._after_calls['create'] = [
wrappers.wrap_in_dict(self.request),
wrappers.add_meta(self.request),
wrappers.add_object_url(self.request),
]
# Update
self._after_calls['update'] = [
wrappers.wrap_in_dict(self.request),
wrappers.add_meta(self.request),
wrappers.add_object_url(self.request),
]
# Replace
self._after_calls['replace'] = [
wrappers.wrap_in_dict(self.request),
wrappers.add_meta(self.request),
wrappers.add_object_url(self.request),
]
# Privacy wrappers
if self._auth_enabled:
for meth in ('index', 'show', 'create', 'update', 'replace'):
self._after_calls[meth] += [
wrappers.apply_privacy(self.request),
]
for meth in ('update', 'replace', 'update_many'):
self._before_calls[meth] += [
wrappers.apply_request_privacy(
self.Model, self._json_params),
] |
<SYSTEM_TASK:>
Register new user by POSTing all required data.
<END_TASK>
<USER_TASK:>
Description:
def register(self):
""" Register new user by POSTing all required data. """ |
user, created = self.Model.create_account(
self._json_params)
if not created:
raise JHTTPConflict('Looks like you already have an account.')
self.request._user = user
pk_field = user.pk_field()
headers = remember(self.request, getattr(user, pk_field))
return JHTTPOk('Registered', headers=headers) |
<SYSTEM_TASK:>
Register a new user by POSTing all required data.
<END_TASK>
<USER_TASK:>
Description:
def register(self):
""" Register a new user by POSTing all required data.
User's `Authorization` header value is returned in `WWW-Authenticate`
header.
""" |
user, created = self.Model.create_account(self._json_params)
if user.api_key is None:
raise JHTTPBadRequest('Failed to generate ApiKey for user')
if not created:
raise JHTTPConflict('Looks like you already have an account.')
self.request._user = user
headers = remember(self.request, user.username)
return JHTTPOk('Registered', headers=headers) |
<SYSTEM_TASK:>
Claim current token by POSTing 'login' and 'password'.
<END_TASK>
<USER_TASK:>
Description:
def claim_token(self, **params):
"""Claim current token by POSTing 'login' and 'password'.
User's `Authorization` header value is returned in `WWW-Authenticate`
header.
""" |
self._json_params.update(params)
success, self.user = self.Model.authenticate_by_password(
self._json_params)
if success:
headers = remember(self.request, self.user.username)
return JHTTPOk('Token claimed', headers=headers)
if self.user:
raise JHTTPUnauthorized('Wrong login or password')
else:
raise JHTTPNotFound('User not found') |
<SYSTEM_TASK:>
Reset current token by POSTing 'login' and 'password'.
<END_TASK>
<USER_TASK:>
Description:
def reset_token(self, **params):
""" Reset current token by POSTing 'login' and 'password'.
User's `Authorization` header value is returned in `WWW-Authenticate`
header.
""" |
response = self.claim_token(**params)
if not self.user:
return response
self.user.api_key.reset_token()
headers = remember(self.request, self.user.username)
return JHTTPOk('Registered', headers=headers) |
<SYSTEM_TASK:>
Apply privacy to nested documents.
<END_TASK>
<USER_TASK:>
Description:
def _apply_nested_privacy(self, data):
""" Apply privacy to nested documents.
:param data: Dict of data to which privacy is already applied.
""" |
kw = {
'is_admin': self.is_admin,
'drop_hidden': self.drop_hidden,
}
for key, val in data.items():
if is_document(val):
data[key] = apply_privacy(self.request)(result=val, **kw)
elif isinstance(val, list) and val and is_document(val[0]):
data[key] = [apply_privacy(self.request)(result=doc, **kw)
for doc in val]
return data |
<SYSTEM_TASK:>
Add a resource with its all children resources to the current
<END_TASK>
<USER_TASK:>
Description:
def add_from_child(self, resource, **kwargs):
""" Add a resource with its all children resources to the current
resource.
""" |
new_resource = self.add(
resource.member_name, resource.collection_name, **kwargs)
for child in resource.children:
new_resource.add_from_child(child, **kwargs) |
<SYSTEM_TASK:>
Add the path of a data set to the list of available sets
<END_TASK>
<USER_TASK:>
Description:
def add(self, path):
"""
Add the path of a data set to the list of available sets
NOTE: a data set is assumed to be a pickled
and gzip compressed Pandas DataFrame
Parameters
----------
path : str
""" |
name_with_ext = os.path.split(path)[1] # split directory and filename
name = name_with_ext.split('.')[0] # remove extension
self.list.update({name: path}) |
<SYSTEM_TASK:>
Unpacks a data set to a Pandas DataFrame
<END_TASK>
<USER_TASK:>
Description:
def unpack(self, name):
"""
Unpacks a data set to a Pandas DataFrame
Parameters
----------
name : str
call `.list` to see all availble datasets
Returns
-------
pd.DataFrame
""" |
path = self.list[name]
df = pd.read_pickle(path, compression='gzip')
return df |
<SYSTEM_TASK:>
translate each sequence into six reading frames
<END_TASK>
<USER_TASK:>
Description:
def six_frame(genome, table, minimum = 10):
"""
translate each sequence into six reading frames
""" |
for seq in parse_fasta(genome):
dna = Seq(seq[1].upper().replace('U', 'T'), IUPAC.ambiguous_dna)
counter = 0
for sequence in ['f', dna], ['rc', dna.reverse_complement()]:
direction, sequence = sequence
for frame in range(0, 3):
for prot in \
sequence[frame:].\
translate(table = table, to_stop = False).split('*'):
if len(prot) < minimum:
continue
counter += 1
header = '%s_%s table=%s frame=%s-%s %s' % \
(seq[0].split()[0], counter, table, frame+1, \
direction, ' '.join(seq[0].split()[1:]))
yield [header, prot] |
<SYSTEM_TASK:>
check for large gaps between alignment windows
<END_TASK>
<USER_TASK:>
Description:
def check_gaps(matches, gap_threshold = 0):
"""
check for large gaps between alignment windows
""" |
gaps = []
prev = None
for match in sorted(matches, key = itemgetter(0)):
if prev is None:
prev = match
continue
if match[0] - prev[1] >= gap_threshold:
gaps.append([prev, match])
prev = match
return [[i[0][1], i[1][0]] for i in gaps] |
<SYSTEM_TASK:>
determine if sequence has already hit the same part of the model,
<END_TASK>
<USER_TASK:>
Description:
def check_overlap(current, hit, overlap = 200):
"""
determine if sequence has already hit the same part of the model,
indicating that this hit is for another 16S rRNA gene
""" |
for prev in current:
p_coords = prev[2:4]
coords = hit[2:4]
if get_overlap(coords, p_coords) >= overlap:
return True
return False |
<SYSTEM_TASK:>
get info from either ssu-cmsearch or cmsearch output
<END_TASK>
<USER_TASK:>
Description:
def get_info(line, bit_thresh):
"""
get info from either ssu-cmsearch or cmsearch output
""" |
if len(line) >= 18: # output is from cmsearch
id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16]
sstart, send, strand = int(line[7]), int(line[8]), line[9]
mstart, mend = int(line[5]), int(line[6])
elif len(line) == 9: # output is from ssu-cmsearch
if bit_thresh == 0:
print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr)
print('# please specify a bit score threshold', file=sys.stderr)
exit()
id, model, bit = line[1].split()[0], line[0], float(line[6])
inc = '!' # this is not a feature of ssu-cmsearch
sstart, send = int(line[2]), int(line[3])
mstart, mend = int(4), int(5)
if send >= sstart:
strand = '+'
else:
strand = '-'
else:
print('# unsupported hmm format:', file=sys.stderr)
print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr)
exit()
coords = [sstart, send]
sstart, send = min(coords), max(coords)
mcoords = [mstart, mend]
mstart, mend = min(mcoords), max(mcoords)
return id, model, bit, sstart, send, mstart, mend, strand, inc |
<SYSTEM_TASK:>
check to see how much of the buffer is being used
<END_TASK>
<USER_TASK:>
Description:
def check_buffer(coords, length, buffer):
"""
check to see how much of the buffer is being used
""" |
s = min(coords[0], buffer)
e = min(length - coords[1], buffer)
return [s, e] |
<SYSTEM_TASK:>
Lazy imports to prevent circular dependencies between this module and utils
<END_TASK>
<USER_TASK:>
Description:
def _import_parsers():
""" Lazy imports to prevent circular dependencies between this module and utils """ |
global ARCGIS_NODES
global ARCGIS_ROOTS
global ArcGISParser
global FGDC_ROOT
global FgdcParser
global ISO_ROOTS
global IsoParser
global VALID_ROOTS
if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None:
from gis_metadata.arcgis_metadata_parser import ARCGIS_NODES
from gis_metadata.arcgis_metadata_parser import ARCGIS_ROOTS
from gis_metadata.arcgis_metadata_parser import ArcGISParser
if FGDC_ROOT is None or FgdcParser is None:
from gis_metadata.fgdc_metadata_parser import FGDC_ROOT
from gis_metadata.fgdc_metadata_parser import FgdcParser
if ISO_ROOTS is None or IsoParser is None:
from gis_metadata.iso_metadata_parser import ISO_ROOTS
from gis_metadata.iso_metadata_parser import IsoParser
if VALID_ROOTS is None:
VALID_ROOTS = {FGDC_ROOT}.union(ARCGIS_ROOTS + ISO_ROOTS) |
<SYSTEM_TASK:>
Dynamically sets attributes from a Dictionary passed in by children.
<END_TASK>
<USER_TASK:>
Description:
def _init_metadata(self):
"""
Dynamically sets attributes from a Dictionary passed in by children.
The Dictionary will contain the name of each attribute as keys, and
either an XPATH mapping to a text value in _xml_tree, or a function
that takes no parameters and returns the intended value.
""" |
if self._data_map is None:
self._init_data_map()
validate_properties(self._data_map, self._metadata_props)
# Parse attribute values and assign them: key = parse(val)
for prop in self._data_map:
setattr(self, prop, parse_property(self._xml_tree, None, self._data_map, prop))
self.has_data = any(getattr(self, prop) for prop in self._data_map) |
<SYSTEM_TASK:>
Default parsing operation for a complex struct
<END_TASK>
<USER_TASK:>
Description:
def _parse_complex(self, prop):
""" Default parsing operation for a complex struct """ |
xpath_root = None
xpath_map = self._data_structures[prop]
return parse_complex(self._xml_tree, xpath_root, xpath_map, prop) |
<SYSTEM_TASK:>
Default parsing operation for lists of complex structs
<END_TASK>
<USER_TASK:>
Description:
def _parse_complex_list(self, prop):
""" Default parsing operation for lists of complex structs """ |
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
return parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop) |
<SYSTEM_TASK:>
Creates and returns a Date Types data structure parsed from the metadata
<END_TASK>
<USER_TASK:>
Description:
def _parse_dates(self, prop=DATES):
""" Creates and returns a Date Types data structure parsed from the metadata """ |
return parse_dates(self._xml_tree, self._data_structures[prop]) |
<SYSTEM_TASK:>
Default update operation for lists of complex structs
<END_TASK>
<USER_TASK:>
Description:
def _update_complex_list(self, **update_props):
""" Default update operation for lists of complex structs """ |
prop = update_props['prop']
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
return update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props) |
<SYSTEM_TASK:>
Regex-escaped string with all one-symbol operators
<END_TASK>
<USER_TASK:>
Description:
def one_symbol_ops_str(self) -> str:
"""Regex-escaped string with all one-symbol operators""" |
return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1))) |
<SYSTEM_TASK:>
plot % of gaps at each position
<END_TASK>
<USER_TASK:>
Description:
def plot_gaps(plot, columns):
"""
plot % of gaps at each position
""" |
from plot_window import window_plot_convolve as plot_window
# plot_window([columns], len(columns)*.01, plot)
plot_window([[100 - i for i in columns]], len(columns)*.01, plot) |
<SYSTEM_TASK:>
Iterate through all categories in an OrderedDict and return category name if SampleID
<END_TASK>
<USER_TASK:>
Description:
def sample_group(sid, groups):
"""
Iterate through all categories in an OrderedDict and return category name if SampleID
present in that category.
:type sid: str
:param sid: SampleID from dataset.
:type groups: OrderedDict
:param groups: Returned dict from phylotoast.util.gather_categories() function.
:return type: str
:return: Category name used to classify `sid`.
""" |
for name in groups:
if sid in groups[name].sids:
return name |
<SYSTEM_TASK:>
Combine multiple sets to create a single larger set.
<END_TASK>
<USER_TASK:>
Description:
def combine_sets(*sets):
"""
Combine multiple sets to create a single larger set.
""" |
combined = set()
for s in sets:
combined.update(s)
return combined |
<SYSTEM_TASK:>
Get unique OTUIDs of each category.
<END_TASK>
<USER_TASK:>
Description:
def unique_otuids(groups):
"""
Get unique OTUIDs of each category.
:type groups: Dict
:param groups: {Category name: OTUIDs in category}
:return type: dict
:return: Dict keyed on category name and unique OTUIDs as values.
""" |
uniques = {key: set() for key in groups}
for i, group in enumerate(groups):
to_combine = groups.values()[:i]+groups.values()[i+1:]
combined = combine_sets(*to_combine)
uniques[group] = groups[group].difference(combined)
return uniques |
<SYSTEM_TASK:>
Get shared OTUIDs between all unique combinations of groups.
<END_TASK>
<USER_TASK:>
Description:
def shared_otuids(groups):
"""
Get shared OTUIDs between all unique combinations of groups.
:type groups: Dict
:param groups: {Category name: OTUIDs in category}
:return type: dict
:return: Dict keyed on group combination and their shared OTUIDs as values.
""" |
for g in sorted(groups):
print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"])))
number_of_categories = len(groups)
shared = defaultdict()
for i in range(2, number_of_categories+1):
for j in combinations(sorted(groups), i):
combo_name = " & ".join(list(j))
for grp in j:
# initialize combo values
shared[combo_name] = groups[j[0]].results["otuids"].copy()
"""iterate through all groups and keep updating combo OTUIDs with set
intersection_update"""
for grp in j[1:]:
shared[combo_name].intersection_update(groups[grp].results["otuids"])
return shared |
<SYSTEM_TASK:>
Given a path, the method writes out one file for each group name in the
<END_TASK>
<USER_TASK:>
Description:
def write_uniques(path, prefix, uniques):
"""
Given a path, the method writes out one file for each group name in the
uniques dictionary with the file name in the pattern
PATH/prefix_group.txt
with each file containing the unique OTUIDs found when comparing that group
to all the other groups in uniques.
:type path: str
:param path: Output files will be saved in this PATH.
:type prefix: str
:param prefix: Prefix name added in front of output filename.
:type uniques: dict
:param uniques: Output from unique_otus() function.
""" |
for group in uniques:
fp = osp.join(path, "{}_{}.txt".format(prefix, group))
with open(fp, "w") as outf:
outf.write("\n".join(uniques[group])) |
<SYSTEM_TASK:>
Parse the records in a FASTA-format file by first reading the entire file into memory.
<END_TASK>
<USER_TASK:>
Description:
def storeFASTA(fastaFNH):
"""
Parse the records in a FASTA-format file by first reading the entire file into memory.
:type source: path to FAST file or open file handle
:param source: The data source from which to parse the FASTA records. Expects the
input to resolve to a collection that can be iterated through, such as
an open file handle.
:rtype: tuple
:return: FASTA records containing entries for id, description and data.
""" |
fasta = file_handle(fastaFNH).read()
return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:]))
for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])] |
<SYSTEM_TASK:>
Parse the records in a FASTA-format file keeping the file open, and reading through
<END_TASK>
<USER_TASK:>
Description:
def parseFASTA(fastaFNH):
"""
Parse the records in a FASTA-format file keeping the file open, and reading through
one line at a time.
:type source: path to FAST file or open file handle
:param source: The data source from which to parse the FASTA records.
Expects the input to resolve to a collection that can be iterated
through, such as an open file handle.
:rtype: tuple
:return: FASTA records containing entries for id, description and data.
""" |
recs = []
seq = []
seqID = ""
descr = ""
for line in file_handle(fastaFNH):
line = line.strip()
if line[0] == ";":
continue
if line[0] == ">":
# conclude previous record
if seq:
recs.append(FASTARecord(seqID, descr, "".join(seq)))
seq = []
# start new record
line = line[1:].split(None, 1)
seqID, descr = line[0], line[1]
else:
seq.append(line)
# catch last seq in file
if seq:
recs.append(FASTARecord(seqID, descr, "".join(seq)))
return recs |
<SYSTEM_TASK:>
Trains a k-nearest neighbors classifier for face recognition.
<END_TASK>
<USER_TASK:>
Description:
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
""" |
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf |
<SYSTEM_TASK:>
Recognizes faces in given image using a trained KNN classifier
<END_TASK>
<USER_TASK:>
Description:
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
"""
Recognizes faces in given image using a trained KNN classifier
:param X_img_path: path to image to be recognized
:param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
:param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
:param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
of mis-classifying an unknown person as a known one.
:return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
For faces of unrecognized persons, the name 'unknown' will be returned.
""" |
if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] |
<SYSTEM_TASK:>
Shows the face recognition results visually.
<END_TASK>
<USER_TASK:>
Description:
def show_prediction_labels_on_image(img_path, predictions):
"""
Shows the face recognition results visually.
:param img_path: path to image to be recognized
:param predictions: results of the predict function
:return:
""" |
pil_image = Image.open(img_path).convert("RGB")
draw = ImageDraw.Draw(pil_image)
for name, (top, right, bottom, left) in predictions:
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# There's a bug in Pillow where it blows up with non-UTF-8 text
# when using the default bitmap font
name = name.encode("UTF-8")
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show() |
<SYSTEM_TASK:>
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
<END_TASK>
<USER_TASK:>
Description:
def face_distance(face_encodings, face_to_compare):
"""
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
""" |
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1) |
<SYSTEM_TASK:>
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
<END_TASK>
<USER_TASK:>
Description:
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128):
"""
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
If you are using a GPU, this can give you much faster results since the GPU
can process batches of images at once. If you aren't using a GPU, you don't need this function.
:param img: A list of images (each as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param batch_size: How many images to include in each GPU processing batch.
:return: A list of tuples of found face locations in css (top, right, bottom, left) order
""" |
def convert_cnn_detections_to_css(detections):
return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections]
raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size)
return list(map(convert_cnn_detections_to_css, raw_detections_batched)) |
<SYSTEM_TASK:>
Given an image, return the 128-dimension face encoding for each face in the image.
<END_TASK>
<USER_TASK:>
Description:
def face_encodings(face_image, known_face_locations=None, num_jitters=1):
"""
Given an image, return the 128-dimension face encoding for each face in the image.
:param face_image: The image that contains one or more faces
:param known_face_locations: Optional - the bounding boxes of each face if you already know them.
:param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
:return: A list of 128-dimensional face encodings (one for each face in the image)
""" |
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small")
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks] |
<SYSTEM_TASK:>
Return the Catalyst datatype from the size of integers.
<END_TASK>
<USER_TASK:>
Description:
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
""" |
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType |
<SYSTEM_TASK:>
Return whether there is NullType in `dt` or not
<END_TASK>
<USER_TASK:>
Description:
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """ |
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType) |
<SYSTEM_TASK:>
Create a converter to drop the names of fields in obj
<END_TASK>
<USER_TASK:>
Description:
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """ |
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct |
<SYSTEM_TASK:>
Convert Spark data type to pyarrow type
<END_TASK>
<USER_TASK:>
Description:
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
""" |
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type |
<SYSTEM_TASK:>
Convert a schema from Spark to Arrow
<END_TASK>
<USER_TASK:>
Description:
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
""" |
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields) |
<SYSTEM_TASK:>
Convert pyarrow type to Spark data type.
<END_TASK>
<USER_TASK:>
Description:
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
""" |
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type |
<SYSTEM_TASK:>
Convert schema from Arrow to Spark.
<END_TASK>
<USER_TASK:>
Description:
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
""" |
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema]) |
<SYSTEM_TASK:>
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
<END_TASK>
<USER_TASK:>
Description:
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
""" |
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s |
<SYSTEM_TASK:>
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
<END_TASK>
<USER_TASK:>
Description:
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
""" |
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf |
<SYSTEM_TASK:>
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
<END_TASK>
<USER_TASK:>
Description:
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
""" |
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s |
<SYSTEM_TASK:>
Convert timestamp to timezone-naive in the specified timezone or local timezone
<END_TASK>
<USER_TASK:>
Description:
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
""" |
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s |
<SYSTEM_TASK:>
Get all the directories
<END_TASK>
<USER_TASK:>
Description:
def _get_local_dirs(sub):
""" Get all the directories """ |
path = os.environ.get("SPARK_LOCAL_DIRS", "/tmp")
dirs = path.split(",")
if len(dirs) > 1:
# different order in different processes and instances
rnd = random.Random(os.getpid() + id(dirs))
random.shuffle(dirs, rnd.random)
return [os.path.join(d, "python", str(os.getpid()), sub) for d in dirs] |
<SYSTEM_TASK:>
Combine the items by creator and combiner
<END_TASK>
<USER_TASK:>
Description:
def mergeValues(self, iterator):
""" Combine the items by creator and combiner """ |
# speedup attribute lookup
creator, comb = self.agg.createCombiner, self.agg.mergeValue
c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch
limit = self.memory_limit
for k, v in iterator:
d = pdata[hfun(k)] if pdata else data
d[k] = comb(d[k], v) if k in d else creator(v)
c += 1
if c >= batch:
if get_used_memory() >= limit:
self._spill()
limit = self._next_limit()
batch /= 2
c = 0
else:
batch *= 1.5
if get_used_memory() >= limit:
self._spill() |
<SYSTEM_TASK:>
Return all merged items as iterator
<END_TASK>
<USER_TASK:>
Description:
def items(self):
""" Return all merged items as iterator """ |
if not self.pdata and not self.spills:
return iter(self.data.items())
return self._external_items() |
<SYSTEM_TASK:>
Return all partitioned items as iterator
<END_TASK>
<USER_TASK:>
Description:
def _external_items(self):
""" Return all partitioned items as iterator """ |
assert not self.data
if any(self.pdata):
self._spill()
# disable partitioning and spilling when merge combiners from disk
self.pdata = []
try:
for i in range(self.partitions):
for v in self._merged_items(i):
yield v
self.data.clear()
# remove the merged partition
for j in range(self.spills):
path = self._get_spill_dir(j)
os.remove(os.path.join(path, str(i)))
finally:
self._cleanup() |
<SYSTEM_TASK:>
merge the partitioned items and return the as iterator
<END_TASK>
<USER_TASK:>
Description:
def _recursive_merged_items(self, index):
"""
merge the partitioned items and return the as iterator
If one partition can not be fit in memory, then them will be
partitioned and merged recursively.
""" |
subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdirs]
m = ExternalMerger(self.agg, self.memory_limit, self.serializer, subdirs,
self.scale * self.partitions, self.partitions, self.batch)
m.pdata = [{} for _ in range(self.partitions)]
limit = self._next_limit()
for j in range(self.spills):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb') as f:
m.mergeCombiners(self.serializer.load_stream(f), 0)
if get_used_memory() > limit:
m._spill()
limit = self._next_limit()
return m._external_items() |
<SYSTEM_TASK:>
Sort the elements in iterator, do external sort when the memory
<END_TASK>
<USER_TASK:>
Description:
def sorted(self, iterator, key=None, reverse=False):
"""
Sort the elements in iterator, do external sort when the memory
goes above the limit.
""" |
global MemoryBytesSpilled, DiskBytesSpilled
batch, limit = 100, self._next_limit()
chunks, current_chunk = [], []
iterator = iter(iterator)
while True:
# pick elements in batch
chunk = list(itertools.islice(iterator, batch))
current_chunk.extend(chunk)
if len(chunk) < batch:
break
used_memory = get_used_memory()
if used_memory > limit:
# sort them inplace will save memory
current_chunk.sort(key=key, reverse=reverse)
path = self._get_path(len(chunks))
with open(path, 'wb') as f:
self.serializer.dump_stream(current_chunk, f)
def load(f):
for v in self.serializer.load_stream(f):
yield v
# close the file explicit once we consume all the items
# to avoid ResourceWarning in Python3
f.close()
chunks.append(load(open(path, 'rb')))
current_chunk = []
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
DiskBytesSpilled += os.path.getsize(path)
os.unlink(path) # data will be deleted after close
elif not chunks:
batch = min(int(batch * 1.5), 10000)
current_chunk.sort(key=key, reverse=reverse)
if not chunks:
return current_chunk
if current_chunk:
chunks.append(iter(current_chunk))
return heapq.merge(chunks, key=key, reverse=reverse) |
<SYSTEM_TASK:>
load a partition from disk, then sort and group by key
<END_TASK>
<USER_TASK:>
Description:
def _merge_sorted_items(self, index):
""" load a partition from disk, then sort and group by key """ |
def load_partition(j):
path = self._get_spill_dir(j)
p = os.path.join(path, str(index))
with open(p, 'rb', 65536) as f:
for v in self.serializer.load_stream(f):
yield v
disk_items = [load_partition(j) for j in range(self.spills)]
if self._sorted:
# all the partitions are already sorted
sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0))
else:
# Flatten the combined values, so it will not consume huge
# memory during merging sort.
ser = self.flattened_serializer()
sorter = ExternalSorter(self.memory_limit, ser)
sorted_items = sorter.sorted(itertools.chain(*disk_items),
key=operator.itemgetter(0))
return ((k, vs) for k, vs in GroupByKey(sorted_items)) |
<SYSTEM_TASK:>
This function returns consistent hash code for builtin types, especially
<END_TASK>
<USER_TASK:>
Description:
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
""" |
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x) |
<SYSTEM_TASK:>
Ignore the 'u' prefix of string in doc tests, to make it works
<END_TASK>
<USER_TASK:>
Description:
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
""" |
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f |
<SYSTEM_TASK:>
Mark the RDD as non-persistent, and remove all blocks for it from
<END_TASK>
<USER_TASK:>
Description:
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
""" |
self.is_cached = False
self._jrdd.unpersist(blocking)
return self |
<SYSTEM_TASK:>
Gets the name of the file to which this RDD was checkpointed
<END_TASK>
<USER_TASK:>
Description:
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
""" |
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get() |
<SYSTEM_TASK:>
Return a new RDD by applying a function to each element of this RDD.
<END_TASK>
<USER_TASK:>
Description:
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
""" |
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning) |
<SYSTEM_TASK:>
Return a new RDD by first applying a function to all elements of this
<END_TASK>
<USER_TASK:>
Description:
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
""" |
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning) |
<SYSTEM_TASK:>
Return a new RDD by applying a function to each partition of this RDD.
<END_TASK>
<USER_TASK:>
Description:
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
""" |
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning) |
<SYSTEM_TASK:>
Return a new RDD containing the distinct elements in this RDD.
<END_TASK>
<USER_TASK:>
Description:
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
""" |
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0]) |
<SYSTEM_TASK:>
Return a sampled subset of this RDD.
<END_TASK>
<USER_TASK:>
Description:
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
""" |
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True) |
<SYSTEM_TASK:>
Randomly splits this RDD with the provided weights.
<END_TASK>
<USER_TASK:>
Description:
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
""" |
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])] |
<SYSTEM_TASK:>
Return a fixed-size sampled subset of this RDD.
<END_TASK>
<USER_TASK:>
Description:
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
""" |
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num] |
<SYSTEM_TASK:>
Returns a sampling rate that guarantees a sample of
<END_TASK>
<USER_TASK:>
Description:
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
""" |
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction)) |
<SYSTEM_TASK:>
Return the union of this RDD and another one.
<END_TASK>
<USER_TASK:>
Description:
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
""" |
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd |
<SYSTEM_TASK:>
Return the intersection of this RDD and another one. The output will
<END_TASK>
<USER_TASK:>
Description:
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
""" |
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys() |
<SYSTEM_TASK:>
Repartition the RDD according to the given partitioner and, within each resulting partition,
<END_TASK>
<USER_TASK:>
Description:
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
""" |
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True) |
<SYSTEM_TASK:>
Return an RDD created by piping elements to a forked external process.
<END_TASK>
<USER_TASK:>
Description:
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
""" |
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func) |
<SYSTEM_TASK:>
Applies a function to all elements of this RDD.
<END_TASK>
<USER_TASK:>
Description:
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
""" |
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() |
<SYSTEM_TASK:>
Applies a function to each partition of this RDD.
<END_TASK>
<USER_TASK:>
Description:
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
""" |
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.