text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_image_tar(image_path):
'''get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself.
'''
bot.debug('Generate file system tar...')
file_obj = Client.image.export(image_path=image_path)
if file_obj is None:
bot.error("Error generating tar, exiting.")
sys.exit(1)
tar = tarfile.open(file_obj)
return file_obj, tar |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def extract_content(image_path, member_name, return_hash=False):
'''extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead
'''
if member_name.startswith('./'):
member_name = member_name.replace('.','',1)
if return_hash:
hashy = hashlib.md5()
try:
content = Client.execute(image_path,'cat %s' %(member_name))
except:
return None
if not isinstance(content,bytes):
content = content.encode('utf-8')
content = bytes(content)
# If permissions don't allow read, return None
if len(content) == 0:
return None
if return_hash:
hashy.update(content)
return hashy.hexdigest()
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def remove_unicode_dict(input_dict):
'''remove unicode keys and values from dict, encoding in utf8
'''
if isinstance(input_dict, collections.Mapping):
return dict(map(remove_unicode_dict, input_dict.iteritems()))
elif isinstance(input_dict, collections.Iterable):
return type(input_dict)(map(remove_unicode_dict, input_dict))
else:
return input_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def RSA(m1,m2):
'''RSA analysis will compare the similarity of two matrices
'''
from scipy.stats import pearsonr
import scipy.linalg
import numpy
# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector
vectorm1 = m1.mask(numpy.triu(numpy.ones(m1.shape)).astype(numpy.bool)).values.flatten()
vectorm2 = m2.mask(numpy.triu(numpy.ones(m2.shape)).astype(numpy.bool)).values.flatten()
# Now remove the nans
m1defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm1,dtype=float)))
m2defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm2,dtype=float)))
idx = numpy.intersect1d(m1defined,m2defined)
return pearsonr(vectorm1[idx],vectorm2[idx])[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rsync(*args, **kwargs):
""" wrapper around the rsync command. the ssh connection arguments are set automatically. any args are just passed directly to rsync. you can use {host_string} in place of the server. the kwargs are passed on the 'local' fabric command. if not set, 'capture' is set to False. example usage: rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/") """ |
kwargs.setdefault('capture', False)
replacements = dict(
host_string="{user}@{host}".format(
user=env.instance.config.get('user', 'root'),
host=env.instance.config.get(
'host', env.instance.config.get(
'ip', env.instance.uid))))
args = [x.format(**replacements) for x in args]
ssh_info = env.instance.init_ssh_key()
ssh_info.pop('host')
ssh_info.pop('user')
ssh_args = env.instance.ssh_args_from_info(ssh_info)
cmd_parts = ['rsync']
cmd_parts.extend(['-e', "ssh %s" % shjoin(ssh_args)])
cmd_parts.extend(args)
cmd = shjoin(cmd_parts)
return local(cmd, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def devices(self):
""" computes the name of the disk devices that are suitable installation targets by subtracting CDROM- and USB devices from the list of total mounts. """ |
install_devices = self.install_devices
if 'bootstrap-system-devices' in env.instance.config:
devices = set(env.instance.config['bootstrap-system-devices'].split())
else:
devices = set(self.sysctl_devices)
for sysctl_device in self.sysctl_devices:
for install_device in install_devices:
if install_device.startswith(sysctl_device):
devices.remove(sysctl_device)
return devices |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_assets(self):
""" download bootstrap assets to control host. If present on the control host they will be uploaded to the target host during bootstrapping. """ |
# allow overwrites from the commandline
packages = set(
env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for filename, asset in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local))
local(cmd.format(asset))
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_resource_definition(resource_name, resource_dct):
""" Returns all the info extracted from a resource section of the apipie json :param resource_name: Name of the resource that is defined by the section :param resrouce_dict: Dictionary as generated by apipie of the resource definition """ |
new_dict = {
'__module__': resource_dct.get('__module__', __name__),
'__doc__': resource_dct['full_description'],
'_resource_name': resource_name,
'_own_methods': set(),
'_conflicting_methods': [],
}
# methods in foreign_methods are meant for other resources,
# that is, the url and the resource field do not match /api/{resource}
foreign_methods = {}
# as defined per apipie gem, each method can have more than one api,
# for example, /api/hosts can have the GET /api/hosts api and the GET
# /api/hosts/:id api or DELETE /api/hosts
for method in resource_dct['methods']:
# set the docstring if it only has one api
if not new_dict['__doc__'] and len(method['apis']) == 1:
new_dict['__doc__'] = \
method['apis'][0]['short_description']
for api in method['apis']:
api = MethodAPIDescription(resource_name, method, api)
if api.resource != resource_name:
# this means that the json apipie passed says that an
# endpoint in the form: /api/{resource}/* belongs to
# {different_resource}, we just put it under {resource}
# later, storing it under _foreign_methods for now as we
# might not have parsed {resource} yet
functions = foreign_methods.setdefault(api.resource, {})
if api.name in functions:
old_api = functions.get(api.name).defs
# show only in debug the repeated but identical definitions
log_method = logger.warning
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine a method "
"for a foreign resource (%s): \n"
"\tresource:\n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
functions[api.name] = api.generate_func()
else:
# it's an own method, resource and url match
if api.name in new_dict['_own_methods']:
old_api = new_dict.get(api.name).defs
log_method = logger.warning
# show only in debug the repeated but identical definitions
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine method "
"(%s): \n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
new_dict['_own_methods'].add(api.name)
new_dict[api.name] = api.generate_func()
return new_dict, foreign_methods |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_resource_from_url(self, url):
""" Returns the appropriate resource name for the given URL. :param url: API URL stub, like: '/api/hosts' :return: Resource name, like 'hosts', or None if not found """ |
# special case for the api root
if url == '/api':
return 'api'
elif url == '/katello':
return 'katello'
match = self.resource_pattern.match(url)
if match:
return match.groupdict().get('resource', None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_func(self, as_global=False):
""" Generate function for specific method and using specific api :param as_global: if set, will use the global function name, instead of the class method (usually {resource}_{class_method}) when defining the function """ |
keywords = []
params_def = []
params_doc = ""
original_names = {}
params = dict(
(param['name'], param)
for param in self.params
)
# parse the url required params, as sometimes they are skipped in the
# parameters list of the definition
for param in self.url_params:
if param not in params:
param = {
'name': param,
'required': True,
'description': '',
'validator': '',
}
params[param['name']] = param
else:
params[param]['required'] = True
# split required and non-required params for the definition
req_params = []
nonreq_params = []
for param in six.itervalues(params):
if param['required']:
req_params.append(param)
else:
nonreq_params.append(param)
for param in req_params + nonreq_params:
params_doc += self.create_param_doc(param) + "\n"
local_name = param['name']
# some params collide with python keywords, that's why we do
# this switch (and undo it inside the function we generate)
if param['name'] == 'except':
local_name = 'except_'
original_names[local_name] = param['name']
keywords.append(local_name)
if param['required']:
params_def.append("%s" % local_name)
else:
params_def.append("%s=None" % local_name)
func_head = 'def {0}(self, {1}):'.format(
as_global and self.get_global_method_name() or self.name,
', '.join(params_def)
)
code_body = (
' _vars_ = locals()\n'
' _url = self._fill_url("{url}", _vars_, {url_params})\n'
' _original_names = {original_names}\n'
' _kwargs = dict((_original_names[k], _vars_[k])\n'
' for k in {keywords} if _vars_[k])\n'
' return self._foreman.do_{http_method}(_url, _kwargs)')
code_body = code_body.format(
http_method=self.http_method.lower(),
url=self.url,
url_params=self.url_params,
keywords=keywords,
original_names=original_names,
)
code = [
func_head,
' """',
self.short_desc,
'',
params_doc,
' """',
code_body,
]
code = '\n'.join(code)
six.exec_(code)
function = locals()[self.name]
# to ease debugging, all the funcs have the definitions attached
setattr(function, 'defs', self)
return function |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_plugin_def(http_method, funcs):
""" This function parses one of the elements of the definitions dict for a plugin and extracts the relevant information :param funcs: functions related to that HTTP method """ |
methods = []
if http_method not in ('GET', 'PUT', 'POST', 'DELETE'):
logger.error(
'Plugin load failure, HTTP method %s unsupported.',
http_method,
)
return methods
for fname, params in six.iteritems(funcs):
method = {
'apis': [{'short_description': 'no-doc'}],
'params': [],
}
method['apis'][0]['http_method'] = http_method
method['apis'][0]['api_url'] = '/api/' + fname
method['name'] = fname
for pname, pdef in six.iteritems(params):
param = {
'name': pname,
'validator': "Must be %s" % pdef['ptype'],
'description': '',
'required': pdef['required'],
}
method['params'].append(param)
methods.append(method)
return methods |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_authors(repo_path, from_commit):
""" Given a repo and optionally a base revision to start from, will return the list of authors. """ |
repo = dulwich.repo.Repo(repo_path)
refs = get_refs(repo)
start_including = False
authors = set()
if from_commit is None:
start_including = True
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
if (
start_including or commit_sha.startswith(from_commit) or
fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
authors.add(commit.author.decode())
for child in children:
authors.add(child.author.decode())
start_including = True
return '\n'.join(sorted(authors)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def emit( self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False ):
"""Emit a spout Tuple message. :param tup: the Tuple to send to Storm, should contain only JSON-serializable data. :type tup: list or tuple :param tup_id: the ID for the Tuple. Leave this blank for an unreliable emit. :type tup_id: str :param stream: ID of the stream this Tuple should be emitted to. Leave empty to emit to the default stream. :type stream: str :param direct_task: the task to send the Tuple to if performing a direct emit. :type direct_task: int :param need_task_ids: indicate whether or not you'd like the task IDs the Tuple was emitted (default: ``False``). :type need_task_ids: bool :returns: ``None``, unless ``need_task_ids=True``, in which case it will be a ``list`` of task IDs that the Tuple was sent to if. Note that when specifying direct_task, this will be equal to ``[direct_task]``. """ |
return super(Spout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ack(self, tup_id):
"""Called when a bolt acknowledges a Tuple in the topology. :param tup_id: the ID of the Tuple that has been fully acknowledged in the topology. :type tup_id: str """ |
self.failed_tuples.pop(tup_id, None)
try:
del self.unacked_tuples[tup_id]
except KeyError:
self.logger.error("Received ack for unknown tuple ID: %r", tup_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fail(self, tup_id):
"""Called when a Tuple fails in the topology A reliable spout will replay a failed tuple up to ``max_fails`` times. :param tup_id: the ID of the Tuple that has failed in the topology either due to a bolt calling ``fail()`` or a Tuple timing out. :type tup_id: str """ |
saved_args = self.unacked_tuples.get(tup_id)
if saved_args is None:
self.logger.error("Received fail for unknown tuple ID: %r", tup_id)
return
tup, stream, direct_task, need_task_ids = saved_args
if self.failed_tuples[tup_id] < self.max_fails:
self.emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
)
self.failed_tuples[tup_id] += 1
else:
# Just pretend we got an ack when we exceed retry limit
self.logger.info(
"Acking tuple ID %r after it exceeded retry limit " "(%r)",
tup_id,
self.max_fails,
)
self.ack(tup_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def emit( self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False ):
"""Emit a spout Tuple & add metadata about it to `unacked_tuples`. In order for this to work, `tup_id` is a required parameter. See :meth:`Bolt.emit`. """ |
if tup_id is None:
raise ValueError(
"You must provide a tuple ID when emitting with a "
"ReliableSpout in order for the tuple to be "
"tracked."
)
args = (tup, stream, direct_task, need_task_ids)
self.unacked_tuples[tup_id] = args
return super(ReliableSpout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """ |
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_component(self, storm_conf, context):
"""Add helpful instance variables to component after initial handshake with Storm. Also configure logging. """ |
self.topology_name = storm_conf.get("topology.name", "")
self.task_id = context.get("taskid", "")
self.component_name = context.get("componentid")
# If using Storm before 0.10.0 componentid is not available
if self.component_name is None:
self.component_name = context.get("task->component", {}).get(
str(self.task_id), ""
)
self.debug = storm_conf.get("topology.debug", False)
self.storm_conf = storm_conf
self.context = context
# Set up logging
self.logger = logging.getLogger(".".join((__name__, self.component_name)))
log_path = self.storm_conf.get("pystorm.log.path")
log_file_name = self.storm_conf.get(
"pystorm.log.file",
"pystorm_{topology_name}" "_{component_name}" "_{task_id}" "_{pid}.log",
)
root_log = logging.getLogger()
log_level = self.storm_conf.get("pystorm.log.level", "info")
if log_path:
max_bytes = self.storm_conf.get("pystorm.log.max_bytes", 1000000) # 1 MB
backup_count = self.storm_conf.get("pystorm.log.backup_count", 10)
log_file = join(
log_path,
(
log_file_name.format(
topology_name=self.topology_name,
component_name=self.component_name,
task_id=self.task_id,
pid=self.pid,
)
),
)
handler = RotatingFileHandler(
log_file, maxBytes=max_bytes, backupCount=backup_count
)
log_format = self.storm_conf.get(
"pystorm.log.format",
"%(asctime)s - %(name)s - " "%(levelname)s - %(message)s",
)
else:
self.log(
"pystorm StormHandler logging enabled, so all messages at "
'levels greater than "pystorm.log.level" ({}) will be sent'
" to Storm.".format(log_level)
)
handler = StormHandler(self.serializer)
log_format = self.storm_conf.get(
"pystorm.log.format", "%(asctime)s - %(name)s - " "%(message)s"
)
formatter = logging.Formatter(log_format)
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if self.debug:
# potentially override logging that was provided if
# topology.debug was set to true
log_level = logging.DEBUG
handler.setLevel(log_level)
handler.setFormatter(formatter)
root_log.addHandler(handler)
self.logger.setLevel(log_level)
logging.getLogger("pystorm").setLevel(log_level)
# Redirect stdout to ensure that print statements/functions
# won't disrupt the multilang protocol
if self.serializer.output_stream == sys.stdout:
sys.stdout = LogStream(logging.getLogger("pystorm.stdout")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_handshake(self):
"""Read and process an initial handshake message from Storm.""" |
msg = self.read_message()
pid_dir, _conf, _context = msg["pidDir"], msg["conf"], msg["context"]
# Write a blank PID file out to the pidDir
open(join(pid_dir, str(self.pid)), "w").close()
self.send_message({"pid": self.pid})
return _conf, _context |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_message(self, message):
"""Send a message to Storm via stdout.""" |
if not isinstance(message, dict):
logger = self.logger if self.logger else log
logger.error(
"%s.%d attempted to send a non dict message to Storm: " "%r",
self.component_name,
self.pid,
message,
)
return
self.serializer.send_message(message) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raise_exception(self, exception, tup=None):
"""Report an exception back to Storm via logging. :param exception: a Python exception. :param tup: a :class:`Tuple` object. """ |
if tup:
message = (
"Python {exception_name} raised while processing Tuple "
"{tup!r}\n{traceback}"
)
else:
message = "Python {exception_name} raised\n{traceback}"
message = message.format(
exception_name=exception.__class__.__name__, tup=tup, traceback=format_exc()
)
self.send_message({"command": "error", "msg": str(message)})
self.send_message({"command": "sync"}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(self, message, level=None):
"""Log a message to Storm optionally providing a logging level. :param message: the log message to send to Storm. :type message: str :param level: the logging level that Storm should use when writing the ``message``. Can be one of: trace, debug, info, warn, or error (default: ``info``). :type level: str .. warning:: This will send your message to Storm regardless of what level you specify. In almost all cases, you are better of using ``Component.logger`` and not setting ``pystorm.log.path``, because that will use a :class:`pystorm.component.StormHandler` to do the filtering on the Python side (instead of on the Java side after taking the time to serialize your message and send it to Storm). """ |
level = _STORM_LOG_LEVELS.get(level, _STORM_LOG_INFO)
self.send_message({"command": "log", "msg": str(message), "level": level}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Main run loop for all components. Performs initial handshake with Storm and reads Tuples handing them off to subclasses. Any exceptions are caught and logged back to Storm prior to the Python process exiting. .. warning:: Subclasses should **not** override this method. """ |
storm_conf, context = self.read_handshake()
self._setup_component(storm_conf, context)
self.initialize(storm_conf, context)
while True:
try:
self._run()
except StormWentAwayError:
log.info("Exiting because parent Storm process went away.")
self._exit(2)
except Exception as e:
log_msg = "Exception in {}.run()".format(self.__class__.__name__)
exc_info = sys.exc_info()
try:
self.logger.error(log_msg, exc_info=True)
self._handle_run_exception(e)
except StormWentAwayError:
log.error(log_msg, exc_info=exc_info)
log.info("Exiting because parent Storm process went away.")
self._exit(2)
except:
log.error(log_msg, exc_info=exc_info)
log.error(
"While trying to handle previous exception...",
exc_info=sys.exc_info(),
)
if self.exit_on_exception:
self._exit(1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _exit(self, status_code):
"""Properly kill Python process including zombie threads.""" |
# If there are active threads still running infinite loops, sys.exit
# won't kill them but os._exit will. os._exit skips calling cleanup
# handlers, flushing stdio buffers, etc.
exit_func = os._exit if threading.active_count() > 1 else sys.exit
exit_func(status_code) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_message(self):
"""The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed. """ |
msg = ""
num_blank_lines = 0
while True:
# readline will return trailing \n so that output is unambigious, we
# should only have line == '' if we're at EOF
with self._reader_lock:
line = self.input_stream.readline()
if line == "end\n":
break
elif line == "":
raise StormWentAwayError()
elif line == "\n":
num_blank_lines += 1
if num_blank_lines % 1000 == 0:
log.warn(
"While trying to read a command or pending task "
"ID, Storm has instead sent %s '\\n' messages.",
num_blank_lines,
)
continue
msg = "{}{}\n".format(msg, line[0:-1])
try:
return json.loads(msg)
except Exception:
log.error("JSON decode error for message: %r", msg, exc_info=True)
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_dict(self, msg_dict):
"""Serialize to JSON a message dictionary.""" |
serialized = json.dumps(msg_dict, namedtuple_as_object=False)
if PY2:
serialized = serialized.decode("utf-8")
serialized = "{}\nend\n".format(serialized)
return serialized |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_tuple(self):
"""Read a tuple from the pipe to Storm.""" |
cmd = self.read_command()
source = cmd["comp"]
stream = cmd["stream"]
values = cmd["tuple"]
val_type = self._source_tuple_types[source].get(stream)
return Tuple(
cmd["id"],
source,
stream,
cmd["task"],
tuple(values) if val_type is None else val_type(*values),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ack(self, tup):
"""Indicate that processing of a Tuple has succeeded. :param tup: the Tuple to acknowledge. :type tup: :class:`str` or :class:`pystorm.component.Tuple` """ |
tup_id = tup.id if isinstance(tup, Tuple) else tup
self.send_message({"command": "ack", "id": tup_id}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fail(self, tup):
"""Indicate that processing of a Tuple has failed. :param tup: the Tuple to fail (its ``id`` if ``str``). :type tup: :class:`str` or :class:`pystorm.component.Tuple` """ |
tup_id = tup.id if isinstance(tup, Tuple) else tup
self.send_message({"command": "fail", "id": tup_id}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def emit(self, tup, **kwargs):
"""Modified emit that will not return task IDs after emitting. See :class:`pystorm.component.Bolt` for more information. :returns: ``None``. """ |
kwargs["need_task_ids"] = False
return super(BatchingBolt, self).emit(tup, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_tick(self, tick_tup):
"""Increment tick counter, and call ``process_batch`` for all current batches if tick counter exceeds ``ticks_between_batches``. See :class:`pystorm.component.Bolt` for more information. .. warning:: This method should **not** be overriden. If you want to tweak how Tuples are grouped into batches, override ``group_key``. """ |
self._tick_counter += 1
# ACK tick Tuple immediately, since it's just responsible for counter
self.ack(tick_tup)
if self._tick_counter > self.ticks_between_batches and self._batches:
self.process_batches()
self._tick_counter = 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_batches(self):
"""Iterate through all batches, call process_batch on them, and ack. Separated out for the rare instances when we want to subclass BatchingBolt and customize what mechanism causes batches to be processed. """ |
for key, batch in iteritems(self._batches):
self._current_tups = batch
self._current_key = key
self.process_batch(key, batch)
if self.auto_ack:
for tup in batch:
self.ack(tup)
# Set current batch to [] so that we know it was acked if a
# later batch raises an exception
self._current_key = None
self._batches[key] = []
self._batches = defaultdict(list) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process(self, tup):
"""Group non-tick Tuples into batches by ``group_key``. .. warning:: This method should **not** be overriden. If you want to tweak how Tuples are grouped into batches, override ``group_key``. """ |
# Append latest Tuple to batches
group_key = self.group_key(tup)
self._batches[group_key].append(tup) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _batch_entry_run(self):
"""The inside of ``_batch_entry``'s infinite loop. Separated out so it can be properly unit tested. """ |
time.sleep(self.secs_between_batches)
with self._batch_lock:
self.process_batches() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _batch_entry(self):
"""Entry point for the batcher thread.""" |
try:
while True:
self._batch_entry_run()
except:
self.exc_info = sys.exc_info()
os.kill(self.pid, signal.SIGUSR1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_message(self, msg_dict):
"""Serialize a message dictionary and write it to the output stream.""" |
with self._writer_lock:
try:
self.output_stream.flush()
self.output_stream.write(self.serialize_dict(msg_dict))
self.output_stream.flush()
except IOError:
raise StormWentAwayError()
except:
log.exception("Failed to send message: %r", msg_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _void_array_to_list(restuple, _func, _args):
""" Convert the FFI result to Python data structures """ |
shape = (restuple.e.len, 1)
array_size = np.prod(shape)
mem_size = 8 * array_size
array_str_e = string_at(restuple.e.data, mem_size)
array_str_n = string_at(restuple.n.data, mem_size)
ls_e = np.frombuffer(array_str_e, float, array_size).tolist()
ls_n = np.frombuffer(array_str_n, float, array_size).tolist()
return ls_e, ls_n |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_data_file(filename, encoding='utf-8'):
"""Load a data file and return it as a list of lines. Parameters: filename: The name of the file (no directories included). encoding: The file encoding. Defaults to utf-8. """ |
data = pkgutil.get_data(PACKAGE_NAME, os.path.join(DATA_DIR, filename))
return data.decode(encoding).splitlines() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_data():
"""Load the word and character mapping data into a dictionary. In the data files, each line is formatted like this: HANZI PINYIN_READING/PINYIN_READING So, lines need to be split by '\t' and then the Pinyin readings need to be split by '/'. """ |
data = {}
for name, file_name in (('words', 'hanzi_pinyin_words.tsv'),
('characters', 'hanzi_pinyin_characters.tsv')):
# Split the lines by tabs: [[hanzi, pinyin]...].
lines = [line.split('\t') for line in
dragonmapper.data.load_data_file(file_name)]
# Make a dictionary: {hanzi: [pinyin, pinyin]...}.
data[name] = {hanzi: pinyin.split('/') for hanzi, pinyin in lines}
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _hanzi_to_pinyin(hanzi):
"""Return the Pinyin reading for a Chinese word. If the given string *hanzi* matches a CC-CEDICT word, the return value is If the given string *hanzi* doesn't match a CC-CEDICT word, the return When returning character readings, if a character wasn't recognized, the """ |
try:
return _HANZI_PINYIN_MAP['words'][hanzi]
except KeyError:
return [_CHARACTERS.get(character, character) for character in hanzi] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_pinyin(s, delimiter=' ', all_readings=False, container='[]', accented=True):
"""Convert a string's Chinese characters to Pinyin readings. *s* is a string containing Chinese characters. *accented* is a boolean value indicating whether to return accented or numbered Pinyin readings. *delimiter* is the character used to indicate word boundaries in *s*. This is used to differentiate between words and characters so that a more accurate reading can be returned. *all_readings* is a boolean value indicating whether or not to return all possible readings in the case of words/characters that have multiple readings. *container* is a two character string that is used to enclose words/characters if *all_readings* is ``True``. The default ``'[]'`` is used like this: ``'[READING1/READING2]'``. Characters not recognized as Chinese are left untouched. """ |
hanzi = s
pinyin = ''
# Process the given string.
while hanzi:
# Get the next match in the given string.
match = re.search('[^%s%s]+' % (delimiter, zhon.hanzi.punctuation),
hanzi)
# There are no more matches, but the string isn't finished yet.
if match is None and hanzi:
pinyin += hanzi
break
match_start, match_end = match.span()
# Process the punctuation marks that occur before the match.
if match_start > 0:
pinyin += hanzi[0:match_start]
# Get the Chinese word/character readings.
readings = _hanzi_to_pinyin(match.group())
# Process the returned word readings.
if match.group() in _WORDS:
if all_readings:
reading = _enclose_readings(container,
_READING_SEPARATOR.join(readings))
else:
reading = readings[0]
pinyin += reading
# Process the returned character readings.
else:
# Process each character individually.
for character in readings:
# Don't touch unrecognized characters.
if isinstance(character, str):
pinyin += character
# Format multiple readings.
elif isinstance(character, list) and all_readings:
pinyin += _enclose_readings(
container, _READING_SEPARATOR.join(character))
# Select and format the most common reading.
elif isinstance(character, list) and not all_readings:
# Add an apostrophe to separate syllables.
if (pinyin and character[0][0] in zhon.pinyin.vowels and
pinyin[-1] in zhon.pinyin.lowercase):
pinyin += "'"
pinyin += character[0]
# Move ahead in the given string.
hanzi = hanzi[match_end:]
if accented:
return pinyin
else:
return accented_to_numbered(pinyin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_zhuyin(s, delimiter=' ', all_readings=False, container='[]'):
"""Convert a string's Chinese characters to Zhuyin readings. *s* is a string containing Chinese characters. *delimiter* is the character used to indicate word boundaries in *s*. This is used to differentiate between words and characters so that a more accurate reading can be returned. *all_readings* is a boolean value indicating whether or not to return all possible readings in the case of words/characters that have multiple readings. *container* is a two character string that is used to enclose words/characters if *all_readings* is ``True``. The default ``'[]'`` is used like this: ``'[READING1/READING2]'``. Characters not recognized as Chinese are left untouched. """ |
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False)
zhuyin = pinyin_to_zhuyin(numbered_pinyin)
return zhuyin |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_ipa(s, delimiter=' ', all_readings=False, container='[]'):
"""Convert a string's Chinese characters to IPA. *s* is a string containing Chinese characters. *delimiter* is the character used to indicate word boundaries in *s*. This is used to differentiate between words and characters so that a more accurate reading can be returned. *all_readings* is a boolean value indicating whether or not to return all possible readings in the case of words/characters that have multiple readings. *container* is a two character string that is used to enclose words/characters if *all_readings* is ``True``. The default ``'[]'`` is used like this: ``'[READING1/READING2]'``. Characters not recognized as Chinese are left untouched. """ |
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False)
ipa = pinyin_to_ipa(numbered_pinyin)
return ipa |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_data():
"""Load the transcription mapping data into a dictionary.""" |
lines = dragonmapper.data.load_data_file('transcriptions.csv')
pinyin_map, zhuyin_map, ipa_map = {}, {}, {}
for line in lines:
p, z, i = line.split(',')
pinyin_map[p] = {'Zhuyin': z, 'IPA': i}
zhuyin_map[z] = {'Pinyin': p, 'IPA': i}
ipa_map[i] = {'Pinyin': p, 'Zhuyin': z}
return pinyin_map, zhuyin_map, ipa_map |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _numbered_vowel_to_accented(vowel, tone):
"""Convert a numbered Pinyin vowel to an accented Pinyin vowel.""" |
if isinstance(tone, int):
tone = str(tone)
return _PINYIN_TONES[vowel + tone] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _accented_vowel_to_numbered(vowel):
"""Convert an accented Pinyin vowel to a numbered Pinyin vowel.""" |
for numbered_vowel, accented_vowel in _PINYIN_TONES.items():
if vowel == accented_vowel:
return tuple(numbered_vowel) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_numbered_syllable(unparsed_syllable):
"""Return the syllable and tone of a numbered Pinyin syllable.""" |
tone_number = unparsed_syllable[-1]
if not tone_number.isdigit():
syllable, tone = unparsed_syllable, '5'
elif tone_number == '0':
syllable, tone = unparsed_syllable[:-1], '5'
elif tone_number in '12345':
syllable, tone = unparsed_syllable[:-1], tone_number
else:
raise ValueError("Invalid syllable: %s" % unparsed_syllable)
return syllable, tone |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_accented_syllable(unparsed_syllable):
"""Return the syllable and tone of an accented Pinyin syllable. Any accented vowels are returned without their accents. Implements the following algorithm: 1. If the syllable has an accent mark, convert that vowel to a regular vowel and add the tone to the end of the syllable. 2. Otherwise, assume the syllable is tone 5 (no accent marks). """ |
if unparsed_syllable[0] == '\u00B7':
# Special case for middle dot tone mark.
return unparsed_syllable[1:], '5'
for character in unparsed_syllable:
if character in _ACCENTED_VOWELS:
vowel, tone = _accented_vowel_to_numbered(character)
return unparsed_syllable.replace(character, vowel), tone
return unparsed_syllable, '5' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_zhuyin_syllable(unparsed_syllable):
"""Return the syllable and tone of a Zhuyin syllable.""" |
zhuyin_tone = unparsed_syllable[-1]
if zhuyin_tone in zhon.zhuyin.characters:
syllable, tone = unparsed_syllable, '1'
elif zhuyin_tone in zhon.zhuyin.marks:
for tone_number, tone_mark in _ZHUYIN_TONES.items():
if zhuyin_tone == tone_mark:
syllable, tone = unparsed_syllable[:-1], tone_number
else:
raise ValueError("Invalid syllable: %s" % unparsed_syllable)
return syllable, tone |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_ipa_syllable(unparsed_syllable):
"""Return the syllable and tone of an IPA syllable.""" |
ipa_tone = re.search('[%(marks)s]+' % {'marks': _IPA_MARKS},
unparsed_syllable)
if not ipa_tone:
syllable, tone = unparsed_syllable, '5'
else:
for tone_number, tone_mark in _IPA_TONES.items():
if ipa_tone.group() == tone_mark:
tone = tone_number
break
syllable = unparsed_syllable[0:ipa_tone.start()]
return syllable, tone |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _restore_case(s, memory):
"""Restore a lowercase string's characters to their original case.""" |
cased_s = []
for i, c in enumerate(s):
if i + 1 > len(memory):
break
cased_s.append(c if memory[i] else c.upper())
return ''.join(cased_s) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _convert(s, re_pattern, syllable_function, add_apostrophes=False, remove_apostrophes=False, separate_syllables=False):
"""Convert a string's syllables to a different transcription system.""" |
original = s
new = ''
while original:
match = re.search(re_pattern, original, re.IGNORECASE | re.UNICODE)
if match is None and original:
# There are no more matches, but the given string isn't fully
# processed yet.
new += original
break
match_start, match_end = match.span()
if match_start > 0: # Handle extra characters before matched syllable.
if (new and remove_apostrophes and match_start == 1 and
original[0] == "'"):
pass # Remove the apostrophe between Pinyin syllables.
if separate_syllables: # Separate syllables by a space.
new += ' '
else:
new += original[0:match_start]
else: # Matched syllable starts immediately.
if new and separate_syllables: # Separate syllables by a space.
new += ' '
elif (new and add_apostrophes and
match.group()[0].lower() in _UNACCENTED_VOWELS):
new += "'"
# Convert the matched syllable.
new += syllable_function(match.group())
original = original[match_end:]
return new |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_pattern_match(re_pattern, s):
"""Check if a re pattern expression matches an entire string.""" |
match = re.match(re_pattern, s, re.I)
return match.group() == s if match else False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def identify(s):
"""Identify a given string's transcription system. *s* is the string to identify. The string is checked to see if its contents are valid Pinyin, Zhuyin, or IPA. The :data:`PINYIN`, :data:`ZHUYIN`, and :data:`IPA` constants are returned to indicate the string's identity. If *s* is not a valid transcription system, then :data:`UNKNOWN` is returned. When checking for valid Pinyin or Zhuyin, testing is done on a syllable level, not a character level. For example, just because a string is composed of characters used in Pinyin, doesn't mean that it will identify as Pinyin; it must actually consist of valid Pinyin syllables. The same applies for Zhuyin. When checking for IPA, testing is only done on a character level. In other words, a string just needs to consist of Chinese IPA characters in order to identify as IPA. """ |
if is_pinyin(s):
return PINYIN
elif is_zhuyin(s):
return ZHUYIN
elif is_ipa(s):
return IPA
else:
return UNKNOWN |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prepare(self, f):
"""Accept an objective function for optimization.""" |
self.g = autograd.grad(f)
self.h = autograd.hessian(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve(self, angles):
"""Calculate a position of the end-effector and return it.""" |
return reduce(
lambda a, m: np.dot(m, a),
reversed(self._matrices(angles)),
np.array([0., 0., 0., 1.])
)[:3] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve(self, angles0, target):
"""Calculate joint angles and returns it.""" |
return self.optimizer.optimize(np.array(angles0), target) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def matrix(self, _):
"""Return translation matrix in homogeneous coordinates.""" |
x, y, z = self.coord
return np.array([
[1., 0., 0., x],
[0., 1., 0., y],
[0., 0., 1., z],
[0., 0., 0., 1.]
]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def matrix(self, angle):
"""Return rotation matrix in homogeneous coordinates.""" |
_rot_mat = {
'x': self._x_rot,
'y': self._y_rot,
'z': self._z_rot
}
return _rot_mat[self.axis](angle) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_logger(self, logger):
""" Set a logger to send debug messages to Parameters logger : `Logger <http://docs.python.org/2/library/logging.html>`_ A python logger used to get debugging output from this module. """ |
self.__logger = logger
self.session.set_logger(self.__logger) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def version(self):
""" Return the version number of the Lending Club Investor tool Returns ------- string The version number string """ |
this_path = os.path.dirname(os.path.realpath(__file__))
version_file = os.path.join(this_path, 'VERSION')
return open(version_file).read().strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self, email=None, password=None):
""" Attempt to authenticate the user. Parameters email : string The email of a user on Lending Club password : string The user's password, for authentication. Returns ------- boolean True if the user authenticated or raises an exception if not Raises ------ session.AuthenticationError If authentication failed session.NetworkError If a network error occurred """ |
if self.session.authenticate(email, password):
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cash_balance(self):
""" Returns the account cash balance available for investing Returns ------- float The cash balance in your account. """ |
cash = False
try:
response = self.session.get('/browse/cashBalanceAj.action')
json_response = response.json()
if self.session.json_success(json_response):
self.__log('Cash available: {0}'.format(json_response['cashBalance']))
cash_value = json_response['cashBalance']
# Convert currency to float value
# Match values like $1,000.12 or 1,0000$
cash_match = re.search('^[^0-9]?([0-9\.,]+)[^0-9]?', cash_value)
if cash_match:
cash_str = cash_match.group(1)
cash_str = cash_str.replace(',', '')
cash = float(cash_str)
else:
self.__log('Could not get cash balance: {0}'.format(response.text))
except Exception as e:
self.__log('Could not get the cash balance on the account: Error: {0}\nJSON: {1}'.format(str(e), response.text))
raise e
return cash |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def my_notes(self, start_index=0, limit=100, get_all=False, sort_by='loanId', sort_dir='asc'):
""" Return all the loan notes you've already invested in. By default it'll return 100 results at a time. Parameters start_index : int, optional The result index to start on. By default only 100 records will be returned at a time, so use this to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200. (default is 0) limit : int, optional The number of results to return per request. (default is 100) get_all : boolean, optional Return all results in one request, instead of 100 per request. sort_by : string, optional What key to sort on sort_dir : {'asc', 'desc'}, optional Which direction to sort Returns ------- dict A dictionary with a list of matching notes on the `loans` key """ |
index = start_index
notes = {
'loans': [],
'total': 0,
'result': 'success'
}
while True:
payload = {
'sortBy': sort_by,
'dir': sort_dir,
'startindex': index,
'pagesize': limit,
'namespace': '/account'
}
response = self.session.post('/account/loansAj.action', data=payload)
json_response = response.json()
# Notes returned
if self.session.json_success(json_response):
notes['loans'] += json_response['searchresult']['loans']
notes['total'] = json_response['searchresult']['totalRecords']
# Error
else:
notes['result'] = json_response['result']
break
# Load more
if get_all is True and len(notes['loans']) < notes['total']:
index += limit
# End
else:
break
return notes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_note(self, note_id):
""" Get a loan note that you've invested in by ID Parameters note_id : int The note ID Returns ------- dict A dictionary representing the matching note or False Examples -------- True 100 630 100 630 """ |
index = 0
while True:
notes = self.my_notes(start_index=index, sort_by='noteId')
if notes['result'] != 'success':
break
# If the first note has a higher ID, we've passed it
if notes['loans'][0]['noteId'] > note_id:
break
# If the last note has a higher ID, it could be in this record set
if notes['loans'][-1]['noteId'] >= note_id:
for note in notes['loans']:
if note['noteId'] == note_id:
return note
index += 100
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, loan_id, amount):
""" Add a loan and amount you want to invest, to your order. If this loan is already in your order, it's amount will be replaced with the this new amount Parameters loan_id : int or dict The ID of the loan you want to add or a dictionary containing a `loan_id` value amount : int % 25 The dollar amount you want to invest in this loan, as a multiple of 25. """ |
assert amount > 0 and amount % 25 == 0, 'Amount must be a multiple of 25'
assert type(amount) in (float, int), 'Amount must be a number'
if type(loan_id) is dict:
loan = loan_id
assert 'loan_id' in loan and type(loan['loan_id']) is int, 'loan_id must be a number or dictionary containing a loan_id value'
loan_id = loan['loan_id']
assert type(loan_id) in [str, unicode, int], 'Loan ID must be an integer number or a string'
self.loans[loan_id] = amount |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_batch(self, loans, batch_amount=None):
""" Add a batch of loans to your order. Parameters loans : list A list of dictionary objects representing each loan and the amount you want to invest in it (see examples below). batch_amount : int, optional The dollar amount you want to set on ALL loans in this batch. **NOTE:** This will override the invest_amount value for each loan. Examples -------- Each item in the loans list can either be a loan ID OR a dictionary object containing `loan_id` and `invest_amount` values. The invest_amount value is the dollar amount you wish to invest in this loan. **List of IDs**:: # Invest $50 in 3 loans order.add_batch([1234, 2345, 3456], 50) **List of Dictionaries**:: # Invest different amounts in each loans order.add_batch([ {'loan_id': 1234, invest_amount: 50}, {'loan_id': 2345, invest_amount: 25}, {'loan_id': 3456, invest_amount: 150} ]) """ |
assert batch_amount is None or batch_amount % 25 == 0, 'batch_amount must be a multiple of 25'
# Add each loan
assert type(loans) is list, 'The loans property must be a list. (not {0})'.format(type(loans))
for loan in loans:
loan_id = loan
amount = batch_amount
# Extract ID and amount from loan dict
if type(loan) is dict:
assert 'loan_id' in loan, 'Each loan dict must have a loan_id value'
assert batch_amount or 'invest_amount' in loan, 'Could not determine how much to invest in loan {0}'.format(loan['loan_id'])
loan_id = loan['loan_id']
if amount is None and 'invest_amount' in loan:
amount = loan['invest_amount']
assert amount is not None, 'Could not determine how much to invest in loan {0}'.format(loan_id)
assert amount % 25 == 0, 'Amount to invest must be a multiple of 25 (loan_id: {0})'.format(loan_id)
self.add(loan_id, amount) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self, portfolio_name=None):
""" Place the order with LendingClub Parameters portfolio_name : string The name of the portfolio to add the invested loan notes to. This can be a new or existing portfolio name. Raises ------ LendingClubError Returns ------- int The completed order ID """ |
assert self.order_id == 0, 'This order has already been place. Start a new order.'
assert len(self.loans) > 0, 'There aren\'t any loans in your order'
# Place the order
self.__stage_order()
token = self.__get_strut_token()
self.order_id = self.__place_order(token)
self.__log('Order #{0} was successfully submitted'.format(self.order_id))
# Assign to portfolio
if portfolio_name:
return self.assign_to_portfolio(portfolio_name)
return self.order_id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assign_to_portfolio(self, portfolio_name=None):
""" Assign all the notes in this order to a portfolio Parameters portfolio_name -- The name of the portfolio to assign it to (new or existing) Raises ------ LendingClubError Returns ------- boolean True on success """ |
assert self.order_id > 0, 'You need to execute this order before you can assign to a portfolio.'
# Get loan IDs as a list
loan_ids = self.loans.keys()
# Make a list of 1 order ID per loan
order_ids = [self.order_id]*len(loan_ids)
return self.lc.assign_to_portfolio(portfolio_name, loan_ids, order_ids) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __stage_order(self):
""" Add all the loans to the LC order session """ |
# Skip staging...probably not a good idea...you've been warned
if self.__already_staged is True and self.__i_know_what_im_doing is True:
self.__log('Not staging the order...I hope you know what you\'re doing...'.format(len(self.loans)))
return
self.__log('Staging order for {0} loan notes...'.format(len(self.loans)))
# Create a fresh order session
self.lc.session.clear_session_order()
#
# Stage all the loans to the order
#
loan_ids = self.loans.keys()
self.__log('Staging loans {0}'.format(loan_ids))
# LendingClub requires you to search for the loans before you can stage them
f = FilterByLoanID(loan_ids)
results = self.lc.search(f, limit=len(self.loans))
if len(results['loans']) == 0 or results['totalRecords'] != len(self.loans):
raise LendingClubError('Could not stage the loans. The number of loans in your batch does not match totalRecords. {0} != {1}'.format(len(self.loans), results['totalRecords']), results)
# Stage each loan
for loan_id, amount in self.loans.iteritems():
payload = {
'method': 'addToPortfolio',
'loan_id': loan_id,
'loan_amount': amount,
'remove': 'false'
}
response = self.lc.session.get('/data/portfolio', query=payload)
json_response = response.json()
# Ensure it was successful before moving on
if not self.lc.session.json_success(json_response):
raise LendingClubError('Could not stage loan {0} on the order: {1}'.format(loan_id, response.text), response)
#
# Add all staged loans to the order
#
payload = {
'method': 'addToPortfolioNew'
}
response = self.lc.session.get('/data/portfolio', query=payload)
json_response = response.json()
if self.lc.session.json_success(json_response):
self.__log(json_response['message'])
return True
else:
raise self.__log('Could not add loans to the order: {0}'.format(response.text))
raise LendingClubError('Could not add loans to the order', response.text) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __place_order(self, token):
""" Use the struts token to place the order. Parameters token : string The struts token received from the place order page Returns ------- int The completed order ID. """ |
order_id = 0
response = None
if not token or token['value'] == '':
raise LendingClubError('The token parameter is False, None or unknown.')
# Process order confirmation page
try:
# Place the order
payload = {}
if token:
payload['struts.token.name'] = token['name']
payload[token['name']] = token['value']
response = self.lc.session.post('/portfolio/orderConfirmed.action', data=payload)
# Process HTML for the order ID
html = response.text
soup = BeautifulSoup(html, 'html5lib')
# Order num
order_field = soup.find(id='order_id')
if order_field:
order_id = int(order_field['value'])
# Did not find an ID
if order_id == 0:
self.__log('An investment order was submitted, but a confirmation ID could not be determined')
raise LendingClubError('No order ID was found when placing the order.', response)
else:
return order_id
except Exception as e:
raise LendingClubError('Could not place the order: {0}'.format(str(e)), response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __continue_session(self):
""" Check if the time since the last HTTP request is under the session timeout limit. If it's been too long since the last request attempt to authenticate again. """ |
now = time.time()
diff = abs(now - self.last_request_time)
timeout_sec = self.session_timeout * 60 # convert minutes to seconds
if diff >= timeout_sec:
self.__log('Session timed out, attempting to authenticate')
self.authenticate() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self, email=None, password=None):
""" Authenticate with LendingClub and preserve the user session for future requests. This will raise an exception if the login appears to have failed, otherwise it returns True. Since Lending Club doesn't seem to have a login API, the code has to try to decide if the login worked or not by looking at the URL redirect and parsing the returned HTML for errors. Parameters email : string The email of a user on Lending Club password : string The user's password, for authentication. Returns ------- boolean True on success or throws an exception on failure. Raises ------ session.AuthenticationError If authentication failed session.NetworkError If a network error occurred """ |
# Get email and password
if email is None:
email = self.email
else:
self.email = email
if password is None:
password = self.__pass
else:
self.__pass = password
# Get them from the user
if email is None:
email = raw_input('Email:')
self.email = email
if password is None:
password = getpass.getpass()
self.__pass = password
self.__log('Attempting to authenticate: {0}'.format(self.email))
# Start session
self.__session = requests.Session()
self.__session.headers = {
'Referer': 'https://www.lendingclub.com/',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31'
}
# Set last request time to now
self.last_request_time = time.time()
# Send login request to LC
payload = {
'login_email': email,
'login_password': password
}
response = self.post('/account/login.action', data=payload, redirects=False)
# Get URL redirect URL and save the last part of the path as the endpoint
response_url = response.url
if response.status_code == 302:
response_url = response.headers['location']
endpoint = response_url.split('/')[-1]
# Debugging
self.__log('Status code: {0}'.format(response.status_code))
self.__log('Redirected to: {0}'.format(response_url))
self.__log('Cookies: {0}'.format(str(response.cookies.keys())))
# Show query and data that the server received
if 'x-echo-query' in response.headers:
self.__log('Query: {0}'.format(response.headers['x-echo-query']))
if 'x-echo-data' in response.headers:
self.__log('Data: {0}'.format(response.headers['x-echo-data']))
# Parse any errors from the HTML
soup = BeautifulSoup(response.text, "html5lib")
errors = soup.find(id='master_error-list')
if errors:
errors = errors.text.strip()
# Remove extra spaces and newlines from error message
errors = re.sub('\t+', '', errors)
errors = re.sub('\s*\n+\s*', ' * ', errors)
if errors == '':
errors = None
# Raise error
if errors is not None:
raise AuthenticationError(errors)
# Redirected back to the login page...must be an error
if endpoint == 'login.action':
raise AuthenticationError('Unknown! Redirected back to the login page without an error message')
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, method, path, query=None, data=None, redirects=True):
""" Sends HTTP request to LendingClub. Parameters method : {GET, POST, HEAD, DELETE} The HTTP method to use: GET, POST, HEAD or DELETE path : string The path that will be appended to the domain defined in :attr:`base_url`. query : dict A dictionary of query string parameters data : dict A dictionary of POST data values redirects : boolean True to follow redirects, False to return the original response from the server. Returns ------- requests.Response A `requests.Response <http://docs.python-requests.org/en/latest/api/#requests.Response>`_ object """ |
# Check session time
self.__continue_session()
try:
url = self.build_url(path)
method = method.upper()
self.__log('{0} request to: {1}'.format(method, url))
if method == 'POST':
request = self.__session.post(url, params=query, data=data, allow_redirects=redirects)
elif method == 'GET':
request = self.__session.get(url, params=query, data=data, allow_redirects=redirects)
elif method == 'HEAD':
request = self.__session.head(url, params=query, data=data, allow_redirects=redirects)
elif method == 'DELETE':
request = self.__session.delete(url, params=query, data=data, allow_redirects=redirects)
else:
raise SessionError('{0} is not a supported HTTP method'.format(method))
self.last_response = request
self.__log('Status code: {0}'.format(request.status_code))
# Update session time
self.last_request_time = time.time()
except (RequestException, ConnectionError, TooManyRedirects, HTTPError) as e:
raise NetworkError('{0} failed to: {1}'.format(method, url), e)
except Timeout:
raise NetworkError('{0} request timed out: {1}'.format(method, url), e)
return request |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def json_success(self, json):
""" Check the JSON response object for the success flag Parameters json : dict A dictionary representing a JSON object from lendingclub.com """ |
if type(json) is dict and 'result' in json and json['result'] == 'success':
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __merge_values(self, from_dict, to_dict):
""" Merge dictionary objects recursively, by only updating keys existing in to_dict """ |
for key, value in from_dict.iteritems():
# Only if the key already exists
if key in to_dict:
# Make sure the values are the same datatype
assert type(to_dict[key]) is type(from_dict[key]), 'Data type for {0} is incorrect: {1}, should be {2}'.format(key, type(from_dict[key]), type(to_dict[key]))
# Recursively dive into the next dictionary
if type(to_dict[key]) is dict:
to_dict[key] = self.__merge_values(from_dict[key], to_dict[key])
# Replace value
else:
to_dict[key] = from_dict[key]
return to_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __normalize_grades(self):
""" Adjust the grades list. If a grade has been set, set All to false """ |
if 'grades' in self and self['grades']['All'] is True:
for grade in self['grades']:
if grade != 'All' and self['grades'][grade] is True:
self['grades']['All'] = False
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __normalize_progress(self):
""" Adjust the funding progress filter to be a factor of 10 """ |
progress = self['funding_progress']
if progress % 10 != 0:
progress = round(float(progress) / 10)
progress = int(progress) * 10
self['funding_progress'] = progress |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __normalize(self):
""" Adjusts the values of the filters to be correct. For example, if you set grade 'B' to True, then 'All' should be set to False """ |
# Don't normalize if we're already normalizing or intializing
if self.__normalizing is True or self.__initialized is False:
return
self.__normalizing = True
self.__normalize_grades()
self.__normalize_progress()
self.__normalizing = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_one(self, loan):
""" Validate a single loan result record against the filters Parameters loan : dict A single loan note record Returns ------- boolean True or raises FilterValidationError Raises ------ FilterValidationError If the loan does not match the filter criteria """ |
assert type(loan) is dict, 'loan parameter must be a dictionary object'
# Map the loan value keys to the filter keys
req = {
'loanGUID': 'loan_id',
'loanGrade': 'grade',
'loanLength': 'term',
'loanUnfundedAmount': 'progress',
'loanAmountRequested': 'progress',
'alreadyInvestedIn': 'exclude_existing',
'purpose': 'loan_purpose',
}
# Throw an error if the loan does not contain one of the criteria keys that this filter has
for key, criteria in req.iteritems():
if criteria in self and key not in loan:
raise FilterValidationError('Loan does not have a "{0}" value.'.format(key), loan, criteria)
# Loan ID
if 'loan_id' in self:
loan_ids = str(self['loan_id']).split(',')
if str(loan['loanGUID']) not in loan_ids:
raise FilterValidationError('Did not meet filter criteria for loan ID. {0} does not match {1}'.format(loan['loanGUID'], self['loan_id']), loan=loan, criteria='loan ID')
# Grade
grade = loan['loanGrade'][0] # Extract the letter portion of the loan
if 'grades' in self and self['grades']['All'] is not True:
if grade not in self['grades']:
raise FilterValidationError('Loan grade "{0}" is unknown'.format(grade), loan, 'grade')
elif self['grades'][grade] is False:
raise FilterValidationError(loan=loan, criteria='grade')
# Term
if 'term' in self and self['term'] is not None:
if loan['loanLength'] == 36 and self['term']['Year3'] is False:
raise FilterValidationError(loan=loan, criteria='loan term')
elif loan['loanLength'] == 60 and self['term']['Year5'] is False:
raise FilterValidationError(loan=loan, criteria='loan term')
# Progress
if 'funding_progress' in self:
loan_progress = (1 - (loan['loanUnfundedAmount'] / loan['loanAmountRequested'])) * 100
if self['funding_progress'] > loan_progress:
raise FilterValidationError(loan=loan, criteria='funding progress')
# Exclude existing
if 'exclude_existing' in self:
if self['exclude_existing'] is True and loan['alreadyInvestedIn'] is True:
raise FilterValidationError(loan=loan, criteria='exclude loans you are invested in')
# Loan purpose (either an array or single value)
if 'loan_purpose' in self and loan['purpose'] is not False:
purpose = self['loan_purpose']
if type(purpose) is not dict:
purpose = {purpose: True}
if 'All' not in purpose or purpose['All'] is False:
if loan['purpose'] not in purpose:
raise FilterValidationError(loan=loan, criteria='loan purpose')
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_string(self):
"""" Returns the JSON string that LendingClub expects for it's search """ |
self.__normalize()
# Get the template
tmpl_source = unicode(open(self.tmpl_file).read())
# Process template
compiler = Compiler()
template = compiler.compile(tmpl_source)
out = template(self)
if not out:
return False
out = ''.join(out)
#
# Cleanup output and remove all extra space
#
# remove extra spaces
out = re.sub('\n', '', out)
out = re.sub('\s{3,}', ' ', out)
# Remove hanging commas i.e: [1, 2,]
out = re.sub(',\s*([}\\]])', '\\1', out)
# Space between brackets i.e: ], [
out = re.sub('([{\\[}\\]])(,?)\s*([{\\[}\\]])', '\\1\\2\\3', out)
# Cleanup spaces around [, {, }, ], : and , characters
out = re.sub('\s*([{\\[\\]}:,])\s*', '\\1', out)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all_filters(lc):
""" Get a list of all your saved filters Parameters lc : :py:class:`lendingclub.LendingClub` An instance of the authenticated LendingClub class Returns ------- list A list of lendingclub.filters.SavedFilter objects """ |
filters = []
response = lc.session.get('/browse/getSavedFiltersAj.action')
json_response = response.json()
# Load all filters
if lc.session.json_success(json_response):
for saved in json_response['filters']:
filters.append(SavedFilter(lc, saved['id']))
return filters |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self):
""" Load the filter from the server """ |
# Attempt to load the saved filter
payload = {
'id': self.id
}
response = self.lc.session.get('/browse/getSavedFilterAj.action', query=payload)
self.response = response
json_response = response.json()
if self.lc.session.json_success(json_response) and json_response['filterName'] != 'No filters':
self.name = json_response['filterName']
#
# Parse out the filter JSON string manually from the response JSON.
# If the filter JSON is modified at all, or any value is out of order,
# LendingClub will reject the filter and perform a wildcard search instead,
# without any error. So we need to retain the filter JSON value exactly how it is given to us.
#
text = response.text
# Cut off everything before "filter": [...]
text = re.sub('\n', '', text)
text = re.sub('^.*?,\s*["\']filter["\']:\s*\[(.*)', '[\\1', text)
# Now loop through the string until we find the end of the filter block
# This is a simple parser that keeps track of block elements, quotes and
# escape characters
blockTracker = []
blockChars = {
'[': ']',
'{': '}'
}
inQuote = False
lastChar = None
json_text = ""
for char in text:
json_text += char
# Escape char
if char == '\\':
if lastChar == '\\':
lastChar = ''
else:
lastChar = char
continue
# Quotes
if char == "'" or char == '"':
if inQuote is False: # Starting a quote block
inQuote = char
elif inQuote == char: # Ending a quote block
inQuote = False
lastChar = char
continue
# Start of a block
if char in blockChars.keys():
blockTracker.insert(0, blockChars[char])
# End of a block, remove from block path
elif len(blockTracker) > 0 and char == blockTracker[0]:
blockTracker.pop(0)
# No more blocks in the tracker which means we're at the end of the filter block
if len(blockTracker) == 0 and lastChar is not None:
break
lastChar = char
# Verify valid JSON
try:
if json_text.strip() == '':
raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response)
json_test = json.loads(json_text)
# Make sure it looks right
assert type(json_test) is list, 'Expecting a list, instead received a {0}'.format(type(json_test))
assert 'm_id' in json_test[0], 'Expecting a \'m_id\' property in each filter'
assert 'm_value' in json_test[0], 'Expecting a \'m_value\' property in each filter'
self.json = json_test
except Exception as e:
raise SavedFilterError('Could not parse filter from the JSON response: {0}'.format(str(e)))
self.json_text = json_text
self.__analyze()
else:
raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __analyze(self):
""" Analyze the filter JSON and attempt to parse out the individual filters. """ |
filter_values = {}
# ID to filter name mapping
name_map = {
10: 'grades',
11: 'loan_purpose',
13: 'approved',
15: 'funding_progress',
38: 'exclude_existing',
39: 'term',
43: 'keyword'
}
if self.json is not None:
filters = self.json
for f in filters:
if 'm_id' in f:
name = f['m_id']
# Get the name to represent this filter
if f['m_id'] in name_map:
name = name_map[f['m_id']]
# Get values
if 'm_value' in f:
raw_values = f['m_value']
value = {}
# No value, skip it
if raw_values is None:
continue
# Loop through multiple values
if type(raw_values) is list:
# A single non string value, is THE value
if len(raw_values) == 1 and type(raw_values[0]['value']) not in [str, unicode]:
value = raw_values[0]['value']
# Create a dict of values: name = True
for val in raw_values:
if type(val['value']) in [str, unicode]:
value[val['value']] = True
# A single value
else:
value = raw_values
# Normalize grades array
if name == 'grades':
if 'All' not in value:
value['All'] = False
# Add filter value
filter_values[name] = value
dict.__setitem__(self, name, value)
return filter_values |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _float_copy_to_out(out, origin):
""" Copy origin to out and return it. If ``out`` is None, a new copy (casted to floating point) is used. If ``out`` and ``origin`` are the same, we simply return it. Otherwise we copy the values. """ |
if out is None:
out = origin / 1 # The division forces cast to a floating point type
elif out is not origin:
np.copyto(out, origin)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _distance_matrix_generic(x, centering, exponent=1):
"""Compute a centered distance matrix given a matrix.""" |
_check_valid_dcov_exponent(exponent)
x = _transform_to_2d(x)
# Calculate distance matrices
a = distances.pairwise_distances(x, exponent=exponent)
# Double centering
a = centering(a, out=a)
return a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _af_inv_scaled(x):
"""Scale a random vector for using the affinely invariant measures""" |
x = _transform_to_2d(x)
cov_matrix = np.atleast_2d(np.cov(x, rowvar=False))
cov_matrix_power = _mat_sqrt_inv(cov_matrix)
return x.dot(cov_matrix_power) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partial_distance_covariance(x, y, z):
""" Partial distance covariance estimator. Compute the estimator for the partial distance covariance of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance covariance is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance covariance. See Also -------- partial_distance_correlation Examples -------- """ |
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
proj = u_complementary_projection(c)
return u_product(proj(a), proj(b)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partial_distance_correlation(x, y, z):
# pylint:disable=too-many-locals """ Partial distance correlation estimator. Compute the estimator for the partial distance correlation of the random vectors corresponding to :math:`x` and :math:`y` with respect to the random variable corresponding to :math:`z`. Parameters x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. z: array_like Random vector with respect to which the partial distance correlation is computed. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the partial distance correlation. See Also -------- partial_distance_covariance Examples -------- 1.0 1.0 0.0 """ |
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
aa = u_product(a, a)
bb = u_product(b, b)
cc = u_product(c, c)
ab = u_product(a, b)
ac = u_product(a, c)
bc = u_product(b, c)
denom_sqr = aa * bb
r_xy = ab / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xy = np.clip(r_xy, -1, 1)
denom_sqr = aa * cc
r_xz = ac / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xz = np.clip(r_xz, -1, 1)
denom_sqr = bb * cc
r_yz = bc / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_yz = np.clip(r_yz, -1, 1)
denom = _sqrt(1 - r_xz ** 2) * _sqrt(1 - r_yz ** 2)
return (r_xy - r_xz * r_yz) / denom if denom != 0 else denom |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _energy_distance_from_distance_matrices( distance_xx, distance_yy, distance_xy):
"""Compute energy distance with precalculated distance matrices.""" |
return (2 * np.mean(distance_xy) - np.mean(distance_xx) -
np.mean(distance_yy)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _distance_covariance_sqr_naive(x, y, exponent=1):
""" Naive biased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ |
a = _distance_matrix(x, exponent=exponent)
b = _distance_matrix(y, exponent=exponent)
return mean_product(a, b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _u_distance_covariance_sqr_naive(x, y, exponent=1):
""" Naive unbiased estimator for distance covariance. Computes the unbiased estimator for distance covariance between two matrices, using an :math:`O(N^2)` algorithm. """ |
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _distance_sqr_stats_naive_generic(x, y, matrix_centered, product, exponent=1):
"""Compute generic squared stats.""" |
a = matrix_centered(x, exponent=exponent)
b = matrix_centered(y, exponent=exponent)
covariance_xy_sqr = product(a, b)
variance_x_sqr = product(a, a)
variance_y_sqr = product(b, b)
denominator_sqr = np.absolute(variance_x_sqr * variance_y_sqr)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = 0.0
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _distance_correlation_sqr_naive(x, y, exponent=1):
"""Biased distance correlation estimator between two matrices.""" |
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_distance_matrix,
product=mean_product,
exponent=exponent).correlation_xy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _u_distance_correlation_sqr_naive(x, y, exponent=1):
"""Bias-corrected distance correlation estimator between two matrices.""" |
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_u_distance_matrix,
product=u_product,
exponent=exponent).correlation_xy |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _can_use_fast_algorithm(x, y, exponent=1):
""" Check if the fast algorithm for distance stats can be used. The fast algorithm has complexity :math:`O(NlogN)`, better than the complexity of the naive algorithm (:math:`O(N^2)`). The algorithm can only be used for random variables (not vectors) where the number of instances is greater than 3. Also, the exponent must be 1. """ |
return (_is_random_variable(x) and _is_random_variable(y) and
x.shape[0] > 3 and y.shape[0] > 3 and exponent == 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _dyad_update(y, c):
# pylint:disable=too-many-locals # This function has many locals so it can be compared # with the original algorithm. """ Inner function of the fast distance covariance. This function is compiled because otherwise it would become a bottleneck. """ |
n = y.shape[0]
gamma = np.zeros(n, dtype=c.dtype)
# Step 1: get the smallest l such that n <= 2^l
l_max = int(math.ceil(np.log2(n)))
# Step 2: assign s(l, k) = 0
s_len = 2 ** (l_max + 1)
s = np.zeros(s_len, dtype=c.dtype)
pos_sums = np.arange(l_max)
pos_sums[:] = 2 ** (l_max - pos_sums)
pos_sums = np.cumsum(pos_sums)
# Step 3: iteration
for i in range(1, n):
# Step 3.a: update s(l, k)
for l in range(l_max):
k = int(math.ceil(y[i - 1] / 2 ** l))
pos = k - 1
if l > 0:
pos += pos_sums[l - 1]
s[pos] += c[i - 1]
# Steps 3.b and 3.c
for l in range(l_max):
k = int(math.floor((y[i] - 1) / 2 ** l))
if k / 2 > math.floor(k / 2):
pos = k - 1
if l > 0:
pos += pos_sums[l - 1]
gamma[i] = gamma[i] + s[pos]
return gamma |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _distance_covariance_sqr_fast_generic( x, y, unbiased=False):
# pylint:disable=too-many-locals # This function has many locals so it can be compared # with the original algorithm. """Fast algorithm for the squared distance covariance.""" |
x = np.asarray(x)
y = np.asarray(y)
x = np.ravel(x)
y = np.ravel(y)
n = x.shape[0]
assert n > 3
assert n == y.shape[0]
temp = range(n)
# Step 1
ix0 = np.argsort(x)
vx = x[ix0]
ix = np.zeros(n, dtype=int)
ix[ix0] = temp
iy0 = np.argsort(y)
vy = y[iy0]
iy = np.zeros(n, dtype=int)
iy[iy0] = temp
# Step 2
sx = np.cumsum(vx)
sy = np.cumsum(vy)
# Step 3
alpha_x = ix
alpha_y = iy
beta_x = sx[ix] - vx[ix]
beta_y = sy[iy] - vy[iy]
# Step 4
x_dot = np.sum(x)
y_dot = np.sum(y)
# Step 5
a_i_dot = x_dot + (2 * alpha_x - n) * x - 2 * beta_x
b_i_dot = y_dot + (2 * alpha_y - n) * y - 2 * beta_y
sum_ab = np.sum(a_i_dot * b_i_dot)
# Step 6
a_dot_dot = 2 * np.sum(alpha_x * x) - 2 * np.sum(beta_x)
b_dot_dot = 2 * np.sum(alpha_y * y) - 2 * np.sum(beta_y)
# Step 7
gamma_1 = _partial_sum_2d(x, y, np.ones(n, dtype=x.dtype))
gamma_x = _partial_sum_2d(x, y, x)
gamma_y = _partial_sum_2d(x, y, y)
gamma_xy = _partial_sum_2d(x, y, x * y)
# Step 8
aijbij = np.sum(x * y * gamma_1 + gamma_xy - x * gamma_y - y * gamma_x)
if unbiased:
d3 = (n - 3)
d2 = (n - 2)
d1 = (n - 1)
else:
d3 = d2 = d1 = n
# Step 9
d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 +
a_dot_dot / n * b_dot_dot / d1 / d2 / d3)
return d_cov |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _distance_stats_sqr_fast_generic(x, y, dcov_function):
"""Compute the distance stats using the fast algorithm.""" |
covariance_xy_sqr = dcov_function(x, y)
variance_x_sqr = dcov_function(x, x)
variance_y_sqr = dcov_function(y, y)
denominator_sqr_signed = variance_x_sqr * variance_y_sqr
denominator_sqr = np.absolute(denominator_sqr_signed)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = denominator.dtype.type(0)
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def distance_correlation_af_inv_sqr(x, y):
""" Square of the affinely invariant distance correlation. Computes the estimator for the square of the affinely invariant distance correlation between two random vectors. .. warning:: The return value of this function is undefined when the covariance matrix of :math:`x` or :math:`y` is singular. Parameters x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the squared affinely invariant distance correlation. See Also -------- distance_correlation u_distance_correlation Examples -------- 1.0 1.0 """ |
x = _af_inv_scaled(x)
y = _af_inv_scaled(y)
correlation = distance_correlation_sqr(x, y)
return 0 if np.isnan(correlation) else correlation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.