text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Generates all configuration file name combinations to read. <END_TASK> <USER_TASK:> Description: def generate_configfile_names(config_files, config_searchpath=None): """Generates all configuration file name combinations to read. .. sourcecode:: # -- ALGORITHM: # First basenames/directories are prefered and override other files. for config_path in reversed(config_searchpath): for config_basename in reversed(config_files): config_fname = os.path.join(config_path, config_basename) if os.path.isfile(config_fname): yield config_fname :param config_files: List of config file basenames. :param config_searchpath: List of directories to look for config files. :return: List of available configuration file names (as generator) """
if config_searchpath is None: config_searchpath = ["."] for config_path in reversed(config_searchpath): for config_basename in reversed(config_files): config_fname = os.path.join(config_path, config_basename) if os.path.isfile(config_fname): # MAYBE: yield os.path.normpath(config_fname) yield config_fname
<SYSTEM_TASK:> Indicates if this schema can be used for a config section <END_TASK> <USER_TASK:> Description: def matches_section(cls, section_name, supported_section_names=None): """Indicates if this schema can be used for a config section by using the section name. :param section_name: Config section name to check. :return: True, if this schema can be applied to the config section. :return: Fals, if this schema does not match the config section. """
if supported_section_names is None: supported_section_names = getattr(cls, "section_names", None) # pylint: disable=invalid-name for supported_section_name_or_pattern in supported_section_names: if fnmatch(section_name, supported_section_name_or_pattern): return True # -- OTHERWISE: return False
<SYSTEM_TASK:> Remove files or files selected by file patterns. <END_TASK> <USER_TASK:> Description: def cleanup_files(patterns, dry_run=False, workdir="."): """Remove files or files selected by file patterns. Skips removal if file does not exist. :param patterns: File patterns, like "**/*.pyc" (as list). :param dry_run: Dry-run mode indicator (as bool). :param workdir: Current work directory (default=".") """
current_dir = Path(workdir) python_basedir = Path(Path(sys.executable).dirname()).joinpath("..").abspath() error_message = None error_count = 0 for file_pattern in patterns: for file_ in path_glob(file_pattern, current_dir): if file_.abspath().startswith(python_basedir): # -- PROTECT CURRENTLY USED VIRTUAL ENVIRONMENT: continue if dry_run: print("REMOVE: %s (dry-run)" % file_) else: print("REMOVE: %s" % file_) try: file_.remove_p() except os.error as e: message = "%s: %s" % (e.__class__.__name__, e) print(message + " basedir: "+ python_basedir) error_count += 1 if not error_message: error_message = message if False and error_message: class CleanupError(RuntimeError): pass raise CleanupError(error_message)
<SYSTEM_TASK:> Stacks a redis command inside the object. <END_TASK> <USER_TASK:> Description: def stack_call(self, *args): """Stacks a redis command inside the object. The syntax is the same than the call() method a Client class. Args: *args: full redis command as variable length argument list. Examples: >>> pipeline = Pipeline() >>> pipeline.stack_call("HSET", "key", "field", "value") >>> pipeline.stack_call("PING") >>> pipeline.stack_call("INCR", "key2") """
self.pipelined_args.append(args) self.number_of_stacked_calls = self.number_of_stacked_calls + 1
<SYSTEM_TASK:> Disconnects the object. <END_TASK> <USER_TASK:> Description: def disconnect(self): """Disconnects the object. Safe method (no exception, even if it's already disconnected or if there are some connection errors). """
if not self.is_connected() and not self.is_connecting(): return LOG.debug("disconnecting from %s...", self._redis_server()) self.__periodic_callback.stop() try: self._ioloop.remove_handler(self.__socket_fileno) self._listened_events = 0 except Exception: pass self.__socket_fileno = -1 try: self.__socket.close() except Exception: pass self._state.set_disconnected() self._close_callback() LOG.debug("disconnected from %s", self._redis_server())
<SYSTEM_TASK:> Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only. <END_TASK> <USER_TASK:> Description: def surrogate_escape(error): """ Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only. """
chars = error.object[error.start:error.end] assert len(chars) == 1 val = ord(chars) val += 0xdc00 return __builtin__.unichr(val), error.end
<SYSTEM_TASK:> Connects the client object to redis. <END_TASK> <USER_TASK:> Description: def connect(self): """Connects the client object to redis. It's safe to use this method even if you are already connected. Note: this method is useless with autoconnect mode (default). Returns: a Future object with True as result if the connection was ok. """
if self.is_connected(): raise tornado.gen.Return(True) cb1 = self._read_callback cb2 = self._close_callback self.__callback_queue = collections.deque() self._reply_list = [] self.__reader = hiredis.Reader(replyError=ClientError) kwargs = self.connection_kwargs self.__connection = Connection(cb1, cb2, **kwargs) connection_status = yield self.__connection.connect() if connection_status is not True: # nothing left to do here, return raise tornado.gen.Return(False) if self.password is not None: authentication_status = yield self._call('AUTH', self.password) if authentication_status != b'OK': # incorrect password, return back the result LOG.warning("impossible to connect: bad password") self.__connection.disconnect() raise tornado.gen.Return(False) if self.db != 0: db_status = yield self._call('SELECT', self.db) if db_status != b'OK': LOG.warning("can't select db %s", self.db) raise tornado.gen.Return(False) raise tornado.gen.Return(True)
<SYSTEM_TASK:> Callback called when redis closed the connection. <END_TASK> <USER_TASK:> Description: def _close_callback(self): """Callback called when redis closed the connection. The callback queue is emptied and we call each callback found with None or with an exception object to wake up blocked client. """
while True: try: callback = self.__callback_queue.popleft() callback(ConnectionError("closed connection")) except IndexError: break if self.subscribed: # pubsub clients self._reply_list.append(ConnectionError("closed connection")) self._condition.notify_all()
<SYSTEM_TASK:> Callback called when some data are read on the socket. <END_TASK> <USER_TASK:> Description: def _read_callback(self, data=None): """Callback called when some data are read on the socket. The buffer is given to the hiredis parser. If a reply is complete, we put the decoded reply to on the reply queue. Args: data (str): string (buffer) read on the socket. """
try: if data is not None: self.__reader.feed(data) while True: reply = self.__reader.gets() if reply is not False: try: callback = self.__callback_queue.popleft() # normal client (1 reply = 1 callback) callback(reply) except IndexError: # pubsub clients self._reply_list.append(reply) self._condition.notify_all() else: break except hiredis.ProtocolError: # something nasty occured (corrupt stream => no way to recover) LOG.warning("corrupted stream => disconnect") self.disconnect()
<SYSTEM_TASK:> Calls a redis command and returns a Future of the reply. <END_TASK> <USER_TASK:> Description: def call(self, *args, **kwargs): """Calls a redis command and returns a Future of the reply. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: internal private options (do not use). Returns: a Future with the decoded redis reply as result (when available) or a ConnectionError object in case of connection error. Raises: ClientError: your Pipeline object is empty. Examples: >>> @tornado.gen.coroutine def foobar(): client = Client() result = yield client.call("HSET", "key", "field", "val") """
if not self.is_connected(): if self.autoconnect: # We use this method only when we are not contected # to void performance penaly due to gen.coroutine decorator return self._call_with_autoconnect(*args, **kwargs) else: error = ConnectionError("you are not connected and " "autoconnect=False") return tornado.gen.maybe_future(error) return self._call(*args, **kwargs)
<SYSTEM_TASK:> Calls a redis command, waits for the reply and call a callback. <END_TASK> <USER_TASK:> Description: def async_call(self, *args, **kwargs): """Calls a redis command, waits for the reply and call a callback. Following options are available (not part of the redis command itself): - callback Function called (with the result as argument) when the result is available. If not set, the reply is silently discarded. In case of errors, the callback is called with a TornadisException object as argument. Args: *args: full redis command as variable length argument list or a Pipeline object (as a single argument). **kwargs: options as keyword parameters. Examples: >>> def cb(result): pass >>> client.async_call("HSET", "key", "field", "val", callback=cb) """
def after_autoconnect_callback(future): if self.is_connected(): self._call(*args, **kwargs) else: # FIXME pass if 'callback' not in kwargs: kwargs['callback'] = discard_reply_cb if not self.is_connected(): if self.autoconnect: connect_future = self.connect() cb = after_autoconnect_callback self.__connection._ioloop.add_future(connect_future, cb) else: error = ConnectionError("you are not connected and " "autoconnect=False") kwargs['callback'](error) else: self._call(*args, **kwargs)
<SYSTEM_TASK:> Formats arguments into redis protocol... <END_TASK> <USER_TASK:> Description: def format_args_in_redis_protocol(*args): """Formats arguments into redis protocol... This function makes and returns a string/buffer corresponding to given arguments formated with the redis protocol. integer, text, string or binary types are automatically converted (using utf8 if necessary). More informations about the protocol: http://redis.io/topics/protocol Args: *args: full redis command as variable length argument list Returns: binary string (arguments in redis protocol) Examples: >>> format_args_in_redis_protocol("HSET", "key", "field", "value") '*4\r\n$4\r\nHSET\r\n$3\r\nkey\r\n$5\r\nfield\r\n$5\r\nvalue\r\n' """
buf = WriteBuffer() l = "*%d\r\n" % len(args) # noqa: E741 if six.PY2: buf.append(l) else: # pragma: no cover buf.append(l.encode('utf-8')) for arg in args: if isinstance(arg, six.text_type): # it's a unicode string in Python2 or a standard (unicode) # string in Python3, let's encode it in utf-8 to get raw bytes arg = arg.encode('utf-8') elif isinstance(arg, six.string_types): # it's a basestring in Python2 => nothing to do pass elif isinstance(arg, six.binary_type): # pragma: no cover # it's a raw bytes string in Python3 => nothing to do pass elif isinstance(arg, six.integer_types): tmp = "%d" % arg if six.PY2: arg = tmp else: # pragma: no cover arg = tmp.encode('utf-8') elif isinstance(arg, WriteBuffer): # it's a WriteBuffer object => nothing to do pass else: raise Exception("don't know what to do with %s" % type(arg)) l = "$%d\r\n" % len(arg) # noqa: E741 if six.PY2: buf.append(l) else: # pragma: no cover buf.append(l.encode('utf-8')) buf.append(arg) buf.append(b"\r\n") return buf
<SYSTEM_TASK:> Internal "done callback" to set the result of the object. <END_TASK> <USER_TASK:> Description: def _done_callback(self, wrapped): """Internal "done callback" to set the result of the object. The result of the object if forced by the wrapped future. So this internal callback must be called when the wrapped future is ready. Args: wrapped (Future): the wrapped Future object """
if wrapped.exception(): self.set_exception(wrapped.exception()) else: self.set_result(wrapped.result())
<SYSTEM_TASK:> The result method which returns a context manager <END_TASK> <USER_TASK:> Description: def result(self): """The result method which returns a context manager Returns: ContextManager: The corresponding context manager """
if self.exception(): raise self.exception() # Otherwise return a context manager that cleans up after the block. @contextlib.contextmanager def f(): try: yield self._wrapped.result() finally: self._exit_callback() return f()
<SYSTEM_TASK:> Create the URL in the LyricWikia format <END_TASK> <USER_TASK:> Description: def create_url(artist, song): """Create the URL in the LyricWikia format"""
return (__BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song)))
<SYSTEM_TASK:> Retrieve the lyrics of the song and return the first one in case <END_TASK> <USER_TASK:> Description: def get_lyrics(artist, song, linesep='\n', timeout=None): """Retrieve the lyrics of the song and return the first one in case multiple versions are available."""
return get_all_lyrics(artist, song, linesep, timeout)[0]
<SYSTEM_TASK:> Retrieve a list of all the lyrics versions of a song. <END_TASK> <USER_TASK:> Description: def get_all_lyrics(artist, song, linesep='\n', timeout=None): """Retrieve a list of all the lyrics versions of a song."""
url = create_url(artist, song) response = _requests.get(url, timeout=timeout) soup = _BeautifulSoup(response.content, "html.parser") lyricboxes = soup.findAll('div', {'class': 'lyricbox'}) if not lyricboxes: raise LyricsNotFound('Cannot download lyrics') for lyricbox in lyricboxes: for br in lyricbox.findAll('br'): br.replace_with(linesep) return [lyricbox.text.strip() for lyricbox in lyricboxes]
<SYSTEM_TASK:> Open an ARF file, creating as necessary. <END_TASK> <USER_TASK:> Description: def open_file(name, mode=None, driver=None, libver=None, userblock_size=None, **kwargs): """Open an ARF file, creating as necessary. Use this instead of h5py.File to ensure that root-level attributes and group creation property lists are set correctly. """
import sys import os from h5py import h5p from h5py._hl import files try: # If the byte string doesn't match the default # encoding, just pass it on as-is. Note Unicode # objects can always be encoded. name = name.encode(sys.getfilesystemencoding()) except (UnicodeError, LookupError): pass exists = os.path.exists(name) try: fcpl = h5p.create(h5p.FILE_CREATE) fcpl.set_link_creation_order( h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) except AttributeError: # older version of h5py fp = files.File(name, mode=mode, driver=driver, libver=libver, **kwargs) else: fapl = files.make_fapl(driver, libver, **kwargs) fp = files.File(files.make_fid(name, mode, userblock_size, fapl, fcpl)) if not exists and fp.mode == 'r+': set_attributes(fp, arf_library='python', arf_library_version=__version__, arf_version=spec_version) return fp
<SYSTEM_TASK:> Create a new ARF entry under group, setting required attributes. <END_TASK> <USER_TASK:> Description: def create_entry(group, name, timestamp, **attributes): """Create a new ARF entry under group, setting required attributes. An entry is an abstract collection of data which all refer to the same time frame. Data can include physiological recordings, sound recordings, and derived data such as spike times and labels. See add_data() for information on how data are stored. name -- the name of the new entry. any valid python string. timestamp -- timestamp of entry (datetime object, or seconds since January 1, 1970). Can be an integer, a float, or a tuple of integers (seconds, microsceconds) Additional keyword arguments are set as attributes on created entry. Returns: newly created entry object """
# create group using low-level interface to store creation order from h5py import h5p, h5g, _hl try: gcpl = h5p.create(h5p.GROUP_CREATE) gcpl.set_link_creation_order( h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED) except AttributeError: grp = group.create_group(name) else: name, lcpl = group._e(name, lcpl=True) grp = _hl.group.Group(h5g.create(group.id, name, lcpl=lcpl, gcpl=gcpl)) set_uuid(grp, attributes.pop("uuid", None)) set_attributes(grp, timestamp=convert_timestamp(timestamp), **attributes) return grp
<SYSTEM_TASK:> Create an ARF dataset under group, setting required attributes <END_TASK> <USER_TASK:> Description: def create_dataset(group, name, data, units='', datatype=DataTypes.UNDEFINED, chunks=True, maxshape=None, compression=None, **attributes): """Create an ARF dataset under group, setting required attributes Required arguments: name -- the name of dataset in which to store the data data -- the data to store Data can be of the following types: * sampled data: an N-D numerical array of measurements * "simple" event data: a 1-D array of times * "complex" event data: a 1-D array of records, with field 'start' required Optional arguments: datatype -- a code defining the nature of the data in the channel units -- channel units (optional for sampled data, otherwise required) sampling_rate -- required for sampled data and event data with units=='samples' Arguments passed to h5py: maxshape -- make the node resizable up to this shape. Use None for axes that need to be unlimited. chunks -- specify the chunk size. The optimal chunk size depends on the intended use of the data. For single-channel sampled data the auto-chunking (True) is probably best. compression -- compression strategy. Can be 'gzip', 'szip', 'lzf' or an integer in range(10) specifying gzip(N). Only gzip is really portable. Additional arguments are set as attributes on the created dataset Returns the created dataset """
from numpy import asarray srate = attributes.get('sampling_rate', None) # check data validity before doing anything if not hasattr(data, 'dtype'): data = asarray(data) if data.dtype.kind in ('S', 'O', 'U'): raise ValueError( "data must be in array with numeric or compound type") if data.dtype.kind == 'V': if 'start' not in data.dtype.names: raise ValueError("complex event data requires 'start' field") if not isinstance(units, (list, tuple)): raise ValueError("complex event data requires sequence of units") if not len(units) == len(data.dtype.names): raise ValueError("number of units doesn't match number of fields") if units == '': if srate is None or not srate > 0: raise ValueError( "unitless data assumed time series and requires sampling_rate attribute") elif units == 'samples': if srate is None or not srate > 0: raise ValueError( "data with units of 'samples' requires sampling_rate attribute") # NB: can't really catch case where sampled data has units but doesn't # have sampling_rate attribute dset = group.create_dataset( name, data=data, maxshape=maxshape, chunks=chunks, compression=compression) set_attributes(dset, units=units, datatype=datatype, **attributes) return dset
<SYSTEM_TASK:> Check the ARF version attribute of file for compatibility. <END_TASK> <USER_TASK:> Description: def check_file_version(file): """Check the ARF version attribute of file for compatibility. Raises DeprecationWarning for backwards-incompatible files, FutureWarning for (potentially) forwards-incompatible files, and UserWarning for files that may not have been created by an ARF library. Returns the version for the file """
from distutils.version import StrictVersion as Version try: ver = file.attrs.get('arf_version', None) if ver is None: ver = file.attrs['arf_library_version'] except KeyError: raise UserWarning( "Unable to determine ARF version for {0.filename};" "created by another program?".format(file)) try: # if the attribute is stored as a string, it's ascii-encoded ver = ver.decode("ascii") except (LookupError, AttributeError): pass # should be backwards compatible after 1.1 file_version = Version(ver) if file_version < Version('1.1'): raise DeprecationWarning( "ARF library {} may have trouble reading file " "version {} (< 1.1)".format(version, file_version)) elif file_version >= Version('3.0'): raise FutureWarning( "ARF library {} may be incompatible with file " "version {} (>= 3.0)".format(version, file_version)) return file_version
<SYSTEM_TASK:> Set multiple attributes on node. <END_TASK> <USER_TASK:> Description: def set_attributes(node, overwrite=True, **attributes): """Set multiple attributes on node. If overwrite is False, and the attribute already exists, does nothing. If the value for a key is None, the attribute is deleted. """
aset = node.attrs for k, v in attributes.items(): if not overwrite and k in aset: pass elif v is None: if k in aset: del aset[k] else: aset[k] = v
<SYSTEM_TASK:> Returns a sequence of links in group in order of creation. <END_TASK> <USER_TASK:> Description: def keys_by_creation(group): """Returns a sequence of links in group in order of creation. Raises an error if the group was not set to track creation order. """
from h5py import h5 out = [] try: group._id.links.iterate( out.append, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC) except (AttributeError, RuntimeError): # pre 2.2 shim def f(name): if name.find(b'/', 1) == -1: out.append(name) group._id.links.visit( f, idx_type=h5.INDEX_CRT_ORDER, order=h5.ITER_INC) return map(group._d, out)
<SYSTEM_TASK:> Make an ARF timestamp from an object. <END_TASK> <USER_TASK:> Description: def convert_timestamp(obj): """Make an ARF timestamp from an object. Argument can be a datetime.datetime object, a time.struct_time, an integer, a float, or a tuple of integers. The returned value is a numpy array with the integer number of seconds since the Epoch and any additional microseconds. Note that because floating point values are approximate, the conversion between float and integer tuple may not be reversible. """
import numbers from datetime import datetime from time import mktime, struct_time from numpy import zeros out = zeros(2, dtype='int64') if isinstance(obj, datetime): out[0] = mktime(obj.timetuple()) out[1] = obj.microsecond elif isinstance(obj, struct_time): out[0] = mktime(obj) elif isinstance(obj, numbers.Integral): out[0] = obj elif isinstance(obj, numbers.Real): out[0] = obj out[1] = (obj - out[0]) * 1e6 else: try: out[:2] = obj[:2] except: raise TypeError("unable to convert %s to timestamp" % obj) return out
<SYSTEM_TASK:> Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype <END_TASK> <USER_TASK:> Description: def set_uuid(obj, uuid=None): """Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype """
from uuid import uuid4, UUID if uuid is None: uuid = uuid4() elif isinstance(uuid, bytes): if len(uuid) == 16: uuid = UUID(bytes=uuid) else: uuid = UUID(hex=uuid) if "uuid" in obj.attrs: del obj.attrs["uuid"] obj.attrs.create("uuid", str(uuid).encode('ascii'), dtype="|S36")
<SYSTEM_TASK:> Return the uuid for obj, or null uuid if none is set <END_TASK> <USER_TASK:> Description: def get_uuid(obj): """Return the uuid for obj, or null uuid if none is set"""
# TODO: deprecate null uuid ret val from uuid import UUID try: uuid = obj.attrs['uuid'] except KeyError: return UUID(int=0) # convert to unicode for python 3 try: uuid = uuid.decode('ascii') except (LookupError, AttributeError): pass return UUID(uuid)
<SYSTEM_TASK:> Return the number of children of obj, optionally restricting by class <END_TASK> <USER_TASK:> Description: def count_children(obj, type=None): """Return the number of children of obj, optionally restricting by class"""
if type is None: return len(obj) else: # there doesn't appear to be any hdf5 function for getting this # information without inspecting each child, which makes this somewhat # slow return sum(1 for x in obj if obj.get(x, getclass=True) is type)
<SYSTEM_TASK:> generate a dict keyed by value <END_TASK> <USER_TASK:> Description: def _todict(cls): """ generate a dict keyed by value """
return dict((getattr(cls, attr), attr) for attr in dir(cls) if not attr.startswith('_'))
<SYSTEM_TASK:> wrapper around the rsync command. <END_TASK> <USER_TASK:> Description: def rsync(*args, **kwargs): """ wrapper around the rsync command. the ssh connection arguments are set automatically. any args are just passed directly to rsync. you can use {host_string} in place of the server. the kwargs are passed on the 'local' fabric command. if not set, 'capture' is set to False. example usage: rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/") """
kwargs.setdefault('capture', False) replacements = dict( host_string="{user}@{host}".format( user=env.instance.config.get('user', 'root'), host=env.instance.config.get( 'host', env.instance.config.get( 'ip', env.instance.uid)))) args = [x.format(**replacements) for x in args] ssh_info = env.instance.init_ssh_key() ssh_info.pop('host') ssh_info.pop('user') ssh_args = env.instance.ssh_args_from_info(ssh_info) cmd_parts = ['rsync'] cmd_parts.extend(['-e', "ssh %s" % shjoin(ssh_args)]) cmd_parts.extend(args) cmd = shjoin(cmd_parts) return local(cmd, **kwargs)
<SYSTEM_TASK:> computes the name of the disk devices that are suitable <END_TASK> <USER_TASK:> Description: def devices(self): """ computes the name of the disk devices that are suitable installation targets by subtracting CDROM- and USB devices from the list of total mounts. """
install_devices = self.install_devices if 'bootstrap-system-devices' in env.instance.config: devices = set(env.instance.config['bootstrap-system-devices'].split()) else: devices = set(self.sysctl_devices) for sysctl_device in self.sysctl_devices: for install_device in install_devices: if install_device.startswith(sysctl_device): devices.remove(sysctl_device) return devices
<SYSTEM_TASK:> download bootstrap assets to control host. <END_TASK> <USER_TASK:> Description: def fetch_assets(self): """ download bootstrap assets to control host. If present on the control host they will be uploaded to the target host during bootstrapping. """
# allow overwrites from the commandline packages = set( env.instance.config.get('bootstrap-packages', '').split()) packages.update(['python27']) cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"') items = sorted(self.bootstrap_files.items()) for filename, asset in items: if asset.url: if not exists(dirname(asset.local)): os.makedirs(dirname(asset.local)) local(cmd.format(asset)) if filename == 'packagesite.txz': # add packages to download items.extend(self._fetch_packages(asset.local, packages))
<SYSTEM_TASK:> Returns all the info extracted from a resource section of the apipie json <END_TASK> <USER_TASK:> Description: def parse_resource_definition(resource_name, resource_dct): """ Returns all the info extracted from a resource section of the apipie json :param resource_name: Name of the resource that is defined by the section :param resrouce_dict: Dictionary as generated by apipie of the resource definition """
new_dict = { '__module__': resource_dct.get('__module__', __name__), '__doc__': resource_dct['full_description'], '_resource_name': resource_name, '_own_methods': set(), '_conflicting_methods': [], } # methods in foreign_methods are meant for other resources, # that is, the url and the resource field do not match /api/{resource} foreign_methods = {} # as defined per apipie gem, each method can have more than one api, # for example, /api/hosts can have the GET /api/hosts api and the GET # /api/hosts/:id api or DELETE /api/hosts for method in resource_dct['methods']: # set the docstring if it only has one api if not new_dict['__doc__'] and len(method['apis']) == 1: new_dict['__doc__'] = \ method['apis'][0]['short_description'] for api in method['apis']: api = MethodAPIDescription(resource_name, method, api) if api.resource != resource_name: # this means that the json apipie passed says that an # endpoint in the form: /api/{resource}/* belongs to # {different_resource}, we just put it under {resource} # later, storing it under _foreign_methods for now as we # might not have parsed {resource} yet functions = foreign_methods.setdefault(api.resource, {}) if api.name in functions: old_api = functions.get(api.name).defs # show only in debug the repeated but identical definitions log_method = logger.warning if api.url == old_api.url: log_method = logger.debug log_method( "There is a conflict trying to redefine a method " "for a foreign resource (%s): \n" "\tresource:\n" "\tapipie_resource: %s\n" "\tnew_api: %s\n" "\tnew_url: %s\n" "\told_api: %s\n" "\told_url: %s", api.name, resource_name, pprint.pformat(api), api.url, pprint.pformat(old_api), old_api.url, ) new_dict['_conflicting_methods'].append(api) continue functions[api.name] = api.generate_func() else: # it's an own method, resource and url match if api.name in new_dict['_own_methods']: old_api = new_dict.get(api.name).defs log_method = logger.warning # show only in debug the repeated but identical definitions if api.url == old_api.url: log_method = logger.debug log_method( "There is a conflict trying to redefine method " "(%s): \n" "\tapipie_resource: %s\n" "\tnew_api: %s\n" "\tnew_url: %s\n" "\told_api: %s\n" "\told_url: %s", api.name, resource_name, pprint.pformat(api), api.url, pprint.pformat(old_api), old_api.url, ) new_dict['_conflicting_methods'].append(api) continue new_dict['_own_methods'].add(api.name) new_dict[api.name] = api.generate_func() return new_dict, foreign_methods
<SYSTEM_TASK:> Returns the appropriate resource name for the given URL. <END_TASK> <USER_TASK:> Description: def parse_resource_from_url(self, url): """ Returns the appropriate resource name for the given URL. :param url: API URL stub, like: '/api/hosts' :return: Resource name, like 'hosts', or None if not found """
# special case for the api root if url == '/api': return 'api' elif url == '/katello': return 'katello' match = self.resource_pattern.match(url) if match: return match.groupdict().get('resource', None)
<SYSTEM_TASK:> Generate function for specific method and using specific api <END_TASK> <USER_TASK:> Description: def generate_func(self, as_global=False): """ Generate function for specific method and using specific api :param as_global: if set, will use the global function name, instead of the class method (usually {resource}_{class_method}) when defining the function """
keywords = [] params_def = [] params_doc = "" original_names = {} params = dict( (param['name'], param) for param in self.params ) # parse the url required params, as sometimes they are skipped in the # parameters list of the definition for param in self.url_params: if param not in params: param = { 'name': param, 'required': True, 'description': '', 'validator': '', } params[param['name']] = param else: params[param]['required'] = True # split required and non-required params for the definition req_params = [] nonreq_params = [] for param in six.itervalues(params): if param['required']: req_params.append(param) else: nonreq_params.append(param) for param in req_params + nonreq_params: params_doc += self.create_param_doc(param) + "\n" local_name = param['name'] # some params collide with python keywords, that's why we do # this switch (and undo it inside the function we generate) if param['name'] == 'except': local_name = 'except_' original_names[local_name] = param['name'] keywords.append(local_name) if param['required']: params_def.append("%s" % local_name) else: params_def.append("%s=None" % local_name) func_head = 'def {0}(self, {1}):'.format( as_global and self.get_global_method_name() or self.name, ', '.join(params_def) ) code_body = ( ' _vars_ = locals()\n' ' _url = self._fill_url("{url}", _vars_, {url_params})\n' ' _original_names = {original_names}\n' ' _kwargs = dict((_original_names[k], _vars_[k])\n' ' for k in {keywords} if _vars_[k])\n' ' return self._foreman.do_{http_method}(_url, _kwargs)') code_body = code_body.format( http_method=self.http_method.lower(), url=self.url, url_params=self.url_params, keywords=keywords, original_names=original_names, ) code = [ func_head, ' """', self.short_desc, '', params_doc, ' """', code_body, ] code = '\n'.join(code) six.exec_(code) function = locals()[self.name] # to ease debugging, all the funcs have the definitions attached setattr(function, 'defs', self) return function
<SYSTEM_TASK:> This function parses one of the elements of the definitions dict for a <END_TASK> <USER_TASK:> Description: def convert_plugin_def(http_method, funcs): """ This function parses one of the elements of the definitions dict for a plugin and extracts the relevant information :param http_method: HTTP method that uses (GET, POST, DELETE, ...) :param funcs: functions related to that HTTP method """
methods = [] if http_method not in ('GET', 'PUT', 'POST', 'DELETE'): logger.error( 'Plugin load failure, HTTP method %s unsupported.', http_method, ) return methods for fname, params in six.iteritems(funcs): method = { 'apis': [{'short_description': 'no-doc'}], 'params': [], } method['apis'][0]['http_method'] = http_method method['apis'][0]['api_url'] = '/api/' + fname method['name'] = fname for pname, pdef in six.iteritems(params): param = { 'name': pname, 'validator': "Must be %s" % pdef['ptype'], 'description': '', 'required': pdef['required'], } method['params'].append(param) methods.append(method) return methods
<SYSTEM_TASK:> Given a repo and optionally a base revision to start from, will return <END_TASK> <USER_TASK:> Description: def get_authors(repo_path, from_commit): """ Given a repo and optionally a base revision to start from, will return the list of authors. """
repo = dulwich.repo.Repo(repo_path) refs = get_refs(repo) start_including = False authors = set() if from_commit is None: start_including = True for commit_sha, children in reversed( get_children_per_first_parent(repo_path).items() ): commit = get_repo_object(repo, commit_sha) if ( start_including or commit_sha.startswith(from_commit) or fuzzy_matches_refs(from_commit, refs.get(commit_sha, [])) ): authors.add(commit.author.decode()) for child in children: authors.add(child.author.decode()) start_including = True return '\n'.join(sorted(authors))
<SYSTEM_TASK:> Emit a spout Tuple message. <END_TASK> <USER_TASK:> Description: def emit( self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False ): """Emit a spout Tuple message. :param tup: the Tuple to send to Storm, should contain only JSON-serializable data. :type tup: list or tuple :param tup_id: the ID for the Tuple. Leave this blank for an unreliable emit. :type tup_id: str :param stream: ID of the stream this Tuple should be emitted to. Leave empty to emit to the default stream. :type stream: str :param direct_task: the task to send the Tuple to if performing a direct emit. :type direct_task: int :param need_task_ids: indicate whether or not you'd like the task IDs the Tuple was emitted (default: ``False``). :type need_task_ids: bool :returns: ``None``, unless ``need_task_ids=True``, in which case it will be a ``list`` of task IDs that the Tuple was sent to if. Note that when specifying direct_task, this will be equal to ``[direct_task]``. """
return super(Spout, self).emit( tup, tup_id=tup_id, stream=stream, direct_task=direct_task, need_task_ids=need_task_ids, )
<SYSTEM_TASK:> Called when a bolt acknowledges a Tuple in the topology. <END_TASK> <USER_TASK:> Description: def ack(self, tup_id): """Called when a bolt acknowledges a Tuple in the topology. :param tup_id: the ID of the Tuple that has been fully acknowledged in the topology. :type tup_id: str """
self.failed_tuples.pop(tup_id, None) try: del self.unacked_tuples[tup_id] except KeyError: self.logger.error("Received ack for unknown tuple ID: %r", tup_id)
<SYSTEM_TASK:> Called when a Tuple fails in the topology <END_TASK> <USER_TASK:> Description: def fail(self, tup_id): """Called when a Tuple fails in the topology A reliable spout will replay a failed tuple up to ``max_fails`` times. :param tup_id: the ID of the Tuple that has failed in the topology either due to a bolt calling ``fail()`` or a Tuple timing out. :type tup_id: str """
saved_args = self.unacked_tuples.get(tup_id) if saved_args is None: self.logger.error("Received fail for unknown tuple ID: %r", tup_id) return tup, stream, direct_task, need_task_ids = saved_args if self.failed_tuples[tup_id] < self.max_fails: self.emit( tup, tup_id=tup_id, stream=stream, direct_task=direct_task, need_task_ids=need_task_ids, ) self.failed_tuples[tup_id] += 1 else: # Just pretend we got an ack when we exceed retry limit self.logger.info( "Acking tuple ID %r after it exceeded retry limit " "(%r)", tup_id, self.max_fails, ) self.ack(tup_id)
<SYSTEM_TASK:> Emit a spout Tuple & add metadata about it to `unacked_tuples`. <END_TASK> <USER_TASK:> Description: def emit( self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False ): """Emit a spout Tuple & add metadata about it to `unacked_tuples`. In order for this to work, `tup_id` is a required parameter. See :meth:`Bolt.emit`. """
if tup_id is None: raise ValueError( "You must provide a tuple ID when emitting with a " "ReliableSpout in order for the tuple to be " "tracked." ) args = (tup, stream, direct_task, need_task_ids) self.unacked_tuples[tup_id] = args return super(ReliableSpout, self).emit( tup, tup_id=tup_id, stream=stream, direct_task=direct_task, need_task_ids=need_task_ids, )
<SYSTEM_TASK:> Handler to drop us into a remote debugger upon receiving SIGUSR1 <END_TASK> <USER_TASK:> Description: def remote_pdb_handler(signum, frame): """ Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try: from remote_pdb import RemotePdb rdb = RemotePdb(host="127.0.0.1", port=0) rdb.set_trace(frame=frame) except ImportError: log.warning( "remote_pdb unavailable. Please install remote_pdb to " "allow remote debugging." ) # Restore signal handler for later signal.signal(signum, remote_pdb_handler)
<SYSTEM_TASK:> Add helpful instance variables to component after initial handshake <END_TASK> <USER_TASK:> Description: def _setup_component(self, storm_conf, context): """Add helpful instance variables to component after initial handshake with Storm. Also configure logging. """
self.topology_name = storm_conf.get("topology.name", "") self.task_id = context.get("taskid", "") self.component_name = context.get("componentid") # If using Storm before 0.10.0 componentid is not available if self.component_name is None: self.component_name = context.get("task->component", {}).get( str(self.task_id), "" ) self.debug = storm_conf.get("topology.debug", False) self.storm_conf = storm_conf self.context = context # Set up logging self.logger = logging.getLogger(".".join((__name__, self.component_name))) log_path = self.storm_conf.get("pystorm.log.path") log_file_name = self.storm_conf.get( "pystorm.log.file", "pystorm_{topology_name}" "_{component_name}" "_{task_id}" "_{pid}.log", ) root_log = logging.getLogger() log_level = self.storm_conf.get("pystorm.log.level", "info") if log_path: max_bytes = self.storm_conf.get("pystorm.log.max_bytes", 1000000) # 1 MB backup_count = self.storm_conf.get("pystorm.log.backup_count", 10) log_file = join( log_path, ( log_file_name.format( topology_name=self.topology_name, component_name=self.component_name, task_id=self.task_id, pid=self.pid, ) ), ) handler = RotatingFileHandler( log_file, maxBytes=max_bytes, backupCount=backup_count ) log_format = self.storm_conf.get( "pystorm.log.format", "%(asctime)s - %(name)s - " "%(levelname)s - %(message)s", ) else: self.log( "pystorm StormHandler logging enabled, so all messages at " 'levels greater than "pystorm.log.level" ({}) will be sent' " to Storm.".format(log_level) ) handler = StormHandler(self.serializer) log_format = self.storm_conf.get( "pystorm.log.format", "%(asctime)s - %(name)s - " "%(message)s" ) formatter = logging.Formatter(log_format) log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO) if self.debug: # potentially override logging that was provided if # topology.debug was set to true log_level = logging.DEBUG handler.setLevel(log_level) handler.setFormatter(formatter) root_log.addHandler(handler) self.logger.setLevel(log_level) logging.getLogger("pystorm").setLevel(log_level) # Redirect stdout to ensure that print statements/functions # won't disrupt the multilang protocol if self.serializer.output_stream == sys.stdout: sys.stdout = LogStream(logging.getLogger("pystorm.stdout"))
<SYSTEM_TASK:> Read and process an initial handshake message from Storm. <END_TASK> <USER_TASK:> Description: def read_handshake(self): """Read and process an initial handshake message from Storm."""
msg = self.read_message() pid_dir, _conf, _context = msg["pidDir"], msg["conf"], msg["context"] # Write a blank PID file out to the pidDir open(join(pid_dir, str(self.pid)), "w").close() self.send_message({"pid": self.pid}) return _conf, _context
<SYSTEM_TASK:> Send a message to Storm via stdout. <END_TASK> <USER_TASK:> Description: def send_message(self, message): """Send a message to Storm via stdout."""
if not isinstance(message, dict): logger = self.logger if self.logger else log logger.error( "%s.%d attempted to send a non dict message to Storm: " "%r", self.component_name, self.pid, message, ) return self.serializer.send_message(message)
<SYSTEM_TASK:> Report an exception back to Storm via logging. <END_TASK> <USER_TASK:> Description: def raise_exception(self, exception, tup=None): """Report an exception back to Storm via logging. :param exception: a Python exception. :param tup: a :class:`Tuple` object. """
if tup: message = ( "Python {exception_name} raised while processing Tuple " "{tup!r}\n{traceback}" ) else: message = "Python {exception_name} raised\n{traceback}" message = message.format( exception_name=exception.__class__.__name__, tup=tup, traceback=format_exc() ) self.send_message({"command": "error", "msg": str(message)}) self.send_message({"command": "sync"})
<SYSTEM_TASK:> Log a message to Storm optionally providing a logging level. <END_TASK> <USER_TASK:> Description: def log(self, message, level=None): """Log a message to Storm optionally providing a logging level. :param message: the log message to send to Storm. :type message: str :param level: the logging level that Storm should use when writing the ``message``. Can be one of: trace, debug, info, warn, or error (default: ``info``). :type level: str .. warning:: This will send your message to Storm regardless of what level you specify. In almost all cases, you are better of using ``Component.logger`` and not setting ``pystorm.log.path``, because that will use a :class:`pystorm.component.StormHandler` to do the filtering on the Python side (instead of on the Java side after taking the time to serialize your message and send it to Storm). """
level = _STORM_LOG_LEVELS.get(level, _STORM_LOG_INFO) self.send_message({"command": "log", "msg": str(message), "level": level})
<SYSTEM_TASK:> Main run loop for all components. <END_TASK> <USER_TASK:> Description: def run(self): """Main run loop for all components. Performs initial handshake with Storm and reads Tuples handing them off to subclasses. Any exceptions are caught and logged back to Storm prior to the Python process exiting. .. warning:: Subclasses should **not** override this method. """
storm_conf, context = self.read_handshake() self._setup_component(storm_conf, context) self.initialize(storm_conf, context) while True: try: self._run() except StormWentAwayError: log.info("Exiting because parent Storm process went away.") self._exit(2) except Exception as e: log_msg = "Exception in {}.run()".format(self.__class__.__name__) exc_info = sys.exc_info() try: self.logger.error(log_msg, exc_info=True) self._handle_run_exception(e) except StormWentAwayError: log.error(log_msg, exc_info=exc_info) log.info("Exiting because parent Storm process went away.") self._exit(2) except: log.error(log_msg, exc_info=exc_info) log.error( "While trying to handle previous exception...", exc_info=sys.exc_info(), ) if self.exit_on_exception: self._exit(1)
<SYSTEM_TASK:> Properly kill Python process including zombie threads. <END_TASK> <USER_TASK:> Description: def _exit(self, status_code): """Properly kill Python process including zombie threads."""
# If there are active threads still running infinite loops, sys.exit # won't kill them but os._exit will. os._exit skips calling cleanup # handlers, flushing stdio buffers, etc. exit_func = os._exit if threading.active_count() > 1 else sys.exit exit_func(status_code)
<SYSTEM_TASK:> The Storm multilang protocol consists of JSON messages followed by <END_TASK> <USER_TASK:> Description: def read_message(self): """The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed. """
msg = "" num_blank_lines = 0 while True: # readline will return trailing \n so that output is unambigious, we # should only have line == '' if we're at EOF with self._reader_lock: line = self.input_stream.readline() if line == "end\n": break elif line == "": raise StormWentAwayError() elif line == "\n": num_blank_lines += 1 if num_blank_lines % 1000 == 0: log.warn( "While trying to read a command or pending task " "ID, Storm has instead sent %s '\\n' messages.", num_blank_lines, ) continue msg = "{}{}\n".format(msg, line[0:-1]) try: return json.loads(msg) except Exception: log.error("JSON decode error for message: %r", msg, exc_info=True) raise
<SYSTEM_TASK:> Read a tuple from the pipe to Storm. <END_TASK> <USER_TASK:> Description: def read_tuple(self): """Read a tuple from the pipe to Storm."""
cmd = self.read_command() source = cmd["comp"] stream = cmd["stream"] values = cmd["tuple"] val_type = self._source_tuple_types[source].get(stream) return Tuple( cmd["id"], source, stream, cmd["task"], tuple(values) if val_type is None else val_type(*values), )
<SYSTEM_TASK:> Indicate that processing of a Tuple has succeeded. <END_TASK> <USER_TASK:> Description: def ack(self, tup): """Indicate that processing of a Tuple has succeeded. :param tup: the Tuple to acknowledge. :type tup: :class:`str` or :class:`pystorm.component.Tuple` """
tup_id = tup.id if isinstance(tup, Tuple) else tup self.send_message({"command": "ack", "id": tup_id})
<SYSTEM_TASK:> Indicate that processing of a Tuple has failed. <END_TASK> <USER_TASK:> Description: def fail(self, tup): """Indicate that processing of a Tuple has failed. :param tup: the Tuple to fail (its ``id`` if ``str``). :type tup: :class:`str` or :class:`pystorm.component.Tuple` """
tup_id = tup.id if isinstance(tup, Tuple) else tup self.send_message({"command": "fail", "id": tup_id})
<SYSTEM_TASK:> Modified emit that will not return task IDs after emitting. <END_TASK> <USER_TASK:> Description: def emit(self, tup, **kwargs): """Modified emit that will not return task IDs after emitting. See :class:`pystorm.component.Bolt` for more information. :returns: ``None``. """
kwargs["need_task_ids"] = False return super(BatchingBolt, self).emit(tup, **kwargs)
<SYSTEM_TASK:> Increment tick counter, and call ``process_batch`` for all current <END_TASK> <USER_TASK:> Description: def process_tick(self, tick_tup): """Increment tick counter, and call ``process_batch`` for all current batches if tick counter exceeds ``ticks_between_batches``. See :class:`pystorm.component.Bolt` for more information. .. warning:: This method should **not** be overriden. If you want to tweak how Tuples are grouped into batches, override ``group_key``. """
self._tick_counter += 1 # ACK tick Tuple immediately, since it's just responsible for counter self.ack(tick_tup) if self._tick_counter > self.ticks_between_batches and self._batches: self.process_batches() self._tick_counter = 0
<SYSTEM_TASK:> Iterate through all batches, call process_batch on them, and ack. <END_TASK> <USER_TASK:> Description: def process_batches(self): """Iterate through all batches, call process_batch on them, and ack. Separated out for the rare instances when we want to subclass BatchingBolt and customize what mechanism causes batches to be processed. """
for key, batch in iteritems(self._batches): self._current_tups = batch self._current_key = key self.process_batch(key, batch) if self.auto_ack: for tup in batch: self.ack(tup) # Set current batch to [] so that we know it was acked if a # later batch raises an exception self._current_key = None self._batches[key] = [] self._batches = defaultdict(list)
<SYSTEM_TASK:> Group non-tick Tuples into batches by ``group_key``. <END_TASK> <USER_TASK:> Description: def process(self, tup): """Group non-tick Tuples into batches by ``group_key``. .. warning:: This method should **not** be overriden. If you want to tweak how Tuples are grouped into batches, override ``group_key``. """
# Append latest Tuple to batches group_key = self.group_key(tup) self._batches[group_key].append(tup)
<SYSTEM_TASK:> The inside of ``_batch_entry``'s infinite loop. <END_TASK> <USER_TASK:> Description: def _batch_entry_run(self): """The inside of ``_batch_entry``'s infinite loop. Separated out so it can be properly unit tested. """
time.sleep(self.secs_between_batches) with self._batch_lock: self.process_batches()
<SYSTEM_TASK:> Serialize a message dictionary and write it to the output stream. <END_TASK> <USER_TASK:> Description: def send_message(self, msg_dict): """Serialize a message dictionary and write it to the output stream."""
with self._writer_lock: try: self.output_stream.flush() self.output_stream.write(self.serialize_dict(msg_dict)) self.output_stream.flush() except IOError: raise StormWentAwayError() except: log.exception("Failed to send message: %r", msg_dict)
<SYSTEM_TASK:> Convert the FFI result to Python data structures <END_TASK> <USER_TASK:> Description: def _void_array_to_list(restuple, _func, _args): """ Convert the FFI result to Python data structures """
shape = (restuple.e.len, 1) array_size = np.prod(shape) mem_size = 8 * array_size array_str_e = string_at(restuple.e.data, mem_size) array_str_n = string_at(restuple.n.data, mem_size) ls_e = np.frombuffer(array_str_e, float, array_size).tolist() ls_n = np.frombuffer(array_str_n, float, array_size).tolist() return ls_e, ls_n
<SYSTEM_TASK:> Load a data file and return it as a list of lines. <END_TASK> <USER_TASK:> Description: def load_data_file(filename, encoding='utf-8'): """Load a data file and return it as a list of lines. Parameters: filename: The name of the file (no directories included). encoding: The file encoding. Defaults to utf-8. """
data = pkgutil.get_data(PACKAGE_NAME, os.path.join(DATA_DIR, filename)) return data.decode(encoding).splitlines()
<SYSTEM_TASK:> Load the word and character mapping data into a dictionary. <END_TASK> <USER_TASK:> Description: def _load_data(): """Load the word and character mapping data into a dictionary. In the data files, each line is formatted like this: HANZI PINYIN_READING/PINYIN_READING So, lines need to be split by '\t' and then the Pinyin readings need to be split by '/'. """
data = {} for name, file_name in (('words', 'hanzi_pinyin_words.tsv'), ('characters', 'hanzi_pinyin_characters.tsv')): # Split the lines by tabs: [[hanzi, pinyin]...]. lines = [line.split('\t') for line in dragonmapper.data.load_data_file(file_name)] # Make a dictionary: {hanzi: [pinyin, pinyin]...}. data[name] = {hanzi: pinyin.split('/') for hanzi, pinyin in lines} return data
<SYSTEM_TASK:> Return the Pinyin reading for a Chinese word. <END_TASK> <USER_TASK:> Description: def _hanzi_to_pinyin(hanzi): """Return the Pinyin reading for a Chinese word. If the given string *hanzi* matches a CC-CEDICT word, the return value is formatted like this: [WORD_READING1, WORD_READING2, ...] If the given string *hanzi* doesn't match a CC-CEDICT word, the return value is formatted like this: [[CHAR_READING1, CHAR_READING2 ...], ...] When returning character readings, if a character wasn't recognized, the original character is returned, e.g. [[CHAR_READING1, ...], CHAR, ...] """
try: return _HANZI_PINYIN_MAP['words'][hanzi] except KeyError: return [_CHARACTERS.get(character, character) for character in hanzi]
<SYSTEM_TASK:> Convert a string's Chinese characters to Pinyin readings. <END_TASK> <USER_TASK:> Description: def to_pinyin(s, delimiter=' ', all_readings=False, container='[]', accented=True): """Convert a string's Chinese characters to Pinyin readings. *s* is a string containing Chinese characters. *accented* is a boolean value indicating whether to return accented or numbered Pinyin readings. *delimiter* is the character used to indicate word boundaries in *s*. This is used to differentiate between words and characters so that a more accurate reading can be returned. *all_readings* is a boolean value indicating whether or not to return all possible readings in the case of words/characters that have multiple readings. *container* is a two character string that is used to enclose words/characters if *all_readings* is ``True``. The default ``'[]'`` is used like this: ``'[READING1/READING2]'``. Characters not recognized as Chinese are left untouched. """
hanzi = s pinyin = '' # Process the given string. while hanzi: # Get the next match in the given string. match = re.search('[^%s%s]+' % (delimiter, zhon.hanzi.punctuation), hanzi) # There are no more matches, but the string isn't finished yet. if match is None and hanzi: pinyin += hanzi break match_start, match_end = match.span() # Process the punctuation marks that occur before the match. if match_start > 0: pinyin += hanzi[0:match_start] # Get the Chinese word/character readings. readings = _hanzi_to_pinyin(match.group()) # Process the returned word readings. if match.group() in _WORDS: if all_readings: reading = _enclose_readings(container, _READING_SEPARATOR.join(readings)) else: reading = readings[0] pinyin += reading # Process the returned character readings. else: # Process each character individually. for character in readings: # Don't touch unrecognized characters. if isinstance(character, str): pinyin += character # Format multiple readings. elif isinstance(character, list) and all_readings: pinyin += _enclose_readings( container, _READING_SEPARATOR.join(character)) # Select and format the most common reading. elif isinstance(character, list) and not all_readings: # Add an apostrophe to separate syllables. if (pinyin and character[0][0] in zhon.pinyin.vowels and pinyin[-1] in zhon.pinyin.lowercase): pinyin += "'" pinyin += character[0] # Move ahead in the given string. hanzi = hanzi[match_end:] if accented: return pinyin else: return accented_to_numbered(pinyin)
<SYSTEM_TASK:> Convert a string's Chinese characters to Zhuyin readings. <END_TASK> <USER_TASK:> Description: def to_zhuyin(s, delimiter=' ', all_readings=False, container='[]'): """Convert a string's Chinese characters to Zhuyin readings. *s* is a string containing Chinese characters. *delimiter* is the character used to indicate word boundaries in *s*. This is used to differentiate between words and characters so that a more accurate reading can be returned. *all_readings* is a boolean value indicating whether or not to return all possible readings in the case of words/characters that have multiple readings. *container* is a two character string that is used to enclose words/characters if *all_readings* is ``True``. The default ``'[]'`` is used like this: ``'[READING1/READING2]'``. Characters not recognized as Chinese are left untouched. """
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False) zhuyin = pinyin_to_zhuyin(numbered_pinyin) return zhuyin
<SYSTEM_TASK:> Convert a string's Chinese characters to IPA. <END_TASK> <USER_TASK:> Description: def to_ipa(s, delimiter=' ', all_readings=False, container='[]'): """Convert a string's Chinese characters to IPA. *s* is a string containing Chinese characters. *delimiter* is the character used to indicate word boundaries in *s*. This is used to differentiate between words and characters so that a more accurate reading can be returned. *all_readings* is a boolean value indicating whether or not to return all possible readings in the case of words/characters that have multiple readings. *container* is a two character string that is used to enclose words/characters if *all_readings* is ``True``. The default ``'[]'`` is used like this: ``'[READING1/READING2]'``. Characters not recognized as Chinese are left untouched. """
numbered_pinyin = to_pinyin(s, delimiter, all_readings, container, False) ipa = pinyin_to_ipa(numbered_pinyin) return ipa
<SYSTEM_TASK:> Load the transcription mapping data into a dictionary. <END_TASK> <USER_TASK:> Description: def _load_data(): """Load the transcription mapping data into a dictionary."""
lines = dragonmapper.data.load_data_file('transcriptions.csv') pinyin_map, zhuyin_map, ipa_map = {}, {}, {} for line in lines: p, z, i = line.split(',') pinyin_map[p] = {'Zhuyin': z, 'IPA': i} zhuyin_map[z] = {'Pinyin': p, 'IPA': i} ipa_map[i] = {'Pinyin': p, 'Zhuyin': z} return pinyin_map, zhuyin_map, ipa_map
<SYSTEM_TASK:> Convert a numbered Pinyin vowel to an accented Pinyin vowel. <END_TASK> <USER_TASK:> Description: def _numbered_vowel_to_accented(vowel, tone): """Convert a numbered Pinyin vowel to an accented Pinyin vowel."""
if isinstance(tone, int): tone = str(tone) return _PINYIN_TONES[vowel + tone]
<SYSTEM_TASK:> Convert an accented Pinyin vowel to a numbered Pinyin vowel. <END_TASK> <USER_TASK:> Description: def _accented_vowel_to_numbered(vowel): """Convert an accented Pinyin vowel to a numbered Pinyin vowel."""
for numbered_vowel, accented_vowel in _PINYIN_TONES.items(): if vowel == accented_vowel: return tuple(numbered_vowel)
<SYSTEM_TASK:> Return the syllable and tone of a numbered Pinyin syllable. <END_TASK> <USER_TASK:> Description: def _parse_numbered_syllable(unparsed_syllable): """Return the syllable and tone of a numbered Pinyin syllable."""
tone_number = unparsed_syllable[-1] if not tone_number.isdigit(): syllable, tone = unparsed_syllable, '5' elif tone_number == '0': syllable, tone = unparsed_syllable[:-1], '5' elif tone_number in '12345': syllable, tone = unparsed_syllable[:-1], tone_number else: raise ValueError("Invalid syllable: %s" % unparsed_syllable) return syllable, tone
<SYSTEM_TASK:> Return the syllable and tone of an accented Pinyin syllable. <END_TASK> <USER_TASK:> Description: def _parse_accented_syllable(unparsed_syllable): """Return the syllable and tone of an accented Pinyin syllable. Any accented vowels are returned without their accents. Implements the following algorithm: 1. If the syllable has an accent mark, convert that vowel to a regular vowel and add the tone to the end of the syllable. 2. Otherwise, assume the syllable is tone 5 (no accent marks). """
if unparsed_syllable[0] == '\u00B7': # Special case for middle dot tone mark. return unparsed_syllable[1:], '5' for character in unparsed_syllable: if character in _ACCENTED_VOWELS: vowel, tone = _accented_vowel_to_numbered(character) return unparsed_syllable.replace(character, vowel), tone return unparsed_syllable, '5'
<SYSTEM_TASK:> Return the syllable and tone of a Zhuyin syllable. <END_TASK> <USER_TASK:> Description: def _parse_zhuyin_syllable(unparsed_syllable): """Return the syllable and tone of a Zhuyin syllable."""
zhuyin_tone = unparsed_syllable[-1] if zhuyin_tone in zhon.zhuyin.characters: syllable, tone = unparsed_syllable, '1' elif zhuyin_tone in zhon.zhuyin.marks: for tone_number, tone_mark in _ZHUYIN_TONES.items(): if zhuyin_tone == tone_mark: syllable, tone = unparsed_syllable[:-1], tone_number else: raise ValueError("Invalid syllable: %s" % unparsed_syllable) return syllable, tone
<SYSTEM_TASK:> Return the syllable and tone of an IPA syllable. <END_TASK> <USER_TASK:> Description: def _parse_ipa_syllable(unparsed_syllable): """Return the syllable and tone of an IPA syllable."""
ipa_tone = re.search('[%(marks)s]+' % {'marks': _IPA_MARKS}, unparsed_syllable) if not ipa_tone: syllable, tone = unparsed_syllable, '5' else: for tone_number, tone_mark in _IPA_TONES.items(): if ipa_tone.group() == tone_mark: tone = tone_number break syllable = unparsed_syllable[0:ipa_tone.start()] return syllable, tone
<SYSTEM_TASK:> Restore a lowercase string's characters to their original case. <END_TASK> <USER_TASK:> Description: def _restore_case(s, memory): """Restore a lowercase string's characters to their original case."""
cased_s = [] for i, c in enumerate(s): if i + 1 > len(memory): break cased_s.append(c if memory[i] else c.upper()) return ''.join(cased_s)
<SYSTEM_TASK:> Convert a string's syllables to a different transcription system. <END_TASK> <USER_TASK:> Description: def _convert(s, re_pattern, syllable_function, add_apostrophes=False, remove_apostrophes=False, separate_syllables=False): """Convert a string's syllables to a different transcription system."""
original = s new = '' while original: match = re.search(re_pattern, original, re.IGNORECASE | re.UNICODE) if match is None and original: # There are no more matches, but the given string isn't fully # processed yet. new += original break match_start, match_end = match.span() if match_start > 0: # Handle extra characters before matched syllable. if (new and remove_apostrophes and match_start == 1 and original[0] == "'"): pass # Remove the apostrophe between Pinyin syllables. if separate_syllables: # Separate syllables by a space. new += ' ' else: new += original[0:match_start] else: # Matched syllable starts immediately. if new and separate_syllables: # Separate syllables by a space. new += ' ' elif (new and add_apostrophes and match.group()[0].lower() in _UNACCENTED_VOWELS): new += "'" # Convert the matched syllable. new += syllable_function(match.group()) original = original[match_end:] return new
<SYSTEM_TASK:> Check if a re pattern expression matches an entire string. <END_TASK> <USER_TASK:> Description: def _is_pattern_match(re_pattern, s): """Check if a re pattern expression matches an entire string."""
match = re.match(re_pattern, s, re.I) return match.group() == s if match else False
<SYSTEM_TASK:> Identify a given string's transcription system. <END_TASK> <USER_TASK:> Description: def identify(s): """Identify a given string's transcription system. *s* is the string to identify. The string is checked to see if its contents are valid Pinyin, Zhuyin, or IPA. The :data:`PINYIN`, :data:`ZHUYIN`, and :data:`IPA` constants are returned to indicate the string's identity. If *s* is not a valid transcription system, then :data:`UNKNOWN` is returned. When checking for valid Pinyin or Zhuyin, testing is done on a syllable level, not a character level. For example, just because a string is composed of characters used in Pinyin, doesn't mean that it will identify as Pinyin; it must actually consist of valid Pinyin syllables. The same applies for Zhuyin. When checking for IPA, testing is only done on a character level. In other words, a string just needs to consist of Chinese IPA characters in order to identify as IPA. """
if is_pinyin(s): return PINYIN elif is_zhuyin(s): return ZHUYIN elif is_ipa(s): return IPA else: return UNKNOWN
<SYSTEM_TASK:> Accept an objective function for optimization. <END_TASK> <USER_TASK:> Description: def prepare(self, f): """Accept an objective function for optimization."""
self.g = autograd.grad(f) self.h = autograd.hessian(f)
<SYSTEM_TASK:> Calculate a position of the end-effector and return it. <END_TASK> <USER_TASK:> Description: def solve(self, angles): """Calculate a position of the end-effector and return it."""
return reduce( lambda a, m: np.dot(m, a), reversed(self._matrices(angles)), np.array([0., 0., 0., 1.]) )[:3]
<SYSTEM_TASK:> Return translation matrix in homogeneous coordinates. <END_TASK> <USER_TASK:> Description: def matrix(self, _): """Return translation matrix in homogeneous coordinates."""
x, y, z = self.coord return np.array([ [1., 0., 0., x], [0., 1., 0., y], [0., 0., 1., z], [0., 0., 0., 1.] ])
<SYSTEM_TASK:> Return rotation matrix in homogeneous coordinates. <END_TASK> <USER_TASK:> Description: def matrix(self, angle): """Return rotation matrix in homogeneous coordinates."""
_rot_mat = { 'x': self._x_rot, 'y': self._y_rot, 'z': self._z_rot } return _rot_mat[self.axis](angle)
<SYSTEM_TASK:> Return the version number of the Lending Club Investor tool <END_TASK> <USER_TASK:> Description: def version(self): """ Return the version number of the Lending Club Investor tool Returns ------- string The version number string """
this_path = os.path.dirname(os.path.realpath(__file__)) version_file = os.path.join(this_path, 'VERSION') return open(version_file).read().strip()
<SYSTEM_TASK:> Attempt to authenticate the user. <END_TASK> <USER_TASK:> Description: def authenticate(self, email=None, password=None): """ Attempt to authenticate the user. Parameters ---------- email : string The email of a user on Lending Club password : string The user's password, for authentication. Returns ------- boolean True if the user authenticated or raises an exception if not Raises ------ session.AuthenticationError If authentication failed session.NetworkError If a network error occurred """
if self.session.authenticate(email, password): return True
<SYSTEM_TASK:> Returns the account cash balance available for investing <END_TASK> <USER_TASK:> Description: def get_cash_balance(self): """ Returns the account cash balance available for investing Returns ------- float The cash balance in your account. """
cash = False try: response = self.session.get('/browse/cashBalanceAj.action') json_response = response.json() if self.session.json_success(json_response): self.__log('Cash available: {0}'.format(json_response['cashBalance'])) cash_value = json_response['cashBalance'] # Convert currency to float value # Match values like $1,000.12 or 1,0000$ cash_match = re.search('^[^0-9]?([0-9\.,]+)[^0-9]?', cash_value) if cash_match: cash_str = cash_match.group(1) cash_str = cash_str.replace(',', '') cash = float(cash_str) else: self.__log('Could not get cash balance: {0}'.format(response.text)) except Exception as e: self.__log('Could not get the cash balance on the account: Error: {0}\nJSON: {1}'.format(str(e), response.text)) raise e return cash
<SYSTEM_TASK:> Return all the loan notes you've already invested in. By default it'll return 100 results at a time. <END_TASK> <USER_TASK:> Description: def my_notes(self, start_index=0, limit=100, get_all=False, sort_by='loanId', sort_dir='asc'): """ Return all the loan notes you've already invested in. By default it'll return 100 results at a time. Parameters ---------- start_index : int, optional The result index to start on. By default only 100 records will be returned at a time, so use this to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200. (default is 0) limit : int, optional The number of results to return per request. (default is 100) get_all : boolean, optional Return all results in one request, instead of 100 per request. sort_by : string, optional What key to sort on sort_dir : {'asc', 'desc'}, optional Which direction to sort Returns ------- dict A dictionary with a list of matching notes on the `loans` key """
index = start_index notes = { 'loans': [], 'total': 0, 'result': 'success' } while True: payload = { 'sortBy': sort_by, 'dir': sort_dir, 'startindex': index, 'pagesize': limit, 'namespace': '/account' } response = self.session.post('/account/loansAj.action', data=payload) json_response = response.json() # Notes returned if self.session.json_success(json_response): notes['loans'] += json_response['searchresult']['loans'] notes['total'] = json_response['searchresult']['totalRecords'] # Error else: notes['result'] = json_response['result'] break # Load more if get_all is True and len(notes['loans']) < notes['total']: index += limit # End else: break return notes
<SYSTEM_TASK:> Get a loan note that you've invested in by ID <END_TASK> <USER_TASK:> Description: def get_note(self, note_id): """ Get a loan note that you've invested in by ID Parameters ---------- note_id : int The note ID Returns ------- dict A dictionary representing the matching note or False Examples -------- >>> from lendingclub import LendingClub >>> lc = LendingClub(email='[email protected]', password='secret123') >>> lc.authenticate() True >>> notes = lc.my_notes() # Get the first 100 loan notes >>> len(notes['loans']) 100 >>> notes['total'] # See the total number of loan notes you have 630 >>> notes = lc.my_notes(start_index=100) # Get the next 100 loan notes >>> len(notes['loans']) 100 >>> notes = lc.my_notes(get_all=True) # Get all notes in one request (may be slow) >>> len(notes['loans']) 630 """
index = 0 while True: notes = self.my_notes(start_index=index, sort_by='noteId') if notes['result'] != 'success': break # If the first note has a higher ID, we've passed it if notes['loans'][0]['noteId'] > note_id: break # If the last note has a higher ID, it could be in this record set if notes['loans'][-1]['noteId'] >= note_id: for note in notes['loans']: if note['noteId'] == note_id: return note index += 100 return False
<SYSTEM_TASK:> Add a loan and amount you want to invest, to your order. <END_TASK> <USER_TASK:> Description: def add(self, loan_id, amount): """ Add a loan and amount you want to invest, to your order. If this loan is already in your order, it's amount will be replaced with the this new amount Parameters ---------- loan_id : int or dict The ID of the loan you want to add or a dictionary containing a `loan_id` value amount : int % 25 The dollar amount you want to invest in this loan, as a multiple of 25. """
assert amount > 0 and amount % 25 == 0, 'Amount must be a multiple of 25' assert type(amount) in (float, int), 'Amount must be a number' if type(loan_id) is dict: loan = loan_id assert 'loan_id' in loan and type(loan['loan_id']) is int, 'loan_id must be a number or dictionary containing a loan_id value' loan_id = loan['loan_id'] assert type(loan_id) in [str, unicode, int], 'Loan ID must be an integer number or a string' self.loans[loan_id] = amount
<SYSTEM_TASK:> Add a batch of loans to your order. <END_TASK> <USER_TASK:> Description: def add_batch(self, loans, batch_amount=None): """ Add a batch of loans to your order. Parameters ---------- loans : list A list of dictionary objects representing each loan and the amount you want to invest in it (see examples below). batch_amount : int, optional The dollar amount you want to set on ALL loans in this batch. **NOTE:** This will override the invest_amount value for each loan. Examples -------- Each item in the loans list can either be a loan ID OR a dictionary object containing `loan_id` and `invest_amount` values. The invest_amount value is the dollar amount you wish to invest in this loan. **List of IDs**:: # Invest $50 in 3 loans order.add_batch([1234, 2345, 3456], 50) **List of Dictionaries**:: # Invest different amounts in each loans order.add_batch([ {'loan_id': 1234, invest_amount: 50}, {'loan_id': 2345, invest_amount: 25}, {'loan_id': 3456, invest_amount: 150} ]) """
assert batch_amount is None or batch_amount % 25 == 0, 'batch_amount must be a multiple of 25' # Add each loan assert type(loans) is list, 'The loans property must be a list. (not {0})'.format(type(loans)) for loan in loans: loan_id = loan amount = batch_amount # Extract ID and amount from loan dict if type(loan) is dict: assert 'loan_id' in loan, 'Each loan dict must have a loan_id value' assert batch_amount or 'invest_amount' in loan, 'Could not determine how much to invest in loan {0}'.format(loan['loan_id']) loan_id = loan['loan_id'] if amount is None and 'invest_amount' in loan: amount = loan['invest_amount'] assert amount is not None, 'Could not determine how much to invest in loan {0}'.format(loan_id) assert amount % 25 == 0, 'Amount to invest must be a multiple of 25 (loan_id: {0})'.format(loan_id) self.add(loan_id, amount)
<SYSTEM_TASK:> Place the order with LendingClub <END_TASK> <USER_TASK:> Description: def execute(self, portfolio_name=None): """ Place the order with LendingClub Parameters ---------- portfolio_name : string The name of the portfolio to add the invested loan notes to. This can be a new or existing portfolio name. Raises ------ LendingClubError Returns ------- int The completed order ID """
assert self.order_id == 0, 'This order has already been place. Start a new order.' assert len(self.loans) > 0, 'There aren\'t any loans in your order' # Place the order self.__stage_order() token = self.__get_strut_token() self.order_id = self.__place_order(token) self.__log('Order #{0} was successfully submitted'.format(self.order_id)) # Assign to portfolio if portfolio_name: return self.assign_to_portfolio(portfolio_name) return self.order_id
<SYSTEM_TASK:> Assign all the notes in this order to a portfolio <END_TASK> <USER_TASK:> Description: def assign_to_portfolio(self, portfolio_name=None): """ Assign all the notes in this order to a portfolio Parameters ---------- portfolio_name -- The name of the portfolio to assign it to (new or existing) Raises ------ LendingClubError Returns ------- boolean True on success """
assert self.order_id > 0, 'You need to execute this order before you can assign to a portfolio.' # Get loan IDs as a list loan_ids = self.loans.keys() # Make a list of 1 order ID per loan order_ids = [self.order_id]*len(loan_ids) return self.lc.assign_to_portfolio(portfolio_name, loan_ids, order_ids)
<SYSTEM_TASK:> Add all the loans to the LC order session <END_TASK> <USER_TASK:> Description: def __stage_order(self): """ Add all the loans to the LC order session """
# Skip staging...probably not a good idea...you've been warned if self.__already_staged is True and self.__i_know_what_im_doing is True: self.__log('Not staging the order...I hope you know what you\'re doing...'.format(len(self.loans))) return self.__log('Staging order for {0} loan notes...'.format(len(self.loans))) # Create a fresh order session self.lc.session.clear_session_order() # # Stage all the loans to the order # loan_ids = self.loans.keys() self.__log('Staging loans {0}'.format(loan_ids)) # LendingClub requires you to search for the loans before you can stage them f = FilterByLoanID(loan_ids) results = self.lc.search(f, limit=len(self.loans)) if len(results['loans']) == 0 or results['totalRecords'] != len(self.loans): raise LendingClubError('Could not stage the loans. The number of loans in your batch does not match totalRecords. {0} != {1}'.format(len(self.loans), results['totalRecords']), results) # Stage each loan for loan_id, amount in self.loans.iteritems(): payload = { 'method': 'addToPortfolio', 'loan_id': loan_id, 'loan_amount': amount, 'remove': 'false' } response = self.lc.session.get('/data/portfolio', query=payload) json_response = response.json() # Ensure it was successful before moving on if not self.lc.session.json_success(json_response): raise LendingClubError('Could not stage loan {0} on the order: {1}'.format(loan_id, response.text), response) # # Add all staged loans to the order # payload = { 'method': 'addToPortfolioNew' } response = self.lc.session.get('/data/portfolio', query=payload) json_response = response.json() if self.lc.session.json_success(json_response): self.__log(json_response['message']) return True else: raise self.__log('Could not add loans to the order: {0}'.format(response.text)) raise LendingClubError('Could not add loans to the order', response.text)
<SYSTEM_TASK:> Use the struts token to place the order. <END_TASK> <USER_TASK:> Description: def __place_order(self, token): """ Use the struts token to place the order. Parameters ---------- token : string The struts token received from the place order page Returns ------- int The completed order ID. """
order_id = 0 response = None if not token or token['value'] == '': raise LendingClubError('The token parameter is False, None or unknown.') # Process order confirmation page try: # Place the order payload = {} if token: payload['struts.token.name'] = token['name'] payload[token['name']] = token['value'] response = self.lc.session.post('/portfolio/orderConfirmed.action', data=payload) # Process HTML for the order ID html = response.text soup = BeautifulSoup(html, 'html5lib') # Order num order_field = soup.find(id='order_id') if order_field: order_id = int(order_field['value']) # Did not find an ID if order_id == 0: self.__log('An investment order was submitted, but a confirmation ID could not be determined') raise LendingClubError('No order ID was found when placing the order.', response) else: return order_id except Exception as e: raise LendingClubError('Could not place the order: {0}'.format(str(e)), response)
<SYSTEM_TASK:> Check if the time since the last HTTP request is under the <END_TASK> <USER_TASK:> Description: def __continue_session(self): """ Check if the time since the last HTTP request is under the session timeout limit. If it's been too long since the last request attempt to authenticate again. """
now = time.time() diff = abs(now - self.last_request_time) timeout_sec = self.session_timeout * 60 # convert minutes to seconds if diff >= timeout_sec: self.__log('Session timed out, attempting to authenticate') self.authenticate()
<SYSTEM_TASK:> Authenticate with LendingClub and preserve the user session for future requests. <END_TASK> <USER_TASK:> Description: def authenticate(self, email=None, password=None): """ Authenticate with LendingClub and preserve the user session for future requests. This will raise an exception if the login appears to have failed, otherwise it returns True. Since Lending Club doesn't seem to have a login API, the code has to try to decide if the login worked or not by looking at the URL redirect and parsing the returned HTML for errors. Parameters ---------- email : string The email of a user on Lending Club password : string The user's password, for authentication. Returns ------- boolean True on success or throws an exception on failure. Raises ------ session.AuthenticationError If authentication failed session.NetworkError If a network error occurred """
# Get email and password if email is None: email = self.email else: self.email = email if password is None: password = self.__pass else: self.__pass = password # Get them from the user if email is None: email = raw_input('Email:') self.email = email if password is None: password = getpass.getpass() self.__pass = password self.__log('Attempting to authenticate: {0}'.format(self.email)) # Start session self.__session = requests.Session() self.__session.headers = { 'Referer': 'https://www.lendingclub.com/', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31' } # Set last request time to now self.last_request_time = time.time() # Send login request to LC payload = { 'login_email': email, 'login_password': password } response = self.post('/account/login.action', data=payload, redirects=False) # Get URL redirect URL and save the last part of the path as the endpoint response_url = response.url if response.status_code == 302: response_url = response.headers['location'] endpoint = response_url.split('/')[-1] # Debugging self.__log('Status code: {0}'.format(response.status_code)) self.__log('Redirected to: {0}'.format(response_url)) self.__log('Cookies: {0}'.format(str(response.cookies.keys()))) # Show query and data that the server received if 'x-echo-query' in response.headers: self.__log('Query: {0}'.format(response.headers['x-echo-query'])) if 'x-echo-data' in response.headers: self.__log('Data: {0}'.format(response.headers['x-echo-data'])) # Parse any errors from the HTML soup = BeautifulSoup(response.text, "html5lib") errors = soup.find(id='master_error-list') if errors: errors = errors.text.strip() # Remove extra spaces and newlines from error message errors = re.sub('\t+', '', errors) errors = re.sub('\s*\n+\s*', ' * ', errors) if errors == '': errors = None # Raise error if errors is not None: raise AuthenticationError(errors) # Redirected back to the login page...must be an error if endpoint == 'login.action': raise AuthenticationError('Unknown! Redirected back to the login page without an error message') return True
<SYSTEM_TASK:> Sends HTTP request to LendingClub. <END_TASK> <USER_TASK:> Description: def request(self, method, path, query=None, data=None, redirects=True): """ Sends HTTP request to LendingClub. Parameters ---------- method : {GET, POST, HEAD, DELETE} The HTTP method to use: GET, POST, HEAD or DELETE path : string The path that will be appended to the domain defined in :attr:`base_url`. query : dict A dictionary of query string parameters data : dict A dictionary of POST data values redirects : boolean True to follow redirects, False to return the original response from the server. Returns ------- requests.Response A `requests.Response <http://docs.python-requests.org/en/latest/api/#requests.Response>`_ object """
# Check session time self.__continue_session() try: url = self.build_url(path) method = method.upper() self.__log('{0} request to: {1}'.format(method, url)) if method == 'POST': request = self.__session.post(url, params=query, data=data, allow_redirects=redirects) elif method == 'GET': request = self.__session.get(url, params=query, data=data, allow_redirects=redirects) elif method == 'HEAD': request = self.__session.head(url, params=query, data=data, allow_redirects=redirects) elif method == 'DELETE': request = self.__session.delete(url, params=query, data=data, allow_redirects=redirects) else: raise SessionError('{0} is not a supported HTTP method'.format(method)) self.last_response = request self.__log('Status code: {0}'.format(request.status_code)) # Update session time self.last_request_time = time.time() except (RequestException, ConnectionError, TooManyRedirects, HTTPError) as e: raise NetworkError('{0} failed to: {1}'.format(method, url), e) except Timeout: raise NetworkError('{0} request timed out: {1}'.format(method, url), e) return request
<SYSTEM_TASK:> Check the JSON response object for the success flag <END_TASK> <USER_TASK:> Description: def json_success(self, json): """ Check the JSON response object for the success flag Parameters ---------- json : dict A dictionary representing a JSON object from lendingclub.com """
if type(json) is dict and 'result' in json and json['result'] == 'success': return True return False
<SYSTEM_TASK:> Merge dictionary objects recursively, by only updating keys existing in to_dict <END_TASK> <USER_TASK:> Description: def __merge_values(self, from_dict, to_dict): """ Merge dictionary objects recursively, by only updating keys existing in to_dict """
for key, value in from_dict.iteritems(): # Only if the key already exists if key in to_dict: # Make sure the values are the same datatype assert type(to_dict[key]) is type(from_dict[key]), 'Data type for {0} is incorrect: {1}, should be {2}'.format(key, type(from_dict[key]), type(to_dict[key])) # Recursively dive into the next dictionary if type(to_dict[key]) is dict: to_dict[key] = self.__merge_values(from_dict[key], to_dict[key]) # Replace value else: to_dict[key] = from_dict[key] return to_dict
<SYSTEM_TASK:> Adjust the grades list. <END_TASK> <USER_TASK:> Description: def __normalize_grades(self): """ Adjust the grades list. If a grade has been set, set All to false """
if 'grades' in self and self['grades']['All'] is True: for grade in self['grades']: if grade != 'All' and self['grades'][grade] is True: self['grades']['All'] = False break
<SYSTEM_TASK:> Adjust the funding progress filter to be a factor of 10 <END_TASK> <USER_TASK:> Description: def __normalize_progress(self): """ Adjust the funding progress filter to be a factor of 10 """
progress = self['funding_progress'] if progress % 10 != 0: progress = round(float(progress) / 10) progress = int(progress) * 10 self['funding_progress'] = progress
<SYSTEM_TASK:> Adjusts the values of the filters to be correct. <END_TASK> <USER_TASK:> Description: def __normalize(self): """ Adjusts the values of the filters to be correct. For example, if you set grade 'B' to True, then 'All' should be set to False """
# Don't normalize if we're already normalizing or intializing if self.__normalizing is True or self.__initialized is False: return self.__normalizing = True self.__normalize_grades() self.__normalize_progress() self.__normalizing = False
<SYSTEM_TASK:> Validate a single loan result record against the filters <END_TASK> <USER_TASK:> Description: def validate_one(self, loan): """ Validate a single loan result record against the filters Parameters ---------- loan : dict A single loan note record Returns ------- boolean True or raises FilterValidationError Raises ------ FilterValidationError If the loan does not match the filter criteria """
assert type(loan) is dict, 'loan parameter must be a dictionary object' # Map the loan value keys to the filter keys req = { 'loanGUID': 'loan_id', 'loanGrade': 'grade', 'loanLength': 'term', 'loanUnfundedAmount': 'progress', 'loanAmountRequested': 'progress', 'alreadyInvestedIn': 'exclude_existing', 'purpose': 'loan_purpose', } # Throw an error if the loan does not contain one of the criteria keys that this filter has for key, criteria in req.iteritems(): if criteria in self and key not in loan: raise FilterValidationError('Loan does not have a "{0}" value.'.format(key), loan, criteria) # Loan ID if 'loan_id' in self: loan_ids = str(self['loan_id']).split(',') if str(loan['loanGUID']) not in loan_ids: raise FilterValidationError('Did not meet filter criteria for loan ID. {0} does not match {1}'.format(loan['loanGUID'], self['loan_id']), loan=loan, criteria='loan ID') # Grade grade = loan['loanGrade'][0] # Extract the letter portion of the loan if 'grades' in self and self['grades']['All'] is not True: if grade not in self['grades']: raise FilterValidationError('Loan grade "{0}" is unknown'.format(grade), loan, 'grade') elif self['grades'][grade] is False: raise FilterValidationError(loan=loan, criteria='grade') # Term if 'term' in self and self['term'] is not None: if loan['loanLength'] == 36 and self['term']['Year3'] is False: raise FilterValidationError(loan=loan, criteria='loan term') elif loan['loanLength'] == 60 and self['term']['Year5'] is False: raise FilterValidationError(loan=loan, criteria='loan term') # Progress if 'funding_progress' in self: loan_progress = (1 - (loan['loanUnfundedAmount'] / loan['loanAmountRequested'])) * 100 if self['funding_progress'] > loan_progress: raise FilterValidationError(loan=loan, criteria='funding progress') # Exclude existing if 'exclude_existing' in self: if self['exclude_existing'] is True and loan['alreadyInvestedIn'] is True: raise FilterValidationError(loan=loan, criteria='exclude loans you are invested in') # Loan purpose (either an array or single value) if 'loan_purpose' in self and loan['purpose'] is not False: purpose = self['loan_purpose'] if type(purpose) is not dict: purpose = {purpose: True} if 'All' not in purpose or purpose['All'] is False: if loan['purpose'] not in purpose: raise FilterValidationError(loan=loan, criteria='loan purpose') return True