text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return weights of name components based on whether or not they were <END_TASK> <USER_TASK:> Description: def _determine_weights(self, other, settings): """ Return weights of name components based on whether or not they were omitted """
# TODO: Reduce weight for matches by prefix or initials first_is_used = settings['first']['required'] or \ self.first and other.first first_weight = settings['first']['weight'] if first_is_used else 0 middle_is_used = settings['middle']['required'] or \ self.middle and other.middle middle_weight = settings['middle']['weight'] if middle_is_used else 0 last_is_used = settings['last']['required'] or \ self.last and other.last last_weight = settings['last']['weight'] if last_is_used else 0 return first_weight, middle_weight, last_weight
<SYSTEM_TASK:> decorator to be used for rate limiting individual routes. <END_TASK> <USER_TASK:> Description: def limit(self, limit_value, key_func=None, per_method=False, methods=None, error_message=None, exempt_when=None): """ decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: """
return self.__limit_decorator(limit_value, key_func, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when)
<SYSTEM_TASK:> decorator to be applied to multiple routes sharing the same rate limit. <END_TASK> <USER_TASK:> Description: def shared_limit(self, limit_value, scope, key_func=None, error_message=None, exempt_when=None): """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. """
return self.__limit_decorator( limit_value, key_func, True, scope, error_message=error_message, exempt_when=exempt_when )
<SYSTEM_TASK:> resets the storage if it supports being reset <END_TASK> <USER_TASK:> Description: def reset(self): """ resets the storage if it supports being reset """
try: self._storage.reset() self.logger.info("Storage has been reset and all limits cleared") except NotImplementedError: self.logger.warning("This storage type does not support being reset")
<SYSTEM_TASK:> Returns a bs4 object of the page requested <END_TASK> <USER_TASK:> Description: def get_soup(page=''): """ Returns a bs4 object of the page requested """
content = requests.get('%s/%s' % (BASE_URL, page)).text return BeautifulSoup(content)
<SYSTEM_TASK:> Takes two names and returns true if they describe the same person. <END_TASK> <USER_TASK:> Description: def match(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match """
if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.deep_compare(name2, settings)
<SYSTEM_TASK:> Takes two names and returns true if they describe the same person. <END_TASK> <USER_TASK:> Description: def ratio(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100) """
if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.ratio_deep_compare(name2, settings)
<SYSTEM_TASK:> Returns all 'tr' tag rows as a list of tuples. Each tuple is for <END_TASK> <USER_TASK:> Description: def _get_zipped_rows(self, soup): """ Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story. """
# the table with all submissions table = soup.findChildren('table')[2] # get all rows but last 2 rows = table.findChildren(['tr'])[:-2] # remove the spacing rows # indices of spacing tr's spacing = range(2, len(rows), 3) rows = [row for (i, row) in enumerate(rows) if (i not in spacing)] # rank, title, domain info = [row for (i, row) in enumerate(rows) if (i % 2 == 0)] # points, submitter, comments detail = [row for (i, row) in enumerate(rows) if (i % 2 != 0)] # build a list of tuple for all post return zip(info, detail)
<SYSTEM_TASK:> For the story, builds and returns a list of Comment objects. <END_TASK> <USER_TASK:> Description: def _build_comments(self, soup): """ For the story, builds and returns a list of Comment objects. """
comments = [] current_page = 1 while True: # Get the table holding all comments: if current_page == 1: table = soup.findChildren('table')[3] elif current_page > 1: table = soup.findChildren('table')[2] # get all rows (each comment is duplicated twice) rows = table.findChildren(['tr']) # last row is more, second last is spacing rows = rows[:len(rows) - 2] # now we have unique comments only rows = [row for i, row in enumerate(rows) if (i % 2 == 0)] if len(rows) > 1: for row in rows: # skip an empty td if not row.findChildren('td'): continue # Builds a flat list of comments # level of comment, starting with 0 level = int(row.findChildren('td')[1].find('img').get( 'width')) // 40 spans = row.findChildren('td')[3].findAll('span') # span[0] = submitter details # [<a href="user?id=jonknee">jonknee</a>, u' 1 hour ago | ', <a href="item?id=6910978">link</a>] # span[1] = actual comment if str(spans[0]) != '<span class="comhead"></span>': # user who submitted the comment user = spans[0].contents[0].string # relative time of comment time_ago = spans[0].contents[1].string.strip( ).rstrip(' |') try: comment_id = int(re.match(r'item\?id=(.*)', spans[0].contents[ 2].get( 'href')).groups()[0]) except AttributeError: comment_id = int(re.match(r'%s/item\?id=(.*)' % BASE_URL, spans[0].contents[ 2].get( 'href')).groups()[0]) # text representation of comment (unformatted) body = spans[1].text if body[-2:] == '--': body = body[:-5] # html of comment, may not be valid try: pat = re.compile( r'<span class="comment"><font color=".*">(.*)</font></span>') body_html = re.match(pat, str(spans[1]).replace( '\n', '')).groups()[0] except AttributeError: pat = re.compile( r'<span class="comment"><font color=".*">(.*)</font></p><p><font size="1">') body_html = re.match(pat, str(spans[1]).replace( '\n', '')).groups()[0] else: # comment deleted user = '' time_ago = '' comment_id = -1 body = '[deleted]' body_html = '[deleted]' comment = Comment(comment_id, level, user, time_ago, body, body_html) comments.append(comment) # Move on to the next page of comments, or exit the loop if there # is no next page. next_page_url = self._get_next_page(soup, current_page) if not next_page_url: break soup = get_soup(page=next_page_url) current_page += 1 previous_comment = None # for comment in comments: # if comment.level == 0: # previous_comment = comment # else: # level_difference = comment.level - previous_comment.level # previous_comment.body_html += '\n' + '\t' * level_difference \ # + comment.body_html # previous_comment.body += '\n' + '\t' * level_difference + \ # comment.body return comments
<SYSTEM_TASK:> Initializes an instance of Story for given item_id. <END_TASK> <USER_TASK:> Description: def fromid(self, item_id): """ Initializes an instance of Story for given item_id. It is assumed that the story referenced by item_id is valid and does not raise any HTTP errors. item_id is an int. """
if not item_id: raise Exception('Need an item_id for a story') # get details about a particular story soup = get_item_soup(item_id) # this post has not been scraped, so we explititly get all info story_id = item_id rank = -1 # to extract meta information about the post info_table = soup.findChildren('table')[2] # [0] = title, domain, [1] = points, user, time, comments info_rows = info_table.findChildren('tr') # title, domain title_row = info_rows[0].findChildren('td')[1] title = title_row.find('a').text try: domain = title_row.find('span').string[2:-2] # domain found is_self = False link = title_row.find('a').get('href') except AttributeError: # self post domain = BASE_URL is_self = True link = '%s/item?id=%s' % (BASE_URL, item_id) # points, user, time, comments meta_row = info_rows[1].findChildren('td')[1].contents # [<span id="score_7024626">789 points</span>, u' by ', <a href="user?id=endianswap">endianswap</a>, # u' 8 hours ago | ', <a href="item?id=7024626">238 comments</a>] points = int(re.match(r'^(\d+)\spoint.*', meta_row[0].text).groups()[0]) submitter = meta_row[2].text submitter_profile = '%s/%s' % (BASE_URL, meta_row[2].get('href')) published_time = ' '.join(meta_row[3].strip().split()[:3]) comments_link = '%s/item?id=%s' % (BASE_URL, item_id) try: num_comments = int(re.match(r'(\d+)\s.*', meta_row[ 4].text).groups()[0]) except AttributeError: num_comments = 0 story = Story(rank, story_id, title, link, domain, points, submitter, published_time, submitter_profile, num_comments, comments_link, is_self) return story
<SYSTEM_TASK:> Compare a list of names from a name component based on settings <END_TASK> <USER_TASK:> Description: def compare_name_component(list1, list2, settings, use_ratio=False): """ Compare a list of names from a name component based on settings """
if not list1[0] or not list2[0]: not_required = not settings['required'] return not_required * 100 if use_ratio else not_required if len(list1) != len(list2): return False compare_func = _ratio_compare if use_ratio else _normal_compare return compare_func(list1, list2, settings)
<SYSTEM_TASK:> Evaluates whether names match, or one name is the initial of the other <END_TASK> <USER_TASK:> Description: def equate_initial(name1, name2): """ Evaluates whether names match, or one name is the initial of the other """
if len(name1) == 0 or len(name2) == 0: return False if len(name1) == 1 or len(name2) == 1: return name1[0] == name2[0] return name1 == name2
<SYSTEM_TASK:> Evaluates whether names match, or one name prefixes another <END_TASK> <USER_TASK:> Description: def equate_prefix(name1, name2): """ Evaluates whether names match, or one name prefixes another """
if len(name1) == 0 or len(name2) == 0: return False return name1.startswith(name2) or name2.startswith(name1)
<SYSTEM_TASK:> Evaluates whether names match based on common nickname patterns <END_TASK> <USER_TASK:> Description: def equate_nickname(name1, name2): """ Evaluates whether names match based on common nickname patterns This is not currently used in any name comparison """
# Convert '-ie' and '-y' to the root name nickname_regex = r'(.)\1(y|ie)$' root_regex = r'\1' name1 = re.sub(nickname_regex, root_regex, name1) name2 = re.sub(nickname_regex, root_regex, name2) if equate_prefix(name1, name2): return True return False
<SYSTEM_TASK:> Converts unicode-specific characters to their equivalent ascii <END_TASK> <USER_TASK:> Description: def make_ascii(word): """ Converts unicode-specific characters to their equivalent ascii """
if sys.version_info < (3, 0, 0): word = unicode(word) else: word = str(word) normalized = unicodedata.normalize('NFKD', word) return normalized.encode('ascii', 'ignore').decode('utf-8')
<SYSTEM_TASK:> Returns sequence match ratio for two words <END_TASK> <USER_TASK:> Description: def seq_ratio(word1, word2): """ Returns sequence match ratio for two words """
raw_ratio = SequenceMatcher(None, word1, word2).ratio() return int(round(100 * raw_ratio))
<SYSTEM_TASK:> Updates the values in a nested dict, while unspecified values will remain <END_TASK> <USER_TASK:> Description: def deep_update_dict(default, options): """ Updates the values in a nested dict, while unspecified values will remain unchanged """
for key in options.keys(): default_setting = default.get(key) new_setting = options.get(key) if isinstance(default_setting, dict): deep_update_dict(default_setting, new_setting) else: default[key] = new_setting
<SYSTEM_TASK:> Get base output path for a list of songs for download. <END_TASK> <USER_TASK:> Description: def template_to_base_path(template, google_songs): """Get base output path for a list of songs for download."""
if template == os.getcwd() or template == '%suggested%': base_path = os.getcwd() else: template = os.path.abspath(template) song_paths = [template_to_filepath(template, song) for song in google_songs] base_path = os.path.dirname(os.path.commonprefix(song_paths)) return base_path
<SYSTEM_TASK:> Returns the number of bits set to True in the bit string. <END_TASK> <USER_TASK:> Description: def count(self): """Returns the number of bits set to True in the bit string. Usage: assert BitString('00110').count() == 2 Arguments: None Return: An int, the number of bits with value 1. """
result = 0 bits = self._bits while bits: result += bits % 2 bits >>= 1 return result
<SYSTEM_TASK:> Wait for an event on any channel. <END_TASK> <USER_TASK:> Description: def drain_events(self, allowed_methods=None, timeout=None): """Wait for an event on any channel."""
return self.wait_multi(self.channels.values(), timeout=timeout)
<SYSTEM_TASK:> Check if a queue has been declared. <END_TASK> <USER_TASK:> Description: def queue_exists(self, queue): """Check if a queue has been declared. :rtype bool: """
try: self.channel.queue_declare(queue=queue, passive=True) except AMQPChannelException, e: if e.amqp_reply_code == 404: return False raise e else: return True
<SYSTEM_TASK:> Declare an named exchange. <END_TASK> <USER_TASK:> Description: def exchange_declare(self, exchange, type, durable, auto_delete): """Declare an named exchange."""
return self.channel.exchange_declare(exchange=exchange, type=type, durable=durable, auto_delete=auto_delete)
<SYSTEM_TASK:> Bind queue to an exchange using a routing key. <END_TASK> <USER_TASK:> Description: def queue_bind(self, queue, exchange, routing_key, arguments=None): """Bind queue to an exchange using a routing key."""
return self.channel.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key, arguments=arguments)
<SYSTEM_TASK:> Receive a message from a declared queue by name. <END_TASK> <USER_TASK:> Description: def get(self, queue, no_ack=False): """Receive a message from a declared queue by name. :returns: A :class:`Message` object if a message was received, ``None`` otherwise. If ``None`` was returned, it probably means there was no messages waiting on the queue. """
raw_message = self.channel.basic_get(queue, no_ack=no_ack) if not raw_message: return None return self.message_to_python(raw_message)
<SYSTEM_TASK:> If no channel exists, a new one is requested. <END_TASK> <USER_TASK:> Description: def channel(self): """If no channel exists, a new one is requested."""
if not self._channel: self._channel_ref = weakref.ref(self.connection.get_channel()) return self._channel
<SYSTEM_TASK:> The fastest serialization method, but restricts <END_TASK> <USER_TASK:> Description: def register_pickle(): """The fastest serialization method, but restricts you to python clients."""
import cPickle registry.register('pickle', cPickle.dumps, cPickle.loads, content_type='application/x-python-serialize', content_encoding='binary')
<SYSTEM_TASK:> Set the default serialization method used by this library. <END_TASK> <USER_TASK:> Description: def _set_default_serializer(self, name): """ Set the default serialization method used by this library. :param name: The name of the registered serialization method. For example, ``json`` (default), ``pickle``, ``yaml``, or any custom methods registered using :meth:`register`. :raises SerializerNotInstalled: If the serialization method requested is not available. """
try: (self._default_content_type, self._default_content_encoding, self._default_encode) = self._encoders[name] except KeyError: raise SerializerNotInstalled( "No encoder installed for %s" % name)
<SYSTEM_TASK:> Serialize a data structure into a string suitable for sending <END_TASK> <USER_TASK:> Description: def encode(self, data, serializer=None): """ Serialize a data structure into a string suitable for sending as an AMQP message body. :param data: The message data to send. Can be a list, dictionary or a string. :keyword serializer: An optional string representing the serialization method you want the data marshalled into. (For example, ``json``, ``raw``, or ``pickle``). If ``None`` (default), then `JSON`_ will be used, unless ``data`` is a ``str`` or ``unicode`` object. In this latter case, no serialization occurs as it would be unnecessary. Note that if ``serializer`` is specified, then that serialization method will be used even if a ``str`` or ``unicode`` object is passed in. :returns: A three-item tuple containing the content type (e.g., ``application/json``), content encoding, (e.g., ``utf-8``) and a string containing the serialized data. :raises SerializerNotInstalled: If the serialization method requested is not available. """
if serializer == "raw": return raw_encode(data) if serializer and not self._encoders.get(serializer): raise SerializerNotInstalled( "No encoder installed for %s" % serializer) # If a raw string was sent, assume binary encoding # (it's likely either ASCII or a raw binary file, but 'binary' # charset will encompass both, even if not ideal. if not serializer and isinstance(data, str): # In Python 3+, this would be "bytes"; allow binary data to be # sent as a message without getting encoder errors return "application/data", "binary", data # For unicode objects, force it into a string if not serializer and isinstance(data, unicode): payload = data.encode("utf-8") return "text/plain", "utf-8", payload if serializer: content_type, content_encoding, encoder = \ self._encoders[serializer] else: encoder = self._default_encode content_type = self._default_content_type content_encoding = self._default_content_encoding payload = encoder(data) return content_type, content_encoding, payload
<SYSTEM_TASK:> Create a new bit condition that matches the provided bit string, <END_TASK> <USER_TASK:> Description: def cover(cls, bits, wildcard_probability): """Create a new bit condition that matches the provided bit string, with the indicated per-index wildcard probability. Usage: condition = BitCondition.cover(bitstring, .33) assert condition(bitstring) Arguments: bits: A BitString which the resulting condition must match. wildcard_probability: A float in the range [0, 1] which indicates the likelihood of any given bit position containing a wildcard. Return: A randomly generated BitCondition which matches the given bits. """
if not isinstance(bits, BitString): bits = BitString(bits) mask = BitString([ random.random() > wildcard_probability for _ in range(len(bits)) ]) return cls(bits, mask)
<SYSTEM_TASK:> Perform 2-point crossover on this bit condition and another of <END_TASK> <USER_TASK:> Description: def crossover_with(self, other, points=2): """Perform 2-point crossover on this bit condition and another of the same length, returning the two resulting children. Usage: offspring1, offspring2 = condition1.crossover_with(condition2) Arguments: other: A second BitCondition of the same length as this one. points: An int, the number of crossover points of the crossover operation. Return: A tuple (condition1, condition2) of BitConditions, where the value at each position of this BitCondition and the other is preserved in one or the other of the two resulting conditions. """
assert isinstance(other, BitCondition) assert len(self) == len(other) template = BitString.crossover_template(len(self), points) inv_template = ~template bits1 = (self._bits & template) | (other._bits & inv_template) mask1 = (self._mask & template) | (other._mask & inv_template) bits2 = (self._bits & inv_template) | (other._bits & template) mask2 = (self._mask & inv_template) | (other._mask & template) # Convert the modified sequences back into BitConditions return type(self)(bits1, mask1), type(self)(bits2, mask2)
<SYSTEM_TASK:> Get the currently used backend class. <END_TASK> <USER_TASK:> Description: def get_backend_cls(self): """Get the currently used backend class."""
backend_cls = self.backend_cls if not backend_cls or isinstance(backend_cls, basestring): backend_cls = get_backend_cls(backend_cls) return backend_cls
<SYSTEM_TASK:> Ensure we have a connection to the server. <END_TASK> <USER_TASK:> Description: def ensure_connection(self, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Ensure we have a connection to the server. If not retry establishing the connection with the settings specified. :keyword errback: Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. :keyword max_retries: Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. :keyword interval_start: The number of seconds we start sleeping for. :keyword interval_step: How many seconds added to the interval for each retry. :keyword interval_max: Maximum number of seconds to sleep between each retry. """
retry_over_time(self.connect, self.connection_errors, (), {}, errback, max_retries, interval_start, interval_step, interval_max) return self
<SYSTEM_TASK:> Close the currently open connection. <END_TASK> <USER_TASK:> Description: def close(self): """Close the currently open connection."""
try: if self._connection: backend = self.create_backend() backend.close_connection(self._connection) except socket.error: pass self._closed = True
<SYSTEM_TASK:> Get connection info. <END_TASK> <USER_TASK:> Description: def info(self): """Get connection info."""
backend_cls = self.backend_cls or "amqplib" port = self.port or self.create_backend().default_port return {"hostname": self.hostname, "userid": self.userid, "password": self.password, "virtual_host": self.virtual_host, "port": port, "insist": self.insist, "ssl": self.ssl, "transport_cls": backend_cls, "backend_cls": backend_cls, "connect_timeout": self.connect_timeout}
<SYSTEM_TASK:> Deserialize the message body, returning the original <END_TASK> <USER_TASK:> Description: def decode(self): """Deserialize the message body, returning the original python structure sent by the publisher."""
return serialization.decode(self.body, self.content_type, self.content_encoding)
<SYSTEM_TASK:> Reject this message. <END_TASK> <USER_TASK:> Description: def reject(self): """Reject this message. The message will be discarded by the server. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """
if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.reject(self.delivery_tag) self._state = "REJECTED"
<SYSTEM_TASK:> Reject this message and put it back on the queue. <END_TASK> <USER_TASK:> Description: def requeue(self): """Reject this message and put it back on the queue. You must not use this method as a means of selecting messages to process. :raises MessageStateError: If the message has already been acknowledged/requeued/rejected. """
if self.acknowledged: raise self.MessageStateError( "Message already acknowledged with state: %s" % self._state) self.backend.requeue(self.delivery_tag) self._state = "REQUEUED"
<SYSTEM_TASK:> Generate a unique id, having - hopefully - a very small chance of <END_TASK> <USER_TASK:> Description: def gen_unique_id(): """Generate a unique id, having - hopefully - a very small chance of collission. For now this is provided by :func:`uuid.uuid4`. """
# Workaround for http://bugs.python.org/issue4607 if ctypes and _uuid_generate_random: buffer = ctypes.create_string_buffer(16) _uuid_generate_random(buffer) return str(UUID(bytes=buffer.raw)) return str(uuid4())
<SYSTEM_TASK:> Get the next waiting message from the queue. <END_TASK> <USER_TASK:> Description: def get(self, *args, **kwargs): """Get the next waiting message from the queue. :returns: A :class:`Message` instance, or ``None`` if there is no messages waiting. """
if not mqueue.qsize(): return None message_data, content_type, content_encoding = mqueue.get() return self.Message(backend=self, body=message_data, content_type=content_type, content_encoding=content_encoding)
<SYSTEM_TASK:> Discard all messages in the queue. <END_TASK> <USER_TASK:> Description: def queue_purge(self, queue, **kwargs): """Discard all messages in the queue."""
qsize = mqueue.qsize() mqueue.queue.clear() return qsize
<SYSTEM_TASK:> Prepare message for sending. <END_TASK> <USER_TASK:> Description: def prepare_message(self, message_data, delivery_mode, content_type, content_encoding, **kwargs): """Prepare message for sending."""
return (message_data, content_type, content_encoding)
<SYSTEM_TASK:> Return a numerical value representing the expected future payoff <END_TASK> <USER_TASK:> Description: def get_future_expectation(self, match_set): """Return a numerical value representing the expected future payoff of the previously selected action, given only the current match set. The match_set argument is a MatchSet instance representing the current match set. Usage: match_set = model.match(situation) expectation = model.algorithm.get_future_expectation(match_set) payoff = previous_reward + discount_factor * expectation previous_match_set.payoff = payoff Arguments: match_set: A MatchSet instance. Return: A float, the estimate of the expected near-future payoff for the situation for which match_set was generated, based on the contents of match_set. """
assert isinstance(match_set, MatchSet) assert match_set.algorithm is self return self.discount_factor * ( self.idealization_factor * match_set.best_prediction + (1 - self.idealization_factor) * match_set.prediction )
<SYSTEM_TASK:> Return a Boolean indicating whether covering is required for the <END_TASK> <USER_TASK:> Description: def covering_is_required(self, match_set): """Return a Boolean indicating whether covering is required for the current match set. The match_set argument is a MatchSet instance representing the current match set before covering is applied. Usage: match_set = model.match(situation) if model.algorithm.covering_is_required(match_set): new_rule = model.algorithm.cover(match_set) assert new_rule.condition(situation) model.add(new_rule) match_set = model.match(situation) Arguments: match_set: A MatchSet instance. Return: A bool indicating whether match_set contains too few matching classifier rules and therefore needs to be augmented with a new one. """
assert isinstance(match_set, MatchSet) assert match_set.algorithm is self if self.minimum_actions is None: return len(match_set) < len(match_set.model.possible_actions) else: return len(match_set) < self.minimum_actions
<SYSTEM_TASK:> Return a new classifier rule that can be added to the match set, <END_TASK> <USER_TASK:> Description: def cover(self, match_set): """Return a new classifier rule that can be added to the match set, with a condition that matches the situation of the match set and an action selected to avoid duplication of the actions already contained therein. The match_set argument is a MatchSet instance representing the match set to which the returned rule may be added. Usage: match_set = model.match(situation) if model.algorithm.covering_is_required(match_set): new_rule = model.algorithm.cover(match_set) assert new_rule.condition(situation) model.add(new_rule) match_set = model.match(situation) Arguments: match_set: A MatchSet instance. Return: A new ClassifierRule instance, appropriate for the addition to match_set and to the classifier set from which match_set was drawn. """
assert isinstance(match_set, MatchSet) assert match_set.model.algorithm is self # Create a new condition that matches the situation. condition = bitstrings.BitCondition.cover( match_set.situation, self.wildcard_probability ) # Pick a random action that (preferably) isn't already suggested by # some other rule for this situation. action_candidates = ( frozenset(match_set.model.possible_actions) - frozenset(match_set) ) if not action_candidates: action_candidates = match_set.model.possible_actions action = random.choice(list(action_candidates)) # Create the new rule. return XCSClassifierRule( condition, action, self, match_set.time_stamp )
<SYSTEM_TASK:> Distribute the payoff received in response to the selected <END_TASK> <USER_TASK:> Description: def distribute_payoff(self, match_set): """Distribute the payoff received in response to the selected action of the given match set among the rules in the action set which deserve credit for recommending the action. The match_set argument is the MatchSet instance which suggested the selected action and earned the payoff. Usage: match_set = model.match(situation) match_set.select_action() match_set.payoff = reward model.algorithm.distribute_payoff(match_set) Arguments: match_set: A MatchSet instance for which the accumulated payoff needs to be distributed among its classifier rules. Return: None """
assert isinstance(match_set, MatchSet) assert match_set.algorithm is self assert match_set.selected_action is not None payoff = float(match_set.payoff) action_set = match_set[match_set.selected_action] action_set_size = sum(rule.numerosity for rule in action_set) # Update the average reward, error, and action set size of each # rule participating in the action set. for rule in action_set: rule.experience += 1 update_rate = max(self.learning_rate, 1 / rule.experience) rule.average_reward += ( (payoff - rule.average_reward) * update_rate ) rule.error += ( (abs(payoff - rule.average_reward) - rule.error) * update_rate ) rule.action_set_size += ( (action_set_size - rule.action_set_size) * update_rate ) # Update the fitness of the rules. self._update_fitness(action_set) # If the parameters so indicate, perform action set subsumption. if self.do_action_set_subsumption: self._action_set_subsumption(action_set)
<SYSTEM_TASK:> Update the fitness values of the rules belonging to this action <END_TASK> <USER_TASK:> Description: def _update_fitness(self, action_set): """Update the fitness values of the rules belonging to this action set."""
# Compute the accuracy of each rule. Accuracy is inversely # proportional to error. Below a certain error threshold, accuracy # becomes constant. Accuracy values range over (0, 1]. total_accuracy = 0 accuracies = {} for rule in action_set: if rule.error < self.error_threshold: accuracy = 1 else: accuracy = ( self.accuracy_coefficient * (rule.error / self.error_threshold) ** -self.accuracy_power ) accuracies[rule] = accuracy total_accuracy += accuracy * rule.numerosity # On rare occasions we have zero total accuracy. This avoids a div # by zero total_accuracy = total_accuracy or 1 # Use the relative accuracies of the rules to update their fitness for rule in action_set: accuracy = accuracies[rule] rule.fitness += ( self.learning_rate * (accuracy * rule.numerosity / total_accuracy - rule.fitness) )
<SYSTEM_TASK:> Return the average time stamp for the rules in this action <END_TASK> <USER_TASK:> Description: def _get_average_time_stamp(action_set): """Return the average time stamp for the rules in this action set."""
# This is the average value of the iteration counter upon the most # recent update of each rule in this action set. total_time_stamps = sum(rule.time_stamp * rule.numerosity for rule in action_set) total_numerosity = sum(rule.numerosity for rule in action_set) return total_time_stamps / (total_numerosity or 1)
<SYSTEM_TASK:> Select a rule from this action set, with probability <END_TASK> <USER_TASK:> Description: def _select_parent(action_set): """Select a rule from this action set, with probability proportionate to its fitness, to act as a parent for a new rule in the classifier set. Return the selected rule."""
total_fitness = sum(rule.fitness for rule in action_set) selector = random.uniform(0, total_fitness) for rule in action_set: selector -= rule.fitness if selector <= 0: return rule # If for some reason a case slips through the above loop, perhaps # due to floating point error, we fall back on uniform selection. return random.choice(list(action_set))
<SYSTEM_TASK:> Create a new condition from the given one by probabilistically <END_TASK> <USER_TASK:> Description: def _mutate(self, condition, situation): """Create a new condition from the given one by probabilistically applying point-wise mutations. Bits that were originally wildcarded in the parent condition acquire their values from the provided situation, to ensure the child condition continues to match it."""
# Go through each position in the condition, randomly flipping # whether the position is a value (0 or 1) or a wildcard (#). We do # this in a new list because the original condition's mask is # immutable. mutation_points = bitstrings.BitString.random( len(condition.mask), self.mutation_probability ) mask = condition.mask ^ mutation_points # The bits that aren't wildcards always have the same value as the # situation, which ensures that the mutated condition still matches # the situation. if isinstance(situation, bitstrings.BitCondition): mask &= situation.mask return bitstrings.BitCondition(situation.bits, mask) return bitstrings.BitCondition(situation, mask)
<SYSTEM_TASK:> Discard all messages in the queue. This will delete the messages <END_TASK> <USER_TASK:> Description: def queue_purge(self, queue, **kwargs): """Discard all messages in the queue. This will delete the messages and results in an empty queue."""
return self.channel.queue_purge(queue=queue).message_count
<SYSTEM_TASK:> Publish a message to a named exchange. <END_TASK> <USER_TASK:> Description: def publish(self, message, exchange, routing_key, mandatory=None, immediate=None, headers=None): """Publish a message to a named exchange."""
body, properties = message if headers: properties.headers = headers ret = self.channel.basic_publish(body=body, properties=properties, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate) if mandatory or immediate: self.close()
<SYSTEM_TASK:> Generate a unique consumer tag. <END_TASK> <USER_TASK:> Description: def _generate_consumer_tag(self): """Generate a unique consumer tag. :rtype string: """
return "%s.%s%s" % ( self.__class__.__module__, self.__class__.__name__, self._next_consumer_tag())
<SYSTEM_TASK:> Declares the queue, the exchange and binds the queue to <END_TASK> <USER_TASK:> Description: def declare(self): """Declares the queue, the exchange and binds the queue to the exchange."""
arguments = None routing_key = self.routing_key if self.exchange_type == "headers": arguments, routing_key = routing_key, "" if self.queue: self.backend.queue_declare(queue=self.queue, durable=self.durable, exclusive=self.exclusive, auto_delete=self.auto_delete, arguments=self.queue_arguments, warn_if_exists=self.warn_if_exists) if self.exchange: self.backend.exchange_declare(exchange=self.exchange, type=self.exchange_type, durable=self.durable, auto_delete=self.auto_delete) if self.queue: self.backend.queue_bind(queue=self.queue, exchange=self.exchange, routing_key=routing_key, arguments=arguments) self._closed = False return self
<SYSTEM_TASK:> Receive the next message waiting on the queue. <END_TASK> <USER_TASK:> Description: def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Receive the next message waiting on the queue. :returns: A :class:`carrot.backends.base.BaseMessage` instance, or ``None`` if there's no messages to be received. :keyword enable_callbacks: Enable callbacks. The message will be processed with all registered callbacks. Default is disabled. :keyword auto_ack: Override the default :attr:`auto_ack` setting. :keyword no_ack: Override the default :attr:`no_ack` setting. """
no_ack = no_ack or self.no_ack auto_ack = auto_ack or self.auto_ack message = self.backend.get(self.queue, no_ack=no_ack) if message: if auto_ack and not message.acknowledged: message.ack() if enable_callbacks: self.receive(message.payload, message) return message
<SYSTEM_TASK:> Discard all waiting messages. <END_TASK> <USER_TASK:> Description: def discard_all(self, filterfunc=None): """Discard all waiting messages. :param filterfunc: A filter function to only discard the messages this filter returns. :returns: the number of messages discarded. *WARNING*: All incoming messages will be ignored and not processed. Example using filter: >>> def waiting_feeds_only(message): ... try: ... message_data = message.decode() ... except: # Should probably be more specific. ... pass ... ... if message_data.get("type") == "feed": ... return True ... else: ... return False """
if not filterfunc: return self.backend.queue_purge(self.queue) if self.no_ack or self.auto_ack: raise Exception("discard_all: Can't use filter with auto/no-ack.") discarded_count = 0 while True: message = self.fetch() if message is None: return discarded_count if filterfunc(message): message.ack() discarded_count += 1
<SYSTEM_TASK:> Go into consume mode. <END_TASK> <USER_TASK:> Description: def wait(self, limit=None): """Go into consume mode. Mostly for testing purposes and simple programs, you probably want :meth:`iterconsume` or :meth:`iterqueue` instead. This runs an infinite loop, processing all incoming messages using :meth:`receive` to apply the message to all registered callbacks. """
it = self.iterconsume(limit) while True: it.next()
<SYSTEM_TASK:> Close the channel to the queue. <END_TASK> <USER_TASK:> Description: def close(self): """Close the channel to the queue."""
self.cancel() self.backend.close() self._closed = True
<SYSTEM_TASK:> With any data, serialize it and encapsulate it in a AMQP <END_TASK> <USER_TASK:> Description: def create_message(self, message_data, delivery_mode=None, priority=None, content_type=None, content_encoding=None, serializer=None): """With any data, serialize it and encapsulate it in a AMQP message with the proper headers set."""
delivery_mode = delivery_mode or self.delivery_mode # No content_type? Then we're serializing the data internally. if not content_type: serializer = serializer or self.serializer (content_type, content_encoding, message_data) = serialization.encode(message_data, serializer=serializer) else: # If the programmer doesn't want us to serialize, # make sure content_encoding is set. if isinstance(message_data, unicode): if not content_encoding: content_encoding = 'utf-8' message_data = message_data.encode(content_encoding) # If they passed in a string, we can't know anything # about it. So assume it's binary data. elif not content_encoding: content_encoding = 'binary' return self.backend.prepare_message(message_data, delivery_mode, priority=priority, content_type=content_type, content_encoding=content_encoding)
<SYSTEM_TASK:> Add another consumer from dictionary configuration. <END_TASK> <USER_TASK:> Description: def add_consumer_from_dict(self, queue, **options): """Add another consumer from dictionary configuration."""
options.setdefault("routing_key", options.pop("binding_key", None)) consumer = Consumer(self.connection, queue=queue, backend=self.backend, **options) self.consumers.append(consumer) return consumer
<SYSTEM_TASK:> Cycle between all consumers in consume mode. <END_TASK> <USER_TASK:> Description: def iterconsume(self, limit=None): """Cycle between all consumers in consume mode. See :meth:`Consumer.iterconsume`. """
self.consume() return self.backend.consume(limit=limit)
<SYSTEM_TASK:> Call the conversion routine on README.md to generate README.rst. <END_TASK> <USER_TASK:> Description: def build_readme(base_path=None): """Call the conversion routine on README.md to generate README.rst. Why do all this? Because pypi requires reStructuredText, but markdown is friendlier to work with and is nicer for GitHub."""
if base_path: path = os.path.join(base_path, 'README.md') else: path = 'README.md' convert_md_to_rst(path) print("Successfully converted README.md to README.rst")
<SYSTEM_TASK:> Reset the scenario, starting it over for a new run. <END_TASK> <USER_TASK:> Description: def reset(self): """Reset the scenario, starting it over for a new run. Usage: if not scenario.more(): scenario.reset() Arguments: None Return: None """
self.remaining_cycles = self.initial_training_cycles self.needle_index = random.randrange(self.input_size)
<SYSTEM_TASK:> Return a sequence containing the possible actions that can be <END_TASK> <USER_TASK:> Description: def get_possible_actions(self): """Return a sequence containing the possible actions that can be executed within the environment. Usage: possible_actions = scenario.get_possible_actions() Arguments: None Return: A sequence containing the possible actions which can be executed within this scenario. """
possible_actions = self.wrapped.get_possible_actions() if len(possible_actions) <= 20: # Try to ensure that the possible actions are unique. Also, put # them into a list so we can iterate over them safely before # returning them; this avoids accidentally exhausting an # iterator, if the wrapped class happens to return one. try: possible_actions = list(set(possible_actions)) except TypeError: possible_actions = list(possible_actions) try: possible_actions.sort() except TypeError: pass self.logger.info('Possible actions:') for action in possible_actions: self.logger.info(' %s', action) else: self.logger.info("%d possible actions.", len(possible_actions)) return possible_actions
<SYSTEM_TASK:> Return a Boolean indicating whether additional actions may be <END_TASK> <USER_TASK:> Description: def more(self): """Return a Boolean indicating whether additional actions may be executed, per the reward program. Usage: while scenario.more(): situation = scenario.sense() selected_action = choice(possible_actions) reward = scenario.execute(selected_action) Arguments: None Return: A bool indicating whether additional situations remain in the current run. """
more = self.wrapped.more() if not self.steps % 100: self.logger.info('Steps completed: %d', self.steps) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) if not more: self.logger.info('Run completed.') self.logger.info('Total steps: %d', self.steps) self.logger.info('Total reward received: %.5f', self.total_reward) self.logger.info('Average reward per step: %.5f', self.total_reward / (self.steps or 1)) return more
<SYSTEM_TASK:> Return the classifications made by the algorithm for this <END_TASK> <USER_TASK:> Description: def get_classifications(self): """Return the classifications made by the algorithm for this scenario. Usage: model.run(scenario, learn=False) classifications = scenario.get_classifications() Arguments: None Return: An indexable sequence containing the classifications made by the model for each situation, in the same order as the original situations themselves appear. """
if bitstrings.using_numpy(): return numpy.array(self.classifications) else: return self.classifications
<SYSTEM_TASK:> Create and return a new classifier set initialized for handling <END_TASK> <USER_TASK:> Description: def new_model(self, scenario): """Create and return a new classifier set initialized for handling the given scenario. Usage: scenario = MUXProblem() model = algorithm.new_model(scenario) model.run(scenario, learn=True) Arguments: scenario: A Scenario instance. Return: A new, untrained classifier set, suited for the given scenario. """
assert isinstance(scenario, scenarios.Scenario) return ClassifierSet(self, scenario.get_possible_actions())
<SYSTEM_TASK:> Compute the combined prediction and prediction weight for this <END_TASK> <USER_TASK:> Description: def _compute_prediction(self): """Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None """
total_weight = 0 total_prediction = 0 for rule in self._rules.values(): total_weight += rule.prediction_weight total_prediction += (rule.prediction * rule.prediction_weight) self._prediction = total_prediction / (total_weight or 1) self._prediction_weight = total_weight
<SYSTEM_TASK:> The highest value from among the predictions made by the action <END_TASK> <USER_TASK:> Description: def best_prediction(self): """The highest value from among the predictions made by the action sets in this match set."""
if self._best_prediction is None and self._action_sets: self._best_prediction = max( action_set.prediction for action_set in self._action_sets.values() ) return self._best_prediction
<SYSTEM_TASK:> A tuple containing the actions whose action sets have the best <END_TASK> <USER_TASK:> Description: def best_actions(self): """A tuple containing the actions whose action sets have the best prediction."""
if self._best_actions is None: best_prediction = self.best_prediction self._best_actions = tuple( action for action, action_set in self._action_sets.items() if action_set.prediction == best_prediction ) return self._best_actions
<SYSTEM_TASK:> Select an action according to the action selection strategy of <END_TASK> <USER_TASK:> Description: def select_action(self): """Select an action according to the action selection strategy of the associated algorithm. If an action has already been selected, raise a ValueError instead. Usage: if match_set.selected_action is None: match_set.select_action() Arguments: None Return: The action that was selected by the action selection strategy. """
if self._selected_action is not None: raise ValueError("The action has already been selected.") strategy = self._algorithm.action_selection_strategy self._selected_action = strategy(self) return self._selected_action
<SYSTEM_TASK:> Setter method for the selected_action property. <END_TASK> <USER_TASK:> Description: def _set_selected_action(self, action): """Setter method for the selected_action property."""
assert action in self._action_sets if self._selected_action is not None: raise ValueError("The action has already been selected.") self._selected_action = action
<SYSTEM_TASK:> Setter method for the payoff property. <END_TASK> <USER_TASK:> Description: def _set_payoff(self, payoff): """Setter method for the payoff property."""
if self._selected_action is None: raise ValueError("The action has not been selected yet.") if self._closed: raise ValueError("The payoff for this match set has already" "been applied.") self._payoff = float(payoff)
<SYSTEM_TASK:> If the predecessor is not None, gives the appropriate amount of <END_TASK> <USER_TASK:> Description: def pay(self, predecessor): """If the predecessor is not None, gives the appropriate amount of payoff to the predecessor in payment for its contribution to this match set's expected future payoff. The predecessor argument should be either None or a MatchSet instance whose selected action led directly to this match set's situation. Usage: match_set = model.match(situation) match_set.pay(previous_match_set) Arguments: predecessor: The MatchSet instance which was produced by the same classifier set in response to the immediately preceding situation, or None if this is the first situation in the scenario. Return: None """
assert predecessor is None or isinstance(predecessor, MatchSet) if predecessor is not None: expectation = self._algorithm.get_future_expectation(self) predecessor.payoff += expectation
<SYSTEM_TASK:> Add a new classifier rule to the classifier set. Return a list <END_TASK> <USER_TASK:> Description: def add(self, rule): """Add a new classifier rule to the classifier set. Return a list containing zero or more rules that were deleted from the classifier by the algorithm in order to make room for the new rule. The rule argument should be a ClassifierRule instance. The behavior of this method depends on whether the rule already exists in the classifier set. When a rule is already present, the rule's numerosity is added to that of the version of the rule already present in the population. Otherwise, the new rule is captured. Note that this means that for rules already present in the classifier set, the metadata of the existing rule is not overwritten by that of the one passed in as an argument. Usage: displaced_rules = model.add(rule) Arguments: rule: A ClassifierRule instance which is to be added to this classifier set. Return: A possibly empty list of ClassifierRule instances which were removed altogether from the classifier set (as opposed to simply having their numerosities decremented) in order to make room for the newly added rule. """
assert isinstance(rule, ClassifierRule) condition = rule.condition action = rule.action # If the rule already exists in the population, then we virtually # add the rule by incrementing the existing rule's numerosity. This # prevents redundancy in the rule set. Otherwise we capture the # new rule. if condition not in self._population: self._population[condition] = {} if action in self._population[condition]: existing_rule = self._population[condition][action] existing_rule.numerosity += rule.numerosity else: self._population[condition][action] = rule # Any time we add a rule, we need to call this to keep the # population size under control. return self._algorithm.prune(self)
<SYSTEM_TASK:> Return the existing version of the given rule. If the rule is <END_TASK> <USER_TASK:> Description: def get(self, rule, default=None): """Return the existing version of the given rule. If the rule is not present in the classifier set, return the default. If no default was given, use None. This is useful for eliminating duplicate copies of rules. Usage: unique_rule = model.get(possible_duplicate, possible_duplicate) Arguments: rule: The ClassifierRule instance which may be a duplicate of another already contained in the classifier set. default: The value returned if the rule is not a duplicate of another already contained in the classifier set. Return: If the rule is a duplicate of another already contained in the classifier set, the existing one is returned. Otherwise, the value of default is returned. """
assert isinstance(rule, ClassifierRule) if (rule.condition not in self._population or rule.action not in self._population[rule.condition]): return default return self._population[rule.condition][rule.action]
<SYSTEM_TASK:> Ping the QPU by submitting a single-qubit problem. <END_TASK> <USER_TASK:> Description: def ping(config_file, profile, solver_def, json_output, request_timeout, polling_timeout): """Ping the QPU by submitting a single-qubit problem."""
now = utcnow() info = dict(datetime=now.isoformat(), timestamp=datetime_to_timestamp(now), code=0) def output(fmt, **kwargs): info.update(kwargs) if not json_output: click.echo(fmt.format(**kwargs)) def flush(): if json_output: click.echo(json.dumps(info)) try: _ping(config_file, profile, solver_def, request_timeout, polling_timeout, output) except CLIError as error: output("Error: {error} (code: {code})", error=str(error), code=error.code) sys.exit(error.code) except Exception as error: output("Unhandled error: {error}", error=str(error)) sys.exit(127) finally: flush()
<SYSTEM_TASK:> Get solver details. <END_TASK> <USER_TASK:> Description: def solvers(config_file, profile, solver_def, list_solvers): """Get solver details. Unless solver name/id specified, fetch and display details for all online solvers available on the configured endpoint. """
with Client.from_config( config_file=config_file, profile=profile, solver=solver_def) as client: try: solvers = client.get_solvers(**client.default_solver) except SolverNotFoundError: click.echo("Solver(s) {} not found.".format(solver_def)) return 1 if list_solvers: for solver in solvers: click.echo(solver.id) return # ~YAML output for solver in solvers: click.echo("Solver: {}".format(solver.id)) click.echo(" Parameters:") for name, val in sorted(solver.parameters.items()): click.echo(" {}: {}".format(name, strtrunc(val) if val else '?')) solver.properties.pop('parameters', None) click.echo(" Properties:") for name, val in sorted(solver.properties.items()): click.echo(" {}: {}".format(name, strtrunc(val))) click.echo(" Derived properties:") for name in sorted(solver.derived_properties): click.echo(" {}: {}".format(name, strtrunc(getattr(solver, name)))) click.echo()
<SYSTEM_TASK:> Return a function that produces samples of a sine. <END_TASK> <USER_TASK:> Description: def get_input_callback(samplerate, params, num_samples=256): """Return a function that produces samples of a sine. Parameters ---------- samplerate : float The sample rate. params : dict Parameters for FM generation. num_samples : int, optional Number of samples to be generated on each call. """
amplitude = params['mod_amplitude'] frequency = params['mod_frequency'] def producer(): """Generate samples. Yields ------ samples : ndarray A number of samples (`num_samples`) of the sine. """ start_time = 0 while True: time = start_time + np.arange(num_samples) / samplerate start_time += num_samples / samplerate output = amplitude * np.cos(2 * np.pi * frequency * time) yield output return lambda p=producer(): next(p)
<SYSTEM_TASK:> Return a sound playback callback. <END_TASK> <USER_TASK:> Description: def get_playback_callback(resampler, samplerate, params): """Return a sound playback callback. Parameters ---------- resampler The resampler from which samples are read. samplerate : float The sample rate. params : dict Parameters for FM generation. """
def callback(outdata, frames, time, _): """Playback callback. Read samples from the resampler and modulate them onto a carrier frequency. """ last_fmphase = getattr(callback, 'last_fmphase', 0) df = params['fm_gain'] * resampler.read(frames) df = np.pad(df, (0, frames - len(df)), mode='constant') t = time.outputBufferDacTime + np.arange(frames) / samplerate phase = 2 * np.pi * params['carrier_frequency'] * t fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase) callback.last_fmphase = fmphase[-1] return callback
<SYSTEM_TASK:> Setup the resampling and audio output callbacks and start playback. <END_TASK> <USER_TASK:> Description: def main(source_samplerate, target_samplerate, params, converter_type): """Setup the resampling and audio output callbacks and start playback."""
from time import sleep ratio = target_samplerate / source_samplerate with sr.CallbackResampler(get_input_callback(source_samplerate, params), ratio, converter_type) as resampler, \ sd.OutputStream(channels=1, samplerate=target_samplerate, callback=get_playback_callback( resampler, target_samplerate, params)): print("Playing back... Ctrl+C to stop.") try: while True: sleep(1) except KeyboardInterrupt: print("Aborting.")
<SYSTEM_TASK:> Returns the maximum number of reads for the given solver parameters. <END_TASK> <USER_TASK:> Description: def max_num_reads(self, **params): """Returns the maximum number of reads for the given solver parameters. Args: **params: Parameters for the sampling method. Relevant to num_reads: - annealing_time - readout_thermalization - num_reads - programming_thermalization Returns: int: The maximum number of reads. """
# dev note: in the future it would be good to have a way of doing this # server-side, as we are duplicating logic here. properties = self.properties if self.software or not params: # software solvers don't use any of the above parameters return properties['num_reads_range'][1] # qpu _, duration = properties['problem_run_duration_range'] annealing_time = params.get('annealing_time', properties['default_annealing_time']) readout_thermalization = params.get('readout_thermalization', properties['default_readout_thermalization']) programming_thermalization = params.get('programming_thermalization', properties['default_programming_thermalization']) return min(properties['num_reads_range'][1], int((duration - programming_thermalization) / (annealing_time + readout_thermalization)))
<SYSTEM_TASK:> Internal method for both sample_ising and sample_qubo. <END_TASK> <USER_TASK:> Description: def _sample(self, type_, linear, quadratic, params): """Internal method for both sample_ising and sample_qubo. Args: linear (list/dict): Linear terms of the model. quadratic (dict of (int, int):float): Quadratic terms of the model. **params: Parameters for the sampling method, specified per solver. Returns: :obj: `Future` """
# Check the problem if not self.check_problem(linear, quadratic): raise ValueError("Problem graph incompatible with solver.") # Mix the new parameters with the default parameters combined_params = dict(self._params) combined_params.update(params) # Check the parameters before submitting for key in combined_params: if key not in self.parameters and not key.startswith('x_'): raise KeyError("{} is not a parameter of this solver.".format(key)) # transform some of the parameters in-place self._format_params(type_, combined_params) body = json.dumps({ 'solver': self.id, 'data': encode_bqm_as_qp(self, linear, quadratic), 'type': type_, 'params': combined_params }) _LOGGER.trace("Encoded sample request: %s", body) future = Future(solver=self, id_=None, return_matrix=self.return_matrix, submission_data=(type_, linear, quadratic, params)) _LOGGER.debug("Submitting new problem to: %s", self.id) self.client._submit(body, future) return future
<SYSTEM_TASK:> Reformat some of the parameters for sapi. <END_TASK> <USER_TASK:> Description: def _format_params(self, type_, params): """Reformat some of the parameters for sapi."""
if 'initial_state' in params: # NB: at this moment the error raised when initial_state does not match lin/quad (in # active qubits) is not very informative, but there is also no clean way to check here # that they match because lin can be either a list or a dict. In the future it would be # good to check. initial_state = params['initial_state'] if isinstance(initial_state, Mapping): initial_state_list = [3]*self.properties['num_qubits'] low = -1 if type_ == 'ising' else 0 for v, val in initial_state.items(): if val == 3: continue if val <= 0: initial_state_list[v] = low else: initial_state_list[v] = 1 params['initial_state'] = initial_state_list
<SYSTEM_TASK:> Test if an Ising model matches the graph provided by the solver. <END_TASK> <USER_TASK:> Description: def check_problem(self, linear, quadratic): """Test if an Ising model matches the graph provided by the solver. Args: linear (list/dict): Linear terms of the model (h). quadratic (dict of (int, int):float): Quadratic terms of the model (J). Returns: boolean Examples: This example creates a client using the local system's default D-Wave Cloud Client configuration file, which is configured to access a D-Wave 2000Q QPU, and tests a simple :term:`Ising` model for two target embeddings (that is, representations of the model's graph by coupled qubits on the QPU's sparsely connected graph), where only the second is valid. >>> from dwave.cloud import Client >>> print((0, 1) in solver.edges) # doctest: +SKIP False >>> print((0, 4) in solver.edges) # doctest: +SKIP True >>> with Client.from_config() as client: # doctest: +SKIP ... solver = client.get_solver() ... print(solver.check_problem({0: -1, 1: 1},{(0, 1):0.5})) ... print(solver.check_problem({0: -1, 4: 1},{(0, 4):0.5})) ... False True """
for key, value in uniform_iterator(linear): if value != 0 and key not in self.nodes: return False for key, value in uniform_iterator(quadratic): if value != 0 and tuple(key) not in self.edges: return False return True
<SYSTEM_TASK:> Resume polling for a problem previously submitted. <END_TASK> <USER_TASK:> Description: def _retrieve_problem(self, id_): """Resume polling for a problem previously submitted. Args: id_: Identification of the query. Returns: :obj: `Future` """
future = Future(self, id_, self.return_matrix, None) self.client._poll(future) return future
<SYSTEM_TASK:> Return the converter type for `identifier`. <END_TASK> <USER_TASK:> Description: def _get_converter_type(identifier): """Return the converter type for `identifier`."""
if isinstance(identifier, str): return ConverterType[identifier] if isinstance(identifier, ConverterType): return identifier return ConverterType(identifier)
<SYSTEM_TASK:> Resample the signal in `input_data` at once. <END_TASK> <USER_TASK:> Description: def resample(input_data, ratio, converter_type='sinc_best', verbose=False): """Resample the signal in `input_data` at once. Parameters ---------- input_data : ndarray Input data. A single channel is provided as a 1D array of `num_frames` length. Input data with several channels is represented as a 2D array of shape (`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data` is converted to 32-bit float and C (row-major) memory order. ratio : float Conversion ratio = output sample rate / input sample rate. converter_type : ConverterType, str, or int Sample rate converter. verbose : bool If `True`, print additional information about the conversion. Returns ------- output_data : ndarray Resampled input data. Note ---- If samples are to be processed in chunks, `Resampler` and `CallbackResampler` will provide better results and allow for variable conversion ratios. """
from samplerate.lowlevel import src_simple from samplerate.exceptions import ResamplingError input_data = np.require(input_data, requirements='C', dtype=np.float32) if input_data.ndim == 2: num_frames, channels = input_data.shape output_shape = (int(num_frames * ratio), channels) elif input_data.ndim == 1: num_frames, channels = input_data.size, 1 output_shape = (int(num_frames * ratio), ) else: raise ValueError('rank > 2 not supported') output_data = np.empty(output_shape, dtype=np.float32) converter_type = _get_converter_type(converter_type) (error, input_frames_used, output_frames_gen) \ = src_simple(input_data, output_data, ratio, converter_type.value, channels) if error != 0: raise ResamplingError(error) if verbose: info = ('samplerate info:\n' '{} input frames used\n' '{} output frames generated\n' .format(input_frames_used, output_frames_gen)) print(info) return (output_data[:output_frames_gen, :] if channels > 1 else output_data[:output_frames_gen])
<SYSTEM_TASK:> Set a new conversion ratio immediately. <END_TASK> <USER_TASK:> Description: def set_ratio(self, new_ratio): """Set a new conversion ratio immediately."""
from samplerate.lowlevel import src_set_ratio return src_set_ratio(self._state, new_ratio)
<SYSTEM_TASK:> Resample the signal in `input_data`. <END_TASK> <USER_TASK:> Description: def process(self, input_data, ratio, end_of_input=False, verbose=False): """Resample the signal in `input_data`. Parameters ---------- input_data : ndarray Input data. A single channel is provided as a 1D array of `num_frames` length. Input data with several channels is represented as a 2D array of shape (`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data` is converted to 32-bit float and C (row-major) memory order. ratio : float Conversion ratio = output sample rate / input sample rate. end_of_input : int Set to `True` if no more data is available, or to `False` otherwise. verbose : bool If `True`, print additional information about the conversion. Returns ------- output_data : ndarray Resampled input data. """
from samplerate.lowlevel import src_process from samplerate.exceptions import ResamplingError input_data = np.require(input_data, requirements='C', dtype=np.float32) if input_data.ndim == 2: num_frames, channels = input_data.shape output_shape = (int(num_frames * ratio), channels) elif input_data.ndim == 1: num_frames, channels = input_data.size, 1 output_shape = (int(num_frames * ratio), ) else: raise ValueError('rank > 2 not supported') if channels != self._channels: raise ValueError('Invalid number of channels in input data.') output_data = np.empty(output_shape, dtype=np.float32) (error, input_frames_used, output_frames_gen) = src_process( self._state, input_data, output_data, ratio, end_of_input) if error != 0: raise ResamplingError(error) if verbose: info = ('samplerate info:\n' '{} input frames used\n' '{} output frames generated\n' .format(input_frames_used, output_frames_gen)) print(info) return (output_data[:output_frames_gen, :] if channels > 1 else output_data[:output_frames_gen])
<SYSTEM_TASK:> Set the starting conversion ratio for the next `read` call. <END_TASK> <USER_TASK:> Description: def set_starting_ratio(self, ratio): """ Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio if self._state is None: self._create() src_set_ratio(self._state, ratio) self.ratio = ratio
<SYSTEM_TASK:> Read a number of frames from the resampler. <END_TASK> <USER_TASK:> Description: def read(self, num_frames): """Read a number of frames from the resampler. Parameters ---------- num_frames : int Number of frames to read. Returns ------- output_data : ndarray Resampled frames as a (`num_output_frames`, `num_channels`) or (`num_output_frames`,) array. Note that this may return fewer frames than requested, for example when no more input is available. """
from samplerate.lowlevel import src_callback_read, src_error from samplerate.exceptions import ResamplingError if self._state is None: self._create() if self._channels > 1: output_shape = (num_frames, self._channels) elif self._channels == 1: output_shape = (num_frames, ) output_data = np.empty(output_shape, dtype=np.float32) ret = src_callback_read(self._state, self._ratio, num_frames, output_data) if ret == 0: error = src_error(self._state) if error: raise ResamplingError(error) return (output_data[:ret, :] if self._channels > 1 else output_data[:ret])
<SYSTEM_TASK:> Scales each number in the sequence so that the sum of all numbers equals 1. <END_TASK> <USER_TASK:> Description: def normalize(seq): """ Scales each number in the sequence so that the sum of all numbers equals 1. """
s = float(sum(seq)) return [v/s for v in seq]
<SYSTEM_TASK:> Calculates the variance fo a continuous class attribute, to be used as an <END_TASK> <USER_TASK:> Description: def entropy_variance(data, class_attr=None, method=DEFAULT_CONTINUOUS_METRIC): """ Calculates the variance fo a continuous class attribute, to be used as an entropy metric. """
assert method in CONTINUOUS_METRICS, "Unknown entropy variance metric: %s" % (method,) assert (class_attr is None and isinstance(data, dict)) \ or (class_attr is not None and isinstance(data, list)) if isinstance(data, dict): lst = data else: lst = [record.get(class_attr) for record in data] return get_variance(lst)
<SYSTEM_TASK:> Creates a list of all values in the target attribute for each record <END_TASK> <USER_TASK:> Description: def majority_value(data, class_attr): """ Creates a list of all values in the target attribute for each record in the data list object, and returns the value that appears in this list the most frequently. """
if is_continuous(data[0][class_attr]): return CDist(seq=[record[class_attr] for record in data]) else: return most_frequent([record[class_attr] for record in data])
<SYSTEM_TASK:> Returns the item that appears most frequently in the given list. <END_TASK> <USER_TASK:> Description: def most_frequent(lst): """ Returns the item that appears most frequently in the given list. """
lst = lst[:] highest_freq = 0 most_freq = None for val in unique(lst): if lst.count(val) > highest_freq: most_freq = val highest_freq = lst.count(val) return most_freq
<SYSTEM_TASK:> Returns a list made up of the unique values found in lst. i.e., it <END_TASK> <USER_TASK:> Description: def unique(lst): """ Returns a list made up of the unique values found in lst. i.e., it removes the redundant values in lst. """
lst = lst[:] unique_lst = [] # Cycle through the list and add each value to the unique list only once. for item in lst: if unique_lst.count(item) <= 0: unique_lst.append(item) # Return the list with all redundant values removed. return unique_lst
<SYSTEM_TASK:> Returns a new decision tree based on the examples given. <END_TASK> <USER_TASK:> Description: def create_decision_tree(data, attributes, class_attr, fitness_func, wrapper, **kwargs): """ Returns a new decision tree based on the examples given. """
split_attr = kwargs.get('split_attr', None) split_val = kwargs.get('split_val', None) assert class_attr not in attributes node = None data = list(data) if isinstance(data, Data) else data if wrapper.is_continuous_class: stop_value = CDist(seq=[r[class_attr] for r in data]) # For a continuous class case, stop if all the remaining records have # a variance below the given threshold. stop = wrapper.leaf_threshold is not None \ and stop_value.variance <= wrapper.leaf_threshold else: stop_value = DDist(seq=[r[class_attr] for r in data]) # For a discrete class, stop if all remaining records have the same # classification. stop = len(stop_value.counts) <= 1 if not data or len(attributes) <= 0: # If the dataset is empty or the attributes list is empty, return the # default value. The target attribute is not in the attributes list, so # we need not subtract 1 to account for the target attribute. if wrapper: wrapper.leaf_count += 1 return stop_value elif stop: # If all the records in the dataset have the same classification, # return that classification. if wrapper: wrapper.leaf_count += 1 return stop_value else: # Choose the next best attribute to best classify our data best = choose_attribute( data, attributes, class_attr, fitness_func, method=wrapper.metric) # Create a new decision tree/node with the best attribute and an empty # dictionary object--we'll fill that up next. # tree = {best:{}} node = Node(tree=wrapper, attr_name=best) node.n += len(data) # Create a new decision tree/sub-node for each of the values in the # best attribute field for val in get_values(data, best): # Create a subtree for the current value under the "best" field subtree = create_decision_tree( [r for r in data if r[best] == val], [attr for attr in attributes if attr != best], class_attr, fitness_func, split_attr=best, split_val=val, wrapper=wrapper) # Add the new subtree to the empty dictionary object in our new # tree/node we just created. if isinstance(subtree, Node): node._branches[val] = subtree elif isinstance(subtree, (CDist, DDist)): node.set_leaf_dist(attr_value=val, dist=subtree) else: raise Exception("Unknown subtree type: %s" % (type(subtree),)) return node
<SYSTEM_TASK:> Increments the count for the given element. <END_TASK> <USER_TASK:> Description: def add(self, k, count=1): """ Increments the count for the given element. """
self.counts[k] += count self.total += count
<SYSTEM_TASK:> Returns the element with the highest probability. <END_TASK> <USER_TASK:> Description: def best(self): """ Returns the element with the highest probability. """
b = (-1e999999, None) for k, c in iteritems(self.counts): b = max(b, (c, k)) return b[1]
<SYSTEM_TASK:> Adds the given distribution's counts to the current distribution. <END_TASK> <USER_TASK:> Description: def update(self, dist): """ Adds the given distribution's counts to the current distribution. """
assert isinstance(dist, DDist) for k, c in iteritems(dist.counts): self.counts[k] += c self.total += dist.total