text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns the probability of a random variable being less than the
<END_TASK>
<USER_TASK:>
Description:
def probability_lt(self, x):
"""
Returns the probability of a random variable being less than the
given value.
""" |
if self.mean is None:
return
return normdist(x=x, mu=self.mean, sigma=self.standard_deviation) |
<SYSTEM_TASK:>
Returns the probability of a random variable falling between the given
<END_TASK>
<USER_TASK:>
Description:
def probability_in(self, a, b):
"""
Returns the probability of a random variable falling between the given
values.
""" |
if self.mean is None:
return
p1 = normdist(x=a, mu=self.mean, sigma=self.standard_deviation)
p2 = normdist(x=b, mu=self.mean, sigma=self.standard_deviation)
return abs(p1 - p2) |
<SYSTEM_TASK:>
Returns the probability of a random variable being greater than the
<END_TASK>
<USER_TASK:>
Description:
def probability_gt(self, x):
"""
Returns the probability of a random variable being greater than the
given value.
""" |
if self.mean is None:
return
p = normdist(x=x, mu=self.mean, sigma=self.standard_deviation)
return 1-p |
<SYSTEM_TASK:>
Returns a copy of the object without any data.
<END_TASK>
<USER_TASK:>
Description:
def copy_no_data(self):
"""
Returns a copy of the object without any data.
""" |
return type(self)(
[],
order=list(self.header_modes),
types=self.header_types.copy(),
modes=self.header_modes.copy()) |
<SYSTEM_TASK:>
Returns true if the given value matches the type for the given name
<END_TASK>
<USER_TASK:>
Description:
def is_valid(self, name, value):
"""
Returns true if the given value matches the type for the given name
according to the schema.
Returns false otherwise.
""" |
if name not in self.header_types:
return False
t = self.header_types[name]
if t == ATTR_TYPE_DISCRETE:
return isinstance(value, int)
elif t == ATTR_TYPE_CONTINUOUS:
return isinstance(value, (float, Decimal))
return True |
<SYSTEM_TASK:>
When a CSV file is given, extracts header information the file.
<END_TASK>
<USER_TASK:>
Description:
def _read_header(self):
"""
When a CSV file is given, extracts header information the file.
Otherwise, this header data must be explicitly given when the object
is instantiated.
""" |
if not self.filename or self.header_types:
return
rows = csv.reader(open(self.filename))
#header = rows.next()
header = next(rows)
self.header_types = {} # {attr_name:type}
self._class_attr_name = None
self.header_order = [] # [attr_name,...]
for el in header:
matches = ATTR_HEADER_PATTERN.findall(el)
assert matches, "Invalid header element: %s" % (el,)
el_name, el_type, el_mode = matches[0]
el_name = el_name.strip()
self.header_order.append(el_name)
self.header_types[el_name] = el_type
if el_mode == ATTR_MODE_CLASS:
assert self._class_attr_name is None, \
"Multiple class attributes are not supported."
self._class_attr_name = el_name
else:
assert self.header_types[el_name] != ATTR_TYPE_CONTINUOUS, \
"Non-class continuous attributes are not supported."
assert self._class_attr_name, "A class attribute must be specified." |
<SYSTEM_TASK:>
Ensure each element in the row matches the schema.
<END_TASK>
<USER_TASK:>
Description:
def validate_row(self, row):
"""
Ensure each element in the row matches the schema.
""" |
clean_row = {}
if isinstance(row, (tuple, list)):
assert self.header_order, "No attribute order specified."
assert len(row) == len(self.header_order), \
"Row length does not match header length."
itr = zip(self.header_order, row)
else:
assert isinstance(row, dict)
itr = iteritems(row)
for el_name, el_value in itr:
if self.header_types[el_name] == ATTR_TYPE_DISCRETE:
clean_row[el_name] = int(el_value)
elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS:
clean_row[el_name] = float(el_value)
else:
clean_row[el_name] = el_value
return clean_row |
<SYSTEM_TASK:>
Returns two Data instances, containing the data randomly split between
<END_TASK>
<USER_TASK:>
Description:
def split(self, ratio=0.5, leave_one_out=False):
"""
Returns two Data instances, containing the data randomly split between
the two according to the given ratio.
The first instance will contain the ratio of data specified.
The second instance will contain the remaining ratio of data.
If leave_one_out is True, the ratio will be ignored and the first
instance will contain exactly one record for each class label, and
the second instance will contain all remaining data.
""" |
a_labels = set()
a = self.copy_no_data()
b = self.copy_no_data()
for row in self:
if leave_one_out and not self.is_continuous_class:
label = row[self.class_attribute_name]
if label not in a_labels:
a_labels.add(label)
a.data.append(row)
else:
b.data.append(row)
elif not a:
a.data.append(row)
elif not b:
b.data.append(row)
elif random.random() <= ratio:
a.data.append(row)
else:
b.data.append(row)
return a, b |
<SYSTEM_TASK:>
Gets the closest value for the current node's attribute matching the
<END_TASK>
<USER_TASK:>
Description:
def _get_attribute_value_for_node(self, record):
"""
Gets the closest value for the current node's attribute matching the
given record.
""" |
# Abort if this node has not get split on an attribute.
if self.attr_name is None:
return
# Otherwise, lookup the attribute value for this node in the
# given record.
attr = self.attr_name
attr_value = record[attr]
attr_values = self.get_values(attr)
if attr_value in attr_values:
return attr_value
else:
# The value of the attribute in the given record does not directly
# map to any previously known values, so apply a missing value
# policy.
policy = self.tree.missing_value_policy.get(attr)
assert policy, \
("No missing value policy specified for attribute %s.") \
% (attr,)
if policy == USE_NEAREST:
# Use the value that the tree has seen that's also has the
# smallest Euclidean distance to the actual value.
assert self.tree.data.header_types[attr] \
in (ATTR_TYPE_DISCRETE, ATTR_TYPE_CONTINUOUS), \
"The use-nearest policy is invalid for nominal types."
nearest = (1e999999, None)
for _value in attr_values:
nearest = min(
nearest,
(abs(_value - attr_value), _value))
_, nearest_value = nearest
return nearest_value
else:
raise Exception("Unknown missing value policy: %s" % (policy,)) |
<SYSTEM_TASK:>
Retrieves the unique set of values seen for the given attribute
<END_TASK>
<USER_TASK:>
Description:
def get_values(self, attr_name):
"""
Retrieves the unique set of values seen for the given attribute
at this node.
""" |
ret = list(self._attr_value_cdist[attr_name].keys()) \
+ list(self._attr_value_counts[attr_name].keys()) \
+ list(self._branches.keys())
ret = set(ret)
return ret |
<SYSTEM_TASK:>
Returns the name of the attribute with the highest gain.
<END_TASK>
<USER_TASK:>
Description:
def get_best_splitting_attr(self):
"""
Returns the name of the attribute with the highest gain.
""" |
best = (-1e999999, None)
for attr in self.attributes:
best = max(best, (self.get_gain(attr), attr))
best_gain, best_attr = best
return best_attr |
<SYSTEM_TASK:>
Calculates the information gain from splitting on the given attribute.
<END_TASK>
<USER_TASK:>
Description:
def get_gain(self, attr_name):
"""
Calculates the information gain from splitting on the given attribute.
""" |
subset_entropy = 0.0
for value in iterkeys(self._attr_value_counts[attr_name]):
value_prob = self.get_value_prob(attr_name, value)
e = self.get_entropy(attr_name, value)
subset_entropy += value_prob * e
return (self.main_entropy - subset_entropy) |
<SYSTEM_TASK:>
Returns the class value probability distribution of the given
<END_TASK>
<USER_TASK:>
Description:
def get_value_ddist(self, attr_name, attr_value):
"""
Returns the class value probability distribution of the given
attribute value.
""" |
assert not self.tree.data.is_continuous_class, \
"Discrete distributions are only maintained for " + \
"discrete class types."
ddist = DDist()
cls_counts = self._attr_class_value_counts[attr_name][attr_value]
for cls_value, cls_count in iteritems(cls_counts):
ddist.add(cls_value, count=cls_count)
return ddist |
<SYSTEM_TASK:>
Returns the value probability of the given attribute at this node.
<END_TASK>
<USER_TASK:>
Description:
def get_value_prob(self, attr_name, value):
"""
Returns the value probability of the given attribute at this node.
""" |
if attr_name not in self._attr_value_count_totals:
return
n = self._attr_value_counts[attr_name][value]
d = self._attr_value_count_totals[attr_name]
return n/float(d) |
<SYSTEM_TASK:>
Returns the estimated value of the class attribute for the given
<END_TASK>
<USER_TASK:>
Description:
def predict(self, record, depth=0):
"""
Returns the estimated value of the class attribute for the given
record.
""" |
# Check if we're ready to predict.
if not self.ready_to_predict:
raise NodeNotReadyToPredict
# Lookup attribute value.
attr_value = self._get_attribute_value_for_node(record)
# Propagate decision to leaf node.
if self.attr_name:
if attr_value in self._branches:
try:
return self._branches[attr_value].predict(record, depth=depth+1)
except NodeNotReadyToPredict:
#TODO:allow re-raise if user doesn't want an intermediate prediction?
pass
# Otherwise make decision at current node.
if self.attr_name:
if self._tree.data.is_continuous_class:
return self._attr_value_cdist[self.attr_name][attr_value].copy()
else:
# return self._class_ddist.copy()
return self.get_value_ddist(self.attr_name, attr_value)
elif self._tree.data.is_continuous_class:
# Make decision at current node, which may be a true leaf node
# or an incomplete branch in a tree currently being built.
assert self._class_cdist is not None
return self._class_cdist.copy()
else:
return self._class_ddist.copy() |
<SYSTEM_TASK:>
Returns true if this node is ready to branch off additional nodes.
<END_TASK>
<USER_TASK:>
Description:
def ready_to_split(self):
"""
Returns true if this node is ready to branch off additional nodes.
Returns false otherwise.
""" |
# Never split if we're a leaf that predicts adequately.
threshold = self._tree.leaf_threshold
if self._tree.data.is_continuous_class:
var = self._class_cdist.variance
if var is not None and threshold is not None \
and var <= threshold:
return False
else:
best_prob = self._class_ddist.best_prob
if best_prob is not None and threshold is not None \
and best_prob >= threshold:
return False
return self._tree.auto_grow \
and not self.attr_name \
and self.n >= self._tree.splitting_n |
<SYSTEM_TASK:>
Sets the probability distribution at a leaf node.
<END_TASK>
<USER_TASK:>
Description:
def set_leaf_dist(self, attr_value, dist):
"""
Sets the probability distribution at a leaf node.
""" |
assert self.attr_name
assert self.tree.data.is_valid(self.attr_name, attr_value), \
"Value %s is invalid for attribute %s." \
% (attr_value, self.attr_name)
if self.is_continuous_class:
assert isinstance(dist, CDist)
assert self.attr_name
self._attr_value_cdist[self.attr_name][attr_value] = dist.copy()
# self.n += dist.count
else:
assert isinstance(dist, DDist)
# {attr_name:{attr_value:count}}
self._attr_value_counts[self.attr_name][attr_value] += 1
# {attr_name:total}
self._attr_value_count_totals[self.attr_name] += 1
# {attr_name:{attr_value:{class_value:count}}}
for cls_value, cls_count in iteritems(dist.counts):
self._attr_class_value_counts[self.attr_name][attr_value] \
[cls_value] += cls_count |
<SYSTEM_TASK:>
Incrementally update the statistics at this node.
<END_TASK>
<USER_TASK:>
Description:
def train(self, record):
"""
Incrementally update the statistics at this node.
""" |
self.n += 1
class_attr = self.tree.data.class_attribute_name
class_value = record[class_attr]
# Update class statistics.
is_con = self.tree.data.is_continuous_class
if is_con:
# For a continuous class.
self._class_cdist += class_value
else:
# For a discrete class.
self._class_ddist.add(class_value)
# Update attribute statistics.
for an, av in iteritems(record):
if an == class_attr:
continue
self._attr_value_counts[an][av] += 1
self._attr_value_count_totals[an] += 1
if is_con:
self._attr_value_cdist[an][av] += class_value
else:
self._attr_class_value_counts[an][av][class_value] += 1
# Decide if branch should split on an attribute.
if self.ready_to_split:
self.attr_name = self.get_best_splitting_attr()
self.tree.leaf_count -= 1
for av in self._attr_value_counts[self.attr_name]:
self._branches[av] = Node(tree=self.tree)
self.tree.leaf_count += 1
# If we've split, then propagate the update to appropriate sub-branch.
if self.attr_name:
key = record[self.attr_name]
del record[self.attr_name]
self._branches[key].train(record) |
<SYSTEM_TASK:>
Constructs a classification or regression tree in a single batch by
<END_TASK>
<USER_TASK:>
Description:
def build(cls, data, *args, **kwargs):
"""
Constructs a classification or regression tree in a single batch by
analyzing the given data.
""" |
assert isinstance(data, Data)
if data.is_continuous_class:
fitness_func = gain_variance
else:
fitness_func = get_gain
t = cls(data=data, *args, **kwargs)
t._data = data
t.sample_count = len(data)
t._tree = create_decision_tree(
data=data,
attributes=data.attribute_names,
class_attr=data.class_attribute_name,
fitness_func=fitness_func,
wrapper=t,
)
return t |
<SYSTEM_TASK:>
Returns the mean absolute error for predictions on the out-of-bag
<END_TASK>
<USER_TASK:>
Description:
def out_of_bag_mae(self):
"""
Returns the mean absolute error for predictions on the out-of-bag
samples.
""" |
if not self._out_of_bag_mae_clean:
try:
self._out_of_bag_mae = self.test(self.out_of_bag_samples)
self._out_of_bag_mae_clean = True
except NodeNotReadyToPredict:
return
return self._out_of_bag_mae.copy() |
<SYSTEM_TASK:>
Returns the out-of-bag samples list, inside a wrapper to keep track
<END_TASK>
<USER_TASK:>
Description:
def out_of_bag_samples(self):
"""
Returns the out-of-bag samples list, inside a wrapper to keep track
of modifications.
""" |
#TODO:replace with more a generic pass-through wrapper?
class O(object):
def __init__(self, tree):
self.tree = tree
def __len__(self):
return len(self.tree._out_of_bag_samples)
def append(self, v):
self.tree._out_of_bag_mae_clean = False
return self.tree._out_of_bag_samples.append(v)
def pop(self, v):
self.tree._out_of_bag_mae_clean = False
return self.tree._out_of_bag_samples.pop(v)
def __iter__(self):
for _ in self.tree._out_of_bag_samples:
yield _
return O(self) |
<SYSTEM_TASK:>
Sets the behavior for one or all attributes to use when traversing the
<END_TASK>
<USER_TASK:>
Description:
def set_missing_value_policy(self, policy, target_attr_name=None):
"""
Sets the behavior for one or all attributes to use when traversing the
tree using a query vector and it encounters a branch that does not
exist.
""" |
assert policy in MISSING_VALUE_POLICIES, \
"Unknown policy: %s" % (policy,)
for attr_name in self.data.attribute_names:
if target_attr_name is not None and target_attr_name != attr_name:
continue
self.missing_value_policy[attr_name] = policy |
<SYSTEM_TASK:>
Incrementally updates the tree with the given sample record.
<END_TASK>
<USER_TASK:>
Description:
def train(self, record):
"""
Incrementally updates the tree with the given sample record.
""" |
assert self.data.class_attribute_name in record, \
"The class attribute must be present in the record."
record = record.copy()
self.sample_count += 1
self.tree.train(record) |
<SYSTEM_TASK:>
Removes trees from the forest according to the specified fell method.
<END_TASK>
<USER_TASK:>
Description:
def _fell_trees(self):
"""
Removes trees from the forest according to the specified fell method.
""" |
if callable(self.fell_method):
for tree in self.fell_method(list(self.trees)):
self.trees.remove(tree) |
<SYSTEM_TASK:>
Gets the prediction from the tree with the lowest mean absolute error.
<END_TASK>
<USER_TASK:>
Description:
def _get_best_prediction(self, record, train=True):
"""
Gets the prediction from the tree with the lowest mean absolute error.
""" |
if not self.trees:
return
best = (+1e999999, None)
for tree in self.trees:
best = min(best, (tree.mae.mean, tree))
_, best_tree = best
prediction, tree_mae = best_tree.predict(record, train=train)
return prediction.mean |
<SYSTEM_TASK:>
Returns weights so that the tree with smallest out-of-bag mean absolute error
<END_TASK>
<USER_TASK:>
Description:
def best_oob_mae_weight(trees):
"""
Returns weights so that the tree with smallest out-of-bag mean absolute error
""" |
best = (+1e999999, None)
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
best = min(best, (oob_mae.mean, tree))
best_mae, best_tree = best
if best_tree is None:
return
return [(1.0, best_tree)] |
<SYSTEM_TASK:>
Returns weights proportional to the out-of-bag mean absolute error for each tree.
<END_TASK>
<USER_TASK:>
Description:
def mean_oob_mae_weight(trees):
"""
Returns weights proportional to the out-of-bag mean absolute error for each tree.
""" |
weights = []
active_trees = []
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
weights.append(oob_mae.mean)
active_trees.append(tree)
if not active_trees:
return
weights = normalize(weights)
return zip(weights, active_trees) |
<SYSTEM_TASK:>
Adds new trees to the forest according to the specified growth method.
<END_TASK>
<USER_TASK:>
Description:
def _grow_trees(self):
"""
Adds new trees to the forest according to the specified growth method.
""" |
if self.grow_method == GROW_AUTO_INCREMENTAL:
self.tree_kwargs['auto_grow'] = True
while len(self.trees) < self.size:
self.trees.append(Tree(data=self.data, **self.tree_kwargs)) |
<SYSTEM_TASK:>
Updates the trees with the given training record.
<END_TASK>
<USER_TASK:>
Description:
def train(self, record):
"""
Updates the trees with the given training record.
""" |
self._fell_trees()
self._grow_trees()
for tree in self.trees:
if random.random() < self.sample_ratio:
tree.train(record)
else:
tree.out_of_bag_samples.append(record)
while len(tree.out_of_bag_samples) > self.max_out_of_bag_samples:
tree.out_of_bag_samples.pop(0) |
<SYSTEM_TASK:>
Return a list of local configuration file paths.
<END_TASK>
<USER_TASK:>
Description:
def get_configfile_paths(system=True, user=True, local=True, only_existing=True):
"""Return a list of local configuration file paths.
Search paths for configuration files on the local system
are based on homebase_ and depend on operating system; for example, for Linux systems
these might include ``dwave.conf`` in the current working directory (CWD),
user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``.
.. _homebase: https://github.com/dwavesystems/homebase
Args:
system (boolean, default=True):
Search for system-wide configuration files.
user (boolean, default=True):
Search for user-local configuration files.
local (boolean, default=True):
Search for local configuration files (in CWD).
only_existing (boolean, default=True):
Return only paths for files that exist on the local system.
Returns:
list[str]:
List of configuration file paths.
Examples:
This example displays all paths to configuration files on a Windows system
running Python 2.7 and then finds the single existing configuration file.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
[u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf',
u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf',
'.\\dwave.conf']
>>> # Find existing files
>>> dc.config.get_configfile_paths() # doctest: +SKIP
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
""" |
candidates = []
# system-wide has the lowest priority, `/etc/dwave/dwave.conf`
if system:
candidates.extend(homebase.site_config_dir_list(
app_author=CONF_AUTHOR, app_name=CONF_APP,
use_virtualenv=False, create=False))
# user-local will override it, `~/.config/dwave/dwave.conf`
if user:
candidates.append(homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False))
# highest priority (overrides all): `./dwave.conf`
if local:
candidates.append(".")
paths = [os.path.join(base, CONF_FILENAME) for base in candidates]
if only_existing:
paths = list(filter(os.path.exists, paths))
return paths |
<SYSTEM_TASK:>
Load D-Wave Cloud Client configuration from a list of files.
<END_TASK>
<USER_TASK:>
Description:
def load_config_from_files(filenames=None):
"""Load D-Wave Cloud Client configuration from a list of files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`. A section called
``defaults`` contains default values inherited by other sections.
Each filename in the list (each configuration file loaded) progressively upgrades
the final configuration, on a key by key basis, per each section.
Args:
filenames (list[str], default=None):
D-Wave Cloud Client configuration files (paths and names).
If ``None``, searches for a configuration file named ``dwave.conf``
in all system-wide configuration directories, in the user-local
configuration directory, and in the current working directory,
following the user/system configuration paths of :func:`get_configfile_paths`.
Returns:
:obj:`~configparser.ConfigParser`:
:class:`dict`-like mapping of configuration sections (profiles) to
mapping of per-profile keys holding values.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
This example loads configurations from two files. One contains a default
section with key/values that are overwritten by any profile section that
contains that key/value; for example, profile dw2000b in file dwave_b.conf
overwrites the default URL and client type, which profile dw2000a inherits
from the defaults section, while profile dw2000a overwrites the API token that
profile dw2000b inherits.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
The following example code loads configuration from both these files, with
the defined overrides and inheritance.
.. code:: python
>>> import dwave.cloud as dc
>>> import sys
>>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
>>> configuration.write(sys.stdout) # doctest: +SKIP
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
""" |
if filenames is None:
filenames = get_configfile_paths()
config = configparser.ConfigParser(default_section="defaults")
for filename in filenames:
try:
with open(filename, 'r') as f:
config.read_file(f, filename)
except (IOError, OSError):
raise ConfigFileReadError("Failed to read {!r}".format(filename))
except configparser.Error:
raise ConfigFileParseError("Failed to parse {!r}".format(filename))
return config |
<SYSTEM_TASK:>
Load a profile from a list of D-Wave Cloud Client configuration files.
<END_TASK>
<USER_TASK:>
Description:
def load_profile_from_files(filenames=None, profile=None):
"""Load a profile from a list of D-Wave Cloud Client configuration files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`.
Each file in the list is progressively searched until the first profile is found.
This function does not input profile information from environment variables.
Args:
filenames (list[str], default=None):
D-Wave cloud client configuration files (path and name). If ``None``,
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
profile (str, default=None):
Name of profile to return from reading the configuration from the specified
configuration file(s). If ``None``, progressively falls back in the
following order:
(1) ``profile`` key following ``[defaults]`` section.
(2) First non-``[defaults]`` section.
(3) ``[defaults]`` section.
Returns:
dict:
Mapping of configuration keys to values. If no valid config/profile
is found, returns an empty dict.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
:exc:`ValueError`:
Profile name not found.
Examples:
This example loads a profile based on configurations from two files. It
finds the first profile, dw2000a, in the first file, dwave_a.conf, and adds to
the values of the defaults section, overwriting the existing client value,
while ignoring the profile in the second file, dwave_b.conf.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
client = sw
solver = EXAMPLE_2000Q_SYSTEM_A
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = qpu
solver = EXAMPLE_2000Q_SYSTEM_B
The following example code loads profile values from parsing both these files,
by default loading the first profile encountered or an explicitly specified profile.
>>> import dwave.cloud as dc
>>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
{'client': u'sw',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"],
... profile='dw2000b') # doctest: +SKIP
{'client': u'qpu',
'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi',
'solver': u'EXAMPLE_2000Q_SYSTEM_B',
'token': u'ABC-123456789123456789123456789'}
""" |
# progressively build config from a file, or a list of auto-detected files
# raises ConfigFileReadError/ConfigFileParseError on error
config = load_config_from_files(filenames)
# determine profile name fallback:
# (1) profile key under [defaults],
# (2) first non-[defaults] section
# (3) [defaults] section
first_section = next(iter(config.sections() + [None]))
config_defaults = config.defaults()
if not profile:
profile = config_defaults.get('profile', first_section)
if profile:
try:
section = dict(config[profile])
except KeyError:
raise ValueError("Config profile {!r} not found".format(profile))
else:
# as the very last resort (unspecified profile name and
# no profiles defined in config), try to use [defaults]
if config_defaults:
section = config_defaults
else:
section = {}
return section |
<SYSTEM_TASK:>
Load D-Wave Cloud Client configuration based on a configuration file.
<END_TASK>
<USER_TASK:>
Description:
def load_config(config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None):
"""Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`load_config()`. These values replace
values read from a configuration file, and therefore must be **strings**, including float
values for timeouts, boolean flags (tested for "truthiness"), and solver feature
constraints (a dictionary encoded as JSON).
2. Values specified as environment variables.
3. Values specified in the configuration file.
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If `False`, loading from file(s) is skipped; if `True`, forces auto-detection
(regardless of the `DWAVE_CONFIG_FILE` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from `DWAVE_PROFILE` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides `[defaults]`, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are `qpu`
for :class:`dwave.cloud.qpu.Client` and `sw` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
:term:`solver` features, as a JSON-encoded dictionary of feature constraints,
the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for
semantics of supported feature constraints.
If undefined, the client uses a solver definition from environment variables,
a configuration file, or falls back to the first available online solver.
For backward compatibility, solver name in string format is accepted and
converted to ``{"name": <solver name>}``.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
Returns:
dict:
Mapping of configuration keys to values for the profile
(section), as read from the configuration file and optionally overridden by
environment values and specified keyword arguments.
Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy`
keys.
Raises:
:exc:`ValueError`:
Invalid (non-existing) profile name.
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
>>> import dwave.cloud as dc
>>> dc.config.load_config()
{'client': u'qpu',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
'solver': u'EXAMPLE_2000Q_SYSTEM_A',
'token': u'DEF-987654321987654321987654321'}
>>> See which configuration file was loaded
>>> dc.config.get_configfile_paths()
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
""" |
if profile is None:
profile = os.getenv("DWAVE_PROFILE")
if config_file == False:
# skip loading from file altogether
section = {}
elif config_file == True:
# force auto-detection, disregarding DWAVE_CONFIG_FILE
section = load_profile_from_files(None, profile)
else:
# auto-detect if not specified with arg or env
if config_file is None:
# note: both empty and undefined DWAVE_CONFIG_FILE treated as None
config_file = os.getenv("DWAVE_CONFIG_FILE")
# handle ''/None/str/[str] for `config_file` (after env)
filenames = None
if config_file:
if isinstance(config_file, six.string_types):
filenames = [config_file]
else:
filenames = config_file
section = load_profile_from_files(filenames, profile)
# override a selected subset of values via env or kwargs,
# pass-through the rest unmodified
section['client'] = client or os.getenv("DWAVE_API_CLIENT", section.get('client'))
section['endpoint'] = endpoint or os.getenv("DWAVE_API_ENDPOINT", section.get('endpoint'))
section['token'] = token or os.getenv("DWAVE_API_TOKEN", section.get('token'))
section['solver'] = solver or os.getenv("DWAVE_API_SOLVER", section.get('solver'))
section['proxy'] = proxy or os.getenv("DWAVE_API_PROXY", section.get('proxy'))
return section |
<SYSTEM_TASK:>
Load configured URLs and token for the SAPI server.
<END_TASK>
<USER_TASK:>
Description:
def legacy_load_config(profile=None, endpoint=None, token=None, solver=None,
proxy=None, **kwargs):
"""Load configured URLs and token for the SAPI server.
.. warning:: Included only for backward compatibility. Please use
:func:`load_config` or the client factory
:meth:`~dwave.cloud.client.Client.from_config` instead.
This method tries to load a legacy configuration file from ``~/.dwrc``, select a
specified `profile` (or, if not specified, the first profile), and override
individual keys with values read from environment variables or
specified explicitly as key values in the function.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`legacy_load_config()`
2. Values specified as environment variables
3. Values specified in the legacy ``~/.dwrc`` configuration file
Environment variables searched for are:
- ``DW_INTERNAL__HTTPLINK``
- ``DW_INTERNAL__TOKEN``
- ``DW_INTERNAL__HTTPPROXY``
- ``DW_INTERNAL__SOLVER``
Legacy configuration file format is a modified CSV where the first comma is
replaced with a bar character (``|``). Each line encodes a single profile. Its
columns are::
profile_name|endpoint_url,authentication_token,proxy_url,default_solver_name
All its fields after ``authentication_token`` are optional.
When there are multiple connections in a file, the first one is
the default. Any commas in the URLs must be percent-encoded.
Args:
profile (str):
Profile name in the legacy configuration file.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (str, default=None):
Default solver to use in :meth:`~dwave.cloud.client.Client.get_solver`.
If undefined, all calls to :meth:`~dwave.cloud.client.Client.get_solver`
must explicitly specify the solver name/id.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client uses a
system-level proxy, if defined, or connects directly to the API.
Returns:
Dictionary with keys: endpoint, token, solver, and proxy.
Examples:
This example creates a client using the :meth:`~dwave.cloud.client.Client.from_config`
method, which falls back on the legacy file by default when it fails to
find a D-Wave Cloud Client configuration file (setting its `legacy_config_fallback`
parameter to False precludes this fall-back operation). For this example,
no D-Wave Cloud Client configuration file is present on the local system;
instead the following ``.dwrc`` legacy configuration file is present in the
user's home directory::
profile-a|https://one.com,token-one
profile-b|https://two.com,token-two
The following example code creates a client without explicitly specifying
key values, therefore auto-detection searches for existing (non-legacy) configuration
files in the standard directories of :func:`get_configfile_paths` and, failing to
find one, falls back on the existing legacy configuration file above.
>>> import dwave.cloud as dc
>>> client = dwave.cloud.Client.from_config() # doctest: +SKIP
>>> client.endpoint # doctest: +SKIP
'https://one.com'
>>> client.token # doctest: +SKIP
'token-one'
The following examples specify a profile and/or token.
>>> # Explicitly specify a profile
>>> client = dwave.cloud.Client.from_config(profile='profile-b')
>>> # Will try to connect with the url `https://two.com` and the token `token-two`.
>>> client = dwave.cloud.Client.from_config(profile='profile-b', token='new-token')
>>> # Will try to connect with the url `https://two.com` and the token `new-token`.
""" |
def _parse_config(fp, filename):
fields = ('endpoint', 'token', 'proxy', 'solver')
config = OrderedDict()
for line in fp:
# strip whitespace, skip blank and comment lines
line = line.strip()
if not line or line.startswith('#'):
continue
# parse each record, store in dict with label as key
try:
label, data = line.split('|', 1)
values = [v.strip() or None for v in data.split(',')]
config[label] = dict(zip(fields, values))
except:
raise ConfigFileParseError(
"Failed to parse {!r}, line {!r}".format(filename, line))
return config
def _read_config(filename):
try:
with open(filename, 'r') as f:
return _parse_config(f, filename)
except (IOError, OSError):
raise ConfigFileReadError("Failed to read {!r}".format(filename))
config = {}
filename = os.path.expanduser('~/.dwrc')
if os.path.exists(filename):
config = _read_config(filename)
# load profile if specified, or first one in file
if profile:
try:
section = config[profile]
except KeyError:
raise ValueError("Config profile {!r} not found".format(profile))
else:
try:
_, section = next(iter(config.items()))
except StopIteration:
section = {}
# override config variables (if any) with environment and then with arguments
section['endpoint'] = endpoint or os.getenv("DW_INTERNAL__HTTPLINK", section.get('endpoint'))
section['token'] = token or os.getenv("DW_INTERNAL__TOKEN", section.get('token'))
section['proxy'] = proxy or os.getenv("DW_INTERNAL__HTTPPROXY", section.get('proxy'))
section['solver'] = solver or os.getenv("DW_INTERNAL__SOLVER", section.get('solver'))
section.update(kwargs)
return section |
<SYSTEM_TASK:>
Perform a single conversion from an input buffer to an output buffer.
<END_TASK>
<USER_TASK:>
Description:
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
""" |
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen |
<SYSTEM_TASK:>
Initialise a new sample rate converter.
<END_TASK>
<USER_TASK:>
Description:
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
""" |
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0] |
<SYSTEM_TASK:>
Internal callback function to be used with the callback API.
<END_TASK>
<USER_TASK:>
Description:
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
""" |
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames |
<SYSTEM_TASK:>
Initialisation for the callback based API.
<END_TASK>
<USER_TASK:>
Description:
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
""" |
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0] |
<SYSTEM_TASK:>
Read up to `frames` worth of data using the callback API.
<END_TASK>
<USER_TASK:>
Description:
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
""" |
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr) |
<SYSTEM_TASK:>
Client factory method to instantiate a client instance from configuration.
<END_TASK>
<USER_TASK:>
Description:
def from_config(cls, config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None,
legacy_config_fallback=False, **kwargs):
"""Client factory method to instantiate a client instance from configuration.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`from_config()`
2. Values specified as environment variables
3. Values specified in the configuration file
Configuration-file format is described in :mod:`dwave.cloud.config`.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file.
If ``None``, the value is taken from ``DWAVE_CONFIG_FILE`` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If ``False``, loading from file is skipped; if ``True``, forces auto-detection
(regardless of the ``DWAVE_CONFIG_FILE`` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from ``DWAVE_PROFILE`` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides ``[defaults]``, the defaults
section is promoted and selected.
client (str, default=None):
Client type used for accessing the API. Supported values are ``qpu``
for :class:`dwave.cloud.qpu.Client` and ``sw`` for
:class:`dwave.cloud.sw.Client`.
endpoint (str, default=None):
API endpoint URL.
token (str, default=None):
API authorization token.
solver (dict/str, default=None):
Default :term:`solver` features to use in :meth:`~dwave.cloud.client.Client.get_solver`.
Defined via dictionary of solver feature constraints
(see :meth:`~dwave.cloud.client.Client.get_solvers`).
For backward compatibility, a solver name, as a string,
is also accepted and converted to ``{"name": <solver name>}``.
If undefined, :meth:`~dwave.cloud.client.Client.get_solver` uses a
solver definition from environment variables, a configuration file, or
falls back to the first available online solver.
proxy (str, default=None):
URL for proxy to use in connections to D-Wave API. Can include
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
legacy_config_fallback (bool, default=False):
If True and loading from a standard D-Wave Cloud Client configuration
file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``).
Other Parameters:
Unrecognized keys (str):
All unrecognized keys are passed through to the appropriate client class constructor
as string keyword arguments.
An explicit key value overrides an identical user-defined key value loaded from a
configuration file.
Returns:
:class:`~dwave.cloud.client.Client` (:class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`, default=:class:`dwave.cloud.qpu.Client`):
Appropriate instance of a QPU or software client.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
A variety of examples are given in :mod:`dwave.cloud.config`.
This example initializes :class:`~dwave.cloud.client.Client` from an
explicitly specified configuration file, "~/jane/my_path_to_config/my_cloud_conf.conf"::
>>> from dwave.cloud import Client
>>> client = Client.from_config(config_file='~/jane/my_path_to_config/my_cloud_conf.conf') # doctest: +SKIP
>>> # code that uses client
>>> client.close()
""" |
# try loading configuration from a preferred new config subsystem
# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)
config = load_config(
config_file=config_file, profile=profile, client=client,
endpoint=endpoint, token=token, solver=solver, proxy=proxy)
_LOGGER.debug("Config loaded: %r", config)
# fallback to legacy `.dwrc` if key variables missing
if legacy_config_fallback:
warnings.warn("'legacy_config_fallback' is deprecated, please convert "
"your legacy .dwrc file to the new config format.", DeprecationWarning)
if not config.get('token'):
config = legacy_load_config(
profile=profile, client=client,
endpoint=endpoint, token=token, solver=solver, proxy=proxy)
_LOGGER.debug("Legacy config loaded: %r", config)
# manual override of other (client-custom) arguments
config.update(kwargs)
from dwave.cloud import qpu, sw
_clients = {'qpu': qpu.Client, 'sw': sw.Client, 'base': cls}
_client = config.pop('client', None) or 'base'
_LOGGER.debug("Final config used for %s.Client(): %r", _client, config)
return _clients[_client](**config) |
<SYSTEM_TASK:>
Perform a clean shutdown.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Perform a clean shutdown.
Waits for all the currently scheduled work to finish, kills the workers,
and closes the connection pool.
.. note:: Ensure your code does not submit new work while the connection is closing.
Where possible, it is recommended you use a context manager (a :code:`with Client.from_config(...) as`
construct) to ensure your code properly closes all resources.
Examples:
This example creates a client (based on an auto-detected configuration file), executes
some code (represented by a placeholder comment), and then closes the client.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> # code that uses client
>>> client.close()
""" |
# Finish all the work that requires the connection
_LOGGER.debug("Joining submission queue")
self._submission_queue.join()
_LOGGER.debug("Joining cancel queue")
self._cancel_queue.join()
_LOGGER.debug("Joining poll queue")
self._poll_queue.join()
_LOGGER.debug("Joining load queue")
self._load_queue.join()
# Send kill-task to all worker threads
# Note: threads can't be 'killed' in Python, they have to die by
# natural causes
for _ in self._submission_workers:
self._submission_queue.put(None)
for _ in self._cancel_workers:
self._cancel_queue.put(None)
for _ in self._poll_workers:
self._poll_queue.put((-1, None))
for _ in self._load_workers:
self._load_queue.put(None)
# Wait for threads to die
for worker in chain(self._submission_workers, self._cancel_workers,
self._poll_workers, self._load_workers):
worker.join()
# Close the requests session
self.session.close() |
<SYSTEM_TASK:>
Load the configuration for a single solver.
<END_TASK>
<USER_TASK:>
Description:
def get_solver(self, name=None, refresh=False, **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
is a URL configured for the client, and returns a :class:`.Solver` instance
that can be used to submit sampling problems to the D-Wave API and retrieve results.
Args:
name (str):
ID of the requested solver. ``None`` returns the default solver.
If default solver is not configured, ``None`` returns the first available
solver in ``Client``'s class (QPU/software/base).
**filters (keyword arguments, optional):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
order_by (callable/str, default='id'):
Solver sorting key function (or :class:`Solver` attribute name).
By default, solvers are sorted by ID/name.
refresh (bool):
Return solver from cache (if cached with ``get_solvers()``),
unless set to ``True``.
Returns:
:class:`.Solver`
Examples:
This example creates two solvers for a client instantiated from
a local system's auto-detected default configuration file, which configures
a connection to a D-Wave resource that provides two solvers. The first
uses the default solver, the second explicitly selects another solver.
>>> from dwave.cloud import Client
>>> client = Client.from_config()
>>> client.get_solvers() # doctest: +SKIP
[Solver(id='2000Q_ONLINE_SOLVER1'), Solver(id='2000Q_ONLINE_SOLVER2')]
>>> solver1 = client.get_solver() # doctest: +SKIP
>>> solver2 = client.get_solver(name='2000Q_ONLINE_SOLVER2') # doctest: +SKIP
>>> solver1.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER1'
>>> solver2.id # doctest: +SKIP
'2000Q_ONLINE_SOLVER2'
>>> # code that uses client
>>> client.close() # doctest: +SKIP
""" |
_LOGGER.debug("Requested a solver that best matches feature filters=%r", filters)
# backward compatibility: name as the first feature
if name is not None:
filters.setdefault('name', name)
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
filters = self.default_solver
# get the first solver that satisfies all filters
try:
_LOGGER.debug("Fetching solvers according to filters=%r", filters)
return self.get_solvers(refresh=refresh, **filters)[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available") |
<SYSTEM_TASK:>
Enqueue a problem for submission to the server.
<END_TASK>
<USER_TASK:>
Description:
def _submit(self, body, future):
"""Enqueue a problem for submission to the server.
This method is thread safe.
""" |
self._submission_queue.put(self._submit.Message(body, future)) |
<SYSTEM_TASK:>
Pull problems from the submission queue and submit them.
<END_TASK>
<USER_TASK:>
Description:
def _do_submit_problems(self):
"""Pull problems from the submission queue and submit them.
Note:
This method is always run inside of a daemon thread.
""" |
try:
while True:
# Pull as many problems as we can, block on the first one,
# but once we have one problem, switch to non-blocking then
# submit without blocking again.
# `None` task is used to signal thread termination
item = self._submission_queue.get()
if item is None:
break
ready_problems = [item]
while len(ready_problems) < self._SUBMIT_BATCH_SIZE:
try:
ready_problems.append(self._submission_queue.get_nowait())
except queue.Empty:
break
# Submit the problems
_LOGGER.debug("Submitting %d problems", len(ready_problems))
body = '[' + ','.join(mess.body for mess in ready_problems) + ']'
try:
try:
response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body)
localtime_of_response = epochnow()
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
_LOGGER.debug("Finished submitting %d problems", len(ready_problems))
except BaseException as exception:
_LOGGER.debug("Submit failed for %d problems", len(ready_problems))
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for mess in ready_problems:
mess.future._set_error(exception, sys.exc_info())
self._submission_queue.task_done()
continue
# Pass on the information
for submission, res in zip(ready_problems, message):
submission.future._set_clock_diff(response, localtime_of_response)
self._handle_problem_status(res, submission.future)
self._submission_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except BaseException as err:
_LOGGER.exception(err) |
<SYSTEM_TASK:>
Handle the results of a problem submission or results request.
<END_TASK>
<USER_TASK:>
Description:
def _handle_problem_status(self, message, future):
"""Handle the results of a problem submission or results request.
This method checks the status of the problem and puts it in the correct queue.
Args:
message (dict): Update message from the SAPI server wrt. this problem.
future `Future`: future corresponding to the problem
Note:
This method is always run inside of a daemon thread.
""" |
try:
_LOGGER.trace("Handling response: %r", message)
_LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status'))
# Handle errors in batch mode
if 'error_code' in message and 'error_msg' in message:
raise SolverFailureError(message['error_msg'])
if 'status' not in message:
raise InvalidAPIResponseError("'status' missing in problem description response")
if 'id' not in message:
raise InvalidAPIResponseError("'id' missing in problem description response")
future.id = message['id']
future.remote_status = status = message['status']
# The future may not have the ID set yet
with future._single_cancel_lock:
# This handles the case where cancel has been called on a future
# before that future received the problem id
if future._cancel_requested:
if not future._cancel_sent and status == self.STATUS_PENDING:
# The problem has been canceled but the status says its still in queue
# try to cancel it
self._cancel(message['id'], future)
# If a cancel request could meaningfully be sent it has been now
future._cancel_sent = True
if not future.time_received and message.get('submitted_on'):
future.time_received = parse_datetime(message['submitted_on'])
if not future.time_solved and message.get('solved_on'):
future.time_solved = parse_datetime(message['solved_on'])
if not future.eta_min and message.get('earliest_estimated_completion'):
future.eta_min = parse_datetime(message['earliest_estimated_completion'])
if not future.eta_max and message.get('latest_estimated_completion'):
future.eta_max = parse_datetime(message['latest_estimated_completion'])
if status == self.STATUS_COMPLETE:
# TODO: find a better way to differentiate between
# `completed-on-submit` and `completed-on-poll`.
# Loading should happen only once, not every time when response
# doesn't contain 'answer'.
# If the message is complete, forward it to the future object
if 'answer' in message:
future._set_message(message)
# If the problem is complete, but we don't have the result data
# put the problem in the queue for loading results.
else:
self._load(future)
elif status in self.ANY_STATUS_ONGOING:
# If the response is pending add it to the queue.
self._poll(future)
elif status == self.STATUS_CANCELLED:
# If canceled return error
raise CanceledFutureError()
else:
# Return an error to the future object
errmsg = message.get('error_message', 'An unknown error has occurred.')
if 'solver is offline' in errmsg.lower():
raise SolverOfflineError(errmsg)
else:
raise SolverFailureError(errmsg)
except Exception as error:
# If there were any unhandled errors we need to release the
# lock in the future, otherwise deadlock occurs.
future._set_error(error, sys.exc_info()) |
<SYSTEM_TASK:>
Pull ids from the cancel queue and submit them.
<END_TASK>
<USER_TASK:>
Description:
def _do_cancel_problems(self):
"""Pull ids from the cancel queue and submit them.
Note:
This method is always run inside of a daemon thread.
""" |
try:
while True:
# Pull as many problems as we can, block when none are available.
# `None` task is used to signal thread termination
item = self._cancel_queue.get()
if item is None:
break
item_list = [item]
while True:
try:
item_list.append(self._cancel_queue.get_nowait())
except queue.Empty:
break
# Submit the problems, attach the ids as a json list in the
# body of the delete query.
try:
body = [item[0] for item in item_list]
try:
self.session.delete(posixpath.join(self.endpoint, 'problems/'), json=body)
except requests.exceptions.Timeout:
raise RequestTimeout
except Exception as err:
for _, future in item_list:
if future is not None:
future._set_error(err, sys.exc_info())
# Mark all the ids as processed regardless of success or failure.
[self._cancel_queue.task_done() for _ in item_list]
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.exception(err) |
<SYSTEM_TASK:>
Enqueue a problem to poll the server for status.
<END_TASK>
<USER_TASK:>
Description:
def _poll(self, future):
"""Enqueue a problem to poll the server for status.""" |
if future._poll_backoff is None:
# on first poll, start with minimal back-off
future._poll_backoff = self._POLL_BACKOFF_MIN
# if we have ETA of results, schedule the first poll for then
if future.eta_min and self._is_clock_diff_acceptable(future):
at = datetime_to_timestamp(future.eta_min)
_LOGGER.debug("Response ETA indicated and local clock reliable. "
"Scheduling first polling at +%.2f sec", at - epochnow())
else:
at = time.time() + future._poll_backoff
_LOGGER.debug("Response ETA not indicated, or local clock unreliable. "
"Scheduling first polling at +%.2f sec", at - epochnow())
else:
# update exponential poll back-off, clipped to a range
future._poll_backoff = \
max(self._POLL_BACKOFF_MIN,
min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX))
# for poll priority we use timestamp of next scheduled poll
at = time.time() + future._poll_backoff
now = utcnow()
future_age = (now - future.time_created).total_seconds()
_LOGGER.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)",
at, future._poll_backoff, future.id, future_age)
# don't enqueue for next poll if polling_timeout is exceeded by then
future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now))
if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout:
_LOGGER.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!",
future_age_on_next_poll, self.polling_timeout)
raise PollingTimeout
self._poll_queue.put((at, future)) |
<SYSTEM_TASK:>
Poll the server for the status of a set of problems.
<END_TASK>
<USER_TASK:>
Description:
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
""" |
try:
# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)
frame_futures = {}
def task_done():
self._poll_queue.task_done()
def add(future):
# add future to query frame_futures
# returns: worker lives on?
# `None` task signifies thread termination
if future is None:
task_done()
return False
if future.id not in frame_futures and not future.done():
frame_futures[future.id] = future
else:
task_done()
return True
while True:
frame_futures.clear()
# blocking add first scheduled
frame_earliest, future = self._poll_queue.get()
if not add(future):
return
# try grouping if scheduled within grouping timeframe
while len(frame_futures) < self._STATUS_QUERY_SIZE:
try:
task = self._poll_queue.get_nowait()
except queue.Empty:
break
at, future = task
if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME:
if not add(future):
return
else:
task_done()
self._poll_queue.put(task)
break
# build a query string with ids of all futures in this frame
ids = [future.id for future in frame_futures.values()]
_LOGGER.debug("Polling for status of futures: %s", ids)
query_string = 'problems/?id=' + ','.join(ids)
# if futures were cancelled while `add`ing, skip empty frame
if not ids:
continue
# wait until `frame_earliest` before polling
delay = frame_earliest - time.time()
if delay > 0:
_LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids)
time.sleep(delay)
else:
_LOGGER.trace("Skipping non-positive delay of %.2f sec", delay)
try:
_LOGGER.trace("Executing poll API request")
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
statuses = response.json()
for status in statuses:
self._handle_problem_status(status, frame_futures[status['id']])
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in frame_futures.keys():
frame_futures[id_]._set_error(IOError(exception), sys.exc_info())
for id_ in frame_futures.keys():
task_done()
time.sleep(0)
except Exception as err:
_LOGGER.exception(err) |
<SYSTEM_TASK:>
Submit a query asking for the results for a particular problem.
<END_TASK>
<USER_TASK:>
Description:
def _do_load_results(self):
"""Submit a query asking for the results for a particular problem.
To request the results of a problem: ``GET /problems/{problem_id}/``
Note:
This method is always run inside of a daemon thread.
""" |
try:
while True:
# Select a problem
future = self._load_queue.get()
# `None` task signifies thread termination
if future is None:
break
_LOGGER.debug("Loading results of: %s", future.id)
# Submit the query
query_string = 'problems/{}/'.format(future.id)
try:
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
message = response.json()
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
future._set_error(IOError(exception), sys.exc_info())
continue
# Dispatch the results, mark the task complete
self._handle_problem_status(message, future)
self._load_queue.task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
except Exception as err:
_LOGGER.error('Load result error: ' + str(err)) |
<SYSTEM_TASK:>
Encode the binary quadratic problem for submission to a given solver,
<END_TASK>
<USER_TASK:>
Description:
def encode_bqm_as_qp(solver, linear, quadratic):
"""Encode the binary quadratic problem for submission to a given solver,
using the `qp` format for data.
Args:
solver (:class:`dwave.cloud.solver.Solver`):
The solver used.
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
encoded submission dictionary
""" |
active = active_qubits(linear, quadratic)
# Encode linear terms. The coefficients of the linear terms of the objective
# are encoded as an array of little endian 64 bit doubles.
# This array is then base64 encoded into a string safe for json.
# The order of the terms is determined by the _encoding_qubits property
# specified by the server.
# Note: only active qubits are coded with double, inactive with NaN
nan = float('nan')
lin = [uniform_get(linear, qubit, 0 if qubit in active else nan)
for qubit in solver._encoding_qubits]
lin = base64.b64encode(struct.pack('<' + ('d' * len(lin)), *lin))
# Encode the coefficients of the quadratic terms of the objective
# in the same manner as the linear terms, in the order given by the
# _encoding_couplers property, discarding tailing zero couplings
quad = [quadratic.get((q1,q2), 0) + quadratic.get((q2,q1), 0)
for (q1,q2) in solver._encoding_couplers
if q1 in active and q2 in active]
quad = base64.b64encode(struct.pack('<' + ('d' * len(quad)), *quad))
# The name for this encoding is 'qp' and is explicitly included in the
# message for easier extension in the future.
return {
'format': 'qp',
'lin': lin.decode('utf-8'),
'quad': quad.decode('utf-8')
} |
<SYSTEM_TASK:>
Decode SAPI response that uses `qp` format, without numpy.
<END_TASK>
<USER_TASK:>
Description:
def decode_qp(msg):
"""Decode SAPI response that uses `qp` format, without numpy.
The 'qp' format is the current encoding used for problems and samples.
In this encoding the reply is generally json, but the samples, energy,
and histogram data (the occurrence count of each solution), are all
base64 encoded arrays.
""" |
# Decode the simple buffers
result = msg['answer']
result['active_variables'] = _decode_ints(result['active_variables'])
active_variables = result['active_variables']
if 'num_occurrences' in result:
result['num_occurrences'] = _decode_ints(result['num_occurrences'])
result['energies'] = _decode_doubles(result['energies'])
# Measure out the size of the binary solution data
num_solutions = len(result['energies'])
num_variables = len(result['active_variables'])
solution_bytes = -(-num_variables // 8) # equivalent to int(math.ceil(num_variables / 8.))
total_variables = result['num_variables']
# Figure out the null value for output
default = 3 if msg['type'] == 'qubo' else 0
# Decode the solutions, which will be byte aligned in binary format
binary = base64.b64decode(result['solutions'])
solutions = []
for solution_index in range(num_solutions):
# Grab the section of the buffer related to the current
buffer_index = solution_index * solution_bytes
solution_buffer = binary[buffer_index:buffer_index + solution_bytes]
bytes = struct.unpack('B' * solution_bytes, solution_buffer)
# Assume None values
solution = [default] * total_variables
index = 0
for byte in bytes:
# Parse each byte and read how ever many bits can be
values = _decode_byte(byte)
for _ in range(min(8, len(active_variables) - index)):
i = active_variables[index]
index += 1
solution[i] = values.pop()
# Switch to the right variable space
if msg['type'] == 'ising':
values = {0: -1, 1: 1}
solution = [values.get(v, default) for v in solution]
solutions.append(solution)
result['solutions'] = solutions
return result |
<SYSTEM_TASK:>
Helper for decode_qp, turns a single byte into a list of bits.
<END_TASK>
<USER_TASK:>
Description:
def _decode_byte(byte):
"""Helper for decode_qp, turns a single byte into a list of bits.
Args:
byte: byte to be decoded
Returns:
list of bits corresponding to byte
""" |
bits = []
for _ in range(8):
bits.append(byte & 1)
byte >>= 1
return bits |
<SYSTEM_TASK:>
Helper for decode_qp, decodes an int array.
<END_TASK>
<USER_TASK:>
Description:
def _decode_ints(message):
"""Helper for decode_qp, decodes an int array.
The int array is stored as little endian 32 bit integers.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
""" |
binary = base64.b64decode(message)
return struct.unpack('<' + ('i' * (len(binary) // 4)), binary) |
<SYSTEM_TASK:>
Helper for decode_qp, decodes a double array.
<END_TASK>
<USER_TASK:>
Description:
def _decode_doubles(message):
"""Helper for decode_qp, decodes a double array.
The double array is stored as little endian 64 bit doubles.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
Args:
message: the double array
Returns:
decoded double array
""" |
binary = base64.b64decode(message)
return struct.unpack('<' + ('d' * (len(binary) // 8)), binary) |
<SYSTEM_TASK:>
Decode SAPI response, results in a `qp` format, explicitly using numpy.
<END_TASK>
<USER_TASK:>
Description:
def decode_qp_numpy(msg, return_matrix=True):
"""Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`.
""" |
import numpy as np
result = msg['answer']
# Build some little endian type encodings
double_type = np.dtype(np.double)
double_type = double_type.newbyteorder('<')
int_type = np.dtype(np.int32)
int_type = int_type.newbyteorder('<')
# Decode the simple buffers
result['energies'] = np.frombuffer(base64.b64decode(result['energies']),
dtype=double_type)
if 'num_occurrences' in result:
result['num_occurrences'] = \
np.frombuffer(base64.b64decode(result['num_occurrences']),
dtype=int_type)
result['active_variables'] = \
np.frombuffer(base64.b64decode(result['active_variables']),
dtype=int_type)
# Measure out the binary data size
num_solutions = len(result['energies'])
active_variables = result['active_variables']
num_variables = len(active_variables)
total_variables = result['num_variables']
# Decode the solutions, which will be a continuous run of bits
byte_type = np.dtype(np.uint8)
byte_type = byte_type.newbyteorder('<')
bits = np.unpackbits(np.frombuffer(base64.b64decode(result['solutions']),
dtype=byte_type))
# Clip off the extra bits from encoding
if num_solutions:
bits = np.reshape(bits, (num_solutions, bits.size // num_solutions))
bits = np.delete(bits, range(num_variables, bits.shape[1]), 1)
# Switch from bits to spins
default = 3
if msg['type'] == 'ising':
bits = bits.astype(np.int8)
bits *= 2
bits -= 1
default = 0
# Fill in the missing variables
solutions = np.full((num_solutions, total_variables), default, dtype=np.int8)
solutions[:, active_variables] = bits
result['solutions'] = solutions
# If the final result shouldn't be numpy formats switch back to python objects
if not return_matrix:
result['energies'] = result['energies'].tolist()
if 'num_occurrences' in result:
result['num_occurrences'] = result['num_occurrences'].tolist()
result['active_variables'] = result['active_variables'].tolist()
result['solutions'] = result['solutions'].tolist()
return result |
<SYSTEM_TASK:>
Calculate the energy of a state given the Hamiltonian.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_ising(linear, quad, state):
"""Calculate the energy of a state given the Hamiltonian.
Args:
linear: Linear Hamiltonian terms.
quad: Quadratic Hamiltonian terms.
state: Vector of spins describing the system state.
Returns:
Energy of the state evaluated by the given energy function.
""" |
# If we were given a numpy array cast to list
if _numpy and isinstance(state, np.ndarray):
return evaluate_ising(linear, quad, state.tolist())
# Accumulate the linear and quadratic values
energy = 0.0
for index, value in uniform_iterator(linear):
energy += state[index] * value
for (index_a, index_b), value in six.iteritems(quad):
energy += value * state[index_a] * state[index_b]
return energy |
<SYSTEM_TASK:>
Calculate a set of all active qubits. Qubit is "active" if it has
<END_TASK>
<USER_TASK:>
Description:
def active_qubits(linear, quadratic):
"""Calculate a set of all active qubits. Qubit is "active" if it has
bias or coupling attached.
Args:
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the model.
Returns:
set:
Active qubits' indices.
""" |
active = {idx for idx,bias in uniform_iterator(linear)}
for edge, _ in six.iteritems(quadratic):
active.update(edge)
return active |
<SYSTEM_TASK:>
Strips elements of `values` from the beginning of `sequence`.
<END_TASK>
<USER_TASK:>
Description:
def strip_head(sequence, values):
"""Strips elements of `values` from the beginning of `sequence`.""" |
values = set(values)
return list(itertools.dropwhile(lambda x: x in values, sequence)) |
<SYSTEM_TASK:>
Adds `next` to the context.
<END_TASK>
<USER_TASK:>
Description:
def get_context_data(self, **kwargs):
"""
Adds `next` to the context.
This makes sure that the `next` parameter doesn't get lost if the
form was submitted invalid.
""" |
ctx = super(UserMediaImageViewMixin, self).get_context_data(**kwargs)
ctx.update({
'action': self.action,
'next': self.next,
})
return ctx |
<SYSTEM_TASK:>
Returns the success URL.
<END_TASK>
<USER_TASK:>
Description:
def get_success_url(self):
"""
Returns the success URL.
This is either the given `next` URL parameter or the content object's
`get_absolute_url` method's return value.
""" |
if self.next:
return self.next
if self.object and self.object.content_object:
return self.object.content_object.get_absolute_url()
raise Exception(
'No content object given. Please provide ``next`` in your POST'
' data') |
<SYSTEM_TASK:>
Adds useful objects to the class and performs security checks.
<END_TASK>
<USER_TASK:>
Description:
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class and performs security checks.""" |
self._add_next_and_user(request)
self.content_object = None
self.content_type = None
self.object_id = kwargs.get('object_id', None)
if kwargs.get('content_type'):
# Check if the user forged the URL and posted a non existant
# content type
try:
self.content_type = ContentType.objects.get(
model=kwargs.get('content_type'))
except ContentType.DoesNotExist:
raise Http404
if self.content_type:
# Check if the user forged the URL and tries to append the image to
# an object that does not exist
try:
self.content_object = \
self.content_type.get_object_for_this_type(
pk=self.object_id)
except ObjectDoesNotExist:
raise Http404
if self.content_object and hasattr(self.content_object, 'user'):
# Check if the user forged the URL and tries to append the image to
# an object that does not belong to him
if not self.content_object.user == self.user:
raise Http404
return super(CreateImageView, self).dispatch(request, *args, **kwargs) |
<SYSTEM_TASK:>
Making sure that a user can only delete his own images.
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
"""
Making sure that a user can only delete his own images.
Even when he forges the request URL.
""" |
queryset = super(DeleteImageView, self).get_queryset()
queryset = queryset.filter(user=self.user)
return queryset |
<SYSTEM_TASK:>
Making sure that a user can only edit his own images.
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
"""
Making sure that a user can only edit his own images.
Even when he forges the request URL.
""" |
queryset = super(UpdateImageView, self).get_queryset()
queryset = queryset.filter(user=self.user)
return queryset |
<SYSTEM_TASK:>
Deletes all user media images of the given instance.
<END_TASK>
<USER_TASK:>
Description:
def _delete_images(self, instance):
"""Deletes all user media images of the given instance.""" |
UserMediaImage.objects.filter(
content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk,
user=instance.user,
).delete() |
<SYSTEM_TASK:>
It seems like in Django 1.5 something has changed.
<END_TASK>
<USER_TASK:>
Description:
def clean_image(self):
"""
It seems like in Django 1.5 something has changed.
When Django tries to validate the form, it checks if the generated
filename fit into the max_length. But at this point, self.instance.user
is not yet set so our filename generation function cannot create
the new file path because it needs the user id. Setting
self.instance.user at this point seems to work as a workaround.
""" |
self.instance.user = self.user
data = self.cleaned_data.get('image')
return data |
<SYSTEM_TASK:>
Makes sure that a an image is also deleted from the media directory.
<END_TASK>
<USER_TASK:>
Description:
def image_post_delete_handler(sender, instance, **kwargs):
"""
Makes sure that a an image is also deleted from the media directory.
This should prevent a load of "dead" image files on disc.
""" |
for f in glob.glob('{}/{}*'.format(instance.image.storage.location,
instance.image.name)):
if not os.path.isdir(f):
instance.image.storage.delete(f) |
<SYSTEM_TASK:>
Uses box coordinates to crop an image without resizing it first.
<END_TASK>
<USER_TASK:>
Description:
def crop_box(im, box=False, **kwargs):
"""Uses box coordinates to crop an image without resizing it first.""" |
if box:
im = im.crop(box)
return im |
<SYSTEM_TASK:>
Assign a value to this Value.
<END_TASK>
<USER_TASK:>
Description:
def AssignVar(self, value):
"""Assign a value to this Value.""" |
self.value = value
# Call OnAssignVar on options.
[option.OnAssignVar() for option in self.options] |
<SYSTEM_TASK:>
Passes the line through each rule until a match is made.
<END_TASK>
<USER_TASK:>
Description:
def _CheckLine(self, line):
"""Passes the line through each rule until a match is made.
Args:
line: A string, the current input line.
""" |
for rule in self._cur_state:
matched = self._CheckRule(rule, line)
if matched:
for value in matched.groupdict():
self._AssignVar(matched, value)
if self._Operations(rule):
# Not a Continue so check for state transition.
if rule.new_state:
if rule.new_state not in ('End', 'EOF'):
self._cur_state = self.states[rule.new_state]
self._cur_state_name = rule.new_state
break |
<SYSTEM_TASK:>
Does not yield the input class
<END_TASK>
<USER_TASK:>
Description:
def walk_subclasses(root):
"""Does not yield the input class""" |
classes = [root]
visited = set()
while classes:
cls = classes.pop()
if cls is type or cls in visited:
continue
classes.extend(cls.__subclasses__())
visited.add(cls)
if cls is not root:
yield cls |
<SYSTEM_TASK:>
Mark the object as having been persisted at least once.
<END_TASK>
<USER_TASK:>
Description:
def sync(obj, engine):
"""Mark the object as having been persisted at least once.
Store the latest snapshot of all marked values.""" |
snapshot = Condition()
# Only expect values (or lack of a value) for columns that have been explicitly set
for column in sorted(_obj_tracking[obj]["marked"], key=lambda col: col.dynamo_name):
value = getattr(obj, column.name, None)
value = engine._dump(column.typedef, value)
condition = column == value
# The renderer shouldn't try to dump the value again.
# We're dumping immediately in case the value is mutable,
# such as a set or (many) custom data types.
condition.dumped = True
snapshot &= condition
_obj_tracking[obj]["snapshot"] = snapshot |
<SYSTEM_TASK:>
Provided for debug output when rendering conditions.
<END_TASK>
<USER_TASK:>
Description:
def printable_name(column, path=None):
"""Provided for debug output when rendering conditions.
User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
""" |
pieces = [column.name]
path = path or path_of(column)
for segment in path:
if isinstance(segment, str):
pieces.append(segment)
else:
pieces[-1] += "[{}]".format(segment)
return ".".join(pieces) |
<SYSTEM_TASK:>
Yield all conditions within the given condition.
<END_TASK>
<USER_TASK:>
Description:
def iter_conditions(condition):
"""Yield all conditions within the given condition.
If the root condition is and/or/not, it is not yielded (unless a cyclic reference to it is found).""" |
conditions = list()
visited = set()
# Has to be split out, since we don't want to visit the root (for cyclic conditions)
# but we don't want to yield it (if it's non-cyclic) because this only yields inner conditions
if condition.operation in {"and", "or"}:
conditions.extend(reversed(condition.values))
elif condition.operation == "not":
conditions.append(condition.values[0])
else:
conditions.append(condition)
while conditions:
condition = conditions.pop()
if condition in visited:
continue
visited.add(condition)
yield condition
if condition.operation in {"and", "or", "not"}:
conditions.extend(reversed(condition.values)) |
<SYSTEM_TASK:>
Yield all columns in the condition or its inner conditions.
<END_TASK>
<USER_TASK:>
Description:
def iter_columns(condition):
"""
Yield all columns in the condition or its inner conditions.
Unwraps proxies when the condition's column (or any of its values) include paths.
""" |
# Like iter_conditions, this can't live in each condition without going possibly infinite on the
# recursion, or passing the visited set through every call. That makes the signature ugly, so we
# take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the
# actual columns.
visited = set()
for condition in iter_conditions(condition):
if condition.operation in ("and", "or", "not"):
continue
# Non-meta conditions always have a column, and each of values has the potential to be a column.
# Comparison will only have a list of len 1, but it's simpler to just iterate values and check each
# unwrap proxies created for paths
column = proxied(condition.column)
# special case for None
# this could also have skipped on isinstance(condition, Condition)
# but this is slightly more flexible for users to create their own None-sentinel Conditions
if column is None:
continue
if column not in visited:
visited.add(column)
yield column
for value in condition.values:
if isinstance(value, ComparisonMixin):
if value not in visited:
visited.add(value)
yield value |
<SYSTEM_TASK:>
inner=True uses column.typedef.inner_type instead of column.typedef
<END_TASK>
<USER_TASK:>
Description:
def _value_ref(self, column, value, *, dumped=False, inner=False):
"""inner=True uses column.typedef.inner_type instead of column.typedef""" |
ref = ":v{}".format(self.next_index)
# Need to dump this value
if not dumped:
typedef = column.typedef
for segment in path_of(column):
typedef = typedef[segment]
if inner:
typedef = typedef.inner_typedef
value = self.engine._dump(typedef, value)
self.attr_values[ref] = value
self.counts[ref] += 1
return ref, value |
<SYSTEM_TASK:>
Decrement the usage of each ref by 1.
<END_TASK>
<USER_TASK:>
Description:
def pop_refs(self, *refs):
"""Decrement the usage of each ref by 1.
If this was the last use of a ref, remove it from attr_names or attr_values.
""" |
for ref in refs:
name = ref.name
count = self.counts[name]
# Not tracking this ref
if count < 1:
continue
# Someone else is using this ref
elif count > 1:
self.counts[name] -= 1
# Last reference
else:
logger.debug("popping last usage of {}".format(ref))
self.counts[name] -= 1
if ref.type == "value":
del self.attr_values[name]
else:
# Clean up both name indexes
path_segment = self.attr_names[name]
del self.attr_names[name]
del self.name_attr_index[path_segment] |
<SYSTEM_TASK:>
Main entry point for rendering multiple expressions. All parameters are optional, except obj when
<END_TASK>
<USER_TASK:>
Description:
def render(self, obj=None, condition=None, atomic=False, update=False, filter=None, projection=None, key=None):
"""Main entry point for rendering multiple expressions. All parameters are optional, except obj when
atomic or update are True.
:param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if
update or atomic are true. Default is False.
:param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation.
If atomic is True, the two are rendered in an AND condition. Default is None.
:type condition: :class:`~bloop.conditions.BaseCondition`
:param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as
a "ConditionExpression". Default is False.
:param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``.
Default is False.
:param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression".
Default is None.
:type filter: :class:`~bloop.conditions.BaseCondition`
:param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a
"ProjectionExpression". Default is None.
:type projection: set :class:`~bloop.models.Column`
:param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None.
:type key: :class:`~bloop.conditions.BaseCondition`
""" |
if (atomic or update) and not obj:
raise InvalidCondition("An object is required to render atomic conditions or updates without an object.")
if filter:
self.render_filter_expression(filter)
if projection:
self.render_projection_expression(projection)
if key:
self.render_key_expression(key)
# Condition requires a bit of work, because either one can be empty/false
condition = (condition or Condition()) & (get_snapshot(obj) if atomic else Condition())
if condition:
self.render_condition_expression(condition)
if update:
self.render_update_expression(obj) |
<SYSTEM_TASK:>
Replaces the attr dict at the given key with an instance of a Model
<END_TASK>
<USER_TASK:>
Description:
def _unpack(self, record, key, expected):
"""Replaces the attr dict at the given key with an instance of a Model""" |
attrs = record.get(key)
if attrs is None:
return
obj = unpack_from_dynamodb(
attrs=attrs,
expected=expected,
model=self.model,
engine=self.engine
)
object_loaded.send(self.engine, engine=self.engine, obj=obj)
record[key] = obj |
<SYSTEM_TASK:>
Repack a record into a cleaner structure for consumption.
<END_TASK>
<USER_TASK:>
Description:
def reformat_record(record):
"""Repack a record into a cleaner structure for consumption.""" |
return {
"key": record["dynamodb"].get("Keys", None),
"new": record["dynamodb"].get("NewImage", None),
"old": record["dynamodb"].get("OldImage", None),
"meta": {
"created_at": record["dynamodb"]["ApproximateCreationDateTime"],
"event": {
"id": record["eventID"],
"type": record["eventName"].lower(),
"version": record["eventVersion"]
},
"sequence_number": record["dynamodb"]["SequenceNumber"],
}
} |
<SYSTEM_TASK:>
JSON-serializable representation of the current Shard state.
<END_TASK>
<USER_TASK:>
Description:
def token(self):
"""JSON-serializable representation of the current Shard state.
The token is enough to rebuild the Shard as part of rebuilding a Stream.
:returns: Shard state as a json-friendly dict
:rtype: dict
""" |
if self.iterator_type in RELATIVE_ITERATORS:
logger.warning("creating shard token at non-exact location \"{}\"".format(self.iterator_type))
token = {
"stream_arn": self.stream_arn,
"shard_id": self.shard_id,
"iterator_type": self.iterator_type,
"sequence_number": self.sequence_number,
}
if self.parent:
token["parent"] = self.parent.shard_id
if not self.iterator_type:
del token["iterator_type"]
if not self.sequence_number:
del token["sequence_number"]
return token |
<SYSTEM_TASK:>
Move to a new position in the shard using the standard parameters to GetShardIterator.
<END_TASK>
<USER_TASK:>
Description:
def jump_to(self, *, iterator_type, sequence_number=None):
"""Move to a new position in the shard using the standard parameters to GetShardIterator.
:param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest"
:param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None.
""" |
# Just a simple wrapper; let the caller handle RecordsExpired
self.iterator_id = self.session.get_shard_iterator(
stream_arn=self.stream_arn,
shard_id=self.shard_id,
iterator_type=iterator_type,
sequence_number=sequence_number)
self.iterator_type = iterator_type
self.sequence_number = sequence_number
self.empty_responses = 0 |
<SYSTEM_TASK:>
If the Shard doesn't have any children, tries to find some from DescribeStream.
<END_TASK>
<USER_TASK:>
Description:
def load_children(self):
"""If the Shard doesn't have any children, tries to find some from DescribeStream.
If the Shard is open this won't find any children, so an empty response doesn't
mean the Shard will **never** have children.
""" |
# Child count is fixed the first time any of the following happen:
# 0 :: stream closed or throughput decreased
# 1 :: shard was open for ~4 hours
# 2 :: throughput increased
if self.children:
return self.children
# ParentShardId -> [Shard, ...]
by_parent = collections.defaultdict(list)
# ShardId -> Shard
by_id = {}
for shard in self.session.describe_stream(
stream_arn=self.stream_arn,
first_shard=self.shard_id)["Shards"]:
parent_list = by_parent[shard.get("ParentShardId")]
shard = Shard(
stream_arn=self.stream_arn,
shard_id=shard["ShardId"],
parent=shard.get("ParentShardId"),
session=self.session)
parent_list.append(shard)
by_id[shard.shard_id] = shard
# Find this shard when looking up shards by ParentShardId
by_id[self.shard_id] = self
# Insert this shard's children, then handle its child's descendants etc.
to_insert = collections.deque(by_parent[self.shard_id])
while to_insert:
shard = to_insert.popleft()
# ParentShardId -> Shard
shard.parent = by_id[shard.parent]
shard.parent.children.append(shard)
# Continue for any shards that have this shard as their parent
to_insert.extend(by_parent[shard.shard_id])
return self.children |
<SYSTEM_TASK:>
Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted.
<END_TASK>
<USER_TASK:>
Description:
def get_records(self):
"""Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted.
:returns: A list of reformatted records. May be empty.
""" |
# Won't be able to find new records.
if self.exhausted:
return []
# Already caught up, just the one call please.
if self.empty_responses >= CALLS_TO_REACH_HEAD:
return self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
# Up to 5 calls to try and find a result
while self.empty_responses < CALLS_TO_REACH_HEAD and not self.exhausted:
records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
if records:
return records
return [] |
<SYSTEM_TASK:>
Create backing tables for a model and its non-abstract subclasses.
<END_TASK>
<USER_TASK:>
Description:
def bind(self, model, *, skip_table_setup=False):
"""Create backing tables for a model and its non-abstract subclasses.
:param model: Base model to bind. Can be abstract.
:param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False.
:raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`.
""" |
# Make sure we're looking at models
validate_is_model(model)
concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model)))
if not model.Meta.abstract:
concrete.add(model)
logger.debug("binding non-abstract models {}".format(
sorted(c.__name__ for c in concrete)
))
# create_table doesn't block until ACTIVE or validate.
# It also doesn't throw when the table already exists, making it safe
# to call multiple times for the same unbound model.
if skip_table_setup:
logger.info("skip_table_setup is True; not trying to create tables or validate models during bind")
else:
self.session.clear_cache()
is_creating = {}
for model in concrete:
table_name = self._compute_table_name(model)
before_create_table.send(self, engine=self, model=model)
if not skip_table_setup:
if table_name in is_creating:
continue
creating = self.session.create_table(table_name, model)
is_creating[table_name] = creating
for model in concrete:
if not skip_table_setup:
table_name = self._compute_table_name(model)
if is_creating[table_name]:
# polls until table is active
self.session.describe_table(table_name)
if model.Meta.ttl:
self.session.enable_ttl(table_name, model)
if model.Meta.backups and model.Meta.backups["enabled"]:
self.session.enable_backups(table_name, model)
self.session.validate_table(table_name, model)
model_validated.send(self, engine=self, model=model)
model_bound.send(self, engine=self, model=model)
logger.info("successfully bound {} models to the engine".format(len(concrete))) |
<SYSTEM_TASK:>
Delete one or more objects.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, *objs, condition=None, atomic=False):
"""Delete one or more objects.
:param objs: objects to delete.
:param condition: only perform each delete if this condition holds.
:param bool atomic: only perform each delete if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
""" |
objs = set(objs)
validate_not_abstract(*objs)
for obj in objs:
self.session.delete_item({
"TableName": self._compute_table_name(obj.__class__),
"Key": dump_key(self, obj),
**render(self, obj=obj, atomic=atomic, condition=condition)
})
object_deleted.send(self, engine=self, obj=obj)
logger.info("successfully deleted {} objects".format(len(objs))) |
<SYSTEM_TASK:>
Populate objects from DynamoDB.
<END_TASK>
<USER_TASK:>
Description:
def load(self, *objs, consistent=False):
"""Populate objects from DynamoDB.
:param objs: objects to delete.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column.
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
""" |
get_table_name = self._compute_table_name
objs = set(objs)
validate_not_abstract(*objs)
table_index, object_index, request = {}, {}, {}
for obj in objs:
table_name = get_table_name(obj.__class__)
key = dump_key(self, obj)
index = index_for(key)
if table_name not in object_index:
table_index[table_name] = list(sorted(key.keys()))
object_index[table_name] = {}
request[table_name] = {"Keys": [], "ConsistentRead": consistent}
if index not in object_index[table_name]:
request[table_name]["Keys"].append(key)
object_index[table_name][index] = set()
object_index[table_name][index].add(obj)
response = self.session.load_items(request)
for table_name, list_of_attrs in response.items():
for attrs in list_of_attrs:
key_shape = table_index[table_name]
key = extract_key(key_shape, attrs)
index = index_for(key)
for obj in object_index[table_name].pop(index):
unpack_from_dynamodb(
attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj)
object_loaded.send(self, engine=self, obj=obj)
if not object_index[table_name]:
object_index.pop(table_name)
if object_index:
not_loaded = set()
for index in object_index.values():
for index_set in index.values():
not_loaded.update(index_set)
logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs)))
raise MissingObjects("Failed to load some objects.", objects=not_loaded)
logger.info("successfully loaded {} objects".format(len(objs))) |
<SYSTEM_TASK:>
Save one or more objects.
<END_TASK>
<USER_TASK:>
Description:
def save(self, *objs, condition=None, atomic=False):
"""Save one or more objects.
:param objs: objects to save.
:param condition: only perform each save if this condition holds.
:param bool atomic: only perform each save if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
""" |
objs = set(objs)
validate_not_abstract(*objs)
for obj in objs:
self.session.save_item({
"TableName": self._compute_table_name(obj.__class__),
"Key": dump_key(self, obj),
**render(self, obj=obj, atomic=atomic, condition=condition, update=True)
})
object_saved.send(self, engine=self, obj=obj)
logger.info("successfully saved {} objects".format(len(objs))) |
<SYSTEM_TASK:>
Returns the DynamoDB backing type for a given python value's type
<END_TASK>
<USER_TASK:>
Description:
def backing_type_for(value):
"""Returns the DynamoDB backing type for a given python value's type
::
4 -> 'N'
['x', 3] -> 'L'
{2, 4} -> 'SS'
""" |
if isinstance(value, str):
vtype = "S"
elif isinstance(value, bytes):
vtype = "B"
# NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number)
elif isinstance(value, bool):
vtype = "BOOL"
elif isinstance(value, numbers.Number):
vtype = "N"
elif isinstance(value, dict):
vtype = "M"
elif isinstance(value, list):
vtype = "L"
elif isinstance(value, set):
if not value:
vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x
else:
inner = next(iter(value))
if isinstance(inner, str):
vtype = "SS"
elif isinstance(inner, bytes):
vtype = "BS"
elif isinstance(inner, numbers.Number):
vtype = "NS"
else:
raise ValueError(f"Unknown set type for inner value {inner!r}")
else:
raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}")
return vtype |
<SYSTEM_TASK:>
Monitor changes in approximately real-time and replicate them
<END_TASK>
<USER_TASK:>
Description:
def stream_replicate():
"""Monitor changes in approximately real-time and replicate them""" |
stream = primary.stream(SomeDataBlob, "trim_horizon")
next_heartbeat = pendulum.now()
while True:
now = pendulum.now()
if now >= next_heartbeat:
stream.heartbeat()
next_heartbeat = now.add(minutes=10)
record = next(stream)
if record is None:
continue
if record["new"] is not None:
replica.save(record["new"])
else:
replica.delete(record["old"]) |
<SYSTEM_TASK:>
Move to the "trim_horizon" or "latest" of the entire stream.
<END_TASK>
<USER_TASK:>
Description:
def _move_stream_endpoint(coordinator, position):
"""Move to the "trim_horizon" or "latest" of the entire stream.""" |
# 0) Everything will be rebuilt from DescribeStream.
stream_arn = coordinator.stream_arn
coordinator.roots.clear()
coordinator.active.clear()
coordinator.buffer.clear()
# 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call
current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"]
current_shards = unpack_shards(current_shards, stream_arn, coordinator.session)
# 2) Roots are any shards without parents.
coordinator.roots.extend(shard for shard in current_shards.values() if not shard.parent)
# 3.0) Stream trim_horizon is the combined trim_horizon of all roots.
if position == "trim_horizon":
for shard in coordinator.roots:
shard.jump_to(iterator_type="trim_horizon")
coordinator.active.extend(coordinator.roots)
# 3.1) Stream latest is the combined latest of all shards without children.
else:
for root in coordinator.roots:
for shard in root.walk_tree():
if not shard.children:
shard.jump_to(iterator_type="latest")
coordinator.active.append(shard) |
<SYSTEM_TASK:>
Move to the Stream position described by the token.
<END_TASK>
<USER_TASK:>
Description:
def _move_stream_token(coordinator, token):
"""Move to the Stream position described by the token.
The following rules are applied when interpolation is required:
- If a shard does not exist (past the trim_horizon) it is ignored. If that
shard had children, its children are also checked against the existing shards.
- If none of the shards in the token exist, then InvalidStream is raised.
- If a Shard expects its iterator to point to a SequenceNumber that is now past
that Shard's trim_horizon, the Shard instead points to trim_horizon.
""" |
stream_arn = coordinator.stream_arn = token["stream_arn"]
# 0) Everything will be rebuilt from the DescribeStream masked by the token.
coordinator.roots.clear()
coordinator.active.clear()
coordinator.closed.clear()
coordinator.buffer.clear()
# Injecting the token gives us access to the standard shard management functions
token_shards = unpack_shards(token["shards"], stream_arn, coordinator.session)
coordinator.roots = [shard for shard in token_shards.values() if not shard.parent]
coordinator.active.extend(token_shards[shard_id] for shard_id in token["active"])
# 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call
current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"]
current_shards = unpack_shards(current_shards, stream_arn, coordinator.session)
# 2) Trying to find an intersection with the actual Stream by walking each root shard's tree.
# Prune any Shard with no children that's not part of the actual Stream.
# Raise InvalidStream if the entire token is pruned.
unverified = collections.deque(coordinator.roots)
while unverified:
shard = unverified.popleft()
if shard.shard_id not in current_shards:
logger.info("Unknown or expired shard \"{}\" - pruning from stream token".format(shard.shard_id))
coordinator.remove_shard(shard, drop_buffered_records=True)
unverified.extend(shard.children)
# 3) Everything was pruned, so the token describes an unknown stream.
if not coordinator.roots:
raise InvalidStream("This token has no relation to the actual Stream.")
# 4) Now that everything's verified, grab new iterators for the coordinator's active Shards.
for shard in coordinator.active:
try:
if shard.iterator_type is None:
# Descendant of an unknown shard
shard.iterator_type = "trim_horizon"
# Move back to the token's specified position
shard.jump_to(iterator_type=shard.iterator_type, sequence_number=shard.sequence_number)
except RecordsExpired:
# This token shard's sequence_number is beyond the trim_horizon.
# The next closest record is at trim_horizon.
msg = "SequenceNumber \"{}\" in shard \"{}\" beyond trim horizon: jumping to trim_horizon"
logger.info(msg.format(shard.sequence_number, shard.shard_id))
shard.jump_to(iterator_type="trim_horizon") |
<SYSTEM_TASK:>
Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
<END_TASK>
<USER_TASK:>
Description:
def advance_shards(self):
"""Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
Returns immediately if the buffer isn't empty.
""" |
# Don't poll shards when there are pending records.
if self.buffer:
return
# 0) Collect new records from all active shards.
record_shard_pairs = []
for shard in self.active:
records = next(shard)
if records:
record_shard_pairs.extend((record, shard) for record in records)
self.buffer.push_all(record_shard_pairs)
self.migrate_closed_shards() |
<SYSTEM_TASK:>
Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators.
<END_TASK>
<USER_TASK:>
Description:
def heartbeat(self):
"""Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators.""" |
for shard in self.active:
if shard.sequence_number is None:
records = next(shard)
# Success! This shard now has an ``at_sequence`` iterator
if records:
self.buffer.push_all((record, shard) for record in records)
self.migrate_closed_shards() |
<SYSTEM_TASK:>
JSON-serializable representation of the current Stream state.
<END_TASK>
<USER_TASK:>
Description:
def token(self):
"""JSON-serializable representation of the current Stream state.
Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream,
or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position.
:returns: Stream state as a json-friendly dict
:rtype: dict
""" |
# 0) Trace roots and active shards
active_ids = []
shard_tokens = []
for root in self.roots:
for shard in root.walk_tree():
shard_tokens.append(shard.token)
# dedupe, stream_arn will be in the root token
shard_tokens[-1].pop("stream_arn")
active_ids.extend((shard.shard_id for shard in self.active))
# 1) Inject closed shards
for shard in self.closed.keys():
active_ids.append(shard.shard_id)
shard_tokens.append(shard.token)
shard_tokens[-1].pop("stream_arn")
return {
"stream_arn": self.stream_arn,
"active": active_ids,
"shards": shard_tokens
} |
<SYSTEM_TASK:>
Remove a Shard from the Coordinator. Drops all buffered records from the Shard.
<END_TASK>
<USER_TASK:>
Description:
def remove_shard(self, shard, drop_buffered_records=False):
"""Remove a Shard from the Coordinator. Drops all buffered records from the Shard.
If the Shard is active or a root, it is removed and any children promoted to those roles.
:param shard: The shard to remove
:type shard: :class:`~bloop.stream.shard.Shard`
:param bool drop_buffered_records:
Whether records from this shard should be removed.
Default is False.
""" |
try:
self.roots.remove(shard)
except ValueError:
# Wasn't a root Shard
pass
else:
self.roots.extend(shard.children)
try:
self.active.remove(shard)
except ValueError:
# Wasn't an active Shard
pass
else:
self.active.extend(shard.children)
if drop_buffered_records:
# TODO can this be improved? Gets expensive for high-volume streams with large buffers
heap = self.buffer.heap
# Clear buffered records from the shard. Each record is (ordering, record, shard)
to_remove = [x for x in heap if x[2] is shard]
for x in to_remove:
heap.remove(x) |
<SYSTEM_TASK:>
Set the Coordinator to a specific endpoint or time, or load state from a token.
<END_TASK>
<USER_TASK:>
Description:
def move_to(self, position):
"""Set the Coordinator to a specific endpoint or time, or load state from a token.
:param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a
:attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>`
""" |
if isinstance(position, collections.abc.Mapping):
move = _move_stream_token
elif hasattr(position, "timestamp") and callable(position.timestamp):
move = _move_stream_time
elif isinstance(position, str) and position.lower() in ["latest", "trim_horizon"]:
move = _move_stream_endpoint
else:
raise InvalidPosition("Don't know how to move to position {!r}".format(position))
move(self, position) |
<SYSTEM_TASK:>
Push a new record into the buffer
<END_TASK>
<USER_TASK:>
Description:
def push(self, record, shard):
"""Push a new record into the buffer
:param dict record: new record
:param shard: Shard the record came from
:type shard: :class:`~bloop.stream.shard.Shard`
""" |
heapq.heappush(self.heap, heap_item(self.clock, record, shard)) |
<SYSTEM_TASK:>
Set an object's field to default if it doesn't have a value
<END_TASK>
<USER_TASK:>
Description:
def setdefault(obj, field, default):
"""Set an object's field to default if it doesn't have a value""" |
setattr(obj, field, getattr(obj, field, default)) |
<SYSTEM_TASK:>
Bind a column to the model with the given name.
<END_TASK>
<USER_TASK:>
Description:
def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column:
"""Bind a column to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new column to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
bound = bloop.models.bind_column(User, "email", email)
assert bound is email
# rebind with force, and use a copy
bound = bloop.models.bind_column(User, "email", email, force=True, copy=True)
assert bound is not email
If an existing index refers to this column, it will be updated to point to the new column
using :meth:`~bloop.models.refresh_index`, including recalculating the index projection.
Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary.
If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`.
:param model:
The model to bind the column to.
:param name:
The name to bind the column as. In effect, used for ``setattr(model, name, column)``
:param column:
The column to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the column instead of the column directly. Default is False.
:return:
The bound column. This is a new column when ``copy`` is True, otherwise the input column.
""" |
if not subclassof(model, BaseModel):
raise InvalidModel(f"{model} is not a subclass of BaseModel")
meta = model.Meta
if copy:
column = copyfn(column)
# TODO elif column.model is not None: logger.warning(f"Trying to rebind column bound to {column.model}")
column._name = name
safe_repr = unbound_repr(column)
# Guard against name, dynamo_name collisions; if force=True, unbind any matches
same_dynamo_name = (
util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or
util.index(meta.indexes, "dynamo_name").get(column.dynamo_name)
)
same_name = (
meta.columns_by_name.get(column.name) or
util.index(meta.indexes, "name").get(column.name)
)
if column.hash_key and column.range_key:
raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.")
if force:
if same_name:
unbind(meta, name=column.name)
if same_dynamo_name:
unbind(meta, dynamo_name=column.dynamo_name)
else:
if same_name:
raise InvalidModel(
f"The column {safe_repr} has the same name as an existing column "
f"or index {same_name}. Did you mean to bind with force=True?")
if same_dynamo_name:
raise InvalidModel(
f"The column {safe_repr} has the same dynamo_name as an existing "
f"column or index {same_name}. Did you mean to bind with force=True?")
if column.hash_key and meta.hash_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different hash_key: {meta.hash_key}")
if column.range_key and meta.range_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different range_key: {meta.range_key}")
# success!
# --------------------------------
column.model = meta.model
meta.columns.add(column)
meta.columns_by_name[name] = column
setattr(meta.model, name, column)
if column.hash_key:
meta.hash_key = column
meta.keys.add(column)
if column.range_key:
meta.range_key = column
meta.keys.add(column)
try:
for index in meta.indexes:
refresh_index(meta, index)
except KeyError as e:
raise InvalidModel(
f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e
if recursive:
for subclass in util.walk_subclasses(meta.model):
try:
bind_column(subclass, name, column, force=False, recursive=False, copy=True)
except InvalidModel:
pass
return column |
<SYSTEM_TASK:>
Bind an index to the model with the given name.
<END_TASK>
<USER_TASK:>
Description:
def bind_index(model, name, index, force=False, recursive=True, copy=False) -> Index:
"""Bind an index to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new index to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key="email")
bound = bloop.models.bind_index(User, "by_email", by_email)
assert bound is by_email
# rebind with force, and use a copy
bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True)
assert bound is not by_email
If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`.
:param model:
The model to bind the index to.
:param name:
The name to bind the index as. In effect, used for ``setattr(model, name, index)``
:param index:
The index to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the index instead of the index directly. Default is False.
:return:
The bound index. This is a new column when ``copy`` is True, otherwise the input index.
""" |
if not subclassof(model, BaseModel):
raise InvalidModel(f"{model} is not a subclass of BaseModel")
meta = model.Meta
if copy:
index = copyfn(index)
# TODO elif index.model is not None: logger.warning(f"Trying to rebind index bound to {index.model}")
index._name = name
safe_repr = unbound_repr(index)
# Guard against name, dynamo_name collisions; if force=True, unbind any matches
same_dynamo_name = (
util.index(meta.columns, "dynamo_name").get(index.dynamo_name) or
util.index(meta.indexes, "dynamo_name").get(index.dynamo_name)
)
same_name = (
meta.columns_by_name.get(index.name) or
util.index(meta.indexes, "name").get(index.name)
)
if isinstance(index, LocalSecondaryIndex) and not meta.range_key:
raise InvalidModel("An LSI requires the Model to have a range key.")
if force:
if same_name:
unbind(meta, name=index.name)
if same_dynamo_name:
unbind(meta, dynamo_name=index.dynamo_name)
else:
if same_name:
raise InvalidModel(
f"The index {safe_repr} has the same name as an existing index "
f"or column {same_name}. Did you mean to bind with force=True?")
if same_dynamo_name:
raise InvalidModel(
f"The index {safe_repr} has the same dynamo_name as an existing "
f"index or column {same_name}. Did you mean to bind with force=True?")
# success!
# --------------------------------
index.model = meta.model
meta.indexes.add(index)
setattr(meta.model, name, index)
if isinstance(index, LocalSecondaryIndex):
meta.lsis.add(index)
if isinstance(index, GlobalSecondaryIndex):
meta.gsis.add(index)
try:
refresh_index(meta, index)
except KeyError as e:
raise InvalidModel("Index expected a hash or range key that does not exist") from e
if recursive:
for subclass in util.walk_subclasses(meta.model):
try:
bind_index(subclass, name, index, force=False, recursive=False, copy=True)
except InvalidModel:
pass
return index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.