text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns the JSON string that LendingClub expects for it's search
<END_TASK>
<USER_TASK:>
Description:
def search_string(self):
""""
Returns the JSON string that LendingClub expects for it's search
""" |
self.__normalize()
# Get the template
tmpl_source = unicode(open(self.tmpl_file).read())
# Process template
compiler = Compiler()
template = compiler.compile(tmpl_source)
out = template(self)
if not out:
return False
out = ''.join(out)
#
# Cleanup output and remove all extra space
#
# remove extra spaces
out = re.sub('\n', '', out)
out = re.sub('\s{3,}', ' ', out)
# Remove hanging commas i.e: [1, 2,]
out = re.sub(',\s*([}\\]])', '\\1', out)
# Space between brackets i.e: ], [
out = re.sub('([{\\[}\\]])(,?)\s*([{\\[}\\]])', '\\1\\2\\3', out)
# Cleanup spaces around [, {, }, ], : and , characters
out = re.sub('\s*([{\\[\\]}:,])\s*', '\\1', out)
return out |
<SYSTEM_TASK:>
Get a list of all your saved filters
<END_TASK>
<USER_TASK:>
Description:
def all_filters(lc):
"""
Get a list of all your saved filters
Parameters
----------
lc : :py:class:`lendingclub.LendingClub`
An instance of the authenticated LendingClub class
Returns
-------
list
A list of lendingclub.filters.SavedFilter objects
""" |
filters = []
response = lc.session.get('/browse/getSavedFiltersAj.action')
json_response = response.json()
# Load all filters
if lc.session.json_success(json_response):
for saved in json_response['filters']:
filters.append(SavedFilter(lc, saved['id']))
return filters |
<SYSTEM_TASK:>
Load the filter from the server
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""
Load the filter from the server
""" |
# Attempt to load the saved filter
payload = {
'id': self.id
}
response = self.lc.session.get('/browse/getSavedFilterAj.action', query=payload)
self.response = response
json_response = response.json()
if self.lc.session.json_success(json_response) and json_response['filterName'] != 'No filters':
self.name = json_response['filterName']
#
# Parse out the filter JSON string manually from the response JSON.
# If the filter JSON is modified at all, or any value is out of order,
# LendingClub will reject the filter and perform a wildcard search instead,
# without any error. So we need to retain the filter JSON value exactly how it is given to us.
#
text = response.text
# Cut off everything before "filter": [...]
text = re.sub('\n', '', text)
text = re.sub('^.*?,\s*["\']filter["\']:\s*\[(.*)', '[\\1', text)
# Now loop through the string until we find the end of the filter block
# This is a simple parser that keeps track of block elements, quotes and
# escape characters
blockTracker = []
blockChars = {
'[': ']',
'{': '}'
}
inQuote = False
lastChar = None
json_text = ""
for char in text:
json_text += char
# Escape char
if char == '\\':
if lastChar == '\\':
lastChar = ''
else:
lastChar = char
continue
# Quotes
if char == "'" or char == '"':
if inQuote is False: # Starting a quote block
inQuote = char
elif inQuote == char: # Ending a quote block
inQuote = False
lastChar = char
continue
# Start of a block
if char in blockChars.keys():
blockTracker.insert(0, blockChars[char])
# End of a block, remove from block path
elif len(blockTracker) > 0 and char == blockTracker[0]:
blockTracker.pop(0)
# No more blocks in the tracker which means we're at the end of the filter block
if len(blockTracker) == 0 and lastChar is not None:
break
lastChar = char
# Verify valid JSON
try:
if json_text.strip() == '':
raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response)
json_test = json.loads(json_text)
# Make sure it looks right
assert type(json_test) is list, 'Expecting a list, instead received a {0}'.format(type(json_test))
assert 'm_id' in json_test[0], 'Expecting a \'m_id\' property in each filter'
assert 'm_value' in json_test[0], 'Expecting a \'m_value\' property in each filter'
self.json = json_test
except Exception as e:
raise SavedFilterError('Could not parse filter from the JSON response: {0}'.format(str(e)))
self.json_text = json_text
self.__analyze()
else:
raise SavedFilterError('A saved filter could not be found for ID {0}'.format(self.id), response) |
<SYSTEM_TASK:>
Analyze the filter JSON and attempt to parse out the individual filters.
<END_TASK>
<USER_TASK:>
Description:
def __analyze(self):
"""
Analyze the filter JSON and attempt to parse out the individual filters.
""" |
filter_values = {}
# ID to filter name mapping
name_map = {
10: 'grades',
11: 'loan_purpose',
13: 'approved',
15: 'funding_progress',
38: 'exclude_existing',
39: 'term',
43: 'keyword'
}
if self.json is not None:
filters = self.json
for f in filters:
if 'm_id' in f:
name = f['m_id']
# Get the name to represent this filter
if f['m_id'] in name_map:
name = name_map[f['m_id']]
# Get values
if 'm_value' in f:
raw_values = f['m_value']
value = {}
# No value, skip it
if raw_values is None:
continue
# Loop through multiple values
if type(raw_values) is list:
# A single non string value, is THE value
if len(raw_values) == 1 and type(raw_values[0]['value']) not in [str, unicode]:
value = raw_values[0]['value']
# Create a dict of values: name = True
for val in raw_values:
if type(val['value']) in [str, unicode]:
value[val['value']] = True
# A single value
else:
value = raw_values
# Normalize grades array
if name == 'grades':
if 'All' not in value:
value['All'] = False
# Add filter value
filter_values[name] = value
dict.__setitem__(self, name, value)
return filter_values |
<SYSTEM_TASK:>
Copy origin to out and return it.
<END_TASK>
<USER_TASK:>
Description:
def _float_copy_to_out(out, origin):
"""
Copy origin to out and return it.
If ``out`` is None, a new copy (casted to floating point) is used. If
``out`` and ``origin`` are the same, we simply return it. Otherwise we
copy the values.
""" |
if out is None:
out = origin / 1 # The division forces cast to a floating point type
elif out is not origin:
np.copyto(out, origin)
return out |
<SYSTEM_TASK:>
Compute a centered distance matrix given a matrix.
<END_TASK>
<USER_TASK:>
Description:
def _distance_matrix_generic(x, centering, exponent=1):
"""Compute a centered distance matrix given a matrix.""" |
_check_valid_dcov_exponent(exponent)
x = _transform_to_2d(x)
# Calculate distance matrices
a = distances.pairwise_distances(x, exponent=exponent)
# Double centering
a = centering(a, out=a)
return a |
<SYSTEM_TASK:>
Scale a random vector for using the affinely invariant measures
<END_TASK>
<USER_TASK:>
Description:
def _af_inv_scaled(x):
"""Scale a random vector for using the affinely invariant measures""" |
x = _transform_to_2d(x)
cov_matrix = np.atleast_2d(np.cov(x, rowvar=False))
cov_matrix_power = _mat_sqrt_inv(cov_matrix)
return x.dot(cov_matrix_power) |
<SYSTEM_TASK:>
Partial distance covariance estimator.
<END_TASK>
<USER_TASK:>
Description:
def partial_distance_covariance(x, y, z):
"""
Partial distance covariance estimator.
Compute the estimator for the partial distance covariance of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance covariance
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance covariance.
See Also
--------
partial_distance_correlation
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> c = np.array([[1, 3, 4],
... [5, 7, 8],
... [9, 11, 15],
... [13, 15, 16]])
>>> dcor.partial_distance_covariance(a, a, c) # doctest: +ELLIPSIS
0.0024298...
>>> dcor.partial_distance_covariance(a, b, c)
0.0347030...
>>> dcor.partial_distance_covariance(b, b, c)
0.4956241...
""" |
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
proj = u_complementary_projection(c)
return u_product(proj(a), proj(b)) |
<SYSTEM_TASK:>
Partial distance correlation estimator.
<END_TASK>
<USER_TASK:>
Description:
def partial_distance_correlation(x, y, z): # pylint:disable=too-many-locals
"""
Partial distance correlation estimator.
Compute the estimator for the partial distance correlation of the
random vectors corresponding to :math:`x` and :math:`y` with respect
to the random variable corresponding to :math:`z`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Random vector with respect to which the partial distance correlation
is computed. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the partial distance correlation.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1], [1], [2], [2], [3]])
>>> b = np.array([[1], [2], [1], [2], [1]])
>>> c = np.array([[1], [2], [2], [1], [2]])
>>> dcor.partial_distance_correlation(a, a, c)
1.0
>>> dcor.partial_distance_correlation(a, b, c) # doctest: +ELLIPSIS
-0.5...
>>> dcor.partial_distance_correlation(b, b, c)
1.0
>>> dcor.partial_distance_correlation(a, c, c)
0.0
""" |
a = _u_distance_matrix(x)
b = _u_distance_matrix(y)
c = _u_distance_matrix(z)
aa = u_product(a, a)
bb = u_product(b, b)
cc = u_product(c, c)
ab = u_product(a, b)
ac = u_product(a, c)
bc = u_product(b, c)
denom_sqr = aa * bb
r_xy = ab / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xy = np.clip(r_xy, -1, 1)
denom_sqr = aa * cc
r_xz = ac / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_xz = np.clip(r_xz, -1, 1)
denom_sqr = bb * cc
r_yz = bc / _sqrt(denom_sqr) if denom_sqr != 0 else denom_sqr
r_yz = np.clip(r_yz, -1, 1)
denom = _sqrt(1 - r_xz ** 2) * _sqrt(1 - r_yz ** 2)
return (r_xy - r_xz * r_yz) / denom if denom != 0 else denom |
<SYSTEM_TASK:>
Compute energy distance with precalculated distance matrices.
<END_TASK>
<USER_TASK:>
Description:
def _energy_distance_from_distance_matrices(
distance_xx, distance_yy, distance_xy):
"""Compute energy distance with precalculated distance matrices.""" |
return (2 * np.mean(distance_xy) - np.mean(distance_xx) -
np.mean(distance_yy)) |
<SYSTEM_TASK:>
Naive biased estimator for distance covariance.
<END_TASK>
<USER_TASK:>
Description:
def _distance_covariance_sqr_naive(x, y, exponent=1):
"""
Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
""" |
a = _distance_matrix(x, exponent=exponent)
b = _distance_matrix(y, exponent=exponent)
return mean_product(a, b) |
<SYSTEM_TASK:>
Naive unbiased estimator for distance covariance.
<END_TASK>
<USER_TASK:>
Description:
def _u_distance_covariance_sqr_naive(x, y, exponent=1):
"""
Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
""" |
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b) |
<SYSTEM_TASK:>
Biased distance correlation estimator between two matrices.
<END_TASK>
<USER_TASK:>
Description:
def _distance_correlation_sqr_naive(x, y, exponent=1):
"""Biased distance correlation estimator between two matrices.""" |
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_distance_matrix,
product=mean_product,
exponent=exponent).correlation_xy |
<SYSTEM_TASK:>
Bias-corrected distance correlation estimator between two matrices.
<END_TASK>
<USER_TASK:>
Description:
def _u_distance_correlation_sqr_naive(x, y, exponent=1):
"""Bias-corrected distance correlation estimator between two matrices.""" |
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_u_distance_matrix,
product=u_product,
exponent=exponent).correlation_xy |
<SYSTEM_TASK:>
Check if the fast algorithm for distance stats can be used.
<END_TASK>
<USER_TASK:>
Description:
def _can_use_fast_algorithm(x, y, exponent=1):
"""
Check if the fast algorithm for distance stats can be used.
The fast algorithm has complexity :math:`O(NlogN)`, better than the
complexity of the naive algorithm (:math:`O(N^2)`).
The algorithm can only be used for random variables (not vectors) where
the number of instances is greater than 3. Also, the exponent must be 1.
""" |
return (_is_random_variable(x) and _is_random_variable(y) and
x.shape[0] > 3 and y.shape[0] > 3 and exponent == 1) |
<SYSTEM_TASK:>
Inner function of the fast distance covariance.
<END_TASK>
<USER_TASK:>
Description:
def _dyad_update(y, c): # pylint:disable=too-many-locals
# This function has many locals so it can be compared
# with the original algorithm.
"""
Inner function of the fast distance covariance.
This function is compiled because otherwise it would become
a bottleneck.
""" |
n = y.shape[0]
gamma = np.zeros(n, dtype=c.dtype)
# Step 1: get the smallest l such that n <= 2^l
l_max = int(math.ceil(np.log2(n)))
# Step 2: assign s(l, k) = 0
s_len = 2 ** (l_max + 1)
s = np.zeros(s_len, dtype=c.dtype)
pos_sums = np.arange(l_max)
pos_sums[:] = 2 ** (l_max - pos_sums)
pos_sums = np.cumsum(pos_sums)
# Step 3: iteration
for i in range(1, n):
# Step 3.a: update s(l, k)
for l in range(l_max):
k = int(math.ceil(y[i - 1] / 2 ** l))
pos = k - 1
if l > 0:
pos += pos_sums[l - 1]
s[pos] += c[i - 1]
# Steps 3.b and 3.c
for l in range(l_max):
k = int(math.floor((y[i] - 1) / 2 ** l))
if k / 2 > math.floor(k / 2):
pos = k - 1
if l > 0:
pos += pos_sums[l - 1]
gamma[i] = gamma[i] + s[pos]
return gamma |
<SYSTEM_TASK:>
Fast algorithm for the squared distance covariance.
<END_TASK>
<USER_TASK:>
Description:
def _distance_covariance_sqr_fast_generic(
x, y, unbiased=False): # pylint:disable=too-many-locals
# This function has many locals so it can be compared
# with the original algorithm.
"""Fast algorithm for the squared distance covariance.""" |
x = np.asarray(x)
y = np.asarray(y)
x = np.ravel(x)
y = np.ravel(y)
n = x.shape[0]
assert n > 3
assert n == y.shape[0]
temp = range(n)
# Step 1
ix0 = np.argsort(x)
vx = x[ix0]
ix = np.zeros(n, dtype=int)
ix[ix0] = temp
iy0 = np.argsort(y)
vy = y[iy0]
iy = np.zeros(n, dtype=int)
iy[iy0] = temp
# Step 2
sx = np.cumsum(vx)
sy = np.cumsum(vy)
# Step 3
alpha_x = ix
alpha_y = iy
beta_x = sx[ix] - vx[ix]
beta_y = sy[iy] - vy[iy]
# Step 4
x_dot = np.sum(x)
y_dot = np.sum(y)
# Step 5
a_i_dot = x_dot + (2 * alpha_x - n) * x - 2 * beta_x
b_i_dot = y_dot + (2 * alpha_y - n) * y - 2 * beta_y
sum_ab = np.sum(a_i_dot * b_i_dot)
# Step 6
a_dot_dot = 2 * np.sum(alpha_x * x) - 2 * np.sum(beta_x)
b_dot_dot = 2 * np.sum(alpha_y * y) - 2 * np.sum(beta_y)
# Step 7
gamma_1 = _partial_sum_2d(x, y, np.ones(n, dtype=x.dtype))
gamma_x = _partial_sum_2d(x, y, x)
gamma_y = _partial_sum_2d(x, y, y)
gamma_xy = _partial_sum_2d(x, y, x * y)
# Step 8
aijbij = np.sum(x * y * gamma_1 + gamma_xy - x * gamma_y - y * gamma_x)
if unbiased:
d3 = (n - 3)
d2 = (n - 2)
d1 = (n - 1)
else:
d3 = d2 = d1 = n
# Step 9
d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 +
a_dot_dot / n * b_dot_dot / d1 / d2 / d3)
return d_cov |
<SYSTEM_TASK:>
Compute the distance stats using the fast algorithm.
<END_TASK>
<USER_TASK:>
Description:
def _distance_stats_sqr_fast_generic(x, y, dcov_function):
"""Compute the distance stats using the fast algorithm.""" |
covariance_xy_sqr = dcov_function(x, y)
variance_x_sqr = dcov_function(x, x)
variance_y_sqr = dcov_function(y, y)
denominator_sqr_signed = variance_x_sqr * variance_y_sqr
denominator_sqr = np.absolute(denominator_sqr_signed)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = denominator.dtype.type(0)
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr) |
<SYSTEM_TASK:>
Square of the affinely invariant distance correlation.
<END_TASK>
<USER_TASK:>
Description:
def distance_correlation_af_inv_sqr(x, y):
"""
Square of the affinely invariant distance correlation.
Computes the estimator for the square of the affinely invariant distance
correlation between two random vectors.
.. warning:: The return value of this function is undefined when the
covariance matrix of :math:`x` or :math:`y` is singular.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
Value of the estimator of the squared affinely invariant
distance correlation.
See Also
--------
distance_correlation
u_distance_correlation
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 3, 2, 5],
... [5, 7, 6, 8],
... [9, 10, 11, 12],
... [13, 15, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_correlation_af_inv_sqr(a, a)
1.0
>>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS
0.5773502...
>>> dcor.distance_correlation_af_inv_sqr(b, b)
1.0
""" |
x = _af_inv_scaled(x)
y = _af_inv_scaled(y)
correlation = distance_correlation_sqr(x, y)
return 0 if np.isnan(correlation) else correlation |
<SYSTEM_TASK:>
Compile a function using a jit compiler.
<END_TASK>
<USER_TASK:>
Description:
def _jit(function):
"""
Compile a function using a jit compiler.
The function is always compiled to check errors, but is only used outside
tests, so that code coverage analysis can be performed in jitted functions.
The tests set sys._called_from_test in conftest.py.
""" |
import sys
compiled = numba.jit(function)
if hasattr(sys, '_called_from_test'):
return function
else: # pragma: no cover
return compiled |
<SYSTEM_TASK:>
Return square root of an ndarray.
<END_TASK>
<USER_TASK:>
Description:
def _sqrt(x):
"""
Return square root of an ndarray.
This sqrt function for ndarrays tries to use the exponentiation operator
if the objects stored do not supply a sqrt method.
""" |
x = np.clip(x, a_min=0, a_max=None)
try:
return np.sqrt(x)
except AttributeError:
exponent = 0.5
try:
exponent = np.take(x, 0).from_float(exponent)
except AttributeError:
pass
return x ** exponent |
<SYSTEM_TASK:>
Convert vectors to column matrices, to always have a 2d shape.
<END_TASK>
<USER_TASK:>
Description:
def _transform_to_2d(t):
"""Convert vectors to column matrices, to always have a 2d shape.""" |
t = np.asarray(t)
dim = len(t.shape)
assert dim <= 2
if dim < 2:
t = np.atleast_2d(t).T
return t |
<SYSTEM_TASK:>
Return if the array can be safely converted to double.
<END_TASK>
<USER_TASK:>
Description:
def _can_be_double(x):
"""
Return if the array can be safely converted to double.
That happens when the dtype is a float with the same size of
a double or narrower, or when is an integer that can be safely
converted to double (if the roundtrip conversion works).
""" |
return ((np.issubdtype(x.dtype, np.floating) and
x.dtype.itemsize <= np.dtype(float).itemsize) or
(np.issubdtype(x.dtype, np.signedinteger) and
np.can_cast(x, float))) |
<SYSTEM_TASK:>
Pairwise distance between the points in two sets.
<END_TASK>
<USER_TASK:>
Description:
def _cdist_scipy(x, y, exponent=1):
"""Pairwise distance between the points in two sets.""" |
metric = 'euclidean'
if exponent != 1:
metric = 'sqeuclidean'
distances = _spatial.distance.cdist(x, y, metric=metric)
if exponent != 1:
distances **= exponent / 2
return distances |
<SYSTEM_TASK:>
Pairwise distance between points in two sets.
<END_TASK>
<USER_TASK:>
Description:
def _cdist(x, y, exponent=1):
"""
Pairwise distance between points in two sets.
As Scipy converts every value to double, this wrapper uses
a less efficient implementation if the original dtype
can not be converted to double.
""" |
if _can_be_double(x) and _can_be_double(y):
return _cdist_scipy(x, y, exponent)
else:
return _cdist_naive(x, y, exponent) |
<SYSTEM_TASK:>
Respond to the request.
<END_TASK>
<USER_TASK:>
Description:
def respond(self,
content=EmptyValue,
content_type=EmptyValue,
always_hash_content=True,
ext=None):
"""
Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=EmptyValue: Byte string of response body that will be sent.
:type content=EmptyValue: str
:param content_type=EmptyValue: content-type header value for response.
:type content_type=EmptyValue: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` must be provided.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk
""" |
log.debug('generating response header')
resource = Resource(url=self.resource.url,
credentials=self.resource.credentials,
ext=ext,
app=self.parsed_header.get('app', None),
dlg=self.parsed_header.get('dlg', None),
method=self.resource.method,
content=content,
content_type=content_type,
always_hash_content=always_hash_content,
nonce=self.parsed_header['nonce'],
timestamp=self.parsed_header['ts'])
mac = calculate_mac('response', resource, resource.gen_content_hash())
self.response_header = self._make_header(resource, mac,
additional_keys=['ext'])
return self.response_header |
<SYSTEM_TASK:>
Calculates a hash for a given payload.
<END_TASK>
<USER_TASK:>
Description:
def calculate_payload_hash(payload, algorithm, content_type):
"""Calculates a hash for a given payload.""" |
p_hash = hashlib.new(algorithm)
parts = []
parts.append('hawk.' + str(HAWK_VER) + '.payload\n')
parts.append(parse_content_type(content_type) + '\n')
parts.append(payload or '')
parts.append('\n')
for i, p in enumerate(parts):
# Make sure we are about to hash binary strings.
if not isinstance(p, six.binary_type):
p = p.encode('utf8')
p_hash.update(p)
parts[i] = p
log.debug('calculating payload hash from:\n{parts}'
.format(parts=pprint.pformat(parts)))
return b64encode(p_hash.digest()) |
<SYSTEM_TASK:>
Serializes mac_type and resource into a HAWK string.
<END_TASK>
<USER_TASK:>
Description:
def normalize_string(mac_type, resource, content_hash):
"""Serializes mac_type and resource into a HAWK string.""" |
normalized = [
'hawk.' + str(HAWK_VER) + '.' + mac_type,
normalize_header_attr(resource.timestamp),
normalize_header_attr(resource.nonce),
normalize_header_attr(resource.method or ''),
normalize_header_attr(resource.name or ''),
normalize_header_attr(resource.host),
normalize_header_attr(resource.port),
normalize_header_attr(content_hash or '')
]
# The blank lines are important. They follow what the Node Hawk lib does.
normalized.append(normalize_header_attr(resource.ext or ''))
if resource.app:
normalized.append(normalize_header_attr(resource.app))
normalized.append(normalize_header_attr(resource.dlg or ''))
# Add trailing new line.
normalized.append('')
normalized = '\n'.join(normalized)
return normalized |
<SYSTEM_TASK:>
Returns a bewit identifier for the resource as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_bewit(resource):
"""
Returns a bewit identifier for the resource as a string.
:param resource:
Resource to generate a bewit for
:type resource: `mohawk.base.Resource`
""" |
if resource.method != 'GET':
raise ValueError('bewits can only be generated for GET requests')
if resource.nonce != '':
raise ValueError('bewits must use an empty nonce')
mac = calculate_mac(
'bewit',
resource,
None,
)
if isinstance(mac, six.binary_type):
mac = mac.decode('ascii')
if resource.ext is None:
ext = ''
else:
validate_header_attr(resource.ext, name='ext')
ext = resource.ext
# b64encode works only with bytes in python3, but all of our parameters are
# in unicode, so we need to encode them. The cleanest way to do this that
# works in both python 2 and 3 is to use string formatting to get a
# unicode string, and then explicitly encode it to bytes.
inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format(
id=resource.credentials['id'],
exp=resource.timestamp,
mac=mac,
ext=ext,
)
inner_bewit_bytes = inner_bewit.encode('ascii')
bewit_bytes = urlsafe_b64encode(inner_bewit_bytes)
# Now decode the resulting bytes back to a unicode string
return bewit_bytes.decode('ascii') |
<SYSTEM_TASK:>
Strips the bewit parameter out of a url.
<END_TASK>
<USER_TASK:>
Description:
def strip_bewit(url):
"""
Strips the bewit parameter out of a url.
Returns (encoded_bewit, stripped_url)
Raises InvalidBewit if no bewit found.
:param url:
The url containing a bewit parameter
:type url: str
""" |
m = re.search('[?&]bewit=([^&]+)', url)
if not m:
raise InvalidBewit('no bewit data found')
bewit = m.group(1)
stripped_url = url[:m.start()] + url[m.end():]
return bewit, stripped_url |
<SYSTEM_TASK:>
Validates the given bewit.
<END_TASK>
<USER_TASK:>
Description:
def check_bewit(url, credential_lookup, now=None):
"""
Validates the given bewit.
Returns True if the resource has a valid bewit parameter attached,
or raises a subclass of HawkFail otherwise.
:param credential_lookup:
Callable to look up the credentials dict by sender ID.
The credentials dict must have the keys:
``id``, ``key``, and ``algorithm``.
See :ref:`receiving-request` for an example.
:type credential_lookup: callable
:param now=None:
Unix epoch time for the current time to determine if bewit has expired.
If None, then the current time as given by utc_now() is used.
:type now=None: integer
""" |
raw_bewit, stripped_url = strip_bewit(url)
bewit = parse_bewit(raw_bewit)
try:
credentials = credential_lookup(bewit.id)
except LookupError:
raise CredentialsLookupError('Could not find credentials for ID {0}'
.format(bewit.id))
res = Resource(url=stripped_url,
method='GET',
credentials=credentials,
timestamp=bewit.expiration,
nonce='',
ext=bewit.ext,
)
mac = calculate_mac('bewit', res, None)
mac = mac.decode('ascii')
if not strings_match(mac, bewit.mac):
raise MacMismatch('bewit with mac {bewit_mac} did not match expected mac {expected_mac}'
.format(bewit_mac=bewit.mac,
expected_mac=mac))
# Check that the timestamp isn't expired
if now is None:
# TODO: Add offset/skew
now = utc_now()
if int(bewit.expiration) < now:
# TODO: Refactor TokenExpired to handle this better
raise TokenExpired('bewit with UTC timestamp {ts} has expired; '
'it was compared to {now}'
.format(ts=bewit.expiration, now=now),
localtime_in_seconds=now,
www_authenticate=''
)
return True |
<SYSTEM_TASK:>
Accept a response to this request.
<END_TASK>
<USER_TASK:>
Description:
def accept_response(self,
response_header,
content=EmptyValue,
content_type=EmptyValue,
accept_untrusted_content=False,
localtime_offset_in_seconds=0,
timestamp_skew_in_seconds=default_ts_skew_in_seconds,
**auth_kw):
"""
Accept a response to this request.
:param response_header:
A `Hawk`_ ``Server-Authorization`` header
such as one created by :class:`mohawk.Receiver`.
:type response_header: str
:param content=EmptyValue: Byte string of the response body received.
:type content=EmptyValue: str
:param content_type=EmptyValue:
Content-Type header value of the response received.
:type content_type=EmptyValue: str
:param accept_untrusted_content=False:
When True, allow responses that do not hash their content.
Read :ref:`skipping-content-checks` to learn more.
:type accept_untrusted_content=False: bool
:param localtime_offset_in_seconds=0:
Seconds to add to local time in case it's out of sync.
:type localtime_offset_in_seconds=0: float
:param timestamp_skew_in_seconds=60:
Max seconds until a message expires. Upon expiry,
:class:`mohawk.exc.TokenExpired` is raised.
:type timestamp_skew_in_seconds=60: float
.. _`Hawk`: https://github.com/hueniverse/hawk
""" |
log.debug('accepting response {header}'
.format(header=response_header))
parsed_header = parse_authorization_header(response_header)
resource = Resource(ext=parsed_header.get('ext', None),
content=content,
content_type=content_type,
# The following response attributes are
# in reference to the original request,
# not to the reponse header:
timestamp=self.req_resource.timestamp,
nonce=self.req_resource.nonce,
url=self.req_resource.url,
method=self.req_resource.method,
app=self.req_resource.app,
dlg=self.req_resource.dlg,
credentials=self.credentials,
seen_nonce=self.seen_nonce)
self._authorize(
'response', parsed_header, resource,
# Per Node lib, a responder macs the *sender's* timestamp.
# It does not create its own timestamp.
# I suppose a slow response could time out here. Maybe only check
# mac failures, not timeouts?
their_timestamp=resource.timestamp,
timestamp_skew_in_seconds=timestamp_skew_in_seconds,
localtime_offset_in_seconds=localtime_offset_in_seconds,
accept_untrusted_content=accept_untrusted_content,
**auth_kw) |
<SYSTEM_TASK:>
Returns a ``field -> value`` dict of the current state of the instance.
<END_TASK>
<USER_TASK:>
Description:
def current_state(self):
"""
Returns a ``field -> value`` dict of the current state of the instance.
""" |
field_names = set()
[field_names.add(f.name) for f in self._meta.local_fields]
[field_names.add(f.attname) for f in self._meta.local_fields]
return dict([(field_name, getattr(self, field_name)) for field_name in field_names]) |
<SYSTEM_TASK:>
Remove trailing colons from the URI back to the first non-colon.
<END_TASK>
<USER_TASK:>
Description:
def _trim(cls, s):
"""
Remove trailing colons from the URI back to the first non-colon.
:param string s: input URI string
:returns: URI string with trailing colons removed
:rtype: string
TEST: trailing colons necessary
>>> s = '1:2::::'
>>> CPE._trim(s)
'1:2'
TEST: trailing colons not necessary
>>> s = '1:2:3:4:5:6'
>>> CPE._trim(s)
'1:2:3:4:5:6'
""" |
reverse = s[::-1]
idx = 0
for i in range(0, len(reverse)):
if reverse[i] == ":":
idx += 1
else:
break
# Return the substring after all trailing colons,
# reversed back to its original character order.
new_s = reverse[idx: len(reverse)]
return new_s[::-1] |
<SYSTEM_TASK:>
Returns the component list of input attribute.
<END_TASK>
<USER_TASK:>
Description:
def _get_attribute_components(self, att):
"""
Returns the component list of input attribute.
:param string att: Attribute name to get
:returns: List of Component objects of the attribute in CPE Name
:rtype: list
:exception: ValueError - invalid attribute name
""" |
lc = []
if not CPEComponent.is_valid_attribute(att):
errmsg = "Invalid attribute name '{0}' is not exist".format(att)
raise ValueError(errmsg)
for pk in CPE.CPE_PART_KEYS:
elements = self.get(pk)
for elem in elements:
lc.append(elem.get(att))
return lc |
<SYSTEM_TASK:>
Pack the values of the five arguments into the simple edition
<END_TASK>
<USER_TASK:>
Description:
def _pack_edition(self):
"""
Pack the values of the five arguments into the simple edition
component. If all the values are blank, just return a blank.
:returns: "edition", "sw_edition", "target_sw", "target_hw" and "other"
attributes packed in a only value
:rtype: string
:exception: TypeError - incompatible version with pack operation
""" |
COMP_KEYS = (CPEComponent.ATT_EDITION,
CPEComponent.ATT_SW_EDITION,
CPEComponent.ATT_TARGET_SW,
CPEComponent.ATT_TARGET_HW,
CPEComponent.ATT_OTHER)
separator = CPEComponent2_3_URI_edpacked.SEPARATOR_COMP
packed_ed = []
packed_ed.append(separator)
for ck in COMP_KEYS:
lc = self._get_attribute_components(ck)
if len(lc) > 1:
# Incompatible version 1.1, there are two or more elements
# in CPE Name
errmsg = "Incompatible version {0} with URI".format(
self.VERSION)
raise TypeError(errmsg)
comp = lc[0]
if (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty) or
isinstance(comp, CPEComponentAnyValue)):
value = ""
elif (isinstance(comp, CPEComponentNotApplicable)):
value = CPEComponent2_3_URI.VALUE_NA
else:
# Component has some value; transform this original value
# in URI value
value = comp.as_uri_2_3()
# Save the value of edition attribute
if ck == CPEComponent.ATT_EDITION:
ed = value
# Packed the value of component
packed_ed.append(value)
packed_ed.append(separator)
# Del the last separator
packed_ed_str = "".join(packed_ed[:-1])
only_ed = []
only_ed.append(separator)
only_ed.append(ed)
only_ed.append(separator)
only_ed.append(separator)
only_ed.append(separator)
only_ed.append(separator)
only_ed_str = "".join(only_ed)
if (packed_ed_str == only_ed_str):
# All the extended attributes are blank,
# so don't do any packing, just return ed
return ed
else:
# Otherwise, pack the five values into a simple string
# prefixed and internally delimited with the tilde
return packed_ed_str |
<SYSTEM_TASK:>
Returns the CPE Name as URI string of version 2.3.
<END_TASK>
<USER_TASK:>
Description:
def as_uri_2_3(self):
"""
Returns the CPE Name as URI string of version 2.3.
:returns: CPE Name as URI string of version 2.3
:rtype: string
:exception: TypeError - incompatible version
""" |
uri = []
uri.append("cpe:/")
ordered_comp_parts = {
0: CPEComponent.ATT_PART,
1: CPEComponent.ATT_VENDOR,
2: CPEComponent.ATT_PRODUCT,
3: CPEComponent.ATT_VERSION,
4: CPEComponent.ATT_UPDATE,
5: CPEComponent.ATT_EDITION,
6: CPEComponent.ATT_LANGUAGE}
# Indicates if the previous component must be set depending on the
# value of current component
set_prev_comp = False
prev_comp_list = []
for i in range(0, len(ordered_comp_parts)):
ck = ordered_comp_parts[i]
lc = self._get_attribute_components(ck)
if len(lc) > 1:
# Incompatible version 1.1, there are two or more elements
# in CPE Name
errmsg = "Incompatible version {0} with URI".format(
self.VERSION)
raise TypeError(errmsg)
if ck == CPEComponent.ATT_EDITION:
# Call the pack() helper function to compute the proper
# binding for the edition element
v = self._pack_edition()
if not v:
set_prev_comp = True
prev_comp_list.append(CPEComponent2_3_URI.VALUE_ANY)
continue
else:
comp = lc[0]
if (isinstance(comp, CPEComponentEmpty) or
isinstance(comp, CPEComponentAnyValue)):
# Logical value any
v = CPEComponent2_3_URI.VALUE_ANY
elif isinstance(comp, CPEComponentNotApplicable):
# Logical value not applicable
v = CPEComponent2_3_URI.VALUE_NA
elif isinstance(comp, CPEComponentUndefined):
set_prev_comp = True
prev_comp_list.append(CPEComponent2_3_URI.VALUE_ANY)
continue
else:
# Get the value of component encoded in URI
v = comp.as_uri_2_3()
# Append v to the URI and add a separator
uri.append(v)
uri.append(CPEComponent2_3_URI.SEPARATOR_COMP)
if set_prev_comp:
# Set the previous attribute as logical value any
v = CPEComponent2_3_URI.VALUE_ANY
pos_ini = max(len(uri) - len(prev_comp_list) - 1, 1)
increment = 2 # Count of inserted values
for p, val in enumerate(prev_comp_list):
pos = pos_ini + (p * increment)
uri.insert(pos, v)
uri.insert(pos + 1, CPEComponent2_3_URI.SEPARATOR_COMP)
set_prev_comp = False
prev_comp_list = []
# Return the URI string, with trailing separator trimmed
return CPE._trim("".join(uri[:-1])) |
<SYSTEM_TASK:>
Returns the CPE Name as Well-Formed Name string of version 2.3.
<END_TASK>
<USER_TASK:>
Description:
def as_wfn(self):
"""
Returns the CPE Name as Well-Formed Name string of version 2.3.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
""" |
from .cpe2_3_wfn import CPE2_3_WFN
wfn = []
wfn.append(CPE2_3_WFN.CPE_PREFIX)
for i in range(0, len(CPEComponent.ordered_comp_parts)):
ck = CPEComponent.ordered_comp_parts[i]
lc = self._get_attribute_components(ck)
if len(lc) > 1:
# Incompatible version 1.1, there are two or more elements
# in CPE Name
errmsg = "Incompatible version {0} with WFN".format(
self.VERSION)
raise TypeError(errmsg)
else:
comp = lc[0]
v = []
v.append(ck)
v.append("=")
if isinstance(comp, CPEComponentAnyValue):
# Logical value any
v.append(CPEComponent2_3_WFN.VALUE_ANY)
elif isinstance(comp, CPEComponentNotApplicable):
# Logical value not applicable
v.append(CPEComponent2_3_WFN.VALUE_NA)
elif (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty)):
# Do not set the attribute
continue
else:
# Get the simple value of WFN of component
v.append('"')
v.append(comp.as_wfn())
v.append('"')
# Append v to the WFN and add a separator
wfn.append("".join(v))
wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Del the last separator
wfn = wfn[:-1]
# Return the WFN string
wfn.append(CPE2_3_WFN.CPE_SUFFIX)
return "".join(wfn) |
<SYSTEM_TASK:>
Returns the CPE Name as formatted string of version 2.3.
<END_TASK>
<USER_TASK:>
Description:
def as_fs(self):
"""
Returns the CPE Name as formatted string of version 2.3.
:returns: CPE Name as formatted string
:rtype: string
:exception: TypeError - incompatible version
""" |
fs = []
fs.append("cpe:2.3:")
for i in range(0, len(CPEComponent.ordered_comp_parts)):
ck = CPEComponent.ordered_comp_parts[i]
lc = self._get_attribute_components(ck)
if len(lc) > 1:
# Incompatible version 1.1, there are two or more elements
# in CPE Name
errmsg = "Incompatible version {0} with formatted string".format(
self.VERSION)
raise TypeError(errmsg)
else:
comp = lc[0]
if (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty) or
isinstance(comp, CPEComponentAnyValue)):
# Logical value any
v = CPEComponent2_3_FS.VALUE_ANY
elif isinstance(comp, CPEComponentNotApplicable):
# Logical value not applicable
v = CPEComponent2_3_FS.VALUE_NA
else:
# Get the value of component encoded in formatted string
v = comp.as_fs()
# Append v to the formatted string then add a separator.
fs.append(v)
fs.append(CPEComponent2_3_FS.SEPARATOR_COMP)
# Return the formatted string
return CPE._trim("".join(fs[:-1])) |
<SYSTEM_TASK:>
Returns True if c is an uppercase letter, a lowercase letter,
<END_TASK>
<USER_TASK:>
Description:
def _is_alphanum(cls, c):
"""
Returns True if c is an uppercase letter, a lowercase letter,
a digit or an underscore, otherwise False.
:param string c: Character to check
:returns: True if char is alphanumeric or an underscore,
False otherwise
:rtype: boolean
TEST: a wrong character
>>> c = "#"
>>> CPEComponentSimple._is_alphanum(c)
False
""" |
alphanum_rxc = re.compile(CPEComponentSimple._ALPHANUM_PATTERN)
return (alphanum_rxc.match(c) is not None) |
<SYSTEM_TASK:>
Check if the value of component is correct in the attribute "comp_att".
<END_TASK>
<USER_TASK:>
Description:
def _parse(self, comp_att):
"""
Check if the value of component is correct in the attribute "comp_att".
:param string comp_att: attribute associated with value of component
:returns: None
:exception: ValueError - incorrect value of component
""" |
errmsg = "Invalid attribute '{0}'".format(comp_att)
if not CPEComponent.is_valid_attribute(comp_att):
raise ValueError(errmsg)
comp_str = self._encoded_value
errmsg = "Invalid value of attribute '{0}': {1}".format(
comp_att, comp_str)
# Check part (system type) value
if comp_att == CPEComponentSimple.ATT_PART:
if not self._is_valid_part():
raise ValueError(errmsg)
# Check language value
elif comp_att == CPEComponentSimple.ATT_LANGUAGE:
if not self._is_valid_language():
raise ValueError(errmsg)
# Check edition value
elif comp_att == CPEComponentSimple.ATT_EDITION:
if not self._is_valid_edition():
raise ValueError(errmsg)
# Check other type of component value
elif not self._is_valid_value():
raise ValueError(errmsg) |
<SYSTEM_TASK:>
Returns the value of component encoded as formatted string.
<END_TASK>
<USER_TASK:>
Description:
def as_fs(self):
"""
Returns the value of component encoded as formatted string.
Inspect each character in value of component.
Certain nonalpha characters pass thru without escaping
into the result, but most retain escaping.
:returns: Formatted string associated with component
:rtype: string
""" |
s = self._standard_value
result = []
idx = 0
while (idx < len(s)):
c = s[idx] # get the idx'th character of s
if c != "\\":
# unquoted characters pass thru unharmed
result.append(c)
else:
# Escaped characters are examined
nextchr = s[idx + 1]
if (nextchr == ".") or (nextchr == "-") or (nextchr == "_"):
# the period, hyphen and underscore pass unharmed
result.append(nextchr)
idx += 1
else:
# all others retain escaping
result.append("\\")
result.append(nextchr)
idx += 2
continue
idx += 1
return "".join(result) |
<SYSTEM_TASK:>
Returns the value of component encoded as URI string.
<END_TASK>
<USER_TASK:>
Description:
def as_uri_2_3(self):
"""
Returns the value of component encoded as URI string.
Scans an input string s and applies the following transformations:
- Pass alphanumeric characters thru untouched
- Percent-encode quoted non-alphanumerics as needed
- Unquoted special characters are mapped to their special forms.
:returns: URI string associated with component
:rtype: string
""" |
s = self._standard_value
result = []
idx = 0
while (idx < len(s)):
thischar = s[idx] # get the idx'th character of s
# alphanumerics (incl. underscore) pass untouched
if (CPEComponentSimple._is_alphanum(thischar)):
result.append(thischar)
idx += 1
continue
# escape character
if (thischar == "\\"):
idx += 1
nxtchar = s[idx]
result.append(CPEComponentSimple._pct_encode_uri(nxtchar))
idx += 1
continue
# Bind the unquoted '?' special character to "%01".
if (thischar == "?"):
result.append("%01")
# Bind the unquoted '*' special character to "%02".
if (thischar == "*"):
result.append("%02")
idx += 1
return "".join(result) |
<SYSTEM_TASK:>
Return True if the value of component in generic attribute is valid,
<END_TASK>
<USER_TASK:>
Description:
def _is_valid_value(self):
"""
Return True if the value of component in generic attribute is valid,
and otherwise False.
:returns: True if value is valid, False otherwise
:rtype: boolean
""" |
comp_str = self._encoded_value
value_pattern = []
value_pattern.append("^((")
value_pattern.append("~[")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+")
value_pattern.append(")|(")
value_pattern.append("[")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+(![")
value_pattern.append(CPEComponent1_1._STRING)
value_pattern.append("]+)*")
value_pattern.append("))$")
value_rxc = re.compile("".join(value_pattern))
return value_rxc.match(comp_str) is not None |
<SYSTEM_TASK:>
Returns a component with value "value".
<END_TASK>
<USER_TASK:>
Description:
def _create_component(cls, att, value):
"""
Returns a component with value "value".
:param string att: Attribute name
:param string value: Attribute value
:returns: Component object created
:rtype: CPEComponent
:exception: ValueError - invalid value of attribute
""" |
if value == CPEComponent2_3_URI.VALUE_UNDEFINED:
comp = CPEComponentUndefined()
elif (value == CPEComponent2_3_URI.VALUE_ANY or
value == CPEComponent2_3_URI.VALUE_EMPTY):
comp = CPEComponentAnyValue()
elif (value == CPEComponent2_3_URI.VALUE_NA):
comp = CPEComponentNotApplicable()
else:
comp = CPEComponentNotApplicable()
try:
comp = CPEComponent2_3_URI(value, att)
except ValueError:
errmsg = "Invalid value of attribute '{0}': {1} ".format(att,
value)
raise ValueError(errmsg)
return comp |
<SYSTEM_TASK:>
Returns the CPE Name as Well-Formed Name string of version 2.3.
<END_TASK>
<USER_TASK:>
Description:
def as_wfn(self):
"""
Returns the CPE Name as Well-Formed Name string of version 2.3.
If edition component is not packed, only shows the first seven
components, otherwise shows all.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
""" |
if self._str.find(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION) == -1:
# Edition unpacked, only show the first seven components
wfn = []
wfn.append(CPE2_3_WFN.CPE_PREFIX)
for ck in CPEComponent.CPE_COMP_KEYS:
lc = self._get_attribute_components(ck)
if len(lc) > 1:
# Incompatible version 1.1, there are two or more elements
# in CPE Name
errmsg = "Incompatible version {0} with WFN".format(
self.VERSION)
raise TypeError(errmsg)
else:
comp = lc[0]
v = []
v.append(ck)
v.append("=")
if (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty)):
# Do not set the attribute
continue
elif isinstance(comp, CPEComponentAnyValue):
# Logical value any
v.append(CPEComponent2_3_WFN.VALUE_ANY)
elif isinstance(comp, CPEComponentNotApplicable):
# Logical value not applicable
v.append(CPEComponent2_3_WFN.VALUE_NA)
else:
# Get the value of WFN of component
v.append('"')
v.append(comp.as_wfn())
v.append('"')
# Append v to the WFN and add a separator
wfn.append("".join(v))
wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Del the last separator
wfn = wfn[:-1]
# Return the WFN string
wfn.append(CPE2_3_WFN.CPE_SUFFIX)
return "".join(wfn)
else:
# Shows all components
return super(CPE2_3_URI, self).as_wfn() |
<SYSTEM_TASK:>
Return True if the input value of attribute "edition" is valid,
<END_TASK>
<USER_TASK:>
Description:
def _is_valid_edition(self):
"""
Return True if the input value of attribute "edition" is valid,
and otherwise False.
:returns: True if value is valid, False otherwise
:rtype: boolean
""" |
comp_str = self._standard_value[0]
packed = []
packed.append("(")
packed.append(CPEComponent2_3_URI.SEPARATOR_PACKED_EDITION)
packed.append(CPEComponent2_3_URI._string)
packed.append("){5}")
value_pattern = []
value_pattern.append("^(")
value_pattern.append(CPEComponent2_3_URI._string)
value_pattern.append("|")
value_pattern.append("".join(packed))
value_pattern.append(")$")
value_rxc = re.compile("".join(value_pattern))
return value_rxc.match(comp_str) is not None |
<SYSTEM_TASK:>
Compares a source string to a target string,
<END_TASK>
<USER_TASK:>
Description:
def _compare_strings(cls, source, target):
"""
Compares a source string to a target string,
and addresses the condition in which the source string
includes unquoted special characters.
It performs a simple regular expression match,
with the assumption that (as required) unquoted special characters
appear only at the beginning and/or the end of the source string.
It also properly differentiates between unquoted and quoted
special characters.
:param string source: First string value
:param string target: Second string value
:returns: The comparison relation among input strings.
:rtype: int
""" |
start = 0
end = len(source)
begins = 0
ends = 0
# Reading of initial wildcard in source
if source.startswith(CPEComponent2_3_WFN.WILDCARD_MULTI):
# Source starts with "*"
start = 1
begins = -1
else:
while ((start < len(source)) and
source.startswith(CPEComponent2_3_WFN.WILDCARD_ONE,
start, start)):
# Source starts with one or more "?"
start += 1
begins += 1
# Reading of final wildcard in source
if (source.endswith(CPEComponent2_3_WFN.WILDCARD_MULTI) and
CPESet2_3._is_even_wildcards(source, end - 1)):
# Source ends in "*"
end -= 1
ends = -1
else:
while ((end > 0) and
source.endswith(CPEComponent2_3_WFN.WILDCARD_ONE, end - 1, end) and
CPESet2_3._is_even_wildcards(source, end - 1)):
# Source ends in "?"
end -= 1
ends += 1
source = source[start: end]
index = -1
leftover = len(target)
while (leftover > 0):
index = target.find(source, index + 1)
if (index == -1):
break
escapes = target.count("\\", 0, index)
if ((index > 0) and (begins != -1) and
(begins < (index - escapes))):
break
escapes = target.count("\\", index + 1, len(target))
leftover = len(target) - index - escapes - len(source)
if ((leftover > 0) and ((ends != -1) and (leftover > ends))):
continue
return CPESet2_3.LOGICAL_VALUE_SUPERSET
return CPESet2_3.LOGICAL_VALUE_DISJOINT |
<SYSTEM_TASK:>
Compares two WFNs and returns a generator of pairwise attribute-value
<END_TASK>
<USER_TASK:>
Description:
def compare_wfns(cls, source, target):
"""
Compares two WFNs and returns a generator of pairwise attribute-value
comparison results. It provides full access to the individual
comparison results to enable use-case specific implementations
of novel name-comparison algorithms.
Compare each attribute of the Source WFN to the Target WFN:
:param CPE2_3_WFN source: first WFN CPE Name
:param CPE2_3_WFN target: seconds WFN CPE Name
:returns: generator of pairwise attribute comparison results
:rtype: generator
""" |
# Compare results using the get() function in WFN
for att in CPEComponent.CPE_COMP_KEYS_EXTENDED:
value_src = source.get_attribute_values(att)[0]
if value_src.find('"') > -1:
# Not a logical value: del double quotes
value_src = value_src[1:-1]
value_tar = target.get_attribute_values(att)[0]
if value_tar.find('"') > -1:
# Not a logical value: del double quotes
value_tar = value_tar[1:-1]
yield (att, CPESet2_3._compare(value_src, value_tar)) |
<SYSTEM_TASK:>
Compares two WFNs and returns True if the set-theoretic relation
<END_TASK>
<USER_TASK:>
Description:
def cpe_disjoint(cls, source, target):
"""
Compares two WFNs and returns True if the set-theoretic relation
between the names is DISJOINT.
:param CPE2_3_WFN source: first WFN CPE Name
:param CPE2_3_WFN target: seconds WFN CPE Name
:returns: True if the set relation between source and target
is DISJOINT, otherwise False.
:rtype: boolean
""" |
# If any pairwise comparison returned DISJOINT then
# the overall name relationship is DISJOINT
for att, result in CPESet2_3.compare_wfns(source, target):
isDisjoint = result == CPESet2_3.LOGICAL_VALUE_DISJOINT
if isDisjoint:
return True
return False |
<SYSTEM_TASK:>
Compares two WFNs and returns True if the set-theoretic relation
<END_TASK>
<USER_TASK:>
Description:
def cpe_equal(cls, source, target):
"""
Compares two WFNs and returns True if the set-theoretic relation
between the names is EQUAL.
:param CPE2_3_WFN source: first WFN CPE Name
:param CPE2_3_WFN target: seconds WFN CPE Name
:returns: True if the set relation between source and target
is EQUAL, otherwise False.
:rtype: boolean
""" |
# If any pairwise comparison returned EQUAL then
# the overall name relationship is EQUAL
for att, result in CPESet2_3.compare_wfns(source, target):
isEqual = result == CPESet2_3.LOGICAL_VALUE_EQUAL
if not isEqual:
return False
return True |
<SYSTEM_TASK:>
Adds a CPE element to the set if not already.
<END_TASK>
<USER_TASK:>
Description:
def append(self, cpe):
"""
Adds a CPE element to the set if not already.
Only WFN CPE Names are valid, so this function converts the input CPE
object of version 2.3 to WFN style.
:param CPE cpe: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
""" |
if cpe.VERSION != CPE2_3.VERSION:
errmsg = "CPE Name version {0} not valid, version 2.3 expected".format(
cpe.VERSION)
raise ValueError(errmsg)
for k in self.K:
if cpe._str == k._str:
return None
if isinstance(cpe, CPE2_3_WFN):
self.K.append(cpe)
else:
# Convert the CPE Name to WFN
wfn = CPE2_3_WFN(cpe.as_wfn())
self.K.append(wfn) |
<SYSTEM_TASK:>
Accepts a set of CPE Names K and a candidate CPE Name X. It returns
<END_TASK>
<USER_TASK:>
Description:
def name_match(self, wfn):
"""
Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
""" |
for N in self.K:
if CPESet2_3.cpe_superset(wfn, N):
return True
return False |
<SYSTEM_TASK:>
Returns the CPE Name as WFN string of version 2.3.
<END_TASK>
<USER_TASK:>
Description:
def as_wfn(self):
"""
Returns the CPE Name as WFN string of version 2.3.
Only shows the first seven components.
:return: CPE Name as WFN string
:rtype: string
:exception: TypeError - incompatible version
""" |
wfn = []
wfn.append(CPE2_3_WFN.CPE_PREFIX)
for ck in CPEComponent.CPE_COMP_KEYS:
lc = self._get_attribute_components(ck)
comp = lc[0]
if (isinstance(comp, CPEComponentUndefined) or
isinstance(comp, CPEComponentEmpty)):
# Do not set the attribute
continue
else:
v = []
v.append(ck)
v.append("=")
# Get the value of WFN of component
v.append('"')
v.append(comp.as_wfn())
v.append('"')
# Append v to the WFN and add a separator
wfn.append("".join(v))
wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP)
# Del the last separator
wfn = wfn[:-1]
# Return the WFN string
wfn.append(CPE2_3_WFN.CPE_SUFFIX)
return "".join(wfn) |
<SYSTEM_TASK:>
Unbinds a bound form to a WFN.
<END_TASK>
<USER_TASK:>
Description:
def _unbind(cls, boundname):
"""
Unbinds a bound form to a WFN.
:param string boundname: CPE name
:returns: WFN object associated with boundname.
:rtype: CPE2_3_WFN
""" |
try:
fs = CPE2_3_FS(boundname)
except:
# CPE name is not formatted string
try:
uri = CPE2_3_URI(boundname)
except:
# CPE name is not URI but WFN
return CPE2_3_WFN(boundname)
else:
return CPE2_3_WFN(uri.as_wfn())
else:
return CPE2_3_WFN(fs.as_wfn()) |
<SYSTEM_TASK:>
Check if the set of ids form a single connected component
<END_TASK>
<USER_TASK:>
Description:
def is_component(w, ids):
"""Check if the set of ids form a single connected component
Parameters
----------
w : spatial weights boject
ids : list
identifiers of units that are tested to be a single connected
component
Returns
-------
True : if the list of ids represents a single connected component
False : if the list of ids forms more than a single connected component
""" |
components = 0
marks = dict([(node, 0) for node in ids])
q = []
for node in ids:
if marks[node] == 0:
components += 1
q.append(node)
if components > 1:
return False
while q:
node = q.pop()
marks[node] = components
others = [neighbor for neighbor in w.neighbors[node]
if neighbor in ids]
for other in others:
if marks[other] == 0 and other not in q:
q.append(other)
return True |
<SYSTEM_TASK:>
Check if contiguity is maintained if leaver is removed from neighbors
<END_TASK>
<USER_TASK:>
Description:
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
""" |
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids) |
<SYSTEM_TASK:>
Declare the view as a JSON API method
<END_TASK>
<USER_TASK:>
Description:
def jsonapi(f):
""" Declare the view as a JSON API method
This converts view return value into a :cls:JsonResponse.
The following return types are supported:
- tuple: a tuple of (response, status, headers)
- any other object is converted to JSON
""" |
@wraps(f)
def wrapper(*args, **kwargs):
rv = f(*args, **kwargs)
return make_json_response(rv)
return wrapper |
<SYSTEM_TASK:>
Download + unpack given package into temp dir ``tmp``.
<END_TASK>
<USER_TASK:>
Description:
def _unpack(c, tmp, package, version, git_url=None):
"""
Download + unpack given package into temp dir ``tmp``.
Return ``(real_version, source)`` where ``real_version`` is the "actual"
version downloaded (e.g. if a Git master was indicated, it will be the SHA
of master HEAD) and ``source`` is the source directory (relative to
unpacked source) to import into ``<project>/vendor``.
""" |
real_version = version[:]
source = None
if git_url:
pass
# git clone into tempdir
# git checkout <version>
# set target to checkout
# if version does not look SHA-ish:
# in the checkout, obtain SHA from that branch
# set real_version to that value
else:
cwd = os.getcwd()
print("Moving into temp dir %s" % tmp)
os.chdir(tmp)
try:
# Nab from index. Skip wheels; we want to unpack an sdist.
flags = "--download=. --build=build --no-use-wheel"
cmd = "pip install %s %s==%s" % (flags, package, version)
c.run(cmd)
# Identify basename
# TODO: glob is bad here because pip install --download gets all
# dependencies too! ugh. Figure out best approach for that.
globs = []
globexpr = ""
for extension, opener in (
("zip", "unzip"),
("tgz", "tar xzvf"),
("tar.gz", "tar xzvf"),
):
globexpr = "*.{0}".format(extension)
globs = glob(globexpr)
if globs:
break
archive = os.path.basename(globs[0])
source, _, _ = archive.rpartition(".{0}".format(extension))
c.run("{0} {1}".format(opener, globexpr))
finally:
os.chdir(cwd)
return real_version, source |
<SYSTEM_TASK:>
Create a passworded sudo-capable user.
<END_TASK>
<USER_TASK:>
Description:
def make_sudouser(c):
"""
Create a passworded sudo-capable user.
Used by other tasks to execute the test suite so sudo tests work.
""" |
user = c.travis.sudo.user
password = c.travis.sudo.password
# --create-home because we need a place to put conf files, keys etc
# --groups travis because we must be in the Travis group to access the
# (created by Travis for us) virtualenv and other contents within
# /home/travis.
c.sudo("useradd {0} --create-home --groups travis".format(user))
# Password 'mypass' also arbitrary
c.run("echo {0}:{1} | sudo chpasswd".format(user, password))
# Set up new (glob-sourced) sudoers conf file for our user; easier than
# attempting to mutate or overwrite main sudoers conf.
conf = "/etc/sudoers.d/passworded"
cmd = "echo '{0} ALL=(ALL:ALL) PASSWD:ALL' > {1}".format(user, conf)
c.sudo('sh -c "{0}"'.format(cmd))
# Grant travis group write access to /home/travis as some integration tests
# may try writing conf files there. (TODO: shouldn't running the tests via
# 'sudo -H' mean that's no longer necessary?)
c.sudo("chmod g+w /home/travis") |
<SYSTEM_TASK:>
Set up passwordless SSH keypair & authorized_hosts access to localhost.
<END_TASK>
<USER_TASK:>
Description:
def make_sshable(c):
"""
Set up passwordless SSH keypair & authorized_hosts access to localhost.
""" |
user = c.travis.sudo.user
home = "~{0}".format(user)
# Run sudo() as the new sudo user; means less chown'ing, etc.
c.config.sudo.user = user
ssh_dir = "{0}/.ssh".format(home)
# TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this?
for cmd in ("mkdir {0}", "chmod 0700 {0}"):
c.sudo(cmd.format(ssh_dir, user))
c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir))
c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir)) |
<SYSTEM_TASK:>
Install and execute ``black`` under appropriate circumstances, with diffs.
<END_TASK>
<USER_TASK:>
Description:
def blacken(c):
"""
Install and execute ``black`` under appropriate circumstances, with diffs.
Installs and runs ``black`` under Python 3.6 (the first version it
supports). Since this sort of CI based task only needs to run once per
commit (formatting is not going to change between interpreters) this seems
like a worthwhile tradeoff.
This task uses black's ``--check`` and ``--fail`` flags, so not only will
the build fail if it does not conform, but contributors can see exactly
what they need to change. This is intended as a hedge against the fact that
not all contributors will be using Python 3.6+.
""" |
if not PYTHON.startswith("3.6"):
msg = "Not blackening, since Python {} != Python 3.6".format(PYTHON)
print(msg, file=sys.stderr)
return
# Install, allowing config override of hardcoded default version
config = c.config.get("travis", {}).get("black", {})
version = config.get("version", "18.5b0")
c.run("pip install black=={}".format(version))
# Execute our blacken task, with diff + check, which will both error
# and emit diffs.
checks.blacken(c, check=True, diff=True) |
<SYSTEM_TASK:>
Wrapper function to decorate a function
<END_TASK>
<USER_TASK:>
Description:
def decorator(self, func):
""" Wrapper function to decorate a function """ |
if inspect.isfunction(func):
func._methodview = self
elif inspect.ismethod(func):
func.__func__._methodview = self
else:
raise AssertionError('Can only decorate function and methods, {} given'.format(func))
return func |
<SYSTEM_TASK:>
Test if the method matches the provided set of arguments
<END_TASK>
<USER_TASK:>
Description:
def matches(self, verb, params):
""" Test if the method matches the provided set of arguments
:param verb: HTTP verb. Uppercase
:type verb: str
:param params: Existing route parameters
:type params: set
:returns: Whether this view matches
:rtype: bool
""" |
return (self.ifset is None or self.ifset <= params) and \
(self.ifnset is None or self.ifnset.isdisjoint(params)) and \
(self.methods is None or verb in self.methods) |
<SYSTEM_TASK:>
Detect a view matching the query
<END_TASK>
<USER_TASK:>
Description:
def _match_view(self, method, route_params):
""" Detect a view matching the query
:param method: HTTP method
:param route_params: Route parameters dict
:return: Method
:rtype: Callable|None
""" |
method = method.upper()
route_params = frozenset(k for k, v in route_params.items() if v is not None)
for view_name, info in self.methods_map[method].items():
if info.matches(method, route_params):
return getattr(self, view_name)
else:
return None |
<SYSTEM_TASK:>
Calculates the steady state probability vector for a regular Markov
<END_TASK>
<USER_TASK:>
Description:
def steady_state(P):
"""
Calculates the steady state probability vector for a regular Markov
transition matrix P.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, ), steady state distribution.
Examples
--------
Taken from :cite:`Kemeny1967`. Land of Oz example where the states are
Rain, Nice and Snow, so there is 25 percent chance that if it
rained in Oz today, it will snow tomorrow, while if it snowed today in
Oz there is a 50 percent chance of snow again tomorrow and a 25
percent chance of a nice day (nice, like when the witch with the monkeys
is melting).
>>> import numpy as np
>>> from giddy.ergodic import steady_state
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> steady_state(p)
array([0.4, 0.2, 0.4])
Thus, the long run distribution for Oz is to have 40 percent of the
days classified as Rain, 20 percent as Nice, and 40 percent as Snow
(states are mutually exclusive).
""" |
v, d = la.eig(np.transpose(P))
d = np.array(d)
# for a regular P maximum eigenvalue will be 1
mv = max(v)
# find its position
i = v.tolist().index(mv)
row = abs(d[:, i])
# normalize eigenvector corresponding to the eigenvalue 1
return row / sum(row) |
<SYSTEM_TASK:>
Calculates the matrix of first mean passage times for an ergodic transition
<END_TASK>
<USER_TASK:>
Description:
def fmpt(P):
"""
Calculates the matrix of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
M : array
(k, k), elements are the expected value for the number of intervals
required for a chain starting in state i to first enter state j.
If i=j then this is the recurrence time.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> fm=fmpt(p)
>>> fm
array([[2.5 , 4. , 3.33333333],
[2.66666667, 5. , 2.66666667],
[3.33333333, 4. , 2.5 ]])
Thus, if it is raining today in Oz we can expect a nice day to come
along in another 4 days, on average, and snow to hit in 3.33 days. We can
expect another rainy day in 2.5 days. If it is nice today in Oz, we would
experience a change in the weather (either rain or snow) in 2.67 days from
today. (That wicked witch can only die once so I reckon that is the
ultimate absorbing state).
Notes
-----
Uses formulation (and examples on p. 218) in :cite:`Kemeny1967`.
""" |
P = np.matrix(P)
k = P.shape[0]
A = np.zeros_like(P)
ss = steady_state(P).reshape(k, 1)
for i in range(k):
A[:, i] = ss
A = A.transpose()
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
A_diag = np.diag(A)
A_diag = A_diag + (A_diag == 0)
D = np.diag(1. / A_diag)
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
return np.array(M) |
<SYSTEM_TASK:>
Variances of first mean passage times for an ergodic transition
<END_TASK>
<USER_TASK:>
Description:
def var_fmpt(P):
"""
Variances of first mean passage times for an ergodic transition
probability matrix.
Parameters
----------
P : array
(k, k), an ergodic Markov transition probability matrix.
Returns
-------
: array
(k, k), elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j.
Examples
--------
>>> import numpy as np
>>> from giddy.ergodic import var_fmpt
>>> p=np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
>>> vfm=var_fmpt(p)
>>> vfm
array([[ 5.58333333, 12. , 6.88888889],
[ 6.22222222, 12. , 6.22222222],
[ 6.88888889, 12. , 5.58333333]])
Notes
-----
Uses formulation (and examples on p. 83) in :cite:`Kemeny1967`.
""" |
P = np.matrix(P)
A = P ** 1000
n, k = A.shape
I = np.identity(k)
Z = la.inv(I - P + A)
E = np.ones_like(Z)
D = np.diag(1. / np.diag(A))
Zdg = np.diag(np.diag(Z))
M = (I - Z + E * Zdg) * D
ZM = Z * M
ZMdg = np.diag(np.diag(ZM))
W = M * (2 * Zdg * D - I) + 2 * (ZM - E * ZMdg)
return np.array(W - np.multiply(M, M)) |
<SYSTEM_TASK:>
Examine world state, returning data on what needs updating for release.
<END_TASK>
<USER_TASK:>
Description:
def _converge(c):
"""
Examine world state, returning data on what needs updating for release.
:param c: Invoke ``Context`` object or subclass.
:returns:
Two dicts (technically, dict subclasses, which allow attribute access),
``actions`` and ``state`` (in that order.)
``actions`` maps release component names to variables (usually class
constants) determining what action should be taken for that component:
- ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or
``OKAY``.
- ``version``: members of `.VersionFile`.
``state`` contains the data used to calculate the actions, in case the
caller wants to do further analysis:
- ``branch``: the name of the checked-out Git branch.
- ``changelog``: the parsed project changelog, a `dict` of releases.
- ``release_type``: what type of release the branch appears to be (will
be a member of `.Release` such as ``Release.BUGFIX``.)
- ``latest_line_release``: the latest changelog release found for
current release type/line.
- ``latest_overall_release``: the absolute most recent release entry.
Useful for determining next minor/feature release.
- ``current_version``: the version string as found in the package's
``__version__``.
""" |
#
# Data/state gathering
#
# Get data about current repo context: what branch are we on & what kind of
# release does it appear to represent?
branch, release_type = _release_line(c)
# Short-circuit if type is undefined; we can't do useful work for that.
if release_type is Release.UNDEFINED:
raise UndefinedReleaseType(
"You don't seem to be on a release-related branch; "
"why are you trying to cut a release?"
)
# Parse our changelog so we can tell what's released and what's not.
# TODO: below needs to go in something doc-y somewhere; having it in a
# non-user-facing subroutine docstring isn't visible enough.
"""
.. note::
Requires that one sets the ``packaging.changelog_file`` configuration
option; it should be a relative or absolute path to your
``changelog.rst`` (or whatever it's named in your project).
"""
# TODO: allow skipping changelog if not using Releases since we have no
# other good way of detecting whether a changelog needs/got an update.
# TODO: chdir to sphinx.source, import conf.py, look at
# releases_changelog_name - that way it will honor that setting and we can
# ditch this explicit one instead. (and the docstring above)
changelog = parse_changelog(
c.packaging.changelog_file, load_extensions=True
)
# Get latest appropriate changelog release and any unreleased issues, for
# current line
line_release, issues = _release_and_issues(changelog, branch, release_type)
# Also get latest overall release, sometimes that matters (usually only
# when latest *appropriate* release doesn't exist yet)
overall_release = _versions_from_changelog(changelog)[-1]
# Obtain the project's main package & its version data
current_version = load_version(c)
# Grab all git tags
tags = _get_tags(c)
state = Lexicon(
{
"branch": branch,
"release_type": release_type,
"changelog": changelog,
"latest_line_release": Version(line_release)
if line_release
else None,
"latest_overall_release": overall_release, # already a Version
"unreleased_issues": issues,
"current_version": Version(current_version),
"tags": tags,
}
)
# Version number determinations:
# - latest actually-released version
# - the next version after that for current branch
# - which of the two is the actual version we're looking to converge on,
# depends on current changelog state.
latest_version, next_version = _latest_and_next_version(state)
state.latest_version = latest_version
state.next_version = next_version
state.expected_version = latest_version
if state.unreleased_issues:
state.expected_version = next_version
#
# Logic determination / convergence
#
actions = Lexicon()
# Changelog: needs new release entry if there are any unreleased issues for
# current branch's line.
# TODO: annotate with number of released issues [of each type?] - so not
# just "up to date!" but "all set (will release 3 features & 5 bugs)"
actions.changelog = Changelog.OKAY
if release_type in (Release.BUGFIX, Release.FEATURE) and issues:
actions.changelog = Changelog.NEEDS_RELEASE
# Version file: simply whether version file equals the target version.
# TODO: corner case of 'version file is >1 release in the future', but
# that's still wrong, just would be a different 'bad' status output.
actions.version = VersionFile.OKAY
if state.current_version != state.expected_version:
actions.version = VersionFile.NEEDS_BUMP
# Git tag: similar to version file, except the check is existence of tag
# instead of comparison to file contents. We even reuse the
# 'expected_version' variable wholesale.
actions.tag = Tag.OKAY
if state.expected_version not in state.tags:
actions.tag = Tag.NEEDS_CUTTING
#
# Return
#
return actions, state |
<SYSTEM_TASK:>
Edit changelog & version, git commit, and git tag, to set up for release.
<END_TASK>
<USER_TASK:>
Description:
def prepare(c):
"""
Edit changelog & version, git commit, and git tag, to set up for release.
""" |
# Print dry-run/status/actions-to-take data & grab programmatic result
# TODO: maybe expand the enum-based stuff to have values that split up
# textual description, command string, etc. See the TODO up by their
# definition too, re: just making them non-enum classes period.
# TODO: otherwise, we at least want derived eg changelog/version/etc paths
# transmitted from status() into here...
actions, state = status(c)
# TODO: unless nothing-to-do in which case just say that & exit 0
if not confirm("Take the above actions?"):
sys.exit("Aborting.")
# TODO: factor out what it means to edit a file:
# - $EDITOR or explicit expansion of it in case no shell involved
# - pty=True and hide=False, because otherwise things can be bad
# - what else?
# Changelog! (pty for non shite editing, eg vim sure won't like non-pty)
if actions.changelog is Changelog.NEEDS_RELEASE:
# TODO: identify top of list and inject a ready-made line? Requires vim
# assumption...GREAT opportunity for class/method based tasks!
cmd = "$EDITOR {0.packaging.changelog_file}".format(c)
c.run(cmd, pty=True, hide=False)
# TODO: add a step for checking reqs.txt / setup.py vs virtualenv contents
# Version file!
if actions.version == VersionFile.NEEDS_BUMP:
# TODO: suggest the bump and/or overwrite the entire file? Assumes a
# specific file format. Could be bad for users which expose __version__
# but have other contents as well.
version_file = os.path.join(
_find_package(c),
c.packaging.get("version_module", "_version") + ".py",
)
cmd = "$EDITOR {0}".format(version_file)
c.run(cmd, pty=True, hide=False)
if actions.tag == Tag.NEEDS_CUTTING:
# Commit, if necessary, so the tag includes everything.
# NOTE: this strips out untracked files. effort.
cmd = 'git status --porcelain | egrep -v "^\\?"'
if c.run(cmd, hide=True, warn=True).ok:
c.run(
'git commit -am "Cut {0}"'.format(state.expected_version),
hide=False,
)
# Tag!
c.run("git tag {0}".format(state.expected_version), hide=False) |
<SYSTEM_TASK:>
Examine current repo state to determine what type of release to prep.
<END_TASK>
<USER_TASK:>
Description:
def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
""" |
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is master the next
# 1.x feature vs 2.0?
branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX
if FEATURE_RE.match(branch):
type_ = Release.FEATURE
return branch, type_ |
<SYSTEM_TASK:>
Return all released versions from given ``changelog``, sorted.
<END_TASK>
<USER_TASK:>
Description:
def _versions_from_changelog(changelog):
"""
Return all released versions from given ``changelog``, sorted.
:param dict changelog:
A changelog dict as returned by ``releases.util.parse_changelog``.
:returns: A sorted list of `semantic_version.Version` objects.
""" |
versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)]
return sorted(versions) |
<SYSTEM_TASK:>
Return most recent branch-appropriate release, if any, and its contents.
<END_TASK>
<USER_TASK:>
Description:
def _release_and_issues(changelog, branch, release_type):
"""
Return most recent branch-appropriate release, if any, and its contents.
:param dict changelog:
Changelog contents, as returned by ``releases.util.parse_changelog``.
:param str branch:
Branch name.
:param release_type:
Member of `Release`, e.g. `Release.FEATURE`.
:returns:
Two-tuple of release (``str``) and issues (``list`` of issue numbers.)
If there is no latest release for the given branch (e.g. if it's a
feature or master branch), it will be ``None``.
""" |
# Bugfix lines just use the branch to find issues
bucket = branch
# Features need a bit more logic
if release_type is Release.FEATURE:
bucket = _latest_feature_bucket(changelog)
# Issues is simply what's in the bucket
issues = changelog[bucket]
# Latest release is undefined for feature lines
release = None
# And requires scanning changelog, for bugfix lines
if release_type is Release.BUGFIX:
versions = [text_type(x) for x in _versions_from_changelog(changelog)]
release = [x for x in versions if x.startswith(bucket)][-1]
return release, issues |
<SYSTEM_TASK:>
Return sorted list of release-style tags as semver objects.
<END_TASK>
<USER_TASK:>
Description:
def _get_tags(c):
"""
Return sorted list of release-style tags as semver objects.
""" |
tags_ = []
for tagstr in c.run("git tag", hide=True).stdout.strip().split("\n"):
try:
tags_.append(Version(tagstr))
# Ignore anything non-semver; most of the time they'll be non-release
# tags, and even if they are, we can't reason about anything
# non-semver anyways.
# TODO: perhaps log these to DEBUG
except ValueError:
pass
# Version objects sort semantically
return sorted(tags_) |
<SYSTEM_TASK:>
Try to find 'the' One True Package for this project.
<END_TASK>
<USER_TASK:>
Description:
def _find_package(c):
"""
Try to find 'the' One True Package for this project.
Mostly for obtaining the ``_version`` file within it.
Uses the ``packaging.package`` config setting if defined. If not defined,
fallback is to look for a single top-level Python package (directory
containing ``__init__.py``). (This search ignores a small blacklist of
directories like ``tests/``, ``vendor/`` etc.)
""" |
# TODO: is there a way to get this from the same place setup.py does w/o
# setup.py barfing (since setup() runs at import time and assumes CLI use)?
configured_value = c.get("packaging", {}).get("package", None)
if configured_value:
return configured_value
# TODO: tests covering this stuff here (most logic tests simply supply
# config above)
packages = [
path
for path in os.listdir(".")
if (
os.path.isdir(path)
and os.path.exists(os.path.join(path, "__init__.py"))
and path not in ("tests", "integration", "sites", "vendor")
)
]
if not packages:
sys.exit("Unable to find a local Python package!")
if len(packages) > 1:
sys.exit("Found multiple Python packages: {0!r}".format(packages))
return packages[0] |
<SYSTEM_TASK:>
Publish code to PyPI or index of choice.
<END_TASK>
<USER_TASK:>
Description:
def publish(
c,
sdist=True,
wheel=False,
index=None,
sign=False,
dry_run=False,
directory=None,
dual_wheels=False,
alt_python=None,
check_desc=False,
):
"""
Publish code to PyPI or index of choice.
All parameters save ``dry_run`` and ``directory`` honor config settings of
the same name, under the ``packaging`` tree. E.g. say
``.configure({'packaging': {'wheel': True}})`` to force building wheel
archives by default.
:param bool sdist:
Whether to upload sdists/tgzs.
:param bool wheel:
Whether to upload wheels (requires the ``wheel`` package from PyPI).
:param str index:
Custom upload index/repository name. See ``upload`` help for details.
:param bool sign:
Whether to sign the built archive(s) via GPG.
:param bool dry_run:
Skip actual publication step if ``True``.
This also prevents cleanup of the temporary build/dist directories, so
you can examine the build artifacts.
:param str directory:
Base directory within which will live the ``dist/`` and ``build/``
directories.
Defaults to a temporary directory which is cleaned up after the run
finishes.
:param bool dual_wheels:
When ``True``, builds individual wheels for Python 2 and Python 3.
Useful for situations where you can't build universal wheels, but still
want to distribute for both interpreter versions.
Requires that you have a useful ``python3`` (or ``python2``, if you're
on Python 3 already) binary in your ``$PATH``. Also requires that this
other python have the ``wheel`` package installed in its
``site-packages``; usually this will mean the global site-packages for
that interpreter.
See also the ``alt_python`` argument.
:param str alt_python:
Path to the 'alternate' Python interpreter to use when
``dual_wheels=True``.
When ``None`` (the default) will be ``python3`` or ``python2``,
depending on the currently active interpreter.
:param bool check_desc:
Whether to run ``setup.py check -r -s`` (uses ``readme_renderer``)
before trying to publish - catches long_description bugs. Default:
``False``.
""" |
# Don't hide by default, this step likes to be verbose most of the time.
c.config.run.hide = False
# Config hooks
config = c.config.get("packaging", {})
index = config.get("index", index)
sign = config.get("sign", sign)
dual_wheels = config.get("dual_wheels", dual_wheels)
check_desc = config.get("check_desc", check_desc)
# Initial sanity check, if needed. Will die usefully.
if check_desc:
c.run("python setup.py check -r -s")
# Build, into controlled temp dir (avoids attempting to re-upload old
# files)
with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp:
# Build default archives
build(c, sdist=sdist, wheel=wheel, directory=tmp)
# Build opposing interpreter archive, if necessary
if dual_wheels:
if not alt_python:
alt_python = "python2"
if sys.version_info[0] == 2:
alt_python = "python3"
build(c, sdist=False, wheel=True, directory=tmp, python=alt_python)
# Do the thing!
upload(c, directory=tmp, index=index, sign=sign, dry_run=dry_run) |
<SYSTEM_TASK:>
Context-manage a temporary directory.
<END_TASK>
<USER_TASK:>
Description:
def tmpdir(skip_cleanup=False, explicit=None):
"""
Context-manage a temporary directory.
Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a
specific location.
(If both are given, this is basically not doing anything, but it allows
code that normally requires a secure temporary directory to 'dry run'
instead.)
""" |
tmp = explicit if explicit is not None else mkdtemp()
try:
yield tmp
finally:
if not skip_cleanup:
rmtree(tmp) |
<SYSTEM_TASK:>
Generate ransom spatial permutations for inference on LISA vectors.
<END_TASK>
<USER_TASK:>
Description:
def permute(self, permutations=99, alternative='two.sided'):
"""
Generate ransom spatial permutations for inference on LISA vectors.
Parameters
----------
permutations : int, optional
Number of random permutations of observations.
alternative : string, optional
Type of alternative to form in generating p-values.
Options are: `two-sided` which tests for difference between observed
counts and those obtained from the permutation distribution;
`positive` which tests the alternative that the focal unit and its
lag move in the same direction over time; `negative` which tests
that the focal unit and its lag move in opposite directions over
the interval.
""" |
rY = self.Y.copy()
idxs = np.arange(len(rY))
counts = np.zeros((permutations, len(self.counts)))
for m in range(permutations):
np.random.shuffle(idxs)
res = self._calc(rY[idxs, :], self.w, self.k)
counts[m] = res['counts']
self.counts_perm = counts
self.larger_perm = np.array(
[(counts[:, i] >= self.counts[i]).sum() for i in range(self.k)])
self.smaller_perm = np.array(
[(counts[:, i] <= self.counts[i]).sum() for i in range(self.k)])
self.expected_perm = counts.mean(axis=0)
self.alternative = alternative
# pvalue logic
# if P is the proportion that are as large for a one sided test (larger
# than), then
# p=P.
#
# For a two-tailed test, if P < .5, p = 2 * P, else, p = 2(1-P)
# Source: Rayner, J. C. W., O. Thas, and D. J. Best. 2009. "Appendix B:
# Parametric Bootstrap P-Values." In Smooth Tests of Goodness of Fit,
# 247. John Wiley and Sons.
# Note that the larger and smaller counts would be complements (except
# for the shared equality, for
# a given bin in the circular histogram. So we only need one of them.
# We report two-sided p-values for each bin as the default
# since a priori there could # be different alternatives for each bin
# depending on the problem at hand.
alt = alternative.upper()
if alt == 'TWO.SIDED':
P = (self.larger_perm + 1) / (permutations + 1.)
mask = P < 0.5
self.p = mask * 2 * P + (1 - mask) * 2 * (1 - P)
elif alt == 'POSITIVE':
# NE, SW sectors are higher, NW, SE are lower
POS = _POS8
if self.k == 4:
POS = _POS4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = POS * L + (1 - POS) * S
self.p = P
elif alt == 'NEGATIVE':
# NE, SW sectors are lower, NW, SE are higher
NEG = _NEG8
if self.k == 4:
NEG = _NEG4
L = (self.larger_perm + 1) / (permutations + 1.)
S = (self.smaller_perm + 1) / (permutations + 1.)
P = NEG * L + (1 - NEG) * S
self.p = P
else:
print(('Bad option for alternative: %s.' % alternative)) |
<SYSTEM_TASK:>
Plot the rose diagram.
<END_TASK>
<USER_TASK:>
Description:
def plot(self, attribute=None, ax=None, **kwargs):
"""
Plot the rose diagram.
Parameters
----------
attribute : (n,) ndarray, optional
Variable to specify colors of the colorbars.
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None. Note, this axis should have a polar projection.
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
""" |
from splot.giddy import dynamic_lisa_rose
fig, ax = dynamic_lisa_rose(self, attribute=attribute,
ax=ax, **kwargs)
return fig, ax |
<SYSTEM_TASK:>
Plot vectors of positional transition of LISA values starting
<END_TASK>
<USER_TASK:>
Description:
def plot_origin(self): # TODO add attribute option to color vectors
"""
Plot vectors of positional transition of LISA values starting
from the same origin.
""" |
import matplotlib.cm as cm
import matplotlib.pyplot as plt
ax = plt.subplot(111)
xlim = [self._dx.min(), self._dx.max()]
ylim = [self._dy.min(), self._dy.max()]
for x, y in zip(self._dx, self._dy):
xs = [0, x]
ys = [0, y]
plt.plot(xs, ys, '-b') # TODO change this to scale with attribute
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim) |
<SYSTEM_TASK:>
Plot vectors of positional transition of LISA values
<END_TASK>
<USER_TASK:>
Description:
def plot_vectors(self, arrows=True):
"""
Plot vectors of positional transition of LISA values
within quadrant in scatterplot in a polar plot.
Parameters
----------
ax : Matplotlib Axes instance, optional
If given, the figure will be created inside this axis.
Default =None.
arrows : boolean, optional
If True show arrowheads of vectors. Default =True
**kwargs : keyword arguments, optional
Keywords used for creating and designing the plot.
Note: 'c' and 'color' cannot be passed when attribute is not None
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted
""" |
from splot.giddy import dynamic_lisa_vectors
fig, ax = dynamic_lisa_vectors(self, arrows=arrows)
return fig, ax |
<SYSTEM_TASK:>
Nuke docs build target directory so next build is clean.
<END_TASK>
<USER_TASK:>
Description:
def _clean(c):
"""
Nuke docs build target directory so next build is clean.
""" |
if isdir(c.sphinx.target):
rmtree(c.sphinx.target) |
<SYSTEM_TASK:>
Display documentation contents with the 'tree' program.
<END_TASK>
<USER_TASK:>
Description:
def tree(c):
"""
Display documentation contents with the 'tree' program.
""" |
ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates"
c.run('tree -Ca -I "{0}" {1}'.format(ignore, c.sphinx.source)) |
<SYSTEM_TASK:>
Watch both doc trees & rebuild them if files change.
<END_TASK>
<USER_TASK:>
Description:
def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
Reuses the configuration values ``packaging.package`` or ``tests.package``
(the former winning over the latter if both defined) when determining which
source directory to scan for API doc updates.
""" |
# TODO: break back down into generic single-site version, then create split
# tasks as with docs/www above. Probably wants invoke#63.
# NOTE: 'www'/'docs' refer to the module level sub-collections. meh.
# Readme & WWW triggers WWW
www_c = Context(config=c.config.clone())
www_c.update(**www.configuration())
www_handler = make_handler(
ctx=www_c,
task_=www["build"],
regexes=[r"\./README.rst", r"\./sites/www"],
ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"],
)
# Code and docs trigger API
docs_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
regexes = [r"\./sites/docs"]
package = c.get("packaging", {}).get("package", None)
if package is None:
package = c.get("tests", {}).get("package", None)
if package:
regexes.append(r"\./{}/".format(package))
api_handler = make_handler(
ctx=docs_c,
task_=docs["build"],
regexes=regexes,
ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"],
)
observe(www_handler, api_handler) |
<SYSTEM_TASK:>
Random permutation of rows and columns of a matrix
<END_TASK>
<USER_TASK:>
Description:
def shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> import numpy as np
>>> from giddy.util import shuffle_matrix
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,list(range(4)))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]])
""" |
np.random.shuffle(ids)
return X[ids, :][:, ids] |
<SYSTEM_TASK:>
Markov-based mobility index.
<END_TASK>
<USER_TASK:>
Description:
def markov_mobility(p, measure="P", ini=None):
"""
Markov-based mobility index.
Parameters
----------
p : array
(k, k), Markov transition probability matrix.
measure : string
If measure= "P",
:math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`;
if measure = "D",
:math:`M_{D} = 1 - |\det(P)|`,
where :math:`\det(P)` is the determinant of :math:`P`;
if measure = "L2",
:math:`M_{L2} = 1 - |\lambda_2|`,
where :math:`\lambda_2` is the second largest eigenvalue of
:math:`P`;
if measure = "B1",
:math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`,
where :math:`\pi` is the initial income distribution;
if measure == "B2",
:math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{
j=1}^m \pi_i P_{ij} |i-j|`,
where :math:`\pi` is the initial income distribution.
ini : array
(k,), initial distribution. Need to be specified if
measure = "B1" or "B2". If not,
the initial distribution would be treated as a uniform
distribution.
Returns
-------
mobi : float
Mobility value.
Notes
-----
The mobility indices are based on :cite:`Formby:2004fk`.
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> import mapclassify as mc
>>> from giddy.markov import Markov
>>> from giddy.mobility import markov_mobility
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
(1) Estimate Shorrock1 mobility index:
>>> mobi_1 = markov_mobility(m.p, measure="P")
>>> print("{:.5f}".format(mobi_1))
0.19759
(2) Estimate Shorrock2 mobility index:
>>> mobi_2 = markov_mobility(m.p, measure="D")
>>> print("{:.5f}".format(mobi_2))
0.60685
(3) Estimate Sommers and Conlisk mobility index:
>>> mobi_3 = markov_mobility(m.p, measure="L2")
>>> print("{:.5f}".format(mobi_3))
0.03978
(4) Estimate Bartholomew1 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini)
>>> print("{:.5f}".format(mobi_4))
0.22777
(5) Estimate Bartholomew2 mobility index (note that the initial
distribution should be given):
>>> ini = np.array([0.1,0.2,0.2,0.4,0.1])
>>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini)
>>> print("{:.5f}".format(mobi_5))
0.04637
""" |
p = np.array(p)
k = p.shape[1]
if measure == "P":
t = np.trace(p)
mobi = (k - t) / (k - 1)
elif measure == "D":
mobi = 1 - abs(la.det(p))
elif measure == "L2":
w, v = la.eig(p)
eigen_value_abs = abs(w)
mobi = 1 - np.sort(eigen_value_abs)[-2]
elif measure == "B1":
if ini is None:
ini = 1.0 / k * np.ones(k)
mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1)
elif measure == "B2":
mobi = 0
if ini is None:
ini = 1.0 / k * np.ones(k)
for i in range(k):
for j in range(k):
mobi = mobi + ini[i] * p[i, j] * abs(i - j)
mobi = mobi / (k - 1)
return mobi |
<SYSTEM_TASK:>
chi-squared test of difference between two transition matrices.
<END_TASK>
<USER_TASK:>
Description:
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
""" |
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof |
<SYSTEM_TASK:>
Kullback information based test of Markov Homogeneity.
<END_TASK>
<USER_TASK:>
Description:
def kullback(F):
"""
Kullback information based test of Markov Homogeneity.
Parameters
----------
F : array
(s, r, r), values are transitions (not probabilities) for
s strata, r initial states, r terminal states.
Returns
-------
Results : dictionary
(key - value)
Conditional homogeneity - (float) test statistic for homogeneity
of transition probabilities across strata.
Conditional homogeneity pvalue - (float) p-value for test
statistic.
Conditional homogeneity dof - (int) degrees of freedom =
r(s-1)(r-1).
Notes
-----
Based on :cite:`Kullback1962`.
Example below is taken from Table 9.2 .
Examples
--------
>>> import numpy as np
>>> from giddy.markov import kullback
>>> s1 = np.array([
... [ 22, 11, 24, 2, 2, 7],
... [ 5, 23, 15, 3, 42, 6],
... [ 4, 21, 190, 25, 20, 34],
... [0, 2, 14, 56, 14, 28],
... [32, 15, 20, 10, 56, 14],
... [5, 22, 31, 18, 13, 134]
... ])
>>> s2 = np.array([
... [3, 6, 9, 3, 0, 8],
... [1, 9, 3, 12, 27, 5],
... [2, 9, 208, 32, 5, 18],
... [0, 14, 32, 108, 40, 40],
... [22, 14, 9, 26, 224, 14],
... [1, 5, 13, 53, 13, 116]
... ])
>>>
>>> F = np.array([s1, s2])
>>> res = kullback(F)
>>> "%8.3f"%res['Conditional homogeneity']
' 160.961'
>>> "%d"%res['Conditional homogeneity dof']
'30'
>>> "%3.1f"%res['Conditional homogeneity pvalue']
'0.0'
""" |
F1 = F == 0
F1 = F + F1
FLF = F * np.log(F1)
T1 = 2 * FLF.sum()
FdJK = F.sum(axis=0)
FdJK1 = FdJK + (FdJK == 0)
FdJKLFdJK = FdJK * np.log(FdJK1)
T2 = 2 * FdJKLFdJK.sum()
FdJd = F.sum(axis=0).sum(axis=1)
FdJd1 = FdJd + (FdJd == 0)
T3 = 2 * (FdJd * np.log(FdJd1)).sum()
FIJd = F[:, :].sum(axis=1)
FIJd1 = FIJd + (FIJd == 0)
T4 = 2 * (FIJd * np.log(FIJd1)).sum()
T6 = F.sum()
T6 = 2 * T6 * np.log(T6)
s, r, r1 = F.shape
chom = T1 - T4 - T2 + T3
cdof = r * (s - 1) * (r - 1)
results = {}
results['Conditional homogeneity'] = chom
results['Conditional homogeneity dof'] = cdof
results['Conditional homogeneity pvalue'] = 1 - stats.chi2.cdf(chom, cdof)
return results |
<SYSTEM_TASK:>
Prais conditional mobility measure.
<END_TASK>
<USER_TASK:>
Description:
def prais(pmat):
"""
Prais conditional mobility measure.
Parameters
----------
pmat : matrix
(k, k), Markov probability transition matrix.
Returns
-------
pr : matrix
(1, k), conditional mobility measures for each of the k classes.
Notes
-----
Prais' conditional mobility measure for a class is defined as:
.. math::
pr_i = 1 - p_{i,i}
Examples
--------
>>> import numpy as np
>>> import libpysal
>>> from giddy.markov import Markov,prais
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
>>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose()
>>> m = Markov(q5)
>>> m.transitions
array([[729., 71., 1., 0., 0.],
[ 72., 567., 80., 3., 0.],
[ 0., 81., 631., 86., 2.],
[ 0., 3., 86., 573., 56.],
[ 0., 0., 1., 57., 741.]])
>>> m.p
array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ],
[0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ],
[0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ],
[0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443],
[0. , 0. , 0.00125156, 0.07133917, 0.92740926]])
>>> prais(m.p)
array([0.08988764, 0.21468144, 0.21125 , 0.20194986, 0.07259074])
""" |
pmat = np.array(pmat)
pr = 1 - np.diag(pmat)
return pr |
<SYSTEM_TASK:>
Test for homogeneity of Markov transition probabilities across regimes.
<END_TASK>
<USER_TASK:>
Description:
def homogeneity(transition_matrices, regime_names=[], class_names=[],
title="Markov Homogeneity Test"):
"""
Test for homogeneity of Markov transition probabilities across regimes.
Parameters
----------
transition_matrices : list
of transition matrices for regimes, all matrices must
have same size (r, c). r is the number of rows in the
transition matrix and c is the number of columns in
the transition matrix.
regime_names : sequence
Labels for the regimes.
class_names : sequence
Labels for the classes/states of the Markov chain.
title : string
name of test.
Returns
-------
: implicit
an instance of Homogeneity_Results.
""" |
return Homogeneity_Results(transition_matrices, regime_names=regime_names,
class_names=class_names, title=title) |
<SYSTEM_TASK:>
Calculate sojourn time based on a given transition probability matrix.
<END_TASK>
<USER_TASK:>
Description:
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
""" |
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) |
<SYSTEM_TASK:>
A summary method to call the Markov homogeneity test to test for
<END_TASK>
<USER_TASK:>
Description:
def summary(self, file_name=None):
"""
A summary method to call the Markov homogeneity test to test for
temporally lagged spatial dependence.
To learn more about the properties of the tests, refer to
:cite:`Rey2016a` and :cite:`Kang2018`.
""" |
class_names = ["C%d" % i for i in range(self.k)]
regime_names = ["LAG%d" % i for i in range(self.k)]
ht = homogeneity(self.T, class_names=class_names,
regime_names=regime_names)
title = "Spatial Markov Test"
if self.variable_name:
title = title + ": " + self.variable_name
if file_name:
ht.summary(file_name=file_name, title=title)
else:
ht.summary(title=title) |
<SYSTEM_TASK:>
Detect spillover locations for diffusion in LISA Markov.
<END_TASK>
<USER_TASK:>
Description:
def spillover(self, quadrant=1, neighbors_on=False):
"""
Detect spillover locations for diffusion in LISA Markov.
Parameters
----------
quadrant : int
which quadrant in the scatterplot should form the core
of a cluster.
neighbors_on : binary
If false, then only the 1st order neighbors of a core
location are included in the cluster.
If true, neighbors of cluster core 1st order neighbors
are included in the cluster.
Returns
-------
results : dictionary
two keys - values pairs:
'components' - array (n, t)
values are integer ids (starting at 1) indicating which
component/cluster observation i in period t belonged to.
'spillover' - array (n, t-1)
binary values indicating if the location was a
spill-over location that became a new member of a
previously existing cluster.
Examples
--------
>>> import libpysal
>>> from giddy.markov import LISA_Markov
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> np.random.seed(10)
>>> lm_random = LISA_Markov(pci, w, permutations=99)
>>> r = lm_random.spillover()
>>> (r['components'][:, 12] > 0).sum()
17
>>> (r['components'][:, 13]>0).sum()
23
>>> (r['spill_over'][:,12]>0).sum()
6
Including neighbors of core neighbors
>>> rn = lm_random.spillover(neighbors_on=True)
>>> (rn['components'][:, 12] > 0).sum()
26
>>> (rn["components"][:, 13] > 0).sum()
34
>>> (rn["spill_over"][:, 12] > 0).sum()
8
""" |
n, k = self.q.shape
if self.permutations:
spill_over = np.zeros((n, k - 1))
components = np.zeros((n, k))
i2id = {} # handle string keys
for key in list(self.w.neighbors.keys()):
idx = self.w.id2i[key]
i2id[idx] = key
sig_lisas = (self.q == quadrant) \
* (self.p_values <= self.significance_level)
sig_ids = [np.nonzero(
sig_lisas[:, i])[0].tolist() for i in range(k)]
neighbors = self.w.neighbors
for t in range(k - 1):
s1 = sig_ids[t]
s2 = sig_ids[t + 1]
g1 = Graph(undirected=True)
for i in s1:
for neighbor in neighbors[i2id[i]]:
g1.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g1.add_edge(neighbor, nn, 1.0)
components1 = g1.connected_components(op=gt)
components1 = [list(c.nodes) for c in components1]
g2 = Graph(undirected=True)
for i in s2:
for neighbor in neighbors[i2id[i]]:
g2.add_edge(i2id[i], neighbor, 1.0)
if neighbors_on:
for nn in neighbors[neighbor]:
g2.add_edge(neighbor, nn, 1.0)
components2 = g2.connected_components(op=gt)
components2 = [list(c.nodes) for c in components2]
c2 = []
c1 = []
for c in components2:
c2.extend(c)
for c in components1:
c1.extend(c)
new_ids = [j for j in c2 if j not in c1]
spill_ids = []
for j in new_ids:
# find j's component in period 2
cj = [c for c in components2 if j in c][0]
# for members of j's component in period 2, check if they
# belonged to any components in period 1
for i in cj:
if i in c1:
spill_ids.append(j)
break
for spill_id in spill_ids:
id = self.w.id2i[spill_id]
spill_over[id, t] = 1
for c, component in enumerate(components1):
for i in component:
ii = self.w.id2i[i]
components[ii, t] = c + 1
results = {}
results['components'] = components
results['spill_over'] = spill_over
return results
else:
return None |
<SYSTEM_TASK:>
Get entity property names
<END_TASK>
<USER_TASK:>
Description:
def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
""" |
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) |
<SYSTEM_TASK:>
Return a Version whose minor number is one greater than self's.
<END_TASK>
<USER_TASK:>
Description:
def next_minor(self):
"""
Return a Version whose minor number is one greater than self's.
.. note::
The new Version will always have a zeroed-out bugfix/tertiary version
number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not
1.3.1.
""" |
clone = self.clone()
clone.minor += 1
clone.patch = 0
return clone |
<SYSTEM_TASK:>
The encoding used by the text file stored in ``source_path``.
<END_TASK>
<USER_TASK:>
Description:
def encoding_for(source_path, encoding='automatic', fallback_encoding=None):
"""
The encoding used by the text file stored in ``source_path``.
The algorithm used is:
* If ``encoding`` is ``'automatic``, attempt the following:
1. Check BOM for UTF-8, UTF-16 and UTF-32.
2. Look for XML prolog or magic heading like ``# -*- coding: cp1252 -*-``
3. Read the file using UTF-8.
4. If all this fails, use assume the ``fallback_encoding``.
* If ``encoding`` is ``'chardet`` use :mod:`chardet` to obtain the encoding.
* For any other ``encoding`` simply use the specified value.
""" |
assert encoding is not None
if encoding == 'automatic':
with open(source_path, 'rb') as source_file:
heading = source_file.read(128)
result = None
if len(heading) == 0:
# File is empty, assume a dummy encoding.
result = 'utf-8'
if result is None:
# Check for known BOMs.
for bom, encoding in _BOM_TO_ENCODING_MAP.items():
if heading[:len(bom)] == bom:
result = encoding
break
if result is None:
# Look for common headings that indicate the encoding.
ascii_heading = heading.decode('ascii', errors='replace')
ascii_heading = ascii_heading.replace('\r\n', '\n')
ascii_heading = ascii_heading.replace('\r', '\n')
ascii_heading = '\n'.join(ascii_heading.split('\n')[:2]) + '\n'
coding_magic_match = _CODING_MAGIC_REGEX.match(ascii_heading)
if coding_magic_match is not None:
result = coding_magic_match.group('encoding')
else:
first_line = ascii_heading.split('\n')[0]
xml_prolog_match = _XML_PROLOG_REGEX.match(first_line)
if xml_prolog_match is not None:
result = xml_prolog_match.group('encoding')
elif encoding == 'chardet':
assert _detector is not None, \
'without chardet installed, encoding="chardet" must be rejected before calling encoding_for()'
_detector.reset()
with open(source_path, 'rb') as source_file:
for line in source_file.readlines():
_detector.feed(line)
if _detector.done:
break
result = _detector.result['encoding']
if result is None:
_log.warning(
'%s: chardet cannot determine encoding, assuming fallback encoding %s',
source_path, fallback_encoding)
result = fallback_encoding
else:
# Simply use the specified encoding.
result = encoding
if result is None:
# Encoding 'automatic' or 'chardet' failed to detect anything.
if fallback_encoding is not None:
# If defined, use the fallback encoding.
result = fallback_encoding
else:
try:
# Attempt to read the file as UTF-8.
with open(source_path, 'r', encoding='utf-8') as source_file:
source_file.read()
result = 'utf-8'
except UnicodeDecodeError:
# UTF-8 did not work out, use the default as last resort.
result = DEFAULT_FALLBACK_ENCODING
_log.debug('%s: no fallback encoding specified, using %s', source_path, result)
assert result is not None
return result |
<SYSTEM_TASK:>
Remove vertical lines in boolean array if linelength >=min_line_length
<END_TASK>
<USER_TASK:>
Description:
def filterVerticalLines(arr, min_line_length=4):
"""
Remove vertical lines in boolean array if linelength >=min_line_length
""" |
gy = arr.shape[0]
gx = arr.shape[1]
mn = min_line_length-1
for i in range(gy):
for j in range(gx):
if arr[i,j]:
for d in range(min_line_length):
if not arr[i+d,j]:
break
if d == mn:
d = 0
while True:
if not arr[i+d,j]:
break
arr[i+d,j] = 0
d +=1 |
<SYSTEM_TASK:>
Recursively traverse schema dictionary and for each "leaf node", evaluate the fake
<END_TASK>
<USER_TASK:>
Description:
def _generate_one_fake(self, schema):
"""
Recursively traverse schema dictionary and for each "leaf node", evaluate the fake
value
Implementation:
For each key-value pair:
1) If value is not an iterable (i.e. dict or list), evaluate the fake data (base case)
2) If value is a dictionary, recurse
3) If value is a list, iteratively recurse over each item
""" |
data = {}
for k, v in schema.items():
if isinstance(v, dict):
data[k] = self._generate_one_fake(v)
elif isinstance(v, list):
data[k] = [self._generate_one_fake(item) for item in v]
else:
data[k] = getattr(self._faker, v)()
return data |
<SYSTEM_TASK:>
Fetch the given uri and return the root Element of the response.
<END_TASK>
<USER_TASK:>
Description:
def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return the root Element of the response.""" |
doc = ElementTree.parse(fetch(method, uri, params_prefix, **params))
return _parse(doc.getroot()) |
<SYSTEM_TASK:>
Recursively convert an Element into python data types
<END_TASK>
<USER_TASK:>
Description:
def _parse(root):
"""Recursively convert an Element into python data types""" |
if root.tag == "nil-classes":
return []
elif root.get("type") == "array":
return [_parse(child) for child in root]
d = {}
for child in root:
type = child.get("type") or "string"
if child.get("nil"):
value = None
elif type == "boolean":
value = True if child.text.lower() == "true" else False
elif type == "dateTime":
value = iso8601.parse_date(child.text)
elif type == "decimal":
value = decimal.Decimal(child.text)
elif type == "integer":
value = int(child.text)
else:
value = child.text
d[child.tag] = value
return d |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.